From 882b70c83f464c0249a07cd8280a33d7342400c7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 14:14:24 +0000 Subject: [PATCH 01/53] tools: bump cross-spawn from 7.0.3 to 7.0.5 in /tools/eslint Bumps [cross-spawn](https://github.com/moxystudio/node-cross-spawn) from 7.0.3 to 7.0.5. - [Changelog](https://github.com/moxystudio/node-cross-spawn/blob/master/CHANGELOG.md) - [Commits](https://github.com/moxystudio/node-cross-spawn/compare/v7.0.3...v7.0.5) --- updated-dependencies: - dependency-name: cross-spawn dependency-type: indirect ... Signed-off-by: dependabot[bot] PR-URL: https://github.com/nodejs/node/pull/55894 Reviewed-By: Rafael Gonzaga Reviewed-By: Marco Ippolito Reviewed-By: Yagiz Nizipli --- tools/eslint/package-lock.json | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/eslint/package-lock.json b/tools/eslint/package-lock.json index 58c6a04431f136..0f1ad7b669d20b 100644 --- a/tools/eslint/package-lock.json +++ b/tools/eslint/package-lock.json @@ -764,10 +764,9 @@ "license": "MIT" }, "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "license": "MIT", + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.5.tgz", + "integrity": "sha512-ZVJrKKYunU38/76t0RMOulHOnUcbU9GbpWKAOZ0mhjr7CX6FVrH+4FrAapSOekrgFQ3f/8gwMEuIft0aKq6Hug==", "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", From 72eb710f0f72585783bd094577c3620dce1146ca Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Mon, 18 Nov 2024 23:41:23 +0800 Subject: [PATCH 02/53] tools: fix riscv64 build failed PR-URL: https://github.com/nodejs/node/pull/52888 Reviewed-By: Stewart X Addison Reviewed-By: Richard Lau --- tools/v8_gypfiles/v8.gyp | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/v8_gypfiles/v8.gyp b/tools/v8_gypfiles/v8.gyp index 226a3f6df54d07..f344406a9414e9 100644 --- a/tools/v8_gypfiles/v8.gyp +++ b/tools/v8_gypfiles/v8.gyp @@ -1176,6 +1176,23 @@ 'sources': [ ' Date: Mon, 18 Nov 2024 16:58:59 +0000 Subject: [PATCH 03/53] doc: add history entry for import assertion removal PR-URL: https://github.com/nodejs/node/pull/55883 Refs: https://github.com/nodejs/node/pull/52104 Reviewed-By: Chemi Atlow Reviewed-By: Geoffrey Booth Reviewed-By: Luigi Pinca Reviewed-By: Moshe Atlow --- doc/api/esm.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/api/esm.md b/doc/api/esm.md index 797e551d130452..64d2e537ecd9d8 100644 --- a/doc/api/esm.md +++ b/doc/api/esm.md @@ -10,6 +10,9 @@ changes: - version: v23.1.0 pr-url: https://github.com/nodejs/node/pull/55333 description: Import attributes are no longer experimental. + - version: v22.0.0 + pr-url: https://github.com/nodejs/node/pull/52104 + description: Drop support for import assertions. - version: - v21.0.0 - v20.10.0 From 83e02dc4823300f4efe3c359d34259fdbfbd5074 Mon Sep 17 00:00:00 2001 From: Jakub Jirutka Date: Sat, 16 Nov 2024 19:10:58 +0100 Subject: [PATCH 04/53] build: compile bundled ada conditionally The --shared-ada flag was introduced in #52924, but the implementation was incomplete. Resolves #52914 PR-URL: https://github.com/nodejs/node/pull/55886 Reviewed-By: Yagiz Nizipli Reviewed-By: Luigi Pinca Reviewed-By: Richard Lau --- node.gyp | 5 ----- node.gypi | 4 ++++ 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/node.gyp b/node.gyp index f018e0a34d5782..4dab82602d658b 100644 --- a/node.gyp +++ b/node.gyp @@ -851,7 +851,6 @@ 'deps/histogram/histogram.gyp:histogram', 'deps/simdjson/simdjson.gyp:simdjson', 'deps/simdutf/simdutf.gyp:simdutf', - 'deps/ada/ada.gyp:ada', 'deps/nbytes/nbytes.gyp:nbytes', 'node_js2c#host', ], @@ -1126,7 +1125,6 @@ 'deps/googletest/googletest.gyp:gtest_prod', 'deps/histogram/histogram.gyp:histogram', 'deps/uvwasi/uvwasi.gyp:uvwasi', - 'deps/ada/ada.gyp:ada', 'deps/nbytes/nbytes.gyp:nbytes', ], 'includes': [ @@ -1175,7 +1173,6 @@ 'deps/histogram/histogram.gyp:histogram', 'deps/simdjson/simdjson.gyp:simdjson', 'deps/simdutf/simdutf.gyp:simdutf', - 'deps/ada/ada.gyp:ada', 'deps/nbytes/nbytes.gyp:nbytes', ], @@ -1253,7 +1250,6 @@ 'dependencies': [ '<(node_lib_target_name)', 'deps/histogram/histogram.gyp:histogram', - 'deps/ada/ada.gyp:ada', 'deps/nbytes/nbytes.gyp:nbytes', ], @@ -1367,7 +1363,6 @@ 'dependencies': [ '<(node_lib_target_name)', 'deps/histogram/histogram.gyp:histogram', - 'deps/ada/ada.gyp:ada', 'deps/nbytes/nbytes.gyp:nbytes', 'deps/simdjson/simdjson.gyp:simdjson', 'deps/simdutf/simdutf.gyp:simdutf', diff --git a/node.gypi b/node.gypi index 9c989022a9ad36..3646945241c0d9 100644 --- a/node.gypi +++ b/node.gypi @@ -212,6 +212,10 @@ 'dependencies': [ 'deps/nghttp2/nghttp2.gyp:nghttp2' ], }], + [ 'node_shared_ada=="false"', { + 'dependencies': [ 'deps/ada/ada.gyp:ada' ], + }], + [ 'node_shared_brotli=="false"', { 'dependencies': [ 'deps/brotli/brotli.gyp:brotli' ], }], From d8eb83c5c5767623b129c79831d26433f55c6fef Mon Sep 17 00:00:00 2001 From: Jakub Jirutka Date: Sat, 16 Nov 2024 19:13:46 +0100 Subject: [PATCH 05/53] build: compile bundled simdjson conditionally The --shared-simdjson flag was introduced in #52924, but the implementation was incomplete. Resolves #52914 PR-URL: https://github.com/nodejs/node/pull/55886 Reviewed-By: Yagiz Nizipli Reviewed-By: Luigi Pinca Reviewed-By: Richard Lau --- node.gyp | 3 --- node.gypi | 4 ++++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/node.gyp b/node.gyp index 4dab82602d658b..502083d3300040 100644 --- a/node.gyp +++ b/node.gyp @@ -849,7 +849,6 @@ 'dependencies': [ 'deps/googletest/googletest.gyp:gtest_prod', 'deps/histogram/histogram.gyp:histogram', - 'deps/simdjson/simdjson.gyp:simdjson', 'deps/simdutf/simdutf.gyp:simdutf', 'deps/nbytes/nbytes.gyp:nbytes', 'node_js2c#host', @@ -1171,7 +1170,6 @@ 'deps/googletest/googletest.gyp:gtest', 'deps/googletest/googletest.gyp:gtest_main', 'deps/histogram/histogram.gyp:histogram', - 'deps/simdjson/simdjson.gyp:simdjson', 'deps/simdutf/simdutf.gyp:simdutf', 'deps/nbytes/nbytes.gyp:nbytes', ], @@ -1364,7 +1362,6 @@ '<(node_lib_target_name)', 'deps/histogram/histogram.gyp:histogram', 'deps/nbytes/nbytes.gyp:nbytes', - 'deps/simdjson/simdjson.gyp:simdjson', 'deps/simdutf/simdutf.gyp:simdutf', ], diff --git a/node.gypi b/node.gypi index 3646945241c0d9..63710d07f552a1 100644 --- a/node.gypi +++ b/node.gypi @@ -216,6 +216,10 @@ 'dependencies': [ 'deps/ada/ada.gyp:ada' ], }], + [ 'node_shared_simdjson=="false"', { + 'dependencies': [ 'deps/simdjson/simdjson.gyp:simdjson' ], + }], + [ 'node_shared_brotli=="false"', { 'dependencies': [ 'deps/brotli/brotli.gyp:brotli' ], }], From fffabca6b8ea6baf834d04149cf53a6a4d01f568 Mon Sep 17 00:00:00 2001 From: Jakub Jirutka Date: Sat, 16 Nov 2024 19:17:17 +0100 Subject: [PATCH 06/53] build: compile bundled simdutf conditionally The --shared-simdutf flag was introduced in #52924, but the implementation was incomplete. Resolves #52914 PR-URL: https://github.com/nodejs/node/pull/55886 Reviewed-By: Yagiz Nizipli Reviewed-By: Luigi Pinca Reviewed-By: Richard Lau --- node.gyp | 9 +++------ node.gypi | 4 ++++ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/node.gyp b/node.gyp index 502083d3300040..b8bfebb562f5f4 100644 --- a/node.gyp +++ b/node.gyp @@ -849,7 +849,6 @@ 'dependencies': [ 'deps/googletest/googletest.gyp:gtest_prod', 'deps/histogram/histogram.gyp:histogram', - 'deps/simdutf/simdutf.gyp:simdutf', 'deps/nbytes/nbytes.gyp:nbytes', 'node_js2c#host', ], @@ -1170,7 +1169,6 @@ 'deps/googletest/googletest.gyp:gtest', 'deps/googletest/googletest.gyp:gtest_main', 'deps/histogram/histogram.gyp:histogram', - 'deps/simdutf/simdutf.gyp:simdutf', 'deps/nbytes/nbytes.gyp:nbytes', ], @@ -1322,9 +1320,6 @@ 'target_name': 'node_js2c', 'type': 'executable', 'toolsets': ['host'], - 'dependencies': [ - 'deps/simdutf/simdutf.gyp:simdutf#host', - ], 'include_dirs': [ 'tools', 'src', @@ -1336,6 +1331,9 @@ 'src/embedded_data.cc', ], 'conditions': [ + [ 'node_shared_simdutf=="false"', { + 'dependencies': [ 'deps/simdutf/simdutf.gyp:simdutf#host' ], + }], [ 'node_shared_libuv=="false"', { 'dependencies': [ 'deps/uv/uv.gyp:libuv#host' ], }], @@ -1362,7 +1360,6 @@ '<(node_lib_target_name)', 'deps/histogram/histogram.gyp:histogram', 'deps/nbytes/nbytes.gyp:nbytes', - 'deps/simdutf/simdutf.gyp:simdutf', ], 'includes': [ diff --git a/node.gypi b/node.gypi index 63710d07f552a1..c61e9b170a05c9 100644 --- a/node.gypi +++ b/node.gypi @@ -220,6 +220,10 @@ 'dependencies': [ 'deps/simdjson/simdjson.gyp:simdjson' ], }], + [ 'node_shared_simdutf=="false"', { + 'dependencies': [ 'deps/simdutf/simdutf.gyp:simdutf' ], + }], + [ 'node_shared_brotli=="false"', { 'dependencies': [ 'deps/brotli/brotli.gyp:brotli' ], }], From 9c6103833b3a47ba42e1dd3ef75b527a024ec9db Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Mon, 18 Nov 2024 20:01:06 -0500 Subject: [PATCH 07/53] deps: update simdutf to 5.6.2 PR-URL: https://github.com/nodejs/node/pull/55889 Reviewed-By: Yagiz Nizipli Reviewed-By: Marco Ippolito Reviewed-By: Luigi Pinca Reviewed-By: Rafael Gonzaga --- deps/simdutf/simdutf.cpp | 36 ++++++++++++++++++++++++++++++------ deps/simdutf/simdutf.h | 6 +++--- 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/deps/simdutf/simdutf.cpp b/deps/simdutf/simdutf.cpp index d5f6fbd9a4c413..2da5ca7284c1f0 100644 --- a/deps/simdutf/simdutf.cpp +++ b/deps/simdutf/simdutf.cpp @@ -1,4 +1,4 @@ -/* auto-generated on 2024-11-12 20:00:19 -0500. Do not edit! */ +/* auto-generated on 2024-11-14 14:52:31 -0500. Do not edit! */ /* begin file src/simdutf.cpp */ #include "simdutf.h" // We include base64_tables once. @@ -7229,6 +7229,11 @@ template bool is_ascii_white_space(char_type c) { return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f'; } +template bool is_ascii_white_space_or_padding(char_type c) { + return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || + c == '='; +} + template bool is_eight_byte(char_type c) { if (sizeof(char_type) == 1) { return true; @@ -9491,6 +9496,21 @@ simdutf_warn_unused result base64_to_binary_safe_impl( if (r.error != error_code::INVALID_BASE64_CHARACTER && r.error != error_code::BASE64_EXTRA_BITS) { outlen = r.output_count; + if (last_chunk_handling_options == stop_before_partial) { + if ((r.output_count % 3) != 0) { + bool empty_trail = true; + for (size_t i = r.input_count; i < length; i++) { + if (!scalar::base64::is_ascii_white_space_or_padding(input[i])) { + empty_trail = false; + break; + } + } + if (empty_trail) { + r.input_count = length; + } + } + return {r.error, r.input_count}; + } return {r.error, length}; } return r; @@ -9557,7 +9577,11 @@ simdutf_warn_unused result base64_to_binary_safe_impl( } if (rr.error == error_code::SUCCESS && last_chunk_handling_options == stop_before_partial) { - rr.count = tail_input - input; + if (tail_input > input + input_index) { + rr.count = tail_input - input; + } else if (r.input_count > 0) { + rr.count = r.input_count + rr.count; + } return rr; } rr.count += input_index; @@ -15891,9 +15915,9 @@ compress_decode_base64(char *dst, const char_type *src, size_t srclen, if (src < srcend + equalsigns) { full_result r = scalar::base64::base64_tail_decode( dst, src, srcend - src, equalsigns, options, last_chunk_options); + r.input_count += size_t(src - srcinit); if (r.error == error_code::INVALID_BASE64_CHARACTER || r.error == error_code::BASE64_EXTRA_BITS) { - r.input_count += size_t(src - srcinit); return r; } else { r.output_count += size_t(dst - dstinit); @@ -23716,9 +23740,9 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, if (src < srcend + equalsigns) { full_result r = scalar::base64::base64_tail_decode( dst, src, srcend - src, equalsigns, options, last_chunk_options); + r.input_count += size_t(src - srcinit); if (r.error == error_code::INVALID_BASE64_CHARACTER || r.error == error_code::BASE64_EXTRA_BITS) { - r.input_count += size_t(src - srcinit); return r; } else { r.output_count += size_t(dst - dstinit); @@ -28552,9 +28576,9 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, if (src < srcend + equalsigns) { full_result r = scalar::base64::base64_tail_decode( dst, src, srcend - src, equalsigns, options, last_chunk_options); + r.input_count += size_t(src - srcinit); if (r.error == error_code::INVALID_BASE64_CHARACTER || r.error == error_code::BASE64_EXTRA_BITS) { - r.input_count += size_t(src - srcinit); return r; } else { r.output_count += size_t(dst - dstinit); @@ -38307,9 +38331,9 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, if (src < srcend + equalsigns) { full_result r = scalar::base64::base64_tail_decode( dst, src, srcend - src, equalsigns, options, last_chunk_options); + r.input_count += size_t(src - srcinit); if (r.error == error_code::INVALID_BASE64_CHARACTER || r.error == error_code::BASE64_EXTRA_BITS) { - r.input_count += size_t(src - srcinit); return r; } else { r.output_count += size_t(dst - dstinit); diff --git a/deps/simdutf/simdutf.h b/deps/simdutf/simdutf.h index a219e96ad9cd9f..a30e5f2cd228cd 100644 --- a/deps/simdutf/simdutf.h +++ b/deps/simdutf/simdutf.h @@ -1,4 +1,4 @@ -/* auto-generated on 2024-11-12 20:00:19 -0500. Do not edit! */ +/* auto-generated on 2024-11-14 14:52:31 -0500. Do not edit! */ /* begin file include/simdutf.h */ #ifndef SIMDUTF_H #define SIMDUTF_H @@ -670,7 +670,7 @@ SIMDUTF_DISABLE_UNDESIRED_WARNINGS #define SIMDUTF_SIMDUTF_VERSION_H /** The version of simdutf being used (major.minor.revision) */ -#define SIMDUTF_VERSION "5.6.1" +#define SIMDUTF_VERSION "5.6.2" namespace simdutf { enum { @@ -685,7 +685,7 @@ enum { /** * The revision (major.minor.REVISION) of simdutf being used. */ - SIMDUTF_VERSION_REVISION = 1 + SIMDUTF_VERSION_REVISION = 2 }; } // namespace simdutf From efb9f05f59e66cb917f71edea7c1333f42269651 Mon Sep 17 00:00:00 2001 From: Colin Ihrig Date: Mon, 18 Nov 2024 22:23:18 -0500 Subject: [PATCH 08/53] doc,lib,src,test: unflag sqlite module MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit allows the node:sqlite module to be used without starting Node with a CLI flag. The module is still experimental. Fixes: https://github.com/nodejs/node/issues/55854 PR-URL: https://github.com/nodejs/node/pull/55890 Reviewed-By: Jake Yuesong Li Reviewed-By: Marco Ippolito Reviewed-By: Moshe Atlow Reviewed-By: Michaël Zasso Reviewed-By: Matteo Collina --- doc/api/cli.md | 22 +++++++++++-------- doc/api/sqlite.md | 4 +--- doc/node.1 | 6 ++--- lib/internal/process/pre_execution.js | 2 +- src/node_options.cc | 3 ++- src/node_options.h | 2 +- test/parallel/test-sqlite-data-types.js | 1 - test/parallel/test-sqlite-database-sync.js | 1 - test/parallel/test-sqlite-named-parameters.js | 1 - test/parallel/test-sqlite-statement-sync.js | 1 - test/parallel/test-sqlite-transactions.js | 1 - test/parallel/test-sqlite.js | 4 ++-- 12 files changed, 23 insertions(+), 25 deletions(-) diff --git a/doc/api/cli.md b/doc/api/cli.md index 3440fbc160b25b..7edc189d8ca8bd 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -1079,14 +1079,6 @@ added: Use this flag to enable [ShadowRealm][] support. -### `--experimental-sqlite` - - - -Enable the experimental [`node:sqlite`][] module. - ### `--experimental-strip-types` + +Disable the experimental [`node:sqlite`][] module. + ### `--no-experimental-websocket` -> Stability: 1.1 - Active development. Enable this API with the -> [`--experimental-sqlite`][] CLI flag. +> Stability: 1.1 - Active development. @@ -412,7 +411,6 @@ The following constants are meant for use with [`database.applyChangeset()`](#da [Changesets and Patchsets]: https://www.sqlite.org/sessionintro.html#changesets_and_patchsets [SQL injection]: https://en.wikipedia.org/wiki/SQL_injection -[`--experimental-sqlite`]: cli.md#--experimental-sqlite [`ATTACH DATABASE`]: https://www.sqlite.org/lang_attach.html [`PRAGMA foreign_keys`]: https://www.sqlite.org/pragma.html#pragma_foreign_keys [`sqlite3_changes64()`]: https://www.sqlite.org/c3ref/changes.html diff --git a/doc/node.1 b/doc/node.1 index 3d92fcd7858b98..02628f8e238724 100644 --- a/doc/node.1 +++ b/doc/node.1 @@ -182,9 +182,6 @@ Enable the experimental permission model. .It Fl -experimental-shadow-realm Use this flag to enable ShadowRealm support. . -.It Fl -experimental-sqlite -Enable the experimental node:sqlite module. -. .It Fl -experimental-test-coverage Enable code coverage in the test runner. . @@ -215,6 +212,9 @@ Enable experimental support for the Web Storage API. .It Fl -no-experimental-repl-await Disable top-level await keyword support in REPL. . +.It Fl -no-experimental-sqlite +Disable the experimental node:sqlite module. +. .It Fl -experimental-vm-modules Enable experimental ES module support in VM module. . diff --git a/lib/internal/process/pre_execution.js b/lib/internal/process/pre_execution.js index 08945a62d4277b..41ebf85900b100 100644 --- a/lib/internal/process/pre_execution.js +++ b/lib/internal/process/pre_execution.js @@ -303,7 +303,7 @@ function setupNavigator() { } function setupSQLite() { - if (!getOptionValue('--experimental-sqlite')) { + if (getOptionValue('--no-experimental-sqlite')) { return; } diff --git a/src/node_options.cc b/src/node_options.cc index dbdec4781a86de..f5661cce536f67 100644 --- a/src/node_options.cc +++ b/src/node_options.cc @@ -427,7 +427,8 @@ EnvironmentOptionsParser::EnvironmentOptionsParser() { AddOption("--experimental-sqlite", "experimental node:sqlite module", &EnvironmentOptions::experimental_sqlite, - kAllowedInEnvvar); + kAllowedInEnvvar, + true); AddOption("--experimental-webstorage", "experimental Web Storage API", &EnvironmentOptions::experimental_webstorage, diff --git a/src/node_options.h b/src/node_options.h index a3521e9aea88b1..aa7a2ce3eba57b 100644 --- a/src/node_options.h +++ b/src/node_options.h @@ -123,7 +123,7 @@ class EnvironmentOptions : public Options { bool experimental_eventsource = false; bool experimental_fetch = true; bool experimental_websocket = true; - bool experimental_sqlite = false; + bool experimental_sqlite = true; bool experimental_webstorage = false; std::string localstorage_file; bool experimental_global_navigator = true; diff --git a/test/parallel/test-sqlite-data-types.js b/test/parallel/test-sqlite-data-types.js index df77a90908eed5..75f1257f4d04b4 100644 --- a/test/parallel/test-sqlite-data-types.js +++ b/test/parallel/test-sqlite-data-types.js @@ -1,4 +1,3 @@ -// Flags: --experimental-sqlite 'use strict'; require('../common'); const tmpdir = require('../common/tmpdir'); diff --git a/test/parallel/test-sqlite-database-sync.js b/test/parallel/test-sqlite-database-sync.js index 0bf5b2c139ca03..b9ce0b8ad2aefc 100644 --- a/test/parallel/test-sqlite-database-sync.js +++ b/test/parallel/test-sqlite-database-sync.js @@ -1,4 +1,3 @@ -// Flags: --experimental-sqlite 'use strict'; require('../common'); const tmpdir = require('../common/tmpdir'); diff --git a/test/parallel/test-sqlite-named-parameters.js b/test/parallel/test-sqlite-named-parameters.js index 3060e252235401..d8091a0fd14b13 100644 --- a/test/parallel/test-sqlite-named-parameters.js +++ b/test/parallel/test-sqlite-named-parameters.js @@ -1,4 +1,3 @@ -// Flags: --experimental-sqlite 'use strict'; require('../common'); const tmpdir = require('../common/tmpdir'); diff --git a/test/parallel/test-sqlite-statement-sync.js b/test/parallel/test-sqlite-statement-sync.js index 8cd3b64ab4bc66..1c2884013680df 100644 --- a/test/parallel/test-sqlite-statement-sync.js +++ b/test/parallel/test-sqlite-statement-sync.js @@ -1,4 +1,3 @@ -// Flags: --experimental-sqlite 'use strict'; require('../common'); const tmpdir = require('../common/tmpdir'); diff --git a/test/parallel/test-sqlite-transactions.js b/test/parallel/test-sqlite-transactions.js index b5ed187e067e6d..304d27d3f3c65f 100644 --- a/test/parallel/test-sqlite-transactions.js +++ b/test/parallel/test-sqlite-transactions.js @@ -1,4 +1,3 @@ -// Flags: --experimental-sqlite 'use strict'; require('../common'); const tmpdir = require('../common/tmpdir'); diff --git a/test/parallel/test-sqlite.js b/test/parallel/test-sqlite.js index f8b122131fe7a2..825e44fb2965f7 100644 --- a/test/parallel/test-sqlite.js +++ b/test/parallel/test-sqlite.js @@ -1,4 +1,3 @@ -// Flags: --experimental-sqlite 'use strict'; const { spawnPromisified } = require('../common'); const tmpdir = require('../common/tmpdir'); @@ -23,13 +22,14 @@ suite('accessing the node:sqlite module', () => { }); }); - test('cannot be accessed without --experimental-sqlite flag', async (t) => { + test('can be disabled with --no-experimental-sqlite flag', async (t) => { const { stdout, stderr, code, signal, } = await spawnPromisified(process.execPath, [ + '--no-experimental-sqlite', '-e', 'require("node:sqlite")', ]); From 8a5d8c766963a27929362859b30d39857d5f8a58 Mon Sep 17 00:00:00 2001 From: Colin Ihrig Date: Tue, 19 Nov 2024 10:01:30 -0500 Subject: [PATCH 09/53] test_runner: mark context.plan() as stable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function does not require a CLI flag, does not emit a warning, and is a fairly simple API that is already being used heavily in the ecosystem. This commit marks context.plan() as stable. PR-URL: https://github.com/nodejs/node/pull/55895 Reviewed-By: Yagiz Nizipli Reviewed-By: Moshe Atlow Reviewed-By: Pietro Marchini Reviewed-By: Matteo Collina Reviewed-By: Michaël Zasso Reviewed-By: Chemi Atlow --- doc/api/test.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/api/test.md b/doc/api/test.md index 2f4bb89ae5622a..e5dee4598579a8 100644 --- a/doc/api/test.md +++ b/doc/api/test.md @@ -3343,10 +3343,12 @@ The name of the test. added: - v22.2.0 - v20.15.0 +changes: + - version: REPLACEME + pr-url: https://github.com/nodejs/node/pull/55895 + description: This function is no longer experimental. --> -> Stability: 1 - Experimental - * `count` {number} The number of assertions and subtests that are expected to run. This function is used to set the number of assertions and subtests that are expected to run From 8197815fe8418c129b19a5a27de0d37ba4dd4879 Mon Sep 17 00:00:00 2001 From: Colin Ihrig Date: Tue, 19 Nov 2024 12:37:35 -0500 Subject: [PATCH 10/53] test_runner: mark snapshot testing as stable This commit marks the test runner's snapshot testing API as stable. PR-URL: https://github.com/nodejs/node/pull/55897 Reviewed-By: Moshe Atlow Reviewed-By: Pietro Marchini Reviewed-By: Matteo Collina Reviewed-By: Jacob Smith Reviewed-By: Chemi Atlow --- doc/api/cli.md | 18 ++------- doc/api/test.md | 4 +- doc/node.1 | 3 -- lib/internal/test_runner/test.js | 9 ++--- lib/test.js | 45 ++++++++++----------- src/node_options.cc | 4 +- src/node_options.h | 1 - test/parallel/test-runner-assert.js | 15 +++---- test/parallel/test-runner-snapshot-tests.js | 11 ++--- 9 files changed, 42 insertions(+), 68 deletions(-) diff --git a/doc/api/cli.md b/doc/api/cli.md index 7edc189d8ca8bd..ac6ad2a953e06c 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -1135,16 +1135,6 @@ added: Enable module mocking in the test runner. -### `--experimental-test-snapshots` - - - -> Stability: 1.0 - Early development - -Enable [snapshot testing][] in the test runner. - ### `--experimental-vm-modules` -> Stability: 1.0 - Early development - Regenerates the snapshot files used by the test runner for [snapshot testing][]. -Node.js must be started with the `--experimental-test-snapshots` flag in order -to use this functionality. ### `--throw-deprecation` diff --git a/doc/api/test.md b/doc/api/test.md index e5dee4598579a8..d01e04dbaf6505 100644 --- a/doc/api/test.md +++ b/doc/api/test.md @@ -937,8 +937,7 @@ compared against a set of known good values. The known good values are known as snapshots, and are stored in a snapshot file. Snapshot files are managed by the test runner, but are designed to be human readable to aid in debugging. Best practice is for snapshot files to be checked into source control along with your -test files. In order to enable snapshot testing, Node.js must be started with -the [`--experimental-test-snapshots`][] command-line flag. +test files. Snapshot files are generated by starting Node.js with the [`--test-update-snapshots`][] command-line flag. A separate snapshot file is @@ -3591,7 +3590,6 @@ Can be used to abort test subtasks when the test has been aborted. [`--experimental-strip-types`]: cli.md#--experimental-strip-types [`--experimental-test-coverage`]: cli.md#--experimental-test-coverage [`--experimental-test-module-mocks`]: cli.md#--experimental-test-module-mocks -[`--experimental-test-snapshots`]: cli.md#--experimental-test-snapshots [`--import`]: cli.md#--importmodule [`--test-concurrency`]: cli.md#--test-concurrency [`--test-coverage-include`]: cli.md#--test-coverage-include diff --git a/doc/node.1 b/doc/node.1 index 02628f8e238724..ac55410f500b41 100644 --- a/doc/node.1 +++ b/doc/node.1 @@ -191,9 +191,6 @@ Configures the type of test isolation used in the test runner. .It Fl -experimental-test-module-mocks Enable module mocking in the test runner. . -.It Fl -experimental-test-snapshots -Enable snapshot testing in the test runner. -. .It Fl -experimental-strip-types Enable experimental type-stripping for TypeScript files. . diff --git a/lib/internal/test_runner/test.js b/lib/internal/test_runner/test.js index b679410c5561e5..abb8ed276dfacc 100644 --- a/lib/internal/test_runner/test.js +++ b/lib/internal/test_runner/test.js @@ -104,6 +104,7 @@ function lazyAssertObject(harness) { if (assertObj === undefined) { assertObj = new SafeMap(); const assert = require('assert'); + const { SnapshotManager } = require('internal/test_runner/snapshot'); const methodsToCopy = [ 'deepEqual', 'deepStrictEqual', @@ -126,12 +127,8 @@ function lazyAssertObject(harness) { assertObj.set(methodsToCopy[i], assert[methodsToCopy[i]]); } - const { getOptionValue } = require('internal/options'); - if (getOptionValue('--experimental-test-snapshots')) { - const { SnapshotManager } = require('internal/test_runner/snapshot'); - harness.snapshotManager = new SnapshotManager(harness.config.updateSnapshots); - assertObj.set('snapshot', harness.snapshotManager.createAssert()); - } + harness.snapshotManager = new SnapshotManager(harness.config.updateSnapshots); + assertObj.set('snapshot', harness.snapshotManager.createAssert()); } return assertObj; } diff --git a/lib/test.js b/lib/test.js index ba72d6e4ecbdbf..8e7c6295a37225 100644 --- a/lib/test.js +++ b/lib/test.js @@ -7,7 +7,6 @@ const { const { test, suite, before, after, beforeEach, afterEach } = require('internal/test_runner/harness'); const { run } = require('internal/test_runner/runner'); -const { getOptionValue } = require('internal/options'); module.exports = test; ObjectAssign(module.exports, { @@ -39,28 +38,26 @@ ObjectDefineProperty(module.exports, 'mock', { }, }); -if (getOptionValue('--experimental-test-snapshots')) { - let lazySnapshot; +let lazySnapshot; - ObjectDefineProperty(module.exports, 'snapshot', { - __proto__: null, - configurable: true, - enumerable: true, - get() { - if (lazySnapshot === undefined) { - const { - setDefaultSnapshotSerializers, - setResolveSnapshotPath, - } = require('internal/test_runner/snapshot'); - - lazySnapshot = { - __proto__: null, - setDefaultSnapshotSerializers, - setResolveSnapshotPath, - }; - } +ObjectDefineProperty(module.exports, 'snapshot', { + __proto__: null, + configurable: true, + enumerable: true, + get() { + if (lazySnapshot === undefined) { + const { + setDefaultSnapshotSerializers, + setResolveSnapshotPath, + } = require('internal/test_runner/snapshot'); + + lazySnapshot = { + __proto__: null, + setDefaultSnapshotSerializers, + setResolveSnapshotPath, + }; + } - return lazySnapshot; - }, - }); -} + return lazySnapshot; + }, +}); diff --git a/src/node_options.cc b/src/node_options.cc index f5661cce536f67..452d98db716afa 100644 --- a/src/node_options.cc +++ b/src/node_options.cc @@ -691,9 +691,7 @@ EnvironmentOptionsParser::EnvironmentOptionsParser() { AddOption("--experimental-test-module-mocks", "enable module mocking in the test runner", &EnvironmentOptions::test_runner_module_mocks); - AddOption("--experimental-test-snapshots", - "enable snapshot testing in the test runner", - &EnvironmentOptions::test_runner_snapshots); + AddOption("--experimental-test-snapshots", "", NoOp{}); AddOption("--test-name-pattern", "run tests whose name matches this regular expression", &EnvironmentOptions::test_name_pattern, diff --git a/src/node_options.h b/src/node_options.h index aa7a2ce3eba57b..5446174545afc4 100644 --- a/src/node_options.h +++ b/src/node_options.h @@ -189,7 +189,6 @@ class EnvironmentOptions : public Options { uint64_t test_coverage_functions = 0; uint64_t test_coverage_lines = 0; bool test_runner_module_mocks = false; - bool test_runner_snapshots = false; bool test_runner_update_snapshots = false; std::vector test_name_pattern; std::vector test_reporter; diff --git a/test/parallel/test-runner-assert.js b/test/parallel/test-runner-assert.js index 8052a0fd84ac32..73d3b107251b50 100644 --- a/test/parallel/test-runner-assert.js +++ b/test/parallel/test-runner-assert.js @@ -3,13 +3,14 @@ require('../common'); const assert = require('node:assert'); const test = require('node:test'); -const uncopiedKeys = [ - 'AssertionError', - 'CallTracker', - 'strict', -]; -test('only methods from node:assert are on t.assert', (t) => { - const expectedKeys = Object.keys(assert).filter((key) => !uncopiedKeys.includes(key)).sort(); +test('expected methods are on t.assert', (t) => { + const uncopiedKeys = [ + 'AssertionError', + 'CallTracker', + 'strict', + ]; + const assertKeys = Object.keys(assert).filter((key) => !uncopiedKeys.includes(key)); + const expectedKeys = ['snapshot'].concat(assertKeys).sort(); assert.deepStrictEqual(Object.keys(t.assert).sort(), expectedKeys); }); diff --git a/test/parallel/test-runner-snapshot-tests.js b/test/parallel/test-runner-snapshot-tests.js index bad3645f5fa183..eea78ff9aa596c 100644 --- a/test/parallel/test-runner-snapshot-tests.js +++ b/test/parallel/test-runner-snapshot-tests.js @@ -1,4 +1,4 @@ -// Flags: --expose-internals --experimental-test-snapshots +// Flags: --expose-internals /* eslint-disable no-template-curly-in-string */ 'use strict'; const common = require('../common'); @@ -299,7 +299,7 @@ test('t.assert.snapshot()', async (t) => { await t.test('fails prior to snapshot generation', async (t) => { const child = await common.spawnPromisified( process.execPath, - ['--experimental-test-snapshots', fixture], + [fixture], { cwd: tmpdir.path }, ); @@ -314,7 +314,7 @@ test('t.assert.snapshot()', async (t) => { await t.test('passes when regenerating snapshots', async (t) => { const child = await common.spawnPromisified( process.execPath, - ['--test-update-snapshots', '--experimental-test-snapshots', fixture], + ['--test-update-snapshots', fixture], { cwd: tmpdir.path }, ); @@ -328,7 +328,7 @@ test('t.assert.snapshot()', async (t) => { await t.test('passes when snapshots exist', async (t) => { const child = await common.spawnPromisified( process.execPath, - ['--experimental-test-snapshots', fixture], + [fixture], { cwd: tmpdir.path }, ); @@ -350,7 +350,6 @@ test('snapshots from multiple files (isolation=none)', async (t) => { const args = [ '--test', '--experimental-test-isolation=none', - '--experimental-test-snapshots', fixture, fixture2, ]; @@ -372,7 +371,6 @@ test('snapshots from multiple files (isolation=none)', async (t) => { const args = [ '--test', '--experimental-test-isolation=none', - '--experimental-test-snapshots', '--test-update-snapshots', fixture, fixture2, @@ -394,7 +392,6 @@ test('snapshots from multiple files (isolation=none)', async (t) => { const args = [ '--test', '--experimental-test-isolation=none', - '--experimental-test-snapshots', fixture, fixture2, ]; From 219f5f26276aece77bcaea02146c935d62569405 Mon Sep 17 00:00:00 2001 From: Rafael Gonzaga Date: Tue, 19 Nov 2024 15:15:34 -0300 Subject: [PATCH 11/53] doc: include git node release --promote to steps Refs: https://github.com/nodejs/node-core-utils/pull/835 PR-URL: https://github.com/nodejs/node/pull/55835 Reviewed-By: Antoine du Hamel Reviewed-By: Joyee Cheung --- doc/contributing/releases.md | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/doc/contributing/releases.md b/doc/contributing/releases.md index 82eec2833998ad..3ae682169873df 100644 --- a/doc/contributing/releases.md +++ b/doc/contributing/releases.md @@ -707,12 +707,23 @@ the build before moving forward. Use the following list as a baseline: ### 11. Tag and sign the release commit -Once you have produced builds that you're happy with, create a new tag. By -waiting until this stage to create tags, you can discard a proposed release if -something goes wrong or additional commits are required. Once you have created a -tag and pushed it to GitHub, you _**must not**_ delete and re-tag. If you make -a mistake after tagging then you'll have to version-bump and start again and -count that tag/version as lost. +Once you have produced builds that you're happy with you can either run +`git node release --promote` + +```bash +git node release -S --promote https://github.com/nodejs/node/pull/XXXX +``` + +to automate the remaining steps until step 16 or you can perform it manually +following the below steps. + +*** + +Create a new tag: By waiting until this stage to create tags, you can discard +a proposed release if something goes wrong or additional commits are required. +Once you have created a tag and pushed it to GitHub, you _**must not**_ delete +and re-tag. If you make a mistake after tagging then you'll have to version-bump +and start again and count that tag/version as lost. Tag summaries have a predictable format. Look at a recent tag to see: From f711a48e15c8647b2125a088f4b5aa6b99368f6f Mon Sep 17 00:00:00 2001 From: Rafael Gonzaga Date: Tue, 19 Nov 2024 15:31:29 -0300 Subject: [PATCH 12/53] doc: fix relative path mention in --allow-fs PR-URL: https://github.com/nodejs/node/pull/55791 Reviewed-By: Luigi Pinca Reviewed-By: Joyee Cheung Reviewed-By: Chemi Atlow --- doc/api/cli.md | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/doc/api/cli.md b/doc/api/cli.md index ac6ad2a953e06c..c41fcb075db645 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -208,23 +208,18 @@ The valid arguments for the `--allow-fs-read` flag are: * Multiple paths can be allowed using multiple `--allow-fs-read` flags. Example `--allow-fs-read=/folder1/ --allow-fs-read=/folder1/` -Paths delimited by comma (`,`) are no longer allowed. -When passing a single flag with a comma a warning will be displayed. - Examples can be found in the [File System Permissions][] documentation. -Relative paths are NOT yet supported by the CLI flag. - The initializer module also needs to be allowed. Consider the following example: ```console -$ node --experimental-permission t.js +$ node --experimental-permission index.js Error: Access to this API has been restricted at node:internal/main/run_main_module:23:47 { code: 'ERR_ACCESS_DENIED', permission: 'FileSystemRead', - resource: '/Users/rafaelgss/repos/os/node/t.js' + resource: '/Users/rafaelgss/repos/os/node/index.js' } ``` @@ -260,8 +255,6 @@ When passing a single flag with a comma a warning will be displayed. Examples can be found in the [File System Permissions][] documentation. -Relative paths are NOT supported through the CLI flag. - ### `--allow-wasi` ```js - const fs = require('fs/promises'); + const fs = require('node:fs/promises'); (async () => { let data; diff --git a/doc/api/esm.md b/doc/api/esm.md index 64d2e537ecd9d8..8a80e0b1fe7202 100644 --- a/doc/api/esm.md +++ b/doc/api/esm.md @@ -509,7 +509,7 @@ module default import or its corresponding sugar syntax: ```js import { default as cjs } from 'cjs'; -// identical to the above +// Identical to the above import cjsSugar from 'cjs'; console.log(cjs); diff --git a/doc/api/inspector.md b/doc/api/inspector.md index 64db50b8fc1da4..2892779403d256 100644 --- a/doc/api/inspector.md +++ b/doc/api/inspector.md @@ -505,7 +505,7 @@ inspector.Network.requestWillBeSent({ request: { url: 'https://nodejs.org/en', method: 'GET', - } + }, }); ``` diff --git a/doc/api/modules.md b/doc/api/modules.md index 6d62b85bd2f12f..720a47205edb2a 100644 --- a/doc/api/modules.md +++ b/doc/api/modules.md @@ -890,7 +890,7 @@ built-in modules and if a name matching a built-in module is added to the cache, only `node:`-prefixed require calls are going to receive the built-in module. Use with care! - + ```js const assert = require('node:assert'); diff --git a/doc/api/perf_hooks.md b/doc/api/perf_hooks.md index 7c3144939244b6..fe345287198297 100644 --- a/doc/api/perf_hooks.md +++ b/doc/api/perf_hooks.md @@ -28,7 +28,7 @@ performance.measure('Start to Now'); performance.mark('A'); (async function doSomeLongRunningProcess() { - await new Promise(r => setTimeout(r, 5000)); + await new Promise((r) => setTimeout(r, 5000)); performance.measure('A to Now', 'A'); performance.mark('B'); diff --git a/doc/api/process.md b/doc/api/process.md index 1a2638b8752f75..01083c74c95089 100644 --- a/doc/api/process.md +++ b/doc/api/process.md @@ -2107,10 +2107,10 @@ class Test { constructor() { finalization.register(this, (ref) => ref.dispose()); - // even something like this is highly discouraged + // Even something like this is highly discouraged // finalization.register(this, () => this.dispose()); - } - dispose() {} + } + dispose() {} } ``` diff --git a/doc/api/punycode.md b/doc/api/punycode.md index 6aa01a8348797b..f2e9bc09fbbb00 100644 --- a/doc/api/punycode.md +++ b/doc/api/punycode.md @@ -21,7 +21,7 @@ The `punycode` module is a bundled version of the [Punycode.js][] module. It can be accessed using: ```js -const punycode = require('punycode'); +const punycode = require('node:punycode'); ``` [Punycode][] is a character encoding scheme defined by RFC 3492 that is diff --git a/doc/api/stream.md b/doc/api/stream.md index 8dc5173500e623..f890f4fd529b01 100644 --- a/doc/api/stream.md +++ b/doc/api/stream.md @@ -315,7 +315,7 @@ events (due to incorrect stream implementations) do not cause unexpected crashes. If this is unwanted behavior then `options.cleanup` should be set to `true`: -```js +```mjs await finished(rs, { cleanup: true }); ``` @@ -3926,7 +3926,7 @@ const { StringDecoder } = require('node:string_decoder'); class StringWritable extends Writable { constructor(options) { super(options); - this._decoder = new StringDecoder(options && options.defaultEncoding); + this._decoder = new StringDecoder(options?.defaultEncoding); this.data = ''; } _write(chunk, encoding, callback) { diff --git a/doc/api/test.md b/doc/api/test.md index d01e04dbaf6505..2ca8c0e6877124 100644 --- a/doc/api/test.md +++ b/doc/api/test.md @@ -3281,7 +3281,7 @@ test('snapshot test with default serialization', (t) => { test('snapshot test with custom serialization', (t) => { t.assert.snapshot({ value3: 3, value4: 4 }, { - serializers: [(value) => JSON.stringify(value)] + serializers: [(value) => JSON.stringify(value)], }); }); ``` diff --git a/doc/api/tls.md b/doc/api/tls.md index f19a75370b1cf5..3436ba6697950f 100644 --- a/doc/api/tls.md +++ b/doc/api/tls.md @@ -465,13 +465,13 @@ to set the security level to 0 while using the default OpenSSL cipher list, you const tls = require('node:tls'); const port = 443; -tls.createServer({ciphers: 'DEFAULT@SECLEVEL=0', minVersion: 'TLSv1'}, function (socket) { +tls.createServer({ ciphers: 'DEFAULT@SECLEVEL=0', minVersion: 'TLSv1' }, function(socket) { console.log('Client connected with protocol:', socket.getProtocol()); socket.end(); this.close(); -}). -listen(port, () => { - tls.connect(port, {ciphers: 'DEFAULT@SECLEVEL=0', maxVersion: 'TLSv1'}); +}) +.listen(port, () => { + tls.connect(port, { ciphers: 'DEFAULT@SECLEVEL=0', maxVersion: 'TLSv1' }); }); ``` diff --git a/doc/api/tracing.md b/doc/api/tracing.md index e1a72a708863a5..ddd1bae52a62c4 100644 --- a/doc/api/tracing.md +++ b/doc/api/tracing.md @@ -245,7 +245,7 @@ console.log(trace_events.getEnabledCategories()); ```js 'use strict'; -const { Session } = require('inspector'); +const { Session } = require('node:inspector'); const session = new Session(); session.connect(); diff --git a/doc/api/util.md b/doc/api/util.md index dc8e3c38ddfca6..cf193528368188 100644 --- a/doc/api/util.md +++ b/doc/api/util.md @@ -513,7 +513,7 @@ The mapping between error codes and string messages is platform-dependent. ```js fs.access('file/that/does/not/exist', (err) => { const name = util.getSystemErrorMessage(err.errno); - console.error(name); // no such file or directory + console.error(name); // No such file or directory }); ``` diff --git a/doc/api/v8.md b/doc/api/v8.md index 4df6e98a48fe78..670283e17f5d80 100644 --- a/doc/api/v8.md +++ b/doc/api/v8.md @@ -1296,7 +1296,7 @@ is as follows. Here's an example. ```js -const { GCProfiler } = require('v8'); +const { GCProfiler } = require('node:v8'); const profiler = new GCProfiler(); profiler.start(); setTimeout(() => { diff --git a/doc/api/vm.md b/doc/api/vm.md index 01317b5843ad34..a5dd038070a498 100644 --- a/doc/api/vm.md +++ b/doc/api/vm.md @@ -1622,12 +1622,12 @@ in the outer context. const vm = require('node:vm'); // An undefined `contextObject` option makes the global object contextified. -let context = vm.createContext(); +const context = vm.createContext(); console.log(vm.runInContext('globalThis', context) === context); // false // A contextified global object cannot be frozen. try { vm.runInContext('Object.freeze(globalThis);', context); -} catch(e) { +} catch (e) { console.log(e); // TypeError: Cannot freeze } console.log(vm.runInContext('globalThis.foo = 1; foo;', context)); // 1 @@ -1652,7 +1652,7 @@ const context = vm.createContext(vm.constants.DONT_CONTEXTIFY); vm.runInContext('Object.freeze(globalThis);', context); try { vm.runInContext('bar = 1; bar;', context); -} catch(e) { +} catch (e) { console.log(e); // Uncaught ReferenceError: bar is not defined } ``` @@ -1681,7 +1681,7 @@ console.log(vm.runInContext('bar;', context)); // 1 Object.freeze(context); try { vm.runInContext('baz = 1; baz;', context); -} catch(e) { +} catch (e) { console.log(e); // Uncaught ReferenceError: baz is not defined } ``` diff --git a/doc/api/worker_threads.md b/doc/api/worker_threads.md index f1b5c5b652f4f1..afecd991da9dd3 100644 --- a/doc/api/worker_threads.md +++ b/doc/api/worker_threads.md @@ -218,7 +218,7 @@ markAsUncloneable(anyObject); const { port1 } = new MessageChannel(); try { // This will throw an error, because anyObject is not cloneable. - port1.postMessage(anyObject) + port1.postMessage(anyObject); } catch (error) { // error.name === 'DataCloneError' } @@ -908,6 +908,8 @@ not preserved. In particular, [`Buffer`][] objects will be read as plain [`Uint8Array`][]s on the receiving side, and instances of JavaScript classes will be cloned as plain JavaScript objects. + + ```js const b = Symbol('b'); diff --git a/doc/api/zlib.md b/doc/api/zlib.md index 36977eae86af6f..c79c19b697e066 100644 --- a/doc/api/zlib.md +++ b/doc/api/zlib.md @@ -190,10 +190,7 @@ http.createServer((request, response) => { const raw = fs.createReadStream('index.html'); // Store both a compressed and an uncompressed version of the resource. response.setHeader('Vary', 'Accept-Encoding'); - let acceptEncoding = request.headers['accept-encoding']; - if (!acceptEncoding) { - acceptEncoding = ''; - } + const acceptEncoding = request.headers['accept-encoding'] || ''; const onError = (err) => { if (err) { diff --git a/doc/changelogs/CHANGELOG_V23.md b/doc/changelogs/CHANGELOG_V23.md index 47b30d19b91bae..b18ffc81ff8ea6 100644 --- a/doc/changelogs/CHANGELOG_V23.md +++ b/doc/changelogs/CHANGELOG_V23.md @@ -246,11 +246,11 @@ will now correctly change as the underlying `ArrayBuffer` size is changed. ```js const ab = new ArrayBuffer(10, { maxByteLength: 20 }); const buffer = Buffer.from(ab); -console.log(buffer.byteLength); 10 +console.log(buffer.byteLength); // 10 ab.resize(15); -console.log(buffer.byteLength); 15 +console.log(buffer.byteLength); // 15 ab.resize(5); -console.log(buffer.byteLength); 5 +console.log(buffer.byteLength); // 5 ``` Contributed by James M Snell in [#55377](https://github.com/nodejs/node/pull/55377). diff --git a/doc/contributing/writing-and-running-benchmarks.md b/doc/contributing/writing-and-running-benchmarks.md index 3d16c7d14fc033..63fbf75c798833 100644 --- a/doc/contributing/writing-and-running-benchmarks.md +++ b/doc/contributing/writing-and-running-benchmarks.md @@ -538,7 +538,7 @@ The arguments of `createBenchmark` are: source: ['buffer', 'string'], len: [2048], n: [50, 2048], - } + }, }, { byGroups: true }); ``` diff --git a/eslint.config.mjs b/eslint.config.mjs index f37774bace871d..8e574a312d4a3e 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -43,9 +43,7 @@ export default [ '**/node_modules/**', 'benchmark/fixtures/**', 'benchmark/tmp/**', - 'doc/**/*.js', 'doc/changelogs/CHANGELOG_V1*.md', - '!doc/api_assets/*.js', '!doc/changelogs/CHANGELOG_V18.md', 'lib/punycode.js', 'test/.tmp.*/**', From 497a9aea1c00b10661776c0587b8c68a694c4bd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=BCseyin=20A=C3=A7acak?= <110401522+huseyinacacak-janea@users.noreply.github.com> Date: Wed, 20 Nov 2024 15:14:20 +0300 Subject: [PATCH 14/53] src: fix kill signal on Windows Fixes: https://github.com/nodejs/node/issues/42923 PR-URL: https://github.com/nodejs/node/pull/55514 Reviewed-By: Luigi Pinca Reviewed-By: Stefan Stojanovic --- doc/api/child_process.md | 4 ++-- src/process_wrap.cc | 6 ++++++ test/parallel/test-child-process-kill.js | 19 +++++++++++++++++++ 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/doc/api/child_process.md b/doc/api/child_process.md index 0c0c80b35dc40c..7a9345416e0545 100644 --- a/doc/api/child_process.md +++ b/doc/api/child_process.md @@ -1700,8 +1700,8 @@ may not actually terminate the process. See kill(2) for reference. On Windows, where POSIX signals do not exist, the `signal` argument will be -ignored, and the process will be killed forcefully and abruptly (similar to -`'SIGKILL'`). +ignored except for `'SIGKILL'`, `'SIGTERM'`, `'SIGINT'` and `'SIGQUIT'`, and the +process will always be killed forcefully and abruptly (similar to `'SIGKILL'`). See [Signal Events][] for more details. On Linux, child processes of child processes will not be terminated diff --git a/src/process_wrap.cc b/src/process_wrap.cc index 14c9e99934e27b..27a294eb384960 100644 --- a/src/process_wrap.cc +++ b/src/process_wrap.cc @@ -314,6 +314,12 @@ class ProcessWrap : public HandleWrap { ProcessWrap* wrap; ASSIGN_OR_RETURN_UNWRAP(&wrap, args.This()); int signal = args[0]->Int32Value(env->context()).FromJust(); +#ifdef _WIN32 + if (signal != SIGKILL && signal != SIGTERM && signal != SIGINT && + signal != SIGQUIT) { + signal = SIGKILL; + } +#endif int err = uv_process_kill(&wrap->process_, signal); args.GetReturnValue().Set(err); } diff --git a/test/parallel/test-child-process-kill.js b/test/parallel/test-child-process-kill.js index 1025c69ba1ac28..26bdc029c04fe8 100644 --- a/test/parallel/test-child-process-kill.js +++ b/test/parallel/test-child-process-kill.js @@ -39,3 +39,22 @@ assert.strictEqual(cat.signalCode, null); assert.strictEqual(cat.killed, false); cat.kill(); assert.strictEqual(cat.killed, true); + +// Test different types of kill signals on Windows. +if (common.isWindows) { + for (const sendSignal of ['SIGTERM', 'SIGKILL', 'SIGQUIT', 'SIGINT']) { + const process = spawn('cmd'); + process.on('exit', (code, signal) => { + assert.strictEqual(code, null); + assert.strictEqual(signal, sendSignal); + }); + process.kill(sendSignal); + } + + const process = spawn('cmd'); + process.on('exit', (code, signal) => { + assert.strictEqual(code, null); + assert.strictEqual(signal, 'SIGKILL'); + }); + process.kill('SIGHUP'); +} From 493e16c85272f14ce7eb6f9638cd6642523b53a2 Mon Sep 17 00:00:00 2001 From: Livia Medeiros Date: Thu, 21 Nov 2024 03:25:05 +0900 Subject: [PATCH 15/53] test: fix determining lower priority PR-URL: https://github.com/nodejs/node/pull/55908 Fixes: https://github.com/NixOS/nixpkgs/issues/355919 Reviewed-By: Luigi Pinca Reviewed-By: Antoine du Hamel --- test/parallel/test-os.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/parallel/test-os.js b/test/parallel/test-os.js index efaec2b3ead385..3d9fe5c1a60e2e 100644 --- a/test/parallel/test-os.js +++ b/test/parallel/test-os.js @@ -84,7 +84,8 @@ assert.ok(hostname.length > 0); // IBMi process priority is different. if (!common.isIBMi) { const { PRIORITY_BELOW_NORMAL, PRIORITY_LOW } = os.constants.priority; - const LOWER_PRIORITY = os.getPriority() > PRIORITY_BELOW_NORMAL ? PRIORITY_BELOW_NORMAL : PRIORITY_LOW; + // Priority means niceness: higher numeric value <=> lower priority + const LOWER_PRIORITY = os.getPriority() < PRIORITY_BELOW_NORMAL ? PRIORITY_BELOW_NORMAL : PRIORITY_LOW; os.setPriority(LOWER_PRIORITY); const priority = os.getPriority(); is.number(priority); From 5a2a75711937202d59382906f7fea3c36739e79b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alfredo=20Gonz=C3=A1lez?= <12631491+mfdebian@users.noreply.github.com> Date: Wed, 20 Nov 2024 20:26:30 -0300 Subject: [PATCH 16/53] doc: add esm examples to node:timers PR-URL: https://github.com/nodejs/node/pull/55857 Reviewed-By: Luigi Pinca Reviewed-By: Jason Zhang Reviewed-By: Antoine du Hamel --- doc/api/timers.md | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/doc/api/timers.md b/doc/api/timers.md index 83edc1d73b0e77..9c4ca2ea17d263 100644 --- a/doc/api/timers.md +++ b/doc/api/timers.md @@ -292,7 +292,24 @@ returned Promises will be rejected with an `'AbortError'`. For `setImmediate()`: -```js +```mjs +import { setImmediate as setImmediatePromise } from 'node:timers/promises'; + +const ac = new AbortController(); +const signal = ac.signal; + +// We do not `await` the promise so `ac.abort()` is called concurrently. +setImmediatePromise('foobar', { signal }) + .then(console.log) + .catch((err) => { + if (err.name === 'AbortError') + console.error('The immediate was aborted'); + }); + +ac.abort(); +``` + +```cjs const { setImmediate: setImmediatePromise } = require('node:timers/promises'); const ac = new AbortController(); @@ -310,7 +327,24 @@ ac.abort(); For `setTimeout()`: -```js +```mjs +import { setTimeout as setTimeoutPromise } from 'node:timers/promises'; + +const ac = new AbortController(); +const signal = ac.signal; + +// We do not `await` the promise so `ac.abort()` is called concurrently. +setTimeoutPromise(1000, 'foobar', { signal }) + .then(console.log) + .catch((err) => { + if (err.name === 'AbortError') + console.error('The timeout was aborted'); + }); + +ac.abort(); +``` + +```cjs const { setTimeout: setTimeoutPromise } = require('node:timers/promises'); const ac = new AbortController(); From f48e289580e1974a718ef1433fafd046db7578df Mon Sep 17 00:00:00 2001 From: Cheng Date: Thu, 21 Nov 2024 09:33:35 +0900 Subject: [PATCH 17/53] build: fix GN build for sqlite MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/55912 Reviewed-By: Colin Ihrig Reviewed-By: Juan José Arboleda Reviewed-By: Yagiz Nizipli Reviewed-By: Marco Ippolito Reviewed-By: Luigi Pinca --- deps/sqlite/unofficial.gni | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deps/sqlite/unofficial.gni b/deps/sqlite/unofficial.gni index 6eda916ad23e07..b26e1335eac339 100644 --- a/deps/sqlite/unofficial.gni +++ b/deps/sqlite/unofficial.gni @@ -7,6 +7,10 @@ template("sqlite_gn_build") { config("sqlite_config") { include_dirs = [ "." ] + defines = [ + "SQLITE_ENABLE_SESSION", + "SQLITE_ENABLE_PREUPDATE_HOOK", + ] } gypi_values = exec_script("../../tools/gypi_to_gn.py", From 7768b3d054971b22f22a8385f3cebba7e4db71f9 Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Thu, 21 Nov 2024 08:21:41 -0500 Subject: [PATCH 18/53] deps: update simdjson to 3.10.1 PR-URL: https://github.com/nodejs/node/pull/54678 Reviewed-By: Yagiz Nizipli Reviewed-By: Marco Ippolito --- deps/simdjson/simdjson.cpp | 6 +- deps/simdjson/simdjson.h | 418 +++++++++++++++++++------------------ 2 files changed, 220 insertions(+), 204 deletions(-) diff --git a/deps/simdjson/simdjson.cpp b/deps/simdjson/simdjson.cpp index b5dd7243853217..d79fc703a815ca 100644 --- a/deps/simdjson/simdjson.cpp +++ b/deps/simdjson/simdjson.cpp @@ -1,4 +1,4 @@ -/* auto-generated on 2024-08-01 09:31:50 -0400. Do not edit! */ +/* auto-generated on 2024-08-26 09:37:03 -0400. Do not edit! */ /* including simdjson.cpp: */ /* begin file simdjson.cpp */ #define SIMDJSON_SRC_SIMDJSON_CPP @@ -332,6 +332,8 @@ double from_chars(const char *first, const char* end) noexcept; #define SIMDJSON_ISALIGNED_N(ptr, n) (((uintptr_t)(ptr) & ((n)-1)) == 0) #if SIMDJSON_REGULAR_VISUAL_STUDIO + // We could use [[deprecated]] but it requires C++14 + #define simdjson_deprecated __declspec(deprecated) #define simdjson_really_inline __forceinline #define simdjson_never_inline __declspec(noinline) @@ -370,6 +372,8 @@ double from_chars(const char *first, const char* end) noexcept; #define SIMDJSON_POP_DISABLE_UNUSED_WARNINGS #else // SIMDJSON_REGULAR_VISUAL_STUDIO + // We could use [[deprecated]] but it requires C++14 + #define simdjson_deprecated __attribute__((deprecated)) #define simdjson_really_inline inline __attribute__((always_inline)) #define simdjson_never_inline inline __attribute__((noinline)) diff --git a/deps/simdjson/simdjson.h b/deps/simdjson/simdjson.h index ddb6f2e4e0a6ed..f21cd9381eef59 100644 --- a/deps/simdjson/simdjson.h +++ b/deps/simdjson/simdjson.h @@ -1,4 +1,4 @@ -/* auto-generated on 2024-08-01 09:31:50 -0400. Do not edit! */ +/* auto-generated on 2024-08-26 09:37:03 -0400. Do not edit! */ /* including simdjson.h: */ /* begin file simdjson.h */ #ifndef SIMDJSON_H @@ -352,6 +352,8 @@ double from_chars(const char *first, const char* end) noexcept; #define SIMDJSON_ISALIGNED_N(ptr, n) (((uintptr_t)(ptr) & ((n)-1)) == 0) #if SIMDJSON_REGULAR_VISUAL_STUDIO + // We could use [[deprecated]] but it requires C++14 + #define simdjson_deprecated __declspec(deprecated) #define simdjson_really_inline __forceinline #define simdjson_never_inline __declspec(noinline) @@ -390,6 +392,8 @@ double from_chars(const char *first, const char* end) noexcept; #define SIMDJSON_POP_DISABLE_UNUSED_WARNINGS #else // SIMDJSON_REGULAR_VISUAL_STUDIO + // We could use [[deprecated]] but it requires C++14 + #define simdjson_deprecated __attribute__((deprecated)) #define simdjson_really_inline inline __attribute__((always_inline)) #define simdjson_never_inline inline __attribute__((noinline)) @@ -2366,7 +2370,7 @@ namespace std { #define SIMDJSON_SIMDJSON_VERSION_H /** The version of simdjson being used (major.minor.revision) */ -#define SIMDJSON_VERSION "3.10.0" +#define SIMDJSON_VERSION "3.10.1" namespace simdjson { enum { @@ -2381,7 +2385,7 @@ enum { /** * The revision (major.minor.REVISION) of simdjson being used. */ - SIMDJSON_VERSION_REVISION = 0 + SIMDJSON_VERSION_REVISION = 1 }; } // namespace simdjson @@ -34014,7 +34018,7 @@ class parser { private: /** @private [for benchmarking access] The implementation to use */ - std::unique_ptr implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -34557,7 +34561,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -34580,7 +34584,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -34593,7 +34597,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -35159,7 +35166,7 @@ struct simdjson_result : public arm64::implementation simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -35599,7 +35606,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -36561,16 +36568,14 @@ simdjson_inline simdjson_result &simdjson_resul /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -36725,24 +36730,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -37029,7 +37036,7 @@ simdjson_inline simdjson_result simdjson_result::g return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -37045,7 +37052,7 @@ simdjson_inline error_code simdjson_result::get(T &ou } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -40014,8 +40021,6 @@ simdjson_inline simdjson_result::simdjson_resul /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -40561,9 +40566,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -45023,7 +45028,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -45046,7 +45051,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -45059,7 +45064,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -45625,7 +45633,7 @@ struct simdjson_result : public fallback::implemen simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -46065,7 +46073,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -47027,16 +47035,14 @@ simdjson_inline simdjson_result &simdjson_re /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -47191,24 +47197,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -47495,7 +47503,7 @@ simdjson_inline simdjson_result simdjson_result return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -47511,7 +47519,7 @@ simdjson_inline error_code simdjson_result::get(T } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -50480,8 +50488,6 @@ simdjson_inline simdjson_result::simdjson_re /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -51027,9 +51033,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -55981,7 +55987,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -56004,7 +56010,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -56017,7 +56023,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -56583,7 +56592,7 @@ struct simdjson_result : public haswell::implementa simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -57023,7 +57032,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -57985,16 +57994,14 @@ simdjson_inline simdjson_result &simdjson_res /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -58149,24 +58156,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -58453,7 +58462,7 @@ simdjson_inline simdjson_result simdjson_result: return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -58469,7 +58478,7 @@ simdjson_inline error_code simdjson_result::get(T & } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -61438,8 +61447,6 @@ simdjson_inline simdjson_result::simdjson_res /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -61985,9 +61992,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -66938,7 +66945,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -66961,7 +66968,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -66974,7 +66981,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -67540,7 +67550,7 @@ struct simdjson_result : public icelake::implementa simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -67980,7 +67990,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -68942,16 +68952,14 @@ simdjson_inline simdjson_result &simdjson_res /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -69106,24 +69114,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -69410,7 +69420,7 @@ simdjson_inline simdjson_result simdjson_result: return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -69426,7 +69436,7 @@ simdjson_inline error_code simdjson_result::get(T & } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -72395,8 +72405,6 @@ simdjson_inline simdjson_result::simdjson_res /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -72942,9 +72950,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -78010,7 +78018,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -78033,7 +78041,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -78046,7 +78054,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -78612,7 +78623,7 @@ struct simdjson_result : public ppc64::implementation simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -79052,7 +79063,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -80014,16 +80025,14 @@ simdjson_inline simdjson_result &simdjson_resul /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -80178,24 +80187,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -80482,7 +80493,7 @@ simdjson_inline simdjson_result simdjson_result::g return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -80498,7 +80509,7 @@ simdjson_inline error_code simdjson_result::get(T &ou } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -83467,8 +83478,6 @@ simdjson_inline simdjson_result::simdjson_resul /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -84014,9 +84023,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -89405,7 +89414,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -89428,7 +89437,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -89441,7 +89450,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -90007,7 +90019,7 @@ struct simdjson_result : public westmere::implemen simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -90447,7 +90459,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -91409,16 +91421,14 @@ simdjson_inline simdjson_result &simdjson_re /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -91573,24 +91583,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -91877,7 +91889,7 @@ simdjson_inline simdjson_result simdjson_result return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -91893,7 +91905,7 @@ simdjson_inline error_code simdjson_result::get(T } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -94862,8 +94874,6 @@ simdjson_inline simdjson_result::simdjson_re /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -95409,9 +95419,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -100271,7 +100281,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -100294,7 +100304,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -100307,7 +100317,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -100873,7 +100886,7 @@ struct simdjson_result : public lsx::implementation_sim simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -101313,7 +101326,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -102275,16 +102288,14 @@ simdjson_inline simdjson_result &simdjson_result< /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -102439,24 +102450,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -102743,7 +102756,7 @@ simdjson_inline simdjson_result simdjson_result::get return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -102759,7 +102772,7 @@ simdjson_inline error_code simdjson_result::get(T &out) } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -105728,8 +105741,6 @@ simdjson_inline simdjson_result::simdjson_result( /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -106275,9 +106286,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -111150,7 +111161,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -111173,7 +111184,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -111186,7 +111197,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -111752,7 +111766,7 @@ struct simdjson_result : public lasx::implementation_s simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -112192,7 +112206,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -113154,16 +113168,14 @@ simdjson_inline simdjson_result &simdjson_result /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -113318,24 +113330,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -113622,7 +113636,7 @@ simdjson_inline simdjson_result simdjson_result::ge return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -113638,7 +113652,7 @@ simdjson_inline error_code simdjson_result::get(T &out } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -116607,8 +116621,6 @@ simdjson_inline simdjson_result::simdjson_result /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -117154,9 +117166,9 @@ simdjson_inline simdjson_result simdjson_result Date: Thu, 21 Nov 2024 18:37:22 -0500 Subject: [PATCH 19/53] doc: remove RedYetiDev from triagers team PR-URL: https://github.com/nodejs/node/pull/55947 Reviewed-By: Luigi Pinca Reviewed-By: Michael Dawson Reviewed-By: Jake Yuesong Li Reviewed-By: Jason Zhang Reviewed-By: Marco Ippolito --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 69777ccb2dad40..de169983d787c9 100644 --- a/README.md +++ b/README.md @@ -757,8 +757,6 @@ maintaining the Node.js project. **Mert Can Altin** <> * [preveen-stack](https://github.com/preveen-stack) - **Preveen Padmanabhan** <> (he/him) -* [RedYetiDev](https://github.com/RedYetiDev) - - **Aviv Keller** <> (they/them) * [VoltrexKeyva](https://github.com/VoltrexKeyva) - **Mohammed Keyvanzadeh** <> (he/him) From d777d4a52db19f009e635a8a9482e2939642f30c Mon Sep 17 00:00:00 2001 From: tpoisseau <22891227+tpoisseau@users.noreply.github.com> Date: Thu, 21 Nov 2024 09:56:04 +0100 Subject: [PATCH 20/53] sqlite: add `StatementSync.prototype.iterate` method PR-URL: https://github.com/nodejs/node/pull/54213 Reviewed-By: Zeyu "Alex" Yang Reviewed-By: Colin Ihrig --- doc/api/sqlite.md | 19 +++ src/env_properties.h | 6 + src/node_sqlite.cc | 176 ++++++++++++++++++++ src/node_sqlite.h | 6 + test/parallel/test-sqlite-statement-sync.js | 36 ++++ 5 files changed, 243 insertions(+) diff --git a/doc/api/sqlite.md b/doc/api/sqlite.md index 048ba67eeac7f7..4e27d0d8bc5f26 100644 --- a/doc/api/sqlite.md +++ b/doc/api/sqlite.md @@ -287,6 +287,25 @@ object. If the prepared statement does not return any results, this method returns `undefined`. The prepared statement [parameters are bound][] using the values in `namedParameters` and `anonymousParameters`. +### `statement.iterate([namedParameters][, ...anonymousParameters])` + + + +* `namedParameters` {Object} An optional object used to bind named parameters. + The keys of this object are used to configure the mapping. +* `...anonymousParameters` {null|number|bigint|string|Buffer|Uint8Array} Zero or + more values to bind to anonymous parameters. +* Returns: {Iterator} An iterable iterator of objects. Each object corresponds to a row + returned by executing the prepared statement. The keys and values of each + object correspond to the column names and values of the row. + +This method executes a prepared statement and returns an iterator of +objects. If the prepared statement does not return any results, this method +returns an empty iterator. The prepared statement [parameters are bound][] using +the values in `namedParameters` and `anonymousParameters`. + ### `statement.run([namedParameters][, ...anonymousParameters])` + +* `otherKeyObject`: {KeyObject} A `KeyObject` with which to + compare `keyObject`. +* Returns: {boolean} + +Returns `true` or `false` depending on whether the keys have exactly the same +type, value, and parameters. This method is not +[constant time](https://en.wikipedia.org/wiki/Timing_attack). + ### `keyObject.export([options])` - -* `otherKeyObject`: {KeyObject} A `KeyObject` with which to - compare `keyObject`. -* Returns: {boolean} - -Returns `true` or `false` depending on whether the keys have exactly the same -type, value, and parameters. This method is not -[constant time](https://en.wikipedia.org/wiki/Timing_attack). - ### `keyObject.symmetricKeySize` + +* Type: {string\[]} + +An array detailing the key extended usages for this certificate. + ### `x509.fingerprint` - -* Type: {string\[]} - -An array detailing the key extended usages for this certificate. - ### `x509.publicKey` - -* {Object} - -An object containing commonly used constants for crypto and security related -operations. The specific constants currently defined are described in -[Crypto constants][]. - -### `crypto.fips` - - - -> Stability: 0 - Deprecated - -Property for checking and controlling whether a FIPS compliant crypto provider -is currently in use. Setting to true requires a FIPS build of Node.js. - -This property is deprecated. Please use `crypto.setFips()` and -`crypto.getFips()` instead. - ### `crypto.checkPrime(candidate[, options], callback)` + +* {Object} + +An object containing commonly used constants for crypto and security related +operations. The specific constants currently defined are described in +[Crypto constants][]. + ### `crypto.createCipheriv(algorithm, key, iv[, options])` -> Stability: 1.2 - Release candidate - -* `algorithm` {string|undefined} -* `data` {string|Buffer|TypedArray|DataView} When `data` is a - string, it will be encoded as UTF-8 before being hashed. If a different - input encoding is desired for a string input, user could encode the string - into a `TypedArray` using either `TextEncoder` or `Buffer.from()` and passing - the encoded `TypedArray` into this API instead. -* `outputEncoding` {string|undefined} [Encoding][encoding] used to encode the - returned digest. **Default:** `'hex'`. -* Returns: {string|Buffer} - -A utility for creating one-shot hash digests of data. It can be faster than -the object-based `crypto.createHash()` when hashing a smaller amount of data -(<= 5MB) that's readily available. If the data can be big or if it is streamed, -it's still recommended to use `crypto.createHash()` instead. - -The `algorithm` is dependent on the available algorithms supported by the -version of OpenSSL on the platform. Examples are `'sha256'`, `'sha512'`, etc. -On recent releases of OpenSSL, `openssl list -digest-algorithms` will -display the available digest algorithms. - -Example: - -```cjs -const crypto = require('node:crypto'); -const { Buffer } = require('node:buffer'); - -// Hashing a string and return the result as a hex-encoded string. -const string = 'Node.js'; -// 10b3493287f831e81a438811a1ffba01f8cec4b7 -console.log(crypto.hash('sha1', string)); - -// Encode a base64-encoded string into a Buffer, hash it and return -// the result as a buffer. -const base64 = 'Tm9kZS5qcw=='; -// -console.log(crypto.hash('sha1', Buffer.from(base64, 'base64'), 'buffer')); -``` - -```mjs -import crypto from 'node:crypto'; -import { Buffer } from 'node:buffer'; +> Stability: 0 - Deprecated -// Hashing a string and return the result as a hex-encoded string. -const string = 'Node.js'; -// 10b3493287f831e81a438811a1ffba01f8cec4b7 -console.log(crypto.hash('sha1', string)); +Property for checking and controlling whether a FIPS compliant crypto provider +is currently in use. Setting to true requires a FIPS build of Node.js. -// Encode a base64-encoded string into a Buffer, hash it and return -// the result as a buffer. -const base64 = 'Tm9kZS5qcw=='; -// -console.log(crypto.hash('sha1', Buffer.from(base64, 'base64'), 'buffer')); -``` +This property is deprecated. Please use `crypto.setFips()` and +`crypto.getFips()` instead. ### `crypto.generateKey(type, options, callback)` @@ -4222,6 +4158,70 @@ A convenient alias for [`crypto.webcrypto.getRandomValues()`][]. This implementation is not compliant with the Web Crypto spec, to write web-compatible code use [`crypto.webcrypto.getRandomValues()`][] instead. +### `crypto.hash(algorithm, data[, outputEncoding])` + + + +> Stability: 1.2 - Release candidate + +* `algorithm` {string|undefined} +* `data` {string|Buffer|TypedArray|DataView} When `data` is a + string, it will be encoded as UTF-8 before being hashed. If a different + input encoding is desired for a string input, user could encode the string + into a `TypedArray` using either `TextEncoder` or `Buffer.from()` and passing + the encoded `TypedArray` into this API instead. +* `outputEncoding` {string|undefined} [Encoding][encoding] used to encode the + returned digest. **Default:** `'hex'`. +* Returns: {string|Buffer} + +A utility for creating one-shot hash digests of data. It can be faster than +the object-based `crypto.createHash()` when hashing a smaller amount of data +(<= 5MB) that's readily available. If the data can be big or if it is streamed, +it's still recommended to use `crypto.createHash()` instead. + +The `algorithm` is dependent on the available algorithms supported by the +version of OpenSSL on the platform. Examples are `'sha256'`, `'sha512'`, etc. +On recent releases of OpenSSL, `openssl list -digest-algorithms` will +display the available digest algorithms. + +Example: + +```cjs +const crypto = require('node:crypto'); +const { Buffer } = require('node:buffer'); + +// Hashing a string and return the result as a hex-encoded string. +const string = 'Node.js'; +// 10b3493287f831e81a438811a1ffba01f8cec4b7 +console.log(crypto.hash('sha1', string)); + +// Encode a base64-encoded string into a Buffer, hash it and return +// the result as a buffer. +const base64 = 'Tm9kZS5qcw=='; +// +console.log(crypto.hash('sha1', Buffer.from(base64, 'base64'), 'buffer')); +``` + +```mjs +import crypto from 'node:crypto'; +import { Buffer } from 'node:buffer'; + +// Hashing a string and return the result as a hex-encoded string. +const string = 'Node.js'; +// 10b3493287f831e81a438811a1ffba01f8cec4b7 +console.log(crypto.hash('sha1', string)); + +// Encode a base64-encoded string into a Buffer, hash it and return +// the result as a buffer. +const base64 = 'Tm9kZS5qcw=='; +// +console.log(crypto.hash('sha1', Buffer.from(base64, 'base64'), 'buffer')); +``` + ### `crypto.hkdf(digest, ikm, salt, info, keylen, callback)` - -* `buffer` {ArrayBuffer|Buffer|TypedArray|DataView} Must be supplied. The - size of the provided `buffer` must not be larger than `2**31 - 1`. -* `offset` {number} **Default:** `0` -* `size` {number} **Default:** `buffer.length - offset`. The `size` must - not be larger than `2**31 - 1`. -* Returns: {ArrayBuffer|Buffer|TypedArray|DataView} The object passed as - `buffer` argument. - -Synchronous version of [`crypto.randomFill()`][]. - -```mjs -import { Buffer } from 'node:buffer'; -const { randomFillSync } = await import('node:crypto'); - -const buf = Buffer.alloc(10); -console.log(randomFillSync(buf).toString('hex')); - -randomFillSync(buf, 5); -console.log(buf.toString('hex')); - -// The above is equivalent to the following: -randomFillSync(buf, 5, 5); -console.log(buf.toString('hex')); -``` - -```cjs -const { randomFillSync } = require('node:crypto'); -const { Buffer } = require('node:buffer'); - -const buf = Buffer.alloc(10); -console.log(randomFillSync(buf).toString('hex')); - -randomFillSync(buf, 5); -console.log(buf.toString('hex')); - -// The above is equivalent to the following: -randomFillSync(buf, 5, 5); -console.log(buf.toString('hex')); -``` - -Any `ArrayBuffer`, `TypedArray` or `DataView` instance may be passed as -`buffer`. - -```mjs -import { Buffer } from 'node:buffer'; -const { randomFillSync } = await import('node:crypto'); - -const a = new Uint32Array(10); -console.log(Buffer.from(randomFillSync(a).buffer, - a.byteOffset, a.byteLength).toString('hex')); - -const b = new DataView(new ArrayBuffer(10)); -console.log(Buffer.from(randomFillSync(b).buffer, - b.byteOffset, b.byteLength).toString('hex')); - -const c = new ArrayBuffer(10); -console.log(Buffer.from(randomFillSync(c)).toString('hex')); -``` - -```cjs -const { randomFillSync } = require('node:crypto'); -const { Buffer } = require('node:buffer'); - -const a = new Uint32Array(10); -console.log(Buffer.from(randomFillSync(a).buffer, - a.byteOffset, a.byteLength).toString('hex')); - -const b = new DataView(new ArrayBuffer(10)); -console.log(Buffer.from(randomFillSync(b).buffer, - b.byteOffset, b.byteLength).toString('hex')); - -const c = new ArrayBuffer(10); -console.log(Buffer.from(randomFillSync(c)).toString('hex')); -``` - ### `crypto.randomFill(buffer[, offset][, size], callback)` + +* `buffer` {ArrayBuffer|Buffer|TypedArray|DataView} Must be supplied. The + size of the provided `buffer` must not be larger than `2**31 - 1`. +* `offset` {number} **Default:** `0` +* `size` {number} **Default:** `buffer.length - offset`. The `size` must + not be larger than `2**31 - 1`. +* Returns: {ArrayBuffer|Buffer|TypedArray|DataView} The object passed as + `buffer` argument. + +Synchronous version of [`crypto.randomFill()`][]. + +```mjs +import { Buffer } from 'node:buffer'; +const { randomFillSync } = await import('node:crypto'); + +const buf = Buffer.alloc(10); +console.log(randomFillSync(buf).toString('hex')); + +randomFillSync(buf, 5); +console.log(buf.toString('hex')); + +// The above is equivalent to the following: +randomFillSync(buf, 5, 5); +console.log(buf.toString('hex')); +``` + +```cjs +const { randomFillSync } = require('node:crypto'); +const { Buffer } = require('node:buffer'); + +const buf = Buffer.alloc(10); +console.log(randomFillSync(buf).toString('hex')); + +randomFillSync(buf, 5); +console.log(buf.toString('hex')); + +// The above is equivalent to the following: +randomFillSync(buf, 5, 5); +console.log(buf.toString('hex')); +``` + +Any `ArrayBuffer`, `TypedArray` or `DataView` instance may be passed as +`buffer`. + +```mjs +import { Buffer } from 'node:buffer'; +const { randomFillSync } = await import('node:crypto'); + +const a = new Uint32Array(10); +console.log(Buffer.from(randomFillSync(a).buffer, + a.byteOffset, a.byteLength).toString('hex')); + +const b = new DataView(new ArrayBuffer(10)); +console.log(Buffer.from(randomFillSync(b).buffer, + b.byteOffset, b.byteLength).toString('hex')); + +const c = new ArrayBuffer(10); +console.log(Buffer.from(randomFillSync(c)).toString('hex')); +``` + +```cjs +const { randomFillSync } = require('node:crypto'); +const { Buffer } = require('node:buffer'); + +const a = new Uint32Array(10); +console.log(Buffer.from(randomFillSync(a).buffer, + a.byteOffset, a.byteLength).toString('hex')); + +const b = new DataView(new ArrayBuffer(10)); +console.log(Buffer.from(randomFillSync(b).buffer, + b.byteOffset, b.byteLength).toString('hex')); + +const c = new ArrayBuffer(10); +console.log(Buffer.from(randomFillSync(c)).toString('hex')); +``` + ### `crypto.randomInt([min, ]max[, callback])` + +> Stability: 1.0 - Early development + +* `actual` {any} +* `expected` {any} +* `message` {string|Error} + +[`assert.partialDeepStrictEqual()`][] Asserts the equivalence between the `actual` and `expected` parameters through a +deep comparison, ensuring that all properties in the `expected` parameter are +present in the `actual` parameter with equivalent values, not allowing type coercion. +The main difference with [`assert.deepStrictEqual()`][] is that [`assert.partialDeepStrictEqual()`][] does not require +all properties in the `actual` parameter to be present in the `expected` parameter. +This method should always pass the same test cases as [`assert.deepStrictEqual()`][], behaving as a super set of it. + +```mjs +import assert from 'node:assert'; + +assert.partialDeepStrictEqual({ a: 1, b: 2 }, { a: 1, b: 2 }); +// OK + +assert.partialDeepStrictEqual({ a: { b: { c: 1 } } }, { a: { b: { c: 1 } } }); +// OK + +assert.partialDeepStrictEqual({ a: 1, b: 2, c: 3 }, { a: 1, b: 2 }); +// OK + +assert.partialDeepStrictEqual(new Set(['value1', 'value2']), new Set(['value1', 'value2'])); +// OK + +assert.partialDeepStrictEqual(new Map([['key1', 'value1']]), new Map([['key1', 'value1']])); +// OK + +assert.partialDeepStrictEqual(new Uint8Array([1, 2, 3]), new Uint8Array([1, 2, 3])); +// OK + +assert.partialDeepStrictEqual(/abc/, /abc/); +// OK + +assert.partialDeepStrictEqual([{ a: 5 }, { b: 5 }], [{ a: 5 }]); +// OK + +assert.partialDeepStrictEqual(new Set([{ a: 1 }, { b: 1 }]), new Set([{ a: 1 }])); +// OK + +assert.partialDeepStrictEqual(new Date(0), new Date(0)); +// OK + +assert.partialDeepStrictEqual({ a: 1 }, { a: 1, b: 2 }); +// AssertionError + +assert.partialDeepStrictEqual({ a: 1, b: '2' }, { a: 1, b: 2 }); +// AssertionError + +assert.partialDeepStrictEqual({ a: { b: 2 } }, { a: { b: '2' } }); +// AssertionError +``` + +```cjs +const assert = require('node:assert'); + +assert.partialDeepStrictEqual({ a: 1, b: 2 }, { a: 1, b: 2 }); +// OK + +assert.partialDeepStrictEqual({ a: { b: { c: 1 } } }, { a: { b: { c: 1 } } }); +// OK + +assert.partialDeepStrictEqual({ a: 1, b: 2, c: 3 }, { a: 1, b: 2 }); +// OK + +assert.partialDeepStrictEqual([{ a: 5 }, { b: 5 }], [{ a: 5 }]); +// OK + +assert.partialDeepStrictEqual(new Set([{ a: 1 }, { b: 1 }]), new Set([{ a: 1 }])); +// OK + +assert.partialDeepStrictEqual({ a: 1 }, { a: 1, b: 2 }); +// AssertionError + +assert.partialDeepStrictEqual({ a: 1, b: '2' }, { a: 1, b: 2 }); +// AssertionError + +assert.partialDeepStrictEqual({ a: { b: 2 } }, { a: { b: '2' } }); +// AssertionError +``` + [Object wrappers]: https://developer.mozilla.org/en-US/docs/Glossary/Primitive#Primitive_wrapper_objects_in_JavaScript [Object.prototype.toString()]: https://tc39.github.io/ecma262/#sec-object.prototype.tostring [`!=` operator]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Inequality @@ -2576,6 +2666,7 @@ argument. [`assert.notEqual()`]: #assertnotequalactual-expected-message [`assert.notStrictEqual()`]: #assertnotstrictequalactual-expected-message [`assert.ok()`]: #assertokvalue-message +[`assert.partialDeepStrictEqual()`]: #assertpartialdeepstrictequalactual-expected-message [`assert.strictEqual()`]: #assertstrictequalactual-expected-message [`assert.throws()`]: #assertthrowsfn-error-message [`getColorDepth()`]: tty.md#writestreamgetcolordepthenv diff --git a/lib/assert.js b/lib/assert.js index 2a65457b39387e..cef7c16f8095e3 100644 --- a/lib/assert.js +++ b/lib/assert.js @@ -21,22 +21,35 @@ 'use strict'; const { + ArrayFrom, + ArrayIsArray, ArrayPrototypeIndexOf, ArrayPrototypeJoin, ArrayPrototypePush, ArrayPrototypeSlice, Error, + FunctionPrototypeCall, + MapPrototypeDelete, + MapPrototypeGet, + MapPrototypeHas, + MapPrototypeSet, NumberIsNaN, ObjectAssign, ObjectIs, ObjectKeys, ObjectPrototypeIsPrototypeOf, ReflectApply, + ReflectHas, + ReflectOwnKeys, RegExpPrototypeExec, + SafeMap, + SafeSet, + SafeWeakSet, String, StringPrototypeIndexOf, StringPrototypeSlice, StringPrototypeSplit, + SymbolIterator, } = primordials; const { @@ -50,8 +63,18 @@ const { } = require('internal/errors'); const AssertionError = require('internal/assert/assertion_error'); const { inspect } = require('internal/util/inspect'); -const { isPromise, isRegExp } = require('internal/util/types'); -const { isError, deprecate } = require('internal/util'); +const { Buffer } = require('buffer'); +const { + isKeyObject, + isPromise, + isRegExp, + isMap, + isSet, + isDate, + isWeakSet, + isWeakMap, +} = require('internal/util/types'); +const { isError, deprecate, emitExperimentalWarning } = require('internal/util'); const { innerOk } = require('internal/assert/utils'); const CallTracker = require('internal/assert/calltracker'); @@ -341,6 +364,191 @@ assert.notStrictEqual = function notStrictEqual(actual, expected, message) { } }; +function isSpecial(obj) { + return obj == null || typeof obj !== 'object' || isError(obj) || isRegExp(obj) || isDate(obj); +} + +const typesToCallDeepStrictEqualWith = [ + isKeyObject, isWeakSet, isWeakMap, Buffer.isBuffer, +]; + +/** + * Compares two objects or values recursively to check if they are equal. + * @param {any} actual - The actual value to compare. + * @param {any} expected - The expected value to compare. + * @param {Set} [comparedObjects=new Set()] - Set to track compared objects for handling circular references. + * @returns {boolean} - Returns `true` if the actual value matches the expected value, otherwise `false`. + * @example + * compareBranch({a: 1, b: 2, c: 3}, {a: 1, b: 2}); // true + */ +function compareBranch( + actual, + expected, + comparedObjects, +) { + // Check for Map object equality + if (isMap(actual) && isMap(expected)) { + if (actual.size !== expected.size) { + return false; + } + const safeIterator = FunctionPrototypeCall(SafeMap.prototype[SymbolIterator], actual); + + comparedObjects ??= new SafeWeakSet(); + + for (const { 0: key, 1: val } of safeIterator) { + if (!MapPrototypeHas(expected, key)) { + return false; + } + if (!compareBranch(val, MapPrototypeGet(expected, key), comparedObjects)) { + return false; + } + } + return true; + } + + for (const type of typesToCallDeepStrictEqualWith) { + if (type(actual) || type(expected)) { + if (isDeepStrictEqual === undefined) lazyLoadComparison(); + return isDeepStrictEqual(actual, expected); + } + } + + // Check for Set object equality + // TODO(aduh95): switch to `SetPrototypeIsSubsetOf` when it's available + if (isSet(actual) && isSet(expected)) { + if (expected.size > actual.size) { + return false; // `expected` can't be a subset if it has more elements + } + + if (isDeepEqual === undefined) lazyLoadComparison(); + + const actualArray = ArrayFrom(actual); + const expectedArray = ArrayFrom(expected); + const usedIndices = new SafeSet(); + + for (let expectedIdx = 0; expectedIdx < expectedArray.length; expectedIdx++) { + const expectedItem = expectedArray[expectedIdx]; + let found = false; + + for (let actualIdx = 0; actualIdx < actualArray.length; actualIdx++) { + if (!usedIndices.has(actualIdx) && isDeepStrictEqual(actualArray[actualIdx], expectedItem)) { + usedIndices.add(actualIdx); + found = true; + break; + } + } + + if (!found) { + return false; + } + } + + return true; + } + + // Check if expected array is a subset of actual array + if (ArrayIsArray(actual) && ArrayIsArray(expected)) { + if (expected.length > actual.length) { + return false; + } + + if (isDeepEqual === undefined) lazyLoadComparison(); + + // Create a map to count occurrences of each element in the expected array + const expectedCounts = new SafeMap(); + for (const expectedItem of expected) { + let found = false; + for (const { 0: key, 1: count } of expectedCounts) { + if (isDeepStrictEqual(key, expectedItem)) { + MapPrototypeSet(expectedCounts, key, count + 1); + found = true; + break; + } + } + if (!found) { + MapPrototypeSet(expectedCounts, expectedItem, 1); + } + } + + // Create a map to count occurrences of relevant elements in the actual array + for (const actualItem of actual) { + for (const { 0: key, 1: count } of expectedCounts) { + if (isDeepStrictEqual(key, actualItem)) { + if (count === 1) { + MapPrototypeDelete(expectedCounts, key); + } else { + MapPrototypeSet(expectedCounts, key, count - 1); + } + break; + } + } + } + + return !expectedCounts.size; + } + + // Comparison done when at least one of the values is not an object + if (isSpecial(actual) || isSpecial(expected)) { + if (isDeepEqual === undefined) { + lazyLoadComparison(); + } + return isDeepStrictEqual(actual, expected); + } + + // Use Reflect.ownKeys() instead of Object.keys() to include symbol properties + const keysExpected = ReflectOwnKeys(expected); + + comparedObjects ??= new SafeWeakSet(); + + // Handle circular references + if (comparedObjects.has(actual)) { + return true; + } + comparedObjects.add(actual); + + // Check if all expected keys and values match + for (let i = 0; i < keysExpected.length; i++) { + const key = keysExpected[i]; + assert( + ReflectHas(actual, key), + new AssertionError({ message: `Expected key ${String(key)} not found in actual object` }), + ); + if (!compareBranch(actual[key], expected[key], comparedObjects)) { + return false; + } + } + + return true; +} + +/** + * The strict equivalence assertion test between two objects + * @param {any} actual + * @param {any} expected + * @param {string | Error} [message] + * @returns {void} + */ +assert.partialDeepStrictEqual = function partialDeepStrictEqual( + actual, + expected, + message, +) { + emitExperimentalWarning('assert.partialDeepStrictEqual'); + if (arguments.length < 2) { + throw new ERR_MISSING_ARGS('actual', 'expected'); + } + + if (!compareBranch(actual, expected)) { + innerFail({ + actual, + expected, + message, + operator: 'partialDeepStrictEqual', + stackStartFn: partialDeepStrictEqual, + }); + } +}; + class Comparison { constructor(obj, keys, actual) { for (const key of keys) { diff --git a/lib/internal/test_runner/test.js b/lib/internal/test_runner/test.js index abb8ed276dfacc..34e1900979960f 100644 --- a/lib/internal/test_runner/test.js +++ b/lib/internal/test_runner/test.js @@ -119,6 +119,7 @@ function lazyAssertObject(harness) { 'notDeepStrictEqual', 'notEqual', 'notStrictEqual', + 'partialDeepStrictEqual', 'rejects', 'strictEqual', 'throws', diff --git a/test/parallel/test-assert-objects.js b/test/parallel/test-assert-objects.js new file mode 100644 index 00000000000000..d1c8bb854babb0 --- /dev/null +++ b/test/parallel/test-assert-objects.js @@ -0,0 +1,501 @@ +'use strict'; + +const common = require('../common'); +const vm = require('node:vm'); +const assert = require('node:assert'); +const { describe, it } = require('node:test'); + +function createCircularObject() { + const obj = {}; + obj.self = obj; + return obj; +} + +function createDeepNestedObject() { + return { level1: { level2: { level3: 'deepValue' } } }; +} + +async function generateCryptoKey() { + const { KeyObject } = require('node:crypto'); + const { subtle } = globalThis.crypto; + + const cryptoKey = await subtle.generateKey( + { + name: 'HMAC', + hash: 'SHA-256', + length: 256, + }, + true, + ['sign', 'verify'] + ); + + const keyObject = KeyObject.from(cryptoKey); + + return { cryptoKey, keyObject }; +} + +describe('Object Comparison Tests', () => { + describe('partialDeepStrictEqual', () => { + describe('throws an error', () => { + const tests = [ + { + description: 'throws when only one argument is provided', + actual: { a: 1 }, + expected: undefined, + }, + { + description: 'throws when expected has more properties than actual', + actual: [1, 'two'], + expected: [1, 'two', true], + }, + { + description: 'throws because expected has seven 2 while actual has six one', + actual: [1, 2, 2, 2, 2, 2, 2, 3], + expected: [1, 2, 2, 2, 2, 2, 2, 2], + }, + { + description: 'throws when comparing two different sets with objects', + actual: new Set([{ a: 1 }]), + expected: new Set([{ a: 1 }, { b: 1 }]), + }, + + { + description: 'throws when comparing two WeakSet objects', + actual: new WeakSet(), + expected: new WeakSet(), + }, + { + description: 'throws when comparing two WeakMap objects', + actual: new WeakMap(), + expected: new WeakMap(), + }, + { + description: 'throws when comparing two different objects', + actual: { a: 1, b: 'string' }, + expected: { a: 2, b: 'string' }, + }, + { + description: + 'throws when comparing two objects with different nested objects', + actual: createDeepNestedObject(), + expected: { level1: { level2: { level3: 'differentValue' } } }, + }, + { + description: + 'throws when comparing two objects with different RegExp properties', + actual: { pattern: /abc/ }, + expected: { pattern: /def/ }, + }, + { + description: + 'throws when comparing two arrays with different elements', + actual: [1, 'two', true], + expected: [1, 'two', false], + }, + { + description: + 'throws when comparing two Date objects with different times', + actual: new Date(0), + expected: new Date(1), + }, + { + description: + 'throws when comparing two objects with different large number of properties', + actual: Object.fromEntries( + Array.from({ length: 100 }, (_, i) => [`key${i}`, i]) + ), + expected: Object.fromEntries( + Array.from({ length: 100 }, (_, i) => [`key${i}`, i + 1]) + ), + }, + { + description: + 'throws when comparing two objects with different Symbols', + actual: { [Symbol('test')]: 'symbol' }, + expected: { [Symbol('test')]: 'symbol' }, + }, + { + description: + 'throws when comparing two objects with different array properties', + actual: { a: [1, 2, 3] }, + expected: { a: [1, 2, 4] }, + }, + { + description: + 'throws when comparing two objects with different function properties', + actual: { fn: () => {} }, + expected: { fn: () => {} }, + }, + { + description: + 'throws when comparing two objects with different Error instances', + actual: { error: new Error('Test error 1') }, + expected: { error: new Error('Test error 2') }, + }, + { + description: + 'throws when comparing two objects with different TypedArray instances and content', + actual: { typedArray: new Uint8Array([1, 2, 3]) }, + expected: { typedArray: new Uint8Array([4, 5, 6]) }, + }, + { + description: + 'throws when comparing two Map objects with different entries', + actual: new Map([ + ['key1', 'value1'], + ['key2', 'value2'], + ]), + expected: new Map([ + ['key1', 'value1'], + ['key3', 'value3'], + ]), + }, + { + description: + 'throws when comparing two Map objects with different keys', + actual: new Map([ + ['key1', 'value1'], + ['key2', 'value2'], + ]), + expected: new Map([ + ['key1', 'value1'], + ['key3', 'value2'], + ]), + }, + { + description: + 'throws when comparing two Map objects with different length', + actual: new Map([ + ['key1', 'value1'], + ['key2', 'value2'], + ]), + expected: new Map([['key1', 'value1']]), + }, + { + description: + 'throws when comparing two TypedArray instances with different content', + actual: new Uint8Array(10), + expected: () => { + const typedArray2 = new Int8Array(10); + Object.defineProperty(typedArray2, Symbol.toStringTag, { + value: 'Uint8Array' + }); + Object.setPrototypeOf(typedArray2, Uint8Array.prototype); + + return typedArray2; + }, + }, + { + description: + 'throws when comparing two Set objects from different realms with different values', + actual: new vm.runInNewContext('new Set(["value1", "value2"])'), + expected: new Set(['value1', 'value3']), + }, + { + description: + 'throws when comparing two Set objects with different values', + actual: new Set(['value1', 'value2']), + expected: new Set(['value1', 'value3']), + }, + { + description: 'throws when comparing one subset object with another', + actual: { a: 1, b: 2, c: 3 }, + expected: { b: '2' }, + }, + { + description: 'throws when comparing one subset array with another', + actual: [1, 2, 3], + expected: ['2'], + }, + ]; + + if (common.hasCrypto) { + tests.push({ + description: + 'throws when comparing two objects with different CryptoKey instances objects', + actual: async () => { + return generateCryptoKey(); + }, + expected: async () => { + return generateCryptoKey(); + }, + }); + + const { createSecretKey } = require('node:crypto'); + + tests.push({ + description: + 'throws when comparing two objects with different KeyObject instances objects', + actual: createSecretKey(Buffer.alloc(1, 0)), + expected: createSecretKey(Buffer.alloc(1, 1)), + }); + } + + tests.forEach(({ description, actual, expected }) => { + it(description, () => { + assert.throws(() => assert.partialDeepStrictEqual(actual, expected), Error); + }); + }); + }); + }); + + describe('does not throw an error', () => { + const sym = Symbol('test'); + const func = () => {}; + + [ + { + description: 'compares two identical simple objects', + actual: { a: 1, b: 'string' }, + expected: { a: 1, b: 'string' }, + }, + { + description: 'compares two objects with different property order', + actual: { a: 1, b: 'string' }, + expected: { b: 'string', a: 1 }, + }, + { + description: 'compares two deeply nested objects with partial equality', + actual: { a: { nested: { property: true, some: 'other' } } }, + expected: { a: { nested: { property: true } } }, + }, + { + description: + 'compares plain objects from different realms', + actual: vm.runInNewContext(`({ + a: 1, + b: 2n, + c: "3", + d: /4/, + e: new Set([5]), + f: [6], + g: new Uint8Array() + })`), + expected: { b: 2n, e: new Set([5]), f: [6], g: new Uint8Array() }, + }, + { + description: 'compares two integers', + actual: 1, + expected: 1, + }, + { + description: 'compares two strings', + actual: '1', + expected: '1', + }, + { + description: 'compares two objects with nested objects', + actual: createDeepNestedObject(), + expected: createDeepNestedObject(), + }, + { + description: 'compares two objects with circular references', + actual: createCircularObject(), + expected: createCircularObject(), + }, + { + description: 'compares two arrays with identical elements', + actual: [1, 'two', true], + expected: [1, 'two', true], + }, + { + description: 'compares two Date objects with the same time', + actual: new Date(0), + expected: new Date(0), + }, + { + description: 'compares two objects with large number of properties', + actual: Object.fromEntries( + Array.from({ length: 100 }, (_, i) => [`key${i}`, i]) + ), + expected: Object.fromEntries( + Array.from({ length: 100 }, (_, i) => [`key${i}`, i]) + ), + }, + { + description: 'compares two objects with Symbol properties', + actual: { [sym]: 'symbol' }, + expected: { [sym]: 'symbol' }, + }, + { + description: 'compares two objects with RegExp properties', + actual: { pattern: /abc/ }, + expected: { pattern: /abc/ }, + }, + { + description: 'compares two objects with identical function properties', + actual: { fn: func }, + expected: { fn: func }, + }, + { + description: 'compares two objects with mixed types of properties', + actual: { num: 1, str: 'test', bool: true, sym }, + expected: { num: 1, str: 'test', bool: true, sym }, + }, + { + description: 'compares two objects with Buffers', + actual: { buf: Buffer.from('Node.js') }, + expected: { buf: Buffer.from('Node.js') }, + }, + { + description: 'compares two objects with identical Error properties', + actual: { error: new Error('Test error') }, + expected: { error: new Error('Test error') }, + }, + { + description: 'compares two objects with TypedArray instances with the same content', + actual: { typedArray: new Uint8Array([1, 2, 3]) }, + expected: { typedArray: new Uint8Array([1, 2, 3]) }, + }, + { + description: 'compares two Map objects with identical entries', + actual: new Map([ + ['key1', 'value1'], + ['key2', 'value2'], + ]), + expected: new Map([ + ['key1', 'value1'], + ['key2', 'value2'], + ]), + }, + { + describe: 'compares two array of objects', + actual: [{ a: 5 }], + expected: [{ a: 5 }], + }, + { + describe: 'compares two array of objects where expected is a subset of actual', + actual: [{ a: 5 }, { b: 5 }], + expected: [{ a: 5 }], + }, + { + description: 'compares two Set objects with identical objects', + actual: new Set([{ a: 1 }]), + expected: new Set([{ a: 1 }]), + }, + { + description: 'compares two Set objects where expected is a subset of actual', + actual: new Set([{ a: 1 }, { b: 1 }]), + expected: new Set([{ a: 1 }]), + }, + { + description: 'compares two Set objects with identical arrays', + actual: new Set(['value1', 'value2']), + expected: new Set(['value1', 'value2']), + }, + { + description: 'compares two Set objects', + actual: new Set(['value1', 'value2', 'value3']), + expected: new Set(['value1', 'value2']), + }, + { + description: + 'compares two Map objects from different realms with identical entries', + actual: new vm.runInNewContext( + 'new Map([["key1", "value1"], ["key2", "value2"]])' + ), + expected: new Map([ + ['key1', 'value1'], + ['key2', 'value2'], + ]), + }, + { + description: + 'compares two objects with identical getter/setter properties', + actual: (() => { + let value = 'test'; + return Object.defineProperty({}, 'prop', { + get: () => value, + set: (newValue) => { + value = newValue; + }, + enumerable: true, + configurable: true, + }); + })(), + expected: (() => { + let value = 'test'; + return Object.defineProperty({}, 'prop', { + get: () => value, + set: (newValue) => { + value = newValue; + }, + enumerable: true, + configurable: true, + }); + })(), + }, + { + description: 'compares two objects with no prototype', + actual: { __proto__: null, prop: 'value' }, + expected: { __proto__: null, prop: 'value' }, + }, + { + description: + 'compares two objects with identical non-enumerable properties', + actual: (() => { + const obj = {}; + Object.defineProperty(obj, 'hidden', { + value: 'secret', + enumerable: false, + }); + return obj; + })(), + expected: (() => { + const obj = {}; + Object.defineProperty(obj, 'hidden', { + value: 'secret', + enumerable: false, + }); + return obj; + })(), + }, + { + description: 'compares two identical primitives, string', + actual: 'foo', + expected: 'foo', + }, + { + description: 'compares two identical primitives, number', + actual: 1, + expected: 1, + }, + { + description: 'compares two identical primitives, boolean', + actual: false, + expected: false, + }, + { + description: 'compares two identical primitives, null', + actual: null, + expected: null, + }, + { + description: 'compares two identical primitives, undefined', + actual: undefined, + expected: undefined, + }, + { + description: 'compares two identical primitives, Symbol', + actual: sym, + expected: sym, + }, + { + description: + 'compares one subset object with another', + actual: { a: 1, b: 2, c: 3 }, + expected: { b: 2 }, + }, + { + description: + 'compares one subset array with another', + actual: [1, 2, 3], + expected: [2], + }, + ].forEach(({ description, actual, expected }) => { + it(description, () => { + assert.partialDeepStrictEqual(actual, expected); + }); + }); + }); +}); From c157e026fcbe7eee5ea11b520541542f1fdce673 Mon Sep 17 00:00:00 2001 From: Thomas Chetwin Date: Sat, 23 Nov 2024 21:23:53 +0000 Subject: [PATCH 34/53] test: convert readdir test to use test runner Signed-off-by: tchetwin PR-URL: https://github.com/nodejs/node/pull/55750 Reviewed-By: Pietro Marchini Reviewed-By: James M Snell --- test/parallel/test-fs-readdir-recursive.js | 24 +++++++++++++--------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/test/parallel/test-fs-readdir-recursive.js b/test/parallel/test-fs-readdir-recursive.js index f32e600d2a3660..7cfc0903faa0b4 100644 --- a/test/parallel/test-fs-readdir-recursive.js +++ b/test/parallel/test-fs-readdir-recursive.js @@ -1,14 +1,18 @@ 'use strict'; -const common = require('../common'); -const fs = require('fs'); -const net = require('net'); +const { PIPE, mustCall } = require('../common'); const tmpdir = require('../common/tmpdir'); -tmpdir.refresh(); +const { test } = require('node:test'); +const fs = require('node:fs'); +const net = require('node:net'); -const server = net.createServer().listen(common.PIPE, common.mustCall(() => { - // The process should not crash - // See https://github.com/nodejs/node/issues/52159 - fs.readdirSync(tmpdir.path, { recursive: true }); - server.close(); -})); +test('readdir should not recurse into Unix domain sockets', (t, done) => { + tmpdir.refresh(); + const server = net.createServer().listen(PIPE, mustCall(() => { + // The process should not crash + // See https://github.com/nodejs/node/issues/52159 + fs.readdirSync(tmpdir.path, { recursive: true }); + server.close(); + done(); + })); +}); From 288416a76463891b911fcdd97ca21a7986c370a5 Mon Sep 17 00:00:00 2001 From: npm CLI robot Date: Sat, 23 Nov 2024 23:55:06 -0800 Subject: [PATCH 35/53] deps: upgrade npm to 10.9.1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/55951 Reviewed-By: Luigi Pinca Reviewed-By: Michaël Zasso --- deps/npm/docs/content/commands/npm-ls.md | 2 +- deps/npm/docs/content/commands/npm.md | 2 +- deps/npm/docs/output/commands/npm-access.html | 4 +- .../npm/docs/output/commands/npm-adduser.html | 4 +- deps/npm/docs/output/commands/npm-audit.html | 4 +- deps/npm/docs/output/commands/npm-bugs.html | 4 +- deps/npm/docs/output/commands/npm-cache.html | 4 +- deps/npm/docs/output/commands/npm-ci.html | 4 +- .../docs/output/commands/npm-completion.html | 4 +- deps/npm/docs/output/commands/npm-config.html | 4 +- deps/npm/docs/output/commands/npm-dedupe.html | 4 +- .../docs/output/commands/npm-deprecate.html | 4 +- deps/npm/docs/output/commands/npm-diff.html | 4 +- .../docs/output/commands/npm-dist-tag.html | 4 +- deps/npm/docs/output/commands/npm-docs.html | 4 +- deps/npm/docs/output/commands/npm-doctor.html | 4 +- deps/npm/docs/output/commands/npm-edit.html | 4 +- deps/npm/docs/output/commands/npm-exec.html | 4 +- .../npm/docs/output/commands/npm-explain.html | 4 +- .../npm/docs/output/commands/npm-explore.html | 4 +- .../docs/output/commands/npm-find-dupes.html | 4 +- deps/npm/docs/output/commands/npm-fund.html | 4 +- .../docs/output/commands/npm-help-search.html | 4 +- deps/npm/docs/output/commands/npm-help.html | 4 +- deps/npm/docs/output/commands/npm-hook.html | 4 +- deps/npm/docs/output/commands/npm-init.html | 4 +- .../output/commands/npm-install-ci-test.html | 4 +- .../output/commands/npm-install-test.html | 4 +- .../npm/docs/output/commands/npm-install.html | 4 +- deps/npm/docs/output/commands/npm-link.html | 4 +- deps/npm/docs/output/commands/npm-login.html | 4 +- deps/npm/docs/output/commands/npm-logout.html | 4 +- deps/npm/docs/output/commands/npm-ls.html | 6 +- deps/npm/docs/output/commands/npm-org.html | 4 +- .../docs/output/commands/npm-outdated.html | 4 +- deps/npm/docs/output/commands/npm-owner.html | 4 +- deps/npm/docs/output/commands/npm-pack.html | 4 +- deps/npm/docs/output/commands/npm-ping.html | 4 +- deps/npm/docs/output/commands/npm-pkg.html | 4 +- deps/npm/docs/output/commands/npm-prefix.html | 4 +- .../npm/docs/output/commands/npm-profile.html | 4 +- deps/npm/docs/output/commands/npm-prune.html | 4 +- .../npm/docs/output/commands/npm-publish.html | 4 +- deps/npm/docs/output/commands/npm-query.html | 4 +- .../npm/docs/output/commands/npm-rebuild.html | 4 +- deps/npm/docs/output/commands/npm-repo.html | 4 +- .../npm/docs/output/commands/npm-restart.html | 4 +- deps/npm/docs/output/commands/npm-root.html | 4 +- .../docs/output/commands/npm-run-script.html | 4 +- deps/npm/docs/output/commands/npm-sbom.html | 4 +- deps/npm/docs/output/commands/npm-search.html | 4 +- .../docs/output/commands/npm-shrinkwrap.html | 4 +- deps/npm/docs/output/commands/npm-star.html | 4 +- deps/npm/docs/output/commands/npm-stars.html | 4 +- deps/npm/docs/output/commands/npm-start.html | 4 +- deps/npm/docs/output/commands/npm-stop.html | 4 +- deps/npm/docs/output/commands/npm-team.html | 4 +- deps/npm/docs/output/commands/npm-test.html | 4 +- deps/npm/docs/output/commands/npm-token.html | 4 +- .../docs/output/commands/npm-uninstall.html | 4 +- .../docs/output/commands/npm-unpublish.html | 4 +- deps/npm/docs/output/commands/npm-unstar.html | 4 +- deps/npm/docs/output/commands/npm-update.html | 4 +- .../npm/docs/output/commands/npm-version.html | 4 +- deps/npm/docs/output/commands/npm-view.html | 4 +- deps/npm/docs/output/commands/npm-whoami.html | 4 +- deps/npm/docs/output/commands/npm.html | 6 +- deps/npm/docs/output/commands/npx.html | 4 +- .../docs/output/configuring-npm/folders.html | 4 +- .../docs/output/configuring-npm/install.html | 4 +- .../output/configuring-npm/npm-global.html | 4 +- .../docs/output/configuring-npm/npm-json.html | 4 +- .../configuring-npm/npm-shrinkwrap-json.html | 4 +- .../docs/output/configuring-npm/npmrc.html | 4 +- .../output/configuring-npm/package-json.html | 4 +- .../configuring-npm/package-lock-json.html | 4 +- deps/npm/docs/output/using-npm/config.html | 4 +- .../using-npm/dependency-selectors.html | 4 +- .../npm/docs/output/using-npm/developers.html | 4 +- deps/npm/docs/output/using-npm/logging.html | 4 +- deps/npm/docs/output/using-npm/orgs.html | 4 +- .../docs/output/using-npm/package-spec.html | 4 +- deps/npm/docs/output/using-npm/registry.html | 4 +- deps/npm/docs/output/using-npm/removal.html | 4 +- deps/npm/docs/output/using-npm/scope.html | 4 +- deps/npm/docs/output/using-npm/scripts.html | 4 +- .../npm/docs/output/using-npm/workspaces.html | 4 +- deps/npm/lib/cli.js | 8 + deps/npm/man/man1/npm-access.1 | 2 +- deps/npm/man/man1/npm-adduser.1 | 2 +- deps/npm/man/man1/npm-audit.1 | 2 +- deps/npm/man/man1/npm-bugs.1 | 2 +- deps/npm/man/man1/npm-cache.1 | 2 +- deps/npm/man/man1/npm-ci.1 | 2 +- deps/npm/man/man1/npm-completion.1 | 2 +- deps/npm/man/man1/npm-config.1 | 2 +- deps/npm/man/man1/npm-dedupe.1 | 2 +- deps/npm/man/man1/npm-deprecate.1 | 2 +- deps/npm/man/man1/npm-diff.1 | 2 +- deps/npm/man/man1/npm-dist-tag.1 | 2 +- deps/npm/man/man1/npm-docs.1 | 2 +- deps/npm/man/man1/npm-doctor.1 | 2 +- deps/npm/man/man1/npm-edit.1 | 2 +- deps/npm/man/man1/npm-exec.1 | 2 +- deps/npm/man/man1/npm-explain.1 | 2 +- deps/npm/man/man1/npm-explore.1 | 2 +- deps/npm/man/man1/npm-find-dupes.1 | 2 +- deps/npm/man/man1/npm-fund.1 | 2 +- deps/npm/man/man1/npm-help-search.1 | 2 +- deps/npm/man/man1/npm-help.1 | 2 +- deps/npm/man/man1/npm-hook.1 | 2 +- deps/npm/man/man1/npm-init.1 | 2 +- deps/npm/man/man1/npm-install-ci-test.1 | 2 +- deps/npm/man/man1/npm-install-test.1 | 2 +- deps/npm/man/man1/npm-install.1 | 2 +- deps/npm/man/man1/npm-link.1 | 2 +- deps/npm/man/man1/npm-login.1 | 2 +- deps/npm/man/man1/npm-logout.1 | 2 +- deps/npm/man/man1/npm-ls.1 | 4 +- deps/npm/man/man1/npm-org.1 | 2 +- deps/npm/man/man1/npm-outdated.1 | 2 +- deps/npm/man/man1/npm-owner.1 | 2 +- deps/npm/man/man1/npm-pack.1 | 2 +- deps/npm/man/man1/npm-ping.1 | 2 +- deps/npm/man/man1/npm-pkg.1 | 2 +- deps/npm/man/man1/npm-prefix.1 | 2 +- deps/npm/man/man1/npm-profile.1 | 2 +- deps/npm/man/man1/npm-prune.1 | 2 +- deps/npm/man/man1/npm-publish.1 | 2 +- deps/npm/man/man1/npm-query.1 | 2 +- deps/npm/man/man1/npm-rebuild.1 | 2 +- deps/npm/man/man1/npm-repo.1 | 2 +- deps/npm/man/man1/npm-restart.1 | 2 +- deps/npm/man/man1/npm-root.1 | 2 +- deps/npm/man/man1/npm-run-script.1 | 2 +- deps/npm/man/man1/npm-sbom.1 | 2 +- deps/npm/man/man1/npm-search.1 | 2 +- deps/npm/man/man1/npm-shrinkwrap.1 | 2 +- deps/npm/man/man1/npm-star.1 | 2 +- deps/npm/man/man1/npm-stars.1 | 2 +- deps/npm/man/man1/npm-start.1 | 2 +- deps/npm/man/man1/npm-stop.1 | 2 +- deps/npm/man/man1/npm-team.1 | 2 +- deps/npm/man/man1/npm-test.1 | 2 +- deps/npm/man/man1/npm-token.1 | 2 +- deps/npm/man/man1/npm-uninstall.1 | 2 +- deps/npm/man/man1/npm-unpublish.1 | 2 +- deps/npm/man/man1/npm-unstar.1 | 2 +- deps/npm/man/man1/npm-update.1 | 2 +- deps/npm/man/man1/npm-version.1 | 2 +- deps/npm/man/man1/npm-view.1 | 2 +- deps/npm/man/man1/npm-whoami.1 | 2 +- deps/npm/man/man1/npm.1 | 4 +- deps/npm/man/man1/npx.1 | 2 +- deps/npm/man/man5/folders.5 | 2 +- deps/npm/man/man5/install.5 | 2 +- deps/npm/man/man5/npm-global.5 | 2 +- deps/npm/man/man5/npm-json.5 | 2 +- deps/npm/man/man5/npm-shrinkwrap-json.5 | 2 +- deps/npm/man/man5/npmrc.5 | 2 +- deps/npm/man/man5/package-json.5 | 2 +- deps/npm/man/man5/package-lock-json.5 | 2 +- deps/npm/man/man7/config.7 | 2 +- deps/npm/man/man7/dependency-selectors.7 | 2 +- deps/npm/man/man7/developers.7 | 2 +- deps/npm/man/man7/logging.7 | 2 +- deps/npm/man/man7/orgs.7 | 2 +- deps/npm/man/man7/package-spec.7 | 2 +- deps/npm/man/man7/registry.7 | 2 +- deps/npm/man/man7/removal.7 | 2 +- deps/npm/man/man7/scope.7 | 2 +- deps/npm/man/man7/scripts.7 | 2 +- deps/npm/man/man7/workspaces.7 | 2 +- .../cliui/node_modules/ansi-regex/index.js | 6 +- .../node_modules/ansi-regex/package.json | 9 +- .../node_modules/pacote}/LICENSE | 2 +- .../node_modules/pacote/README.md | 283 +++++++++ .../node_modules/pacote/bin/index.js | 158 +++++ .../node_modules/pacote/lib/dir.js | 105 ++++ .../node_modules/pacote/lib/fetcher.js | 497 +++++++++++++++ .../node_modules/pacote/lib/file.js | 94 +++ .../node_modules/pacote/lib/git.js | 317 ++++++++++ .../node_modules/pacote/lib/index.js | 23 + .../node_modules/pacote/lib/registry.js | 369 +++++++++++ .../node_modules/pacote/lib/remote.js | 89 +++ .../pacote/lib/util/add-git-sha.js | 15 + .../node_modules/pacote/lib/util/cache-dir.js | 15 + .../pacote/lib/util/is-package-bin.js | 25 + .../node_modules/pacote/lib/util/npm.js | 14 + .../node_modules/pacote/lib/util/protected.js | 5 + .../pacote/lib/util/tar-create-options.js | 31 + .../pacote/lib/util/trailing-slashes.js | 10 + .../node_modules/pacote/package.json | 79 +++ .../@npmcli/metavuln-calculator/package.json | 4 +- .../@npmcli/promise-spawn/lib/index.js | 16 +- .../@npmcli/promise-spawn/package.json | 6 +- .../node_modules/@npmcli/agent/lib/agents.js | 206 ------- .../node_modules/@npmcli/agent/lib/dns.js | 53 -- .../node_modules/@npmcli/agent/lib/errors.js | 61 -- .../node_modules/@npmcli/agent/lib/index.js | 56 -- .../node_modules/@npmcli/agent/lib/options.js | 86 --- .../node_modules/@npmcli/agent/lib/proxy.js | 88 --- .../node_modules/@npmcli/agent/package.json | 60 -- .../sign/node_modules/@npmcli/fs/LICENSE.md | 20 - .../@npmcli/fs/lib/common/get-options.js | 20 - .../@npmcli/fs/lib/common/node.js | 9 - .../node_modules/@npmcli/fs/lib/cp/LICENSE | 15 - .../node_modules/@npmcli/fs/lib/cp/errors.js | 129 ---- .../node_modules/@npmcli/fs/lib/cp/index.js | 22 - .../@npmcli/fs/lib/cp/polyfill.js | 428 ------------- .../sign/node_modules/@npmcli/fs/lib/index.js | 13 - .../node_modules/@npmcli/fs/lib/move-file.js | 78 --- .../@npmcli/fs/lib/readdir-scoped.js | 20 - .../@npmcli/fs/lib/with-temp-dir.js | 39 -- .../sign/node_modules/@npmcli/fs/package.json | 52 -- .../sign/node_modules/cacache/LICENSE.md | 16 - .../node_modules/cacache/lib/content/path.js | 29 - .../node_modules/cacache/lib/content/read.js | 165 ----- .../node_modules/cacache/lib/content/rm.js | 18 - .../node_modules/cacache/lib/content/write.js | 206 ------- .../node_modules/cacache/lib/entry-index.js | 336 ---------- .../sign/node_modules/cacache/lib/get.js | 170 ----- .../sign/node_modules/cacache/lib/index.js | 42 -- .../node_modules/cacache/lib/memoization.js | 72 --- .../sign/node_modules/cacache/lib/put.js | 80 --- .../sign/node_modules/cacache/lib/rm.js | 31 - .../node_modules/cacache/lib/util/glob.js | 7 - .../cacache/lib/util/hash-to-segments.js | 7 - .../sign/node_modules/cacache/lib/util/tmp.js | 26 - .../sign/node_modules/cacache/lib/verify.js | 257 -------- .../sign/node_modules/cacache/package.json | 82 --- .../node_modules/make-fetch-happen/LICENSE | 16 - .../make-fetch-happen/lib/cache/entry.js | 471 -------------- .../make-fetch-happen/lib/cache/errors.js | 11 - .../make-fetch-happen/lib/cache/index.js | 49 -- .../make-fetch-happen/lib/cache/key.js | 17 - .../make-fetch-happen/lib/cache/policy.js | 161 ----- .../make-fetch-happen/lib/fetch.js | 118 ---- .../make-fetch-happen/lib/index.js | 41 -- .../make-fetch-happen/lib/options.js | 54 -- .../make-fetch-happen/lib/pipeline.js | 41 -- .../make-fetch-happen/lib/remote.js | 131 ---- .../make-fetch-happen/package.json | 75 --- .../sign/node_modules/minipass-fetch/LICENSE | 28 - .../minipass-fetch/lib/abort-error.js | 17 - .../node_modules/minipass-fetch/lib/blob.js | 97 --- .../node_modules/minipass-fetch/lib/body.js | 350 ----------- .../minipass-fetch/lib/fetch-error.js | 32 - .../minipass-fetch/lib/headers.js | 267 -------- .../node_modules/minipass-fetch/lib/index.js | 377 ------------ .../minipass-fetch/lib/request.js | 282 --------- .../minipass-fetch/lib/response.js | 90 --- .../node_modules/minipass-fetch/package.json | 69 --- .../sign/node_modules/proc-log/LICENSE | 15 - .../sign/node_modules/proc-log/lib/index.js | 153 ----- .../sign/node_modules/proc-log/package.json | 45 -- .../sign/node_modules/ssri/LICENSE.md | 16 - .../sign/node_modules/ssri/lib/index.js | 580 ------------------ .../sign/node_modules/ssri/package.json | 65 -- .../sign/node_modules/unique-filename/LICENSE | 5 - .../node_modules/unique-filename/lib/index.js | 7 - .../node_modules/unique-filename/package.json | 51 -- .../sign/node_modules/unique-slug/LICENSE | 15 - .../node_modules/unique-slug/lib/index.js | 11 - .../node_modules/unique-slug/package.json | 47 -- .../@sigstore/tuf/dist/appdata.js | 3 +- .../node_modules/@sigstore/tuf/dist/client.js | 1 - .../node_modules/@sigstore/tuf/dist/index.js | 6 +- .../node_modules/@sigstore/tuf/dist/target.js | 3 +- .../node_modules/@sigstore/tuf/package.json | 8 +- .../npm/node_modules/@sigstore/tuf/seeds.json | 2 +- deps/npm/node_modules/ci-info/index.js | 80 ++- deps/npm/node_modules/ci-info/package.json | 28 +- deps/npm/node_modules/ci-info/vendors.json | 16 +- .../node_modules/cross-spawn/lib/enoent.js | 2 +- .../cross-spawn/lib/util/escape.js | 6 +- .../npm/node_modules/cross-spawn/package.json | 4 +- .../debug/node_modules/ms/index.js | 162 ----- .../debug/node_modules/ms/license.md | 21 - .../debug/node_modules/ms/package.json | 37 -- deps/npm/node_modules/debug/package.json | 4 +- .../node_modules/hosted-git-info/lib/hosts.js | 6 +- .../node_modules/hosted-git-info/package.json | 6 +- .../node_modules/libnpmpublish/lib/publish.js | 2 +- .../node_modules/libnpmpublish/package.json | 4 +- .../make-fetch-happen/lib/options.js | 7 +- .../make-fetch-happen/lib/remote.js | 3 +- .../node_modules/negotiator/HISTORY.md | 114 ++++ .../node_modules/negotiator/LICENSE | 24 + .../node_modules/negotiator/index.js | 83 +++ .../node_modules/negotiator/lib/charset.js | 169 +++++ .../node_modules/negotiator/lib/encoding.js | 205 +++++++ .../node_modules/negotiator/lib/language.js | 179 ++++++ .../node_modules/negotiator/lib/mediaType.js | 294 +++++++++ .../node_modules/negotiator/package.json | 43 ++ .../make-fetch-happen/package.json | 8 +- deps/npm/node_modules/negotiator/HISTORY.md | 5 + deps/npm/node_modules/negotiator/index.js | 8 +- .../node_modules/negotiator/lib/encoding.js | 31 +- .../node_modules/negotiator/lib/mediaType.js | 6 +- deps/npm/node_modules/negotiator/package.json | 2 +- .../npm-install-checks/lib/current-env.js | 36 +- .../npm-install-checks/package.json | 6 +- .../npm-registry-fetch/lib/check-response.js | 16 +- .../npm-registry-fetch/package.json | 8 +- .../dist/commonjs/index.js | 12 +- .../package-json-from-dist/dist/esm/index.js | 12 +- .../package-json-from-dist/package.json | 4 +- deps/npm/node_modules/pacote/lib/dir.js | 2 + deps/npm/node_modules/pacote/lib/fetcher.js | 10 +- deps/npm/node_modules/pacote/package.json | 4 +- .../promise-call-limit/dist/commonjs/index.js | 4 +- .../promise-call-limit/dist/esm/index.js | 4 +- .../promise-call-limit/package.json | 18 +- deps/npm/node_modules/sigstore/dist/config.js | 14 +- .../node_modules/sigstore/dist/sigstore.js | 9 +- .../node_modules}/@sigstore/bundle/LICENSE | 0 .../@sigstore/bundle/dist/build.js | 31 +- .../@sigstore/bundle/dist/bundle.js | 10 +- .../@sigstore/bundle/dist/error.js | 0 .../@sigstore/bundle/dist/index.js | 0 .../@sigstore/bundle/dist/serialized.js | 0 .../@sigstore/bundle/dist/utility.js | 0 .../@sigstore/bundle/dist/validate.js | 12 +- .../@sigstore/bundle/package.json | 4 +- .../node_modules}/@sigstore/core/LICENSE | 0 .../@sigstore/core/dist/asn1/error.js | 0 .../@sigstore/core/dist/asn1/index.js | 0 .../@sigstore/core/dist/asn1/length.js | 5 +- .../@sigstore/core/dist/asn1/obj.js | 0 .../@sigstore/core/dist/asn1/parse.js | 13 +- .../@sigstore/core/dist/asn1/tag.js | 0 .../@sigstore/core/dist/crypto.js | 19 +- .../node_modules}/@sigstore/core/dist/dsse.js | 3 +- .../@sigstore/core/dist/encoding.js | 5 +- .../@sigstore/core/dist/index.js | 0 .../node_modules}/@sigstore/core/dist/json.js | 3 +- .../node_modules}/@sigstore/core/dist/oid.js | 0 .../node_modules}/@sigstore/core/dist/pem.js | 5 +- .../@sigstore/core/dist/rfc3161/error.js | 0 .../@sigstore/core/dist/rfc3161/index.js | 0 .../@sigstore/core/dist/rfc3161/timestamp.js | 0 .../@sigstore/core/dist/rfc3161/tstinfo.js | 0 .../@sigstore/core/dist/stream.js | 0 .../@sigstore/core/dist/x509/cert.js | 10 +- .../@sigstore/core/dist/x509/ext.js | 0 .../@sigstore/core/dist/x509/index.js | 0 .../@sigstore/core/dist/x509/sct.js | 0 .../node_modules}/@sigstore/core/package.json | 4 +- .../node_modules}/@sigstore/sign/LICENSE | 0 .../@sigstore/sign/dist/bundler/base.js | 0 .../@sigstore/sign/dist/bundler/bundle.js | 12 +- .../@sigstore/sign/dist/bundler/dsse.js | 4 +- .../@sigstore/sign/dist/bundler/index.js | 0 .../@sigstore/sign/dist/bundler/message.js | 0 .../@sigstore/sign/dist/error.js | 4 +- .../@sigstore/sign/dist/external/error.js | 0 .../@sigstore/sign/dist/external/fetch.js | 7 +- .../@sigstore/sign/dist/external/fulcio.js | 0 .../@sigstore/sign/dist/external/rekor.js | 0 .../@sigstore/sign/dist/external/tsa.js | 0 .../@sigstore/sign/dist/identity/ci.js | 0 .../@sigstore/sign/dist/identity/index.js | 0 .../@sigstore/sign/dist/identity/provider.js | 0 .../@sigstore/sign/dist/index.js | 0 .../@sigstore/sign/dist/signer/fulcio/ca.js | 1 - .../sign/dist/signer/fulcio/ephemeral.js | 0 .../sign/dist/signer/fulcio/index.js | 0 .../@sigstore/sign/dist/signer/index.js | 0 .../@sigstore/sign/dist/signer/signer.js | 0 .../@sigstore/sign/dist/types/fetch.js | 0 .../@sigstore/sign/dist/util/index.js | 0 .../@sigstore/sign/dist/util/oidc.js | 3 +- .../@sigstore/sign/dist/util/ua.js | 1 - .../@sigstore/sign/dist/witness/index.js | 0 .../sign/dist/witness/tlog/client.js | 0 .../@sigstore/sign/dist/witness/tlog/entry.js | 28 +- .../@sigstore/sign/dist/witness/tlog/index.js | 0 .../@sigstore/sign/dist/witness/tsa/client.js | 7 +- .../@sigstore/sign/dist/witness/tsa/index.js | 0 .../@sigstore/sign/dist/witness/witness.js | 0 .../node_modules}/@sigstore/sign/package.json | 16 +- .../@sigstore/verify/dist/bundle/dsse.js | 2 +- .../@sigstore/verify/dist/bundle/index.js | 5 +- .../@sigstore/verify/dist/bundle/message.js | 0 .../@sigstore/verify/dist/error.js | 0 .../@sigstore/verify/dist/index.js | 0 .../@sigstore/verify/dist/key/certificate.js | 4 +- .../@sigstore/verify/dist/key/index.js | 6 +- .../@sigstore/verify/dist/key/sct.js | 5 +- .../@sigstore/verify/dist/policy.js | 5 +- .../@sigstore/verify/dist/shared.types.js | 0 .../verify/dist/timestamp/checkpoint.js | 3 +- .../@sigstore/verify/dist/timestamp/index.js | 5 +- .../@sigstore/verify/dist/timestamp/merkle.js | 7 +- .../@sigstore/verify/dist/timestamp/set.js | 3 +- .../@sigstore/verify/dist/timestamp/tsa.js | 3 +- .../@sigstore/verify/dist/tlog/dsse.js | 3 +- .../verify/dist/tlog/hashedrekord.js | 3 +- .../@sigstore/verify/dist/tlog/index.js | 3 +- .../@sigstore/verify/dist/tlog/intoto.js | 3 +- .../@sigstore/verify/dist/trust/filter.js | 5 +- .../@sigstore/verify/dist/trust/index.js | 6 +- .../verify/dist/trust/trust.types.js | 0 .../@sigstore/verify/dist/verifier.js | 0 .../@sigstore/verify/package.json | 8 +- deps/npm/node_modules/sigstore/package.json | 20 +- .../spdx-license-ids/deprecated.json | 1 + .../node_modules/spdx-license-ids/index.json | 8 +- .../spdx-license-ids/package.json | 2 +- deps/npm/node_modules/tuf-js/dist/config.js | 2 +- deps/npm/node_modules/tuf-js/dist/updater.js | 19 +- .../npm/node_modules/tuf-js/dist/utils/url.js | 3 +- .../node_modules/@npmcli/agent/lib/agents.js | 206 ------- .../node_modules/@npmcli/agent/lib/dns.js | 53 -- .../node_modules/@npmcli/agent/lib/errors.js | 61 -- .../node_modules/@npmcli/agent/lib/index.js | 56 -- .../node_modules/@npmcli/agent/lib/options.js | 86 --- .../node_modules/@npmcli/agent/lib/proxy.js | 88 --- .../node_modules/@npmcli/agent/package.json | 60 -- .../tuf-js/node_modules/@npmcli/fs/LICENSE.md | 20 - .../@npmcli/fs/lib/common/get-options.js | 20 - .../@npmcli/fs/lib/common/node.js | 9 - .../node_modules/@npmcli/fs/lib/cp/LICENSE | 15 - .../node_modules/@npmcli/fs/lib/cp/errors.js | 129 ---- .../node_modules/@npmcli/fs/lib/cp/index.js | 22 - .../@npmcli/fs/lib/cp/polyfill.js | 428 ------------- .../node_modules/@npmcli/fs/lib/index.js | 13 - .../node_modules/@npmcli/fs/lib/move-file.js | 78 --- .../@npmcli/fs/lib/readdir-scoped.js | 20 - .../@npmcli/fs/lib/with-temp-dir.js | 39 -- .../node_modules/@npmcli/fs/package.json | 52 -- .../node_modules}/@tufjs/models/LICENSE | 0 .../node_modules}/@tufjs/models/dist/base.js | 23 +- .../@tufjs/models/dist/delegations.js | 0 .../node_modules}/@tufjs/models/dist/error.js | 0 .../node_modules}/@tufjs/models/dist/file.js | 0 .../node_modules}/@tufjs/models/dist/index.js | 0 .../node_modules}/@tufjs/models/dist/key.js | 0 .../@tufjs/models/dist/metadata.js | 22 +- .../node_modules}/@tufjs/models/dist/role.js | 0 .../node_modules}/@tufjs/models/dist/root.js | 0 .../@tufjs/models/dist/signature.js | 0 .../@tufjs/models/dist/snapshot.js | 0 .../@tufjs/models/dist/targets.js | 0 .../@tufjs/models/dist/timestamp.js | 0 .../@tufjs/models/dist/utils/guard.js | 13 +- .../@tufjs/models/dist/utils/index.js | 0 .../@tufjs/models/dist/utils/key.js | 3 +- .../@tufjs/models/dist/utils/oid.js | 3 +- .../@tufjs/models/dist/utils/types.js | 0 .../@tufjs/models/dist/utils/verify.js | 0 .../node_modules}/@tufjs/models/package.json | 6 +- .../tuf-js/node_modules/cacache/LICENSE.md | 16 - .../node_modules/cacache/lib/content/path.js | 29 - .../node_modules/cacache/lib/content/read.js | 165 ----- .../node_modules/cacache/lib/content/rm.js | 18 - .../node_modules/cacache/lib/content/write.js | 206 ------- .../node_modules/cacache/lib/entry-index.js | 336 ---------- .../tuf-js/node_modules/cacache/lib/get.js | 170 ----- .../tuf-js/node_modules/cacache/lib/index.js | 42 -- .../node_modules/cacache/lib/memoization.js | 72 --- .../tuf-js/node_modules/cacache/lib/put.js | 80 --- .../tuf-js/node_modules/cacache/lib/rm.js | 31 - .../node_modules/cacache/lib/util/glob.js | 7 - .../cacache/lib/util/hash-to-segments.js | 7 - .../node_modules/cacache/lib/util/tmp.js | 26 - .../tuf-js/node_modules/cacache/lib/verify.js | 257 -------- .../tuf-js/node_modules/cacache/package.json | 82 --- .../node_modules/make-fetch-happen/LICENSE | 16 - .../make-fetch-happen/lib/cache/entry.js | 471 -------------- .../make-fetch-happen/lib/cache/errors.js | 11 - .../make-fetch-happen/lib/cache/index.js | 49 -- .../make-fetch-happen/lib/cache/key.js | 17 - .../make-fetch-happen/lib/cache/policy.js | 161 ----- .../make-fetch-happen/lib/fetch.js | 118 ---- .../make-fetch-happen/lib/index.js | 41 -- .../make-fetch-happen/lib/options.js | 54 -- .../make-fetch-happen/lib/pipeline.js | 41 -- .../make-fetch-happen/lib/remote.js | 131 ---- .../make-fetch-happen/package.json | 75 --- .../node_modules/minipass-fetch/LICENSE | 28 - .../minipass-fetch/lib/abort-error.js | 17 - .../node_modules/minipass-fetch/lib/blob.js | 97 --- .../node_modules/minipass-fetch/lib/body.js | 350 ----------- .../minipass-fetch/lib/fetch-error.js | 32 - .../minipass-fetch/lib/headers.js | 267 -------- .../node_modules/minipass-fetch/lib/index.js | 377 ------------ .../minipass-fetch/lib/request.js | 282 --------- .../minipass-fetch/lib/response.js | 90 --- .../node_modules/minipass-fetch/package.json | 69 --- .../tuf-js/node_modules/proc-log/lib/index.js | 153 ----- .../tuf-js/node_modules/proc-log/package.json | 45 -- .../tuf-js/node_modules/ssri/LICENSE.md | 16 - .../tuf-js/node_modules/ssri/lib/index.js | 580 ------------------ .../tuf-js/node_modules/ssri/package.json | 65 -- .../node_modules/unique-filename/LICENSE | 5 - .../node_modules/unique-filename/lib/index.js | 7 - .../node_modules/unique-filename/package.json | 51 -- .../tuf-js/node_modules/unique-slug/LICENSE | 15 - .../node_modules/unique-slug/lib/index.js | 11 - .../node_modules/unique-slug/package.json | 47 -- deps/npm/node_modules/tuf-js/package.json | 14 +- .../node_modules/ansi-regex/index.js | 6 +- .../node_modules/ansi-regex/package.json | 9 +- deps/npm/package.json | 20 +- 506 files changed, 4044 insertions(+), 14450 deletions(-) rename deps/npm/node_modules/{tuf-js/node_modules/proc-log => @npmcli/metavuln-calculator/node_modules/pacote}/LICENSE (90%) create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/README.md create mode 100755 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/bin/index.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/dir.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/fetcher.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/file.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/git.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/index.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/registry.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/remote.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/add-git-sha.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/cache-dir.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/is-package-bin.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/npm.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/protected.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/tar-create-options.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/trailing-slashes.js create mode 100644 deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/package.json delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/agents.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/dns.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/errors.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/index.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/options.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/proxy.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/package.json delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/LICENSE.md delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/get-options.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/node.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/LICENSE delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/errors.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/index.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/polyfill.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/index.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/move-file.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/readdir-scoped.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/with-temp-dir.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/package.json delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/LICENSE.md delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/path.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/read.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/rm.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/write.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/entry-index.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/get.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/index.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/memoization.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/put.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/rm.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/glob.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/hash-to-segments.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/tmp.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/verify.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/cacache/package.json delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/LICENSE delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/entry.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/errors.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/index.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/key.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/policy.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/fetch.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/index.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/options.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/pipeline.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/remote.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/package.json delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/LICENSE delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/abort-error.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/blob.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/body.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/fetch-error.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/headers.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/index.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/request.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/response.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/package.json delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/LICENSE delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/lib/index.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/package.json delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/ssri/LICENSE.md delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/ssri/lib/index.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/ssri/package.json delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/LICENSE delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/lib/index.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/package.json delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/LICENSE delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/lib/index.js delete mode 100644 deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/package.json delete mode 100644 deps/npm/node_modules/debug/node_modules/ms/index.js delete mode 100644 deps/npm/node_modules/debug/node_modules/ms/license.md delete mode 100644 deps/npm/node_modules/debug/node_modules/ms/package.json create mode 100644 deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/HISTORY.md create mode 100644 deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/LICENSE create mode 100644 deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/index.js create mode 100644 deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/charset.js create mode 100644 deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/encoding.js create mode 100644 deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/language.js create mode 100644 deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/mediaType.js create mode 100644 deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/package.json rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/bundle/LICENSE (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/bundle/dist/build.js (87%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/bundle/dist/bundle.js (79%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/bundle/dist/error.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/bundle/dist/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/bundle/dist/serialized.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/bundle/dist/utility.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/bundle/dist/validate.js (98%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/bundle/package.json (92%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/LICENSE (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/asn1/error.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/asn1/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/asn1/length.js (97%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/asn1/obj.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/asn1/parse.js (96%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/asn1/tag.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/crypto.js (83%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/dsse.js (96%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/encoding.js (94%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/json.js (98%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/oid.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/pem.js (97%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/rfc3161/error.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/rfc3161/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/rfc3161/timestamp.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/rfc3161/tstinfo.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/stream.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/x509/cert.js (96%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/x509/ext.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/x509/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/dist/x509/sct.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/core/package.json (92%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/LICENSE (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/bundler/base.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/bundler/bundle.js (93%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/bundler/dsse.js (93%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/bundler/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/bundler/message.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/error.js (95%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/external/error.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/external/fetch.js (95%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/external/fulcio.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/external/rekor.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/external/tsa.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/identity/ci.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/identity/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/identity/provider.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/signer/fulcio/ca.js (96%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/signer/fulcio/ephemeral.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/signer/fulcio/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/signer/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/signer/signer.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/types/fetch.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/util/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/util/oidc.js (96%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/util/ua.js (95%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/witness/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/witness/tlog/client.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/witness/tlog/entry.js (87%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/witness/tlog/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/witness/tsa/client.js (86%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/witness/tsa/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/dist/witness/witness.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/sign/package.json (78%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/bundle/dsse.js (93%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/bundle/index.js (97%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/bundle/message.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/error.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/index.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/key/certificate.js (99%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/key/index.js (97%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/key/sct.js (97%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/policy.js (93%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/shared.types.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/timestamp/checkpoint.js (99%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/timestamp/index.js (96%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/timestamp/merkle.js (95%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/timestamp/set.js (98%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/timestamp/tsa.js (98%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/tlog/dsse.js (98%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/tlog/hashedrekord.js (97%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/tlog/index.js (98%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/tlog/intoto.js (98%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/trust/filter.js (93%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/trust/index.js (95%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/trust/trust.types.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/dist/verifier.js (100%) rename deps/npm/node_modules/{ => sigstore/node_modules}/@sigstore/verify/package.json (86%) delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/agents.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/dns.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/errors.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/index.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/options.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/proxy.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/package.json delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/LICENSE.md delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/get-options.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/node.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/LICENSE delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/errors.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/index.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/polyfill.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/index.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/move-file.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/readdir-scoped.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/with-temp-dir.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/package.json rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/LICENSE (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/base.js (80%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/delegations.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/error.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/file.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/index.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/key.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/metadata.js (91%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/role.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/root.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/signature.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/snapshot.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/targets.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/timestamp.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/utils/guard.js (88%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/utils/index.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/utils/key.js (99%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/utils/oid.js (96%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/utils/types.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/dist/utils/verify.js (100%) rename deps/npm/node_modules/{ => tuf-js/node_modules}/@tufjs/models/package.json (90%) delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/LICENSE.md delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/path.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/read.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/rm.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/write.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/entry-index.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/get.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/index.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/memoization.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/put.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/rm.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/glob.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/hash-to-segments.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/tmp.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/lib/verify.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/cacache/package.json delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/LICENSE delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/entry.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/errors.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/index.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/key.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/policy.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/fetch.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/index.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/options.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/pipeline.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/remote.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/package.json delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/LICENSE delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/abort-error.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/blob.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/body.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/fetch-error.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/headers.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/index.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/request.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/response.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/package.json delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/proc-log/lib/index.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/proc-log/package.json delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/ssri/LICENSE.md delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/ssri/lib/index.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/ssri/package.json delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/unique-filename/LICENSE delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/unique-filename/lib/index.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/unique-filename/package.json delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/unique-slug/LICENSE delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/unique-slug/lib/index.js delete mode 100644 deps/npm/node_modules/tuf-js/node_modules/unique-slug/package.json diff --git a/deps/npm/docs/content/commands/npm-ls.md b/deps/npm/docs/content/commands/npm-ls.md index 3abedf9cd8e4e6..2dbb408ac77e45 100644 --- a/deps/npm/docs/content/commands/npm-ls.md +++ b/deps/npm/docs/content/commands/npm-ls.md @@ -27,7 +27,7 @@ packages will *also* show the paths to the specified packages. For example, running `npm ls promzard` in npm's source tree will show: ```bash -npm@10.9.0 /path/to/npm +npm@10.9.1 /path/to/npm └─┬ init-package-json@0.0.4 └── promzard@0.1.5 ``` diff --git a/deps/npm/docs/content/commands/npm.md b/deps/npm/docs/content/commands/npm.md index d92892ec544eb9..d5a92b439c6020 100644 --- a/deps/npm/docs/content/commands/npm.md +++ b/deps/npm/docs/content/commands/npm.md @@ -14,7 +14,7 @@ Note: This command is unaware of workspaces. ### Version -10.9.0 +10.9.1 ### Description diff --git a/deps/npm/docs/output/commands/npm-access.html b/deps/npm/docs/output/commands/npm-access.html index 0a839663fcb016..c965b4703b57c8 100644 --- a/deps/npm/docs/output/commands/npm-access.html +++ b/deps/npm/docs/output/commands/npm-access.html @@ -141,9 +141,9 @@
-

+

npm-access - @10.9.0 + @10.9.1

Set access level on published packages
diff --git a/deps/npm/docs/output/commands/npm-adduser.html b/deps/npm/docs/output/commands/npm-adduser.html index 2ac615ad13978d..425b0d5d23a3f5 100644 --- a/deps/npm/docs/output/commands/npm-adduser.html +++ b/deps/npm/docs/output/commands/npm-adduser.html @@ -141,9 +141,9 @@
-

+

npm-adduser - @10.9.0 + @10.9.1

Add a registry user account
diff --git a/deps/npm/docs/output/commands/npm-audit.html b/deps/npm/docs/output/commands/npm-audit.html index d7374ccfa66089..be436a29cbc817 100644 --- a/deps/npm/docs/output/commands/npm-audit.html +++ b/deps/npm/docs/output/commands/npm-audit.html @@ -141,9 +141,9 @@
-

+

npm-audit - @10.9.0 + @10.9.1

Run a security audit
diff --git a/deps/npm/docs/output/commands/npm-bugs.html b/deps/npm/docs/output/commands/npm-bugs.html index 9186bc100ae8f6..48d109440deff5 100644 --- a/deps/npm/docs/output/commands/npm-bugs.html +++ b/deps/npm/docs/output/commands/npm-bugs.html @@ -141,9 +141,9 @@
-

+

npm-bugs - @10.9.0 + @10.9.1

Report bugs for a package in a web browser
diff --git a/deps/npm/docs/output/commands/npm-cache.html b/deps/npm/docs/output/commands/npm-cache.html index 55cec217dbdfb7..a82e9a8e8a0d63 100644 --- a/deps/npm/docs/output/commands/npm-cache.html +++ b/deps/npm/docs/output/commands/npm-cache.html @@ -141,9 +141,9 @@
-

+

npm-cache - @10.9.0 + @10.9.1

Manipulates packages cache
diff --git a/deps/npm/docs/output/commands/npm-ci.html b/deps/npm/docs/output/commands/npm-ci.html index efe858777565a9..64d5d130273724 100644 --- a/deps/npm/docs/output/commands/npm-ci.html +++ b/deps/npm/docs/output/commands/npm-ci.html @@ -141,9 +141,9 @@
-

+

npm-ci - @10.9.0 + @10.9.1

Clean install a project
diff --git a/deps/npm/docs/output/commands/npm-completion.html b/deps/npm/docs/output/commands/npm-completion.html index 4a91a94498f33b..776720e250b2be 100644 --- a/deps/npm/docs/output/commands/npm-completion.html +++ b/deps/npm/docs/output/commands/npm-completion.html @@ -141,9 +141,9 @@
-

+

npm-completion - @10.9.0 + @10.9.1

Tab Completion for npm
diff --git a/deps/npm/docs/output/commands/npm-config.html b/deps/npm/docs/output/commands/npm-config.html index d18998fea8471d..074a2eb2c8606b 100644 --- a/deps/npm/docs/output/commands/npm-config.html +++ b/deps/npm/docs/output/commands/npm-config.html @@ -141,9 +141,9 @@
-

+

npm-config - @10.9.0 + @10.9.1

Manage the npm configuration files
diff --git a/deps/npm/docs/output/commands/npm-dedupe.html b/deps/npm/docs/output/commands/npm-dedupe.html index 194ea085383df3..a21182385e4e27 100644 --- a/deps/npm/docs/output/commands/npm-dedupe.html +++ b/deps/npm/docs/output/commands/npm-dedupe.html @@ -141,9 +141,9 @@
-

+

npm-dedupe - @10.9.0 + @10.9.1

Reduce duplication in the package tree
diff --git a/deps/npm/docs/output/commands/npm-deprecate.html b/deps/npm/docs/output/commands/npm-deprecate.html index ae40adfbbab051..f690a38413e51c 100644 --- a/deps/npm/docs/output/commands/npm-deprecate.html +++ b/deps/npm/docs/output/commands/npm-deprecate.html @@ -141,9 +141,9 @@
-

+

npm-deprecate - @10.9.0 + @10.9.1

Deprecate a version of a package
diff --git a/deps/npm/docs/output/commands/npm-diff.html b/deps/npm/docs/output/commands/npm-diff.html index 257b1c21572083..8f3de9df3769b2 100644 --- a/deps/npm/docs/output/commands/npm-diff.html +++ b/deps/npm/docs/output/commands/npm-diff.html @@ -141,9 +141,9 @@
-

+

npm-diff - @10.9.0 + @10.9.1

The registry diff command
diff --git a/deps/npm/docs/output/commands/npm-dist-tag.html b/deps/npm/docs/output/commands/npm-dist-tag.html index 08e7770a0c1745..8c9cc759da6cea 100644 --- a/deps/npm/docs/output/commands/npm-dist-tag.html +++ b/deps/npm/docs/output/commands/npm-dist-tag.html @@ -141,9 +141,9 @@
-

+

npm-dist-tag - @10.9.0 + @10.9.1

Modify package distribution tags
diff --git a/deps/npm/docs/output/commands/npm-docs.html b/deps/npm/docs/output/commands/npm-docs.html index 8b647251f36740..f187ddef642bcb 100644 --- a/deps/npm/docs/output/commands/npm-docs.html +++ b/deps/npm/docs/output/commands/npm-docs.html @@ -141,9 +141,9 @@
-

+

npm-docs - @10.9.0 + @10.9.1

Open documentation for a package in a web browser
diff --git a/deps/npm/docs/output/commands/npm-doctor.html b/deps/npm/docs/output/commands/npm-doctor.html index 110ec2f9b20bf0..ca25439e6d7c20 100644 --- a/deps/npm/docs/output/commands/npm-doctor.html +++ b/deps/npm/docs/output/commands/npm-doctor.html @@ -141,9 +141,9 @@
-

+

npm-doctor - @10.9.0 + @10.9.1

Check the health of your npm environment
diff --git a/deps/npm/docs/output/commands/npm-edit.html b/deps/npm/docs/output/commands/npm-edit.html index 9e4f7b361a5cb6..350b438b1230e5 100644 --- a/deps/npm/docs/output/commands/npm-edit.html +++ b/deps/npm/docs/output/commands/npm-edit.html @@ -141,9 +141,9 @@
-

+

npm-edit - @10.9.0 + @10.9.1

Edit an installed package
diff --git a/deps/npm/docs/output/commands/npm-exec.html b/deps/npm/docs/output/commands/npm-exec.html index 695fa35ab825c2..500ba9cbc77b2f 100644 --- a/deps/npm/docs/output/commands/npm-exec.html +++ b/deps/npm/docs/output/commands/npm-exec.html @@ -141,9 +141,9 @@
-

+

npm-exec - @10.9.0 + @10.9.1

Run a command from a local or remote npm package
diff --git a/deps/npm/docs/output/commands/npm-explain.html b/deps/npm/docs/output/commands/npm-explain.html index e79255ff5fa6c6..0fa690ab34f280 100644 --- a/deps/npm/docs/output/commands/npm-explain.html +++ b/deps/npm/docs/output/commands/npm-explain.html @@ -141,9 +141,9 @@
-

+

npm-explain - @10.9.0 + @10.9.1

Explain installed packages
diff --git a/deps/npm/docs/output/commands/npm-explore.html b/deps/npm/docs/output/commands/npm-explore.html index e296f4146aff98..93e2f9a837c91b 100644 --- a/deps/npm/docs/output/commands/npm-explore.html +++ b/deps/npm/docs/output/commands/npm-explore.html @@ -141,9 +141,9 @@
-

+

npm-explore - @10.9.0 + @10.9.1

Browse an installed package
diff --git a/deps/npm/docs/output/commands/npm-find-dupes.html b/deps/npm/docs/output/commands/npm-find-dupes.html index a8c914a0dd3d44..bca82e42914386 100644 --- a/deps/npm/docs/output/commands/npm-find-dupes.html +++ b/deps/npm/docs/output/commands/npm-find-dupes.html @@ -141,9 +141,9 @@
-

+

npm-find-dupes - @10.9.0 + @10.9.1

Find duplication in the package tree
diff --git a/deps/npm/docs/output/commands/npm-fund.html b/deps/npm/docs/output/commands/npm-fund.html index 36a63253439b60..64b3b9839647b9 100644 --- a/deps/npm/docs/output/commands/npm-fund.html +++ b/deps/npm/docs/output/commands/npm-fund.html @@ -141,9 +141,9 @@
-

+

npm-fund - @10.9.0 + @10.9.1

Retrieve funding information
diff --git a/deps/npm/docs/output/commands/npm-help-search.html b/deps/npm/docs/output/commands/npm-help-search.html index 76dea45d852e75..d0ff1a4c555502 100644 --- a/deps/npm/docs/output/commands/npm-help-search.html +++ b/deps/npm/docs/output/commands/npm-help-search.html @@ -141,9 +141,9 @@
-

+

npm-help-search - @10.9.0 + @10.9.1

Search npm help documentation
diff --git a/deps/npm/docs/output/commands/npm-help.html b/deps/npm/docs/output/commands/npm-help.html index e6b14af2f9ec6c..1a773e36f31d31 100644 --- a/deps/npm/docs/output/commands/npm-help.html +++ b/deps/npm/docs/output/commands/npm-help.html @@ -141,9 +141,9 @@
-

+

npm-help - @10.9.0 + @10.9.1

Get help on npm
diff --git a/deps/npm/docs/output/commands/npm-hook.html b/deps/npm/docs/output/commands/npm-hook.html index 393700a9a7165a..6688593a06533e 100644 --- a/deps/npm/docs/output/commands/npm-hook.html +++ b/deps/npm/docs/output/commands/npm-hook.html @@ -141,9 +141,9 @@
-

+

npm-hook - @10.9.0 + @10.9.1

Manage registry hooks
diff --git a/deps/npm/docs/output/commands/npm-init.html b/deps/npm/docs/output/commands/npm-init.html index 8ff01b2f7a76d0..3260f015ec7aa6 100644 --- a/deps/npm/docs/output/commands/npm-init.html +++ b/deps/npm/docs/output/commands/npm-init.html @@ -141,9 +141,9 @@
-

+

npm-init - @10.9.0 + @10.9.1

Create a package.json file
diff --git a/deps/npm/docs/output/commands/npm-install-ci-test.html b/deps/npm/docs/output/commands/npm-install-ci-test.html index b0f9d237ed8e98..a7cdbbf0213afc 100644 --- a/deps/npm/docs/output/commands/npm-install-ci-test.html +++ b/deps/npm/docs/output/commands/npm-install-ci-test.html @@ -141,9 +141,9 @@
-

+

npm-install-ci-test - @10.9.0 + @10.9.1

Install a project with a clean slate and run tests
diff --git a/deps/npm/docs/output/commands/npm-install-test.html b/deps/npm/docs/output/commands/npm-install-test.html index b0fbd63887fff5..972e0c4b9b55f9 100644 --- a/deps/npm/docs/output/commands/npm-install-test.html +++ b/deps/npm/docs/output/commands/npm-install-test.html @@ -141,9 +141,9 @@
-

+

npm-install-test - @10.9.0 + @10.9.1

Install package(s) and run tests
diff --git a/deps/npm/docs/output/commands/npm-install.html b/deps/npm/docs/output/commands/npm-install.html index fa57e02eaf9ad5..685f6bff0fcfe3 100644 --- a/deps/npm/docs/output/commands/npm-install.html +++ b/deps/npm/docs/output/commands/npm-install.html @@ -141,9 +141,9 @@
-

+

npm-install - @10.9.0 + @10.9.1

Install a package
diff --git a/deps/npm/docs/output/commands/npm-link.html b/deps/npm/docs/output/commands/npm-link.html index 4e461ebefafd42..11b7ad9ec3c23a 100644 --- a/deps/npm/docs/output/commands/npm-link.html +++ b/deps/npm/docs/output/commands/npm-link.html @@ -141,9 +141,9 @@
-

+

npm-link - @10.9.0 + @10.9.1

Symlink a package folder
diff --git a/deps/npm/docs/output/commands/npm-login.html b/deps/npm/docs/output/commands/npm-login.html index 9c1584ca36bc41..e9f1e0b08bb1d5 100644 --- a/deps/npm/docs/output/commands/npm-login.html +++ b/deps/npm/docs/output/commands/npm-login.html @@ -141,9 +141,9 @@
-

+

npm-login - @10.9.0 + @10.9.1

Login to a registry user account
diff --git a/deps/npm/docs/output/commands/npm-logout.html b/deps/npm/docs/output/commands/npm-logout.html index 8908b329395254..d24c73d9a09f25 100644 --- a/deps/npm/docs/output/commands/npm-logout.html +++ b/deps/npm/docs/output/commands/npm-logout.html @@ -141,9 +141,9 @@
-

+

npm-logout - @10.9.0 + @10.9.1

Log out of the registry
diff --git a/deps/npm/docs/output/commands/npm-ls.html b/deps/npm/docs/output/commands/npm-ls.html index 2615a492a75e38..bdbd451e2794ab 100644 --- a/deps/npm/docs/output/commands/npm-ls.html +++ b/deps/npm/docs/output/commands/npm-ls.html @@ -141,9 +141,9 @@
-

+

npm-ls - @10.9.0 + @10.9.1

List installed packages
@@ -168,7 +168,7 @@

Description

the results to only the paths to the packages named. Note that nested packages will also show the paths to the specified packages. For example, running npm ls promzard in npm's source tree will show:

-
npm@10.9.0 /path/to/npm
+
npm@10.9.1 /path/to/npm
 └─┬ init-package-json@0.0.4
   └── promzard@0.1.5
 
diff --git a/deps/npm/docs/output/commands/npm-org.html b/deps/npm/docs/output/commands/npm-org.html index 66d823f7f3c2de..4cbb0828d998ba 100644 --- a/deps/npm/docs/output/commands/npm-org.html +++ b/deps/npm/docs/output/commands/npm-org.html @@ -141,9 +141,9 @@
-

+

npm-org - @10.9.0 + @10.9.1

Manage orgs
diff --git a/deps/npm/docs/output/commands/npm-outdated.html b/deps/npm/docs/output/commands/npm-outdated.html index 6002ff1818da2c..0611cc7d46e5e3 100644 --- a/deps/npm/docs/output/commands/npm-outdated.html +++ b/deps/npm/docs/output/commands/npm-outdated.html @@ -141,9 +141,9 @@
-

+

npm-outdated - @10.9.0 + @10.9.1

Check for outdated packages
diff --git a/deps/npm/docs/output/commands/npm-owner.html b/deps/npm/docs/output/commands/npm-owner.html index e8b7a05274aa91..9c674c67479b27 100644 --- a/deps/npm/docs/output/commands/npm-owner.html +++ b/deps/npm/docs/output/commands/npm-owner.html @@ -141,9 +141,9 @@
-

+

npm-owner - @10.9.0 + @10.9.1

Manage package owners
diff --git a/deps/npm/docs/output/commands/npm-pack.html b/deps/npm/docs/output/commands/npm-pack.html index 596ba9a35e3ac5..5e9f1fc13fa0e1 100644 --- a/deps/npm/docs/output/commands/npm-pack.html +++ b/deps/npm/docs/output/commands/npm-pack.html @@ -141,9 +141,9 @@
-

+

npm-pack - @10.9.0 + @10.9.1

Create a tarball from a package
diff --git a/deps/npm/docs/output/commands/npm-ping.html b/deps/npm/docs/output/commands/npm-ping.html index de393dbd0b5d71..198d820b58c3ba 100644 --- a/deps/npm/docs/output/commands/npm-ping.html +++ b/deps/npm/docs/output/commands/npm-ping.html @@ -141,9 +141,9 @@
-

+

npm-ping - @10.9.0 + @10.9.1

Ping npm registry
diff --git a/deps/npm/docs/output/commands/npm-pkg.html b/deps/npm/docs/output/commands/npm-pkg.html index ffb153cc440ce4..0a4fcd384f55fb 100644 --- a/deps/npm/docs/output/commands/npm-pkg.html +++ b/deps/npm/docs/output/commands/npm-pkg.html @@ -141,9 +141,9 @@
-

+

npm-pkg - @10.9.0 + @10.9.1

Manages your package.json
diff --git a/deps/npm/docs/output/commands/npm-prefix.html b/deps/npm/docs/output/commands/npm-prefix.html index da63644f2df42a..62651d94535ee6 100644 --- a/deps/npm/docs/output/commands/npm-prefix.html +++ b/deps/npm/docs/output/commands/npm-prefix.html @@ -141,9 +141,9 @@
-

+

npm-prefix - @10.9.0 + @10.9.1

Display prefix
diff --git a/deps/npm/docs/output/commands/npm-profile.html b/deps/npm/docs/output/commands/npm-profile.html index c3679196a85bc4..ff616498acb136 100644 --- a/deps/npm/docs/output/commands/npm-profile.html +++ b/deps/npm/docs/output/commands/npm-profile.html @@ -141,9 +141,9 @@
-

+

npm-profile - @10.9.0 + @10.9.1

Change settings on your registry profile
diff --git a/deps/npm/docs/output/commands/npm-prune.html b/deps/npm/docs/output/commands/npm-prune.html index 4e844ab9f02cbb..0bf046badbd199 100644 --- a/deps/npm/docs/output/commands/npm-prune.html +++ b/deps/npm/docs/output/commands/npm-prune.html @@ -141,9 +141,9 @@
-

+

npm-prune - @10.9.0 + @10.9.1

Remove extraneous packages
diff --git a/deps/npm/docs/output/commands/npm-publish.html b/deps/npm/docs/output/commands/npm-publish.html index b808cc29a15744..b69cb033afa39c 100644 --- a/deps/npm/docs/output/commands/npm-publish.html +++ b/deps/npm/docs/output/commands/npm-publish.html @@ -141,9 +141,9 @@
-

+

npm-publish - @10.9.0 + @10.9.1

Publish a package
diff --git a/deps/npm/docs/output/commands/npm-query.html b/deps/npm/docs/output/commands/npm-query.html index d85a2f6c27ff79..04359b6954b82f 100644 --- a/deps/npm/docs/output/commands/npm-query.html +++ b/deps/npm/docs/output/commands/npm-query.html @@ -141,9 +141,9 @@
-

+

npm-query - @10.9.0 + @10.9.1

Dependency selector query
diff --git a/deps/npm/docs/output/commands/npm-rebuild.html b/deps/npm/docs/output/commands/npm-rebuild.html index ff28b35b86bced..ce696a013d7792 100644 --- a/deps/npm/docs/output/commands/npm-rebuild.html +++ b/deps/npm/docs/output/commands/npm-rebuild.html @@ -141,9 +141,9 @@
-

+

npm-rebuild - @10.9.0 + @10.9.1

Rebuild a package
diff --git a/deps/npm/docs/output/commands/npm-repo.html b/deps/npm/docs/output/commands/npm-repo.html index 8ee7e9d6d2d074..6ec545571e0cf8 100644 --- a/deps/npm/docs/output/commands/npm-repo.html +++ b/deps/npm/docs/output/commands/npm-repo.html @@ -141,9 +141,9 @@
-

+

npm-repo - @10.9.0 + @10.9.1

Open package repository page in the browser
diff --git a/deps/npm/docs/output/commands/npm-restart.html b/deps/npm/docs/output/commands/npm-restart.html index e3171bf280910b..aa6907f7cf0e9b 100644 --- a/deps/npm/docs/output/commands/npm-restart.html +++ b/deps/npm/docs/output/commands/npm-restart.html @@ -141,9 +141,9 @@
-

+

npm-restart - @10.9.0 + @10.9.1

Restart a package
diff --git a/deps/npm/docs/output/commands/npm-root.html b/deps/npm/docs/output/commands/npm-root.html index 4e2e82e5bb259c..139504648861b0 100644 --- a/deps/npm/docs/output/commands/npm-root.html +++ b/deps/npm/docs/output/commands/npm-root.html @@ -141,9 +141,9 @@
-

+

npm-root - @10.9.0 + @10.9.1

Display npm root
diff --git a/deps/npm/docs/output/commands/npm-run-script.html b/deps/npm/docs/output/commands/npm-run-script.html index 4673e733f49fcd..bcb21c48052eff 100644 --- a/deps/npm/docs/output/commands/npm-run-script.html +++ b/deps/npm/docs/output/commands/npm-run-script.html @@ -141,9 +141,9 @@
-

+

npm-run-script - @10.9.0 + @10.9.1

Run arbitrary package scripts
diff --git a/deps/npm/docs/output/commands/npm-sbom.html b/deps/npm/docs/output/commands/npm-sbom.html index 00508eca6cd91a..64fa00b09d8648 100644 --- a/deps/npm/docs/output/commands/npm-sbom.html +++ b/deps/npm/docs/output/commands/npm-sbom.html @@ -141,9 +141,9 @@
-

+

npm-sbom - @10.9.0 + @10.9.1

Generate a Software Bill of Materials (SBOM)
diff --git a/deps/npm/docs/output/commands/npm-search.html b/deps/npm/docs/output/commands/npm-search.html index edf4e437e65400..19130178365691 100644 --- a/deps/npm/docs/output/commands/npm-search.html +++ b/deps/npm/docs/output/commands/npm-search.html @@ -141,9 +141,9 @@
-

+

npm-search - @10.9.0 + @10.9.1

Search for packages
diff --git a/deps/npm/docs/output/commands/npm-shrinkwrap.html b/deps/npm/docs/output/commands/npm-shrinkwrap.html index f225abaed9218f..318e80890e12c9 100644 --- a/deps/npm/docs/output/commands/npm-shrinkwrap.html +++ b/deps/npm/docs/output/commands/npm-shrinkwrap.html @@ -141,9 +141,9 @@
-

+

npm-shrinkwrap - @10.9.0 + @10.9.1

Lock down dependency versions for publication
diff --git a/deps/npm/docs/output/commands/npm-star.html b/deps/npm/docs/output/commands/npm-star.html index 04b36628ad2176..4db54c52ce041a 100644 --- a/deps/npm/docs/output/commands/npm-star.html +++ b/deps/npm/docs/output/commands/npm-star.html @@ -141,9 +141,9 @@
-

+

npm-star - @10.9.0 + @10.9.1

Mark your favorite packages
diff --git a/deps/npm/docs/output/commands/npm-stars.html b/deps/npm/docs/output/commands/npm-stars.html index 3183aa047f1f89..23598971506f12 100644 --- a/deps/npm/docs/output/commands/npm-stars.html +++ b/deps/npm/docs/output/commands/npm-stars.html @@ -141,9 +141,9 @@
-

+

npm-stars - @10.9.0 + @10.9.1

View packages marked as favorites
diff --git a/deps/npm/docs/output/commands/npm-start.html b/deps/npm/docs/output/commands/npm-start.html index b81caefe0a4431..a8b7460c5503b5 100644 --- a/deps/npm/docs/output/commands/npm-start.html +++ b/deps/npm/docs/output/commands/npm-start.html @@ -141,9 +141,9 @@
-

+

npm-start - @10.9.0 + @10.9.1

Start a package
diff --git a/deps/npm/docs/output/commands/npm-stop.html b/deps/npm/docs/output/commands/npm-stop.html index 85d4c782a736e7..05c59988ce5b02 100644 --- a/deps/npm/docs/output/commands/npm-stop.html +++ b/deps/npm/docs/output/commands/npm-stop.html @@ -141,9 +141,9 @@
-

+

npm-stop - @10.9.0 + @10.9.1

Stop a package
diff --git a/deps/npm/docs/output/commands/npm-team.html b/deps/npm/docs/output/commands/npm-team.html index 6ad869fa3bed9e..596e81ca07571e 100644 --- a/deps/npm/docs/output/commands/npm-team.html +++ b/deps/npm/docs/output/commands/npm-team.html @@ -141,9 +141,9 @@
-

+

npm-team - @10.9.0 + @10.9.1

Manage organization teams and team memberships
diff --git a/deps/npm/docs/output/commands/npm-test.html b/deps/npm/docs/output/commands/npm-test.html index bce16c92e4f087..a67145f8007675 100644 --- a/deps/npm/docs/output/commands/npm-test.html +++ b/deps/npm/docs/output/commands/npm-test.html @@ -141,9 +141,9 @@
-

+

npm-test - @10.9.0 + @10.9.1

Test a package
diff --git a/deps/npm/docs/output/commands/npm-token.html b/deps/npm/docs/output/commands/npm-token.html index 99fcafd9b8f53b..43a3d9e8c11824 100644 --- a/deps/npm/docs/output/commands/npm-token.html +++ b/deps/npm/docs/output/commands/npm-token.html @@ -141,9 +141,9 @@
-

+

npm-token - @10.9.0 + @10.9.1

Manage your authentication tokens
diff --git a/deps/npm/docs/output/commands/npm-uninstall.html b/deps/npm/docs/output/commands/npm-uninstall.html index dd8e7a2234604e..2be118b69200f7 100644 --- a/deps/npm/docs/output/commands/npm-uninstall.html +++ b/deps/npm/docs/output/commands/npm-uninstall.html @@ -141,9 +141,9 @@
-

+

npm-uninstall - @10.9.0 + @10.9.1

Remove a package
diff --git a/deps/npm/docs/output/commands/npm-unpublish.html b/deps/npm/docs/output/commands/npm-unpublish.html index 56e2c59f62addb..7ffcfc5687253d 100644 --- a/deps/npm/docs/output/commands/npm-unpublish.html +++ b/deps/npm/docs/output/commands/npm-unpublish.html @@ -141,9 +141,9 @@
-

+

npm-unpublish - @10.9.0 + @10.9.1

Remove a package from the registry
diff --git a/deps/npm/docs/output/commands/npm-unstar.html b/deps/npm/docs/output/commands/npm-unstar.html index 710d8a947d665f..80e858ca870d86 100644 --- a/deps/npm/docs/output/commands/npm-unstar.html +++ b/deps/npm/docs/output/commands/npm-unstar.html @@ -141,9 +141,9 @@
-

+

npm-unstar - @10.9.0 + @10.9.1

Remove an item from your favorite packages
diff --git a/deps/npm/docs/output/commands/npm-update.html b/deps/npm/docs/output/commands/npm-update.html index e587ec92f8614a..1a6ae021eb91d7 100644 --- a/deps/npm/docs/output/commands/npm-update.html +++ b/deps/npm/docs/output/commands/npm-update.html @@ -141,9 +141,9 @@
-

+

npm-update - @10.9.0 + @10.9.1

Update packages
diff --git a/deps/npm/docs/output/commands/npm-version.html b/deps/npm/docs/output/commands/npm-version.html index 196a0236093673..f86ea98c3c92bb 100644 --- a/deps/npm/docs/output/commands/npm-version.html +++ b/deps/npm/docs/output/commands/npm-version.html @@ -141,9 +141,9 @@
-

+

npm-version - @10.9.0 + @10.9.1

Bump a package version
diff --git a/deps/npm/docs/output/commands/npm-view.html b/deps/npm/docs/output/commands/npm-view.html index 0eebee3037748d..054ab0f5929683 100644 --- a/deps/npm/docs/output/commands/npm-view.html +++ b/deps/npm/docs/output/commands/npm-view.html @@ -141,9 +141,9 @@
-

+

npm-view - @10.9.0 + @10.9.1

View registry info
diff --git a/deps/npm/docs/output/commands/npm-whoami.html b/deps/npm/docs/output/commands/npm-whoami.html index 0d1fa5ea9b87d7..7103f29354b398 100644 --- a/deps/npm/docs/output/commands/npm-whoami.html +++ b/deps/npm/docs/output/commands/npm-whoami.html @@ -141,9 +141,9 @@
-

+

npm-whoami - @10.9.0 + @10.9.1

Display npm username
diff --git a/deps/npm/docs/output/commands/npm.html b/deps/npm/docs/output/commands/npm.html index 005009c1dba277..79d2e5b0ec9a75 100644 --- a/deps/npm/docs/output/commands/npm.html +++ b/deps/npm/docs/output/commands/npm.html @@ -141,9 +141,9 @@
-

+

npm - @10.9.0 + @10.9.1

javascript package manager
@@ -158,7 +158,7 @@

Table of contents

Note: This command is unaware of workspaces.

Version

-

10.9.0

+

10.9.1

Description

npm is the package manager for the Node JavaScript platform. It puts modules in place so that node can find them, and manages dependency diff --git a/deps/npm/docs/output/commands/npx.html b/deps/npm/docs/output/commands/npx.html index d3239ed320bb70..e722d396d2fc2f 100644 --- a/deps/npm/docs/output/commands/npx.html +++ b/deps/npm/docs/output/commands/npx.html @@ -141,9 +141,9 @@

-

+

npx - @10.9.0 + @10.9.1

Run a command from a local or remote npm package
diff --git a/deps/npm/docs/output/configuring-npm/folders.html b/deps/npm/docs/output/configuring-npm/folders.html index f23aef6354f73d..278258f62d3109 100644 --- a/deps/npm/docs/output/configuring-npm/folders.html +++ b/deps/npm/docs/output/configuring-npm/folders.html @@ -141,9 +141,9 @@
-

+

folders - @10.9.0 + @10.9.1

Folder Structures Used by npm
diff --git a/deps/npm/docs/output/configuring-npm/install.html b/deps/npm/docs/output/configuring-npm/install.html index cddbcaae2de8be..61faffa4666219 100644 --- a/deps/npm/docs/output/configuring-npm/install.html +++ b/deps/npm/docs/output/configuring-npm/install.html @@ -141,9 +141,9 @@
-

+

install - @10.9.0 + @10.9.1

Download and install node and npm
diff --git a/deps/npm/docs/output/configuring-npm/npm-global.html b/deps/npm/docs/output/configuring-npm/npm-global.html index f23aef6354f73d..278258f62d3109 100644 --- a/deps/npm/docs/output/configuring-npm/npm-global.html +++ b/deps/npm/docs/output/configuring-npm/npm-global.html @@ -141,9 +141,9 @@
-

+

folders - @10.9.0 + @10.9.1

Folder Structures Used by npm
diff --git a/deps/npm/docs/output/configuring-npm/npm-json.html b/deps/npm/docs/output/configuring-npm/npm-json.html index 0cd27f8d82fdd8..a13d96ed88c66e 100644 --- a/deps/npm/docs/output/configuring-npm/npm-json.html +++ b/deps/npm/docs/output/configuring-npm/npm-json.html @@ -141,9 +141,9 @@
-

+

package.json - @10.9.0 + @10.9.1

Specifics of npm's package.json handling
diff --git a/deps/npm/docs/output/configuring-npm/npm-shrinkwrap-json.html b/deps/npm/docs/output/configuring-npm/npm-shrinkwrap-json.html index 5f20e99541a7a7..27034d851c3f13 100644 --- a/deps/npm/docs/output/configuring-npm/npm-shrinkwrap-json.html +++ b/deps/npm/docs/output/configuring-npm/npm-shrinkwrap-json.html @@ -141,9 +141,9 @@
-

+

npm-shrinkwrap.json - @10.9.0 + @10.9.1

A publishable lockfile
diff --git a/deps/npm/docs/output/configuring-npm/npmrc.html b/deps/npm/docs/output/configuring-npm/npmrc.html index a20c3d807d3d83..ad3a7436cd9fde 100644 --- a/deps/npm/docs/output/configuring-npm/npmrc.html +++ b/deps/npm/docs/output/configuring-npm/npmrc.html @@ -141,9 +141,9 @@
-

+

npmrc - @10.9.0 + @10.9.1

The npm config files
diff --git a/deps/npm/docs/output/configuring-npm/package-json.html b/deps/npm/docs/output/configuring-npm/package-json.html index 0cd27f8d82fdd8..a13d96ed88c66e 100644 --- a/deps/npm/docs/output/configuring-npm/package-json.html +++ b/deps/npm/docs/output/configuring-npm/package-json.html @@ -141,9 +141,9 @@
-

+

package.json - @10.9.0 + @10.9.1

Specifics of npm's package.json handling
diff --git a/deps/npm/docs/output/configuring-npm/package-lock-json.html b/deps/npm/docs/output/configuring-npm/package-lock-json.html index 58598df7ae4fbc..9cf1f7413394f3 100644 --- a/deps/npm/docs/output/configuring-npm/package-lock-json.html +++ b/deps/npm/docs/output/configuring-npm/package-lock-json.html @@ -141,9 +141,9 @@
-

+

package-lock.json - @10.9.0 + @10.9.1

A manifestation of the manifest
diff --git a/deps/npm/docs/output/using-npm/config.html b/deps/npm/docs/output/using-npm/config.html index 5d70bb7e8e803e..a3e521919798fc 100644 --- a/deps/npm/docs/output/using-npm/config.html +++ b/deps/npm/docs/output/using-npm/config.html @@ -141,9 +141,9 @@
-

+

config - @10.9.0 + @10.9.1

More than you probably want to know about npm configuration
diff --git a/deps/npm/docs/output/using-npm/dependency-selectors.html b/deps/npm/docs/output/using-npm/dependency-selectors.html index 008377b3ab55ff..9ab66b16a2b588 100644 --- a/deps/npm/docs/output/using-npm/dependency-selectors.html +++ b/deps/npm/docs/output/using-npm/dependency-selectors.html @@ -141,9 +141,9 @@
-

+

Dependency Selector Syntax & Querying - @10.9.0 + @10.9.1

Dependency Selector Syntax & Querying
diff --git a/deps/npm/docs/output/using-npm/developers.html b/deps/npm/docs/output/using-npm/developers.html index e75223cd623a54..d2e03c36046a7a 100644 --- a/deps/npm/docs/output/using-npm/developers.html +++ b/deps/npm/docs/output/using-npm/developers.html @@ -141,9 +141,9 @@
-

+

developers - @10.9.0 + @10.9.1

Developer Guide
diff --git a/deps/npm/docs/output/using-npm/logging.html b/deps/npm/docs/output/using-npm/logging.html index 5499bd53e285f2..2d5b25ec33a9eb 100644 --- a/deps/npm/docs/output/using-npm/logging.html +++ b/deps/npm/docs/output/using-npm/logging.html @@ -141,9 +141,9 @@
-

+

Logging - @10.9.0 + @10.9.1

Why, What & How We Log
diff --git a/deps/npm/docs/output/using-npm/orgs.html b/deps/npm/docs/output/using-npm/orgs.html index c9ed80f6421c5e..30378d391994c6 100644 --- a/deps/npm/docs/output/using-npm/orgs.html +++ b/deps/npm/docs/output/using-npm/orgs.html @@ -141,9 +141,9 @@
-

+

orgs - @10.9.0 + @10.9.1

Working with Teams & Orgs
diff --git a/deps/npm/docs/output/using-npm/package-spec.html b/deps/npm/docs/output/using-npm/package-spec.html index a370699bb555bd..998257f5841b17 100644 --- a/deps/npm/docs/output/using-npm/package-spec.html +++ b/deps/npm/docs/output/using-npm/package-spec.html @@ -141,9 +141,9 @@
-

+

package-spec - @10.9.0 + @10.9.1

Package name specifier
diff --git a/deps/npm/docs/output/using-npm/registry.html b/deps/npm/docs/output/using-npm/registry.html index 9d9f8a539333c9..323aa7c776f822 100644 --- a/deps/npm/docs/output/using-npm/registry.html +++ b/deps/npm/docs/output/using-npm/registry.html @@ -141,9 +141,9 @@
-

+

registry - @10.9.0 + @10.9.1

The JavaScript Package Registry
diff --git a/deps/npm/docs/output/using-npm/removal.html b/deps/npm/docs/output/using-npm/removal.html index 2668dd9dd8a440..8011ad107cebc8 100644 --- a/deps/npm/docs/output/using-npm/removal.html +++ b/deps/npm/docs/output/using-npm/removal.html @@ -141,9 +141,9 @@
-

+

removal - @10.9.0 + @10.9.1

Cleaning the Slate
diff --git a/deps/npm/docs/output/using-npm/scope.html b/deps/npm/docs/output/using-npm/scope.html index 0441f8ef703e95..f8d0cbbbfbb5bc 100644 --- a/deps/npm/docs/output/using-npm/scope.html +++ b/deps/npm/docs/output/using-npm/scope.html @@ -141,9 +141,9 @@
-

+

scope - @10.9.0 + @10.9.1

Scoped packages
diff --git a/deps/npm/docs/output/using-npm/scripts.html b/deps/npm/docs/output/using-npm/scripts.html index ff37c2ede18062..fbf65a2593c240 100644 --- a/deps/npm/docs/output/using-npm/scripts.html +++ b/deps/npm/docs/output/using-npm/scripts.html @@ -141,9 +141,9 @@
-

+

scripts - @10.9.0 + @10.9.1

How npm handles the "scripts" field
diff --git a/deps/npm/docs/output/using-npm/workspaces.html b/deps/npm/docs/output/using-npm/workspaces.html index 19195cc868db4b..09e6d401fd75aa 100644 --- a/deps/npm/docs/output/using-npm/workspaces.html +++ b/deps/npm/docs/output/using-npm/workspaces.html @@ -141,9 +141,9 @@
-

+

workspaces - @10.9.0 + @10.9.1

Working with workspaces
diff --git a/deps/npm/lib/cli.js b/deps/npm/lib/cli.js index e11729fe3205b9..00b4fc0bd7fb72 100644 --- a/deps/npm/lib/cli.js +++ b/deps/npm/lib/cli.js @@ -1,3 +1,11 @@ +try { + const { enableCompileCache } = require('node:module') + /* istanbul ignore next */ + if (enableCompileCache) { + enableCompileCache() + } +} catch (e) { /* istanbul ignore next */ } + const validateEngines = require('./cli/validate-engines.js') const cliEntry = require('node:path').resolve(__dirname, 'cli/entry.js') diff --git a/deps/npm/man/man1/npm-access.1 b/deps/npm/man/man1/npm-access.1 index 602193f3b8e3e8..2e0b979a061918 100644 --- a/deps/npm/man/man1/npm-access.1 +++ b/deps/npm/man/man1/npm-access.1 @@ -1,4 +1,4 @@ -.TH "NPM-ACCESS" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-ACCESS" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-access\fR - Set access level on published packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-adduser.1 b/deps/npm/man/man1/npm-adduser.1 index 467384ec0ec40e..c1f786f595b062 100644 --- a/deps/npm/man/man1/npm-adduser.1 +++ b/deps/npm/man/man1/npm-adduser.1 @@ -1,4 +1,4 @@ -.TH "NPM-ADDUSER" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-ADDUSER" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-adduser\fR - Add a registry user account .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-audit.1 b/deps/npm/man/man1/npm-audit.1 index 64bf5d5461a157..908d8d2ddc5cb0 100644 --- a/deps/npm/man/man1/npm-audit.1 +++ b/deps/npm/man/man1/npm-audit.1 @@ -1,4 +1,4 @@ -.TH "NPM-AUDIT" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-AUDIT" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-audit\fR - Run a security audit .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-bugs.1 b/deps/npm/man/man1/npm-bugs.1 index c25b04b6814137..17eb1d09ae4c55 100644 --- a/deps/npm/man/man1/npm-bugs.1 +++ b/deps/npm/man/man1/npm-bugs.1 @@ -1,4 +1,4 @@ -.TH "NPM-BUGS" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-BUGS" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-bugs\fR - Report bugs for a package in a web browser .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-cache.1 b/deps/npm/man/man1/npm-cache.1 index e9da044a5b4479..2b06a6e5d9d471 100644 --- a/deps/npm/man/man1/npm-cache.1 +++ b/deps/npm/man/man1/npm-cache.1 @@ -1,4 +1,4 @@ -.TH "NPM-CACHE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-CACHE" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-cache\fR - Manipulates packages cache .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-ci.1 b/deps/npm/man/man1/npm-ci.1 index 1ad1419cb1f98f..d021d55d0f34cc 100644 --- a/deps/npm/man/man1/npm-ci.1 +++ b/deps/npm/man/man1/npm-ci.1 @@ -1,4 +1,4 @@ -.TH "NPM-CI" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-CI" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-ci\fR - Clean install a project .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-completion.1 b/deps/npm/man/man1/npm-completion.1 index 6f246c436cead3..e0a454d49109d2 100644 --- a/deps/npm/man/man1/npm-completion.1 +++ b/deps/npm/man/man1/npm-completion.1 @@ -1,4 +1,4 @@ -.TH "NPM-COMPLETION" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-COMPLETION" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-completion\fR - Tab Completion for npm .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-config.1 b/deps/npm/man/man1/npm-config.1 index 1bef58e188b68c..b6c5e5dd331fb1 100644 --- a/deps/npm/man/man1/npm-config.1 +++ b/deps/npm/man/man1/npm-config.1 @@ -1,4 +1,4 @@ -.TH "NPM-CONFIG" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-CONFIG" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-config\fR - Manage the npm configuration files .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-dedupe.1 b/deps/npm/man/man1/npm-dedupe.1 index b1e21f0478ed4f..3e0b5d91d4e0b5 100644 --- a/deps/npm/man/man1/npm-dedupe.1 +++ b/deps/npm/man/man1/npm-dedupe.1 @@ -1,4 +1,4 @@ -.TH "NPM-DEDUPE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-DEDUPE" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-dedupe\fR - Reduce duplication in the package tree .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-deprecate.1 b/deps/npm/man/man1/npm-deprecate.1 index 31e9e56cd95382..73d802434859f3 100644 --- a/deps/npm/man/man1/npm-deprecate.1 +++ b/deps/npm/man/man1/npm-deprecate.1 @@ -1,4 +1,4 @@ -.TH "NPM-DEPRECATE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-DEPRECATE" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-deprecate\fR - Deprecate a version of a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-diff.1 b/deps/npm/man/man1/npm-diff.1 index c29ae3266d1543..52fb6989ae79f9 100644 --- a/deps/npm/man/man1/npm-diff.1 +++ b/deps/npm/man/man1/npm-diff.1 @@ -1,4 +1,4 @@ -.TH "NPM-DIFF" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-DIFF" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-diff\fR - The registry diff command .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-dist-tag.1 b/deps/npm/man/man1/npm-dist-tag.1 index 46e5cf8ac3492a..272018226ae78b 100644 --- a/deps/npm/man/man1/npm-dist-tag.1 +++ b/deps/npm/man/man1/npm-dist-tag.1 @@ -1,4 +1,4 @@ -.TH "NPM-DIST-TAG" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-DIST-TAG" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-dist-tag\fR - Modify package distribution tags .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-docs.1 b/deps/npm/man/man1/npm-docs.1 index 6cda2d87abe163..86d32afda3cbeb 100644 --- a/deps/npm/man/man1/npm-docs.1 +++ b/deps/npm/man/man1/npm-docs.1 @@ -1,4 +1,4 @@ -.TH "NPM-DOCS" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-DOCS" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-docs\fR - Open documentation for a package in a web browser .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-doctor.1 b/deps/npm/man/man1/npm-doctor.1 index 7a67ed1e6b32b1..32c190ba11b9cd 100644 --- a/deps/npm/man/man1/npm-doctor.1 +++ b/deps/npm/man/man1/npm-doctor.1 @@ -1,4 +1,4 @@ -.TH "NPM-DOCTOR" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-DOCTOR" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-doctor\fR - Check the health of your npm environment .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-edit.1 b/deps/npm/man/man1/npm-edit.1 index 5ae7e24b67d60e..d1f5db8b127da1 100644 --- a/deps/npm/man/man1/npm-edit.1 +++ b/deps/npm/man/man1/npm-edit.1 @@ -1,4 +1,4 @@ -.TH "NPM-EDIT" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-EDIT" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-edit\fR - Edit an installed package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-exec.1 b/deps/npm/man/man1/npm-exec.1 index 20c21e67919d65..db7631141e97cb 100644 --- a/deps/npm/man/man1/npm-exec.1 +++ b/deps/npm/man/man1/npm-exec.1 @@ -1,4 +1,4 @@ -.TH "NPM-EXEC" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-EXEC" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-exec\fR - Run a command from a local or remote npm package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-explain.1 b/deps/npm/man/man1/npm-explain.1 index 65ef0e086e2ce0..92602d31ec27ad 100644 --- a/deps/npm/man/man1/npm-explain.1 +++ b/deps/npm/man/man1/npm-explain.1 @@ -1,4 +1,4 @@ -.TH "NPM-EXPLAIN" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-EXPLAIN" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-explain\fR - Explain installed packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-explore.1 b/deps/npm/man/man1/npm-explore.1 index 6b92a81c2abc0d..f57981af0e84e6 100644 --- a/deps/npm/man/man1/npm-explore.1 +++ b/deps/npm/man/man1/npm-explore.1 @@ -1,4 +1,4 @@ -.TH "NPM-EXPLORE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-EXPLORE" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-explore\fR - Browse an installed package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-find-dupes.1 b/deps/npm/man/man1/npm-find-dupes.1 index 261313e1b23ad1..d86fd9b54f7a2d 100644 --- a/deps/npm/man/man1/npm-find-dupes.1 +++ b/deps/npm/man/man1/npm-find-dupes.1 @@ -1,4 +1,4 @@ -.TH "NPM-FIND-DUPES" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-FIND-DUPES" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-find-dupes\fR - Find duplication in the package tree .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-fund.1 b/deps/npm/man/man1/npm-fund.1 index b87d57d20c0f50..d1c07a8785e7f6 100644 --- a/deps/npm/man/man1/npm-fund.1 +++ b/deps/npm/man/man1/npm-fund.1 @@ -1,4 +1,4 @@ -.TH "NPM-FUND" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-FUND" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-fund\fR - Retrieve funding information .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-help-search.1 b/deps/npm/man/man1/npm-help-search.1 index 9660ca29697e9f..9b61ca2bacbe62 100644 --- a/deps/npm/man/man1/npm-help-search.1 +++ b/deps/npm/man/man1/npm-help-search.1 @@ -1,4 +1,4 @@ -.TH "NPM-HELP-SEARCH" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-HELP-SEARCH" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-help-search\fR - Search npm help documentation .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-help.1 b/deps/npm/man/man1/npm-help.1 index a8c70fdc88a9ae..1a9cacd9992837 100644 --- a/deps/npm/man/man1/npm-help.1 +++ b/deps/npm/man/man1/npm-help.1 @@ -1,4 +1,4 @@ -.TH "NPM-HELP" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-HELP" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-help\fR - Get help on npm .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-hook.1 b/deps/npm/man/man1/npm-hook.1 index 2c07a5afadbb64..121633a214df82 100644 --- a/deps/npm/man/man1/npm-hook.1 +++ b/deps/npm/man/man1/npm-hook.1 @@ -1,4 +1,4 @@ -.TH "NPM-HOOK" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-HOOK" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-hook\fR - Manage registry hooks .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-init.1 b/deps/npm/man/man1/npm-init.1 index cb7339dc0f4ff0..e3f38fa0718afd 100644 --- a/deps/npm/man/man1/npm-init.1 +++ b/deps/npm/man/man1/npm-init.1 @@ -1,4 +1,4 @@ -.TH "NPM-INIT" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-INIT" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-init\fR - Create a package.json file .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-install-ci-test.1 b/deps/npm/man/man1/npm-install-ci-test.1 index 52defde693a506..f9521016c9fe1c 100644 --- a/deps/npm/man/man1/npm-install-ci-test.1 +++ b/deps/npm/man/man1/npm-install-ci-test.1 @@ -1,4 +1,4 @@ -.TH "NPM-INSTALL-CI-TEST" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-INSTALL-CI-TEST" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-install-ci-test\fR - Install a project with a clean slate and run tests .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-install-test.1 b/deps/npm/man/man1/npm-install-test.1 index e2023e1a985474..a32baa859c8a75 100644 --- a/deps/npm/man/man1/npm-install-test.1 +++ b/deps/npm/man/man1/npm-install-test.1 @@ -1,4 +1,4 @@ -.TH "NPM-INSTALL-TEST" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-INSTALL-TEST" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-install-test\fR - Install package(s) and run tests .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-install.1 b/deps/npm/man/man1/npm-install.1 index 59b3a8c81ac73f..510736aecbe21e 100644 --- a/deps/npm/man/man1/npm-install.1 +++ b/deps/npm/man/man1/npm-install.1 @@ -1,4 +1,4 @@ -.TH "NPM-INSTALL" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-INSTALL" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-install\fR - Install a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-link.1 b/deps/npm/man/man1/npm-link.1 index e6106495400fdc..00378601d85fec 100644 --- a/deps/npm/man/man1/npm-link.1 +++ b/deps/npm/man/man1/npm-link.1 @@ -1,4 +1,4 @@ -.TH "NPM-LINK" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-LINK" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-link\fR - Symlink a package folder .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-login.1 b/deps/npm/man/man1/npm-login.1 index 36f2bf2d0f35dc..dd76470ae39984 100644 --- a/deps/npm/man/man1/npm-login.1 +++ b/deps/npm/man/man1/npm-login.1 @@ -1,4 +1,4 @@ -.TH "NPM-LOGIN" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-LOGIN" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-login\fR - Login to a registry user account .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-logout.1 b/deps/npm/man/man1/npm-logout.1 index 78cba39ccd85c9..b8f5117faf5f17 100644 --- a/deps/npm/man/man1/npm-logout.1 +++ b/deps/npm/man/man1/npm-logout.1 @@ -1,4 +1,4 @@ -.TH "NPM-LOGOUT" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-LOGOUT" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-logout\fR - Log out of the registry .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-ls.1 b/deps/npm/man/man1/npm-ls.1 index b0364fe1531753..70f25355090dee 100644 --- a/deps/npm/man/man1/npm-ls.1 +++ b/deps/npm/man/man1/npm-ls.1 @@ -1,4 +1,4 @@ -.TH "NPM-LS" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-LS" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-ls\fR - List installed packages .SS "Synopsis" @@ -20,7 +20,7 @@ Positional arguments are \fBname@version-range\fR identifiers, which will limit .P .RS 2 .nf -npm@10.9.0 /path/to/npm +npm@10.9.1 /path/to/npm └─┬ init-package-json@0.0.4 └── promzard@0.1.5 .fi diff --git a/deps/npm/man/man1/npm-org.1 b/deps/npm/man/man1/npm-org.1 index 0465a6b5bddb54..96cd8f8188792b 100644 --- a/deps/npm/man/man1/npm-org.1 +++ b/deps/npm/man/man1/npm-org.1 @@ -1,4 +1,4 @@ -.TH "NPM-ORG" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-ORG" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-org\fR - Manage orgs .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-outdated.1 b/deps/npm/man/man1/npm-outdated.1 index ad1cf07823630f..190a362f9db0e2 100644 --- a/deps/npm/man/man1/npm-outdated.1 +++ b/deps/npm/man/man1/npm-outdated.1 @@ -1,4 +1,4 @@ -.TH "NPM-OUTDATED" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-OUTDATED" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-outdated\fR - Check for outdated packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-owner.1 b/deps/npm/man/man1/npm-owner.1 index 5842e5a51d125f..48306a70964528 100644 --- a/deps/npm/man/man1/npm-owner.1 +++ b/deps/npm/man/man1/npm-owner.1 @@ -1,4 +1,4 @@ -.TH "NPM-OWNER" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-OWNER" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-owner\fR - Manage package owners .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-pack.1 b/deps/npm/man/man1/npm-pack.1 index 6cc2acd4b580f7..b482c24822da3a 100644 --- a/deps/npm/man/man1/npm-pack.1 +++ b/deps/npm/man/man1/npm-pack.1 @@ -1,4 +1,4 @@ -.TH "NPM-PACK" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PACK" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-pack\fR - Create a tarball from a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-ping.1 b/deps/npm/man/man1/npm-ping.1 index ad03cbbb6a1398..136348a2d95d7d 100644 --- a/deps/npm/man/man1/npm-ping.1 +++ b/deps/npm/man/man1/npm-ping.1 @@ -1,4 +1,4 @@ -.TH "NPM-PING" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PING" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-ping\fR - Ping npm registry .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-pkg.1 b/deps/npm/man/man1/npm-pkg.1 index 2f45876234ec3d..be9998d4157760 100644 --- a/deps/npm/man/man1/npm-pkg.1 +++ b/deps/npm/man/man1/npm-pkg.1 @@ -1,4 +1,4 @@ -.TH "NPM-PKG" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PKG" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-pkg\fR - Manages your package.json .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-prefix.1 b/deps/npm/man/man1/npm-prefix.1 index cac44344c71d79..44f138d72ee25e 100644 --- a/deps/npm/man/man1/npm-prefix.1 +++ b/deps/npm/man/man1/npm-prefix.1 @@ -1,4 +1,4 @@ -.TH "NPM-PREFIX" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PREFIX" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-prefix\fR - Display prefix .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-profile.1 b/deps/npm/man/man1/npm-profile.1 index 28e3f41244ace1..cb8c012d8b51b3 100644 --- a/deps/npm/man/man1/npm-profile.1 +++ b/deps/npm/man/man1/npm-profile.1 @@ -1,4 +1,4 @@ -.TH "NPM-PROFILE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PROFILE" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-profile\fR - Change settings on your registry profile .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-prune.1 b/deps/npm/man/man1/npm-prune.1 index 7634c75f0901fa..5ce07fb8b0b197 100644 --- a/deps/npm/man/man1/npm-prune.1 +++ b/deps/npm/man/man1/npm-prune.1 @@ -1,4 +1,4 @@ -.TH "NPM-PRUNE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PRUNE" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-prune\fR - Remove extraneous packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-publish.1 b/deps/npm/man/man1/npm-publish.1 index c6724b6fe2ce51..2d30658334dd0e 100644 --- a/deps/npm/man/man1/npm-publish.1 +++ b/deps/npm/man/man1/npm-publish.1 @@ -1,4 +1,4 @@ -.TH "NPM-PUBLISH" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PUBLISH" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-publish\fR - Publish a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-query.1 b/deps/npm/man/man1/npm-query.1 index 0a3c6e665e7ecf..b4d6019ae216d3 100644 --- a/deps/npm/man/man1/npm-query.1 +++ b/deps/npm/man/man1/npm-query.1 @@ -1,4 +1,4 @@ -.TH "NPM-QUERY" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-QUERY" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-query\fR - Dependency selector query .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-rebuild.1 b/deps/npm/man/man1/npm-rebuild.1 index e537cec373e3c6..a2fa8cae922e05 100644 --- a/deps/npm/man/man1/npm-rebuild.1 +++ b/deps/npm/man/man1/npm-rebuild.1 @@ -1,4 +1,4 @@ -.TH "NPM-REBUILD" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-REBUILD" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-rebuild\fR - Rebuild a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-repo.1 b/deps/npm/man/man1/npm-repo.1 index 869541f6faaa51..3b3d714f4e255a 100644 --- a/deps/npm/man/man1/npm-repo.1 +++ b/deps/npm/man/man1/npm-repo.1 @@ -1,4 +1,4 @@ -.TH "NPM-REPO" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-REPO" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-repo\fR - Open package repository page in the browser .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-restart.1 b/deps/npm/man/man1/npm-restart.1 index b0e3b0deead7ef..dbdf51cfacb73d 100644 --- a/deps/npm/man/man1/npm-restart.1 +++ b/deps/npm/man/man1/npm-restart.1 @@ -1,4 +1,4 @@ -.TH "NPM-RESTART" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-RESTART" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-restart\fR - Restart a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-root.1 b/deps/npm/man/man1/npm-root.1 index 94a132557e3979..f1d47ad60d2844 100644 --- a/deps/npm/man/man1/npm-root.1 +++ b/deps/npm/man/man1/npm-root.1 @@ -1,4 +1,4 @@ -.TH "NPM-ROOT" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-ROOT" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-root\fR - Display npm root .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-run-script.1 b/deps/npm/man/man1/npm-run-script.1 index 424c733d6a0523..a34eed7ad0a0bd 100644 --- a/deps/npm/man/man1/npm-run-script.1 +++ b/deps/npm/man/man1/npm-run-script.1 @@ -1,4 +1,4 @@ -.TH "NPM-RUN-SCRIPT" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-RUN-SCRIPT" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-run-script\fR - Run arbitrary package scripts .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-sbom.1 b/deps/npm/man/man1/npm-sbom.1 index 7bb8199c8b11c8..85b9ecff24b6ac 100644 --- a/deps/npm/man/man1/npm-sbom.1 +++ b/deps/npm/man/man1/npm-sbom.1 @@ -1,4 +1,4 @@ -.TH "NPM-SBOM" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-SBOM" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-sbom\fR - Generate a Software Bill of Materials (SBOM) .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-search.1 b/deps/npm/man/man1/npm-search.1 index 4fe67890bc949a..3526282de54112 100644 --- a/deps/npm/man/man1/npm-search.1 +++ b/deps/npm/man/man1/npm-search.1 @@ -1,4 +1,4 @@ -.TH "NPM-SEARCH" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-SEARCH" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-search\fR - Search for packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-shrinkwrap.1 b/deps/npm/man/man1/npm-shrinkwrap.1 index ed3c0cb03ccca7..454e2719fbb91d 100644 --- a/deps/npm/man/man1/npm-shrinkwrap.1 +++ b/deps/npm/man/man1/npm-shrinkwrap.1 @@ -1,4 +1,4 @@ -.TH "NPM-SHRINKWRAP" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-SHRINKWRAP" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-shrinkwrap\fR - Lock down dependency versions for publication .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-star.1 b/deps/npm/man/man1/npm-star.1 index 83d0ff230ec1bb..a848760bcfda03 100644 --- a/deps/npm/man/man1/npm-star.1 +++ b/deps/npm/man/man1/npm-star.1 @@ -1,4 +1,4 @@ -.TH "NPM-STAR" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-STAR" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-star\fR - Mark your favorite packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-stars.1 b/deps/npm/man/man1/npm-stars.1 index bdeffec7a3b1b5..7a1deaae083530 100644 --- a/deps/npm/man/man1/npm-stars.1 +++ b/deps/npm/man/man1/npm-stars.1 @@ -1,4 +1,4 @@ -.TH "NPM-STARS" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-STARS" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-stars\fR - View packages marked as favorites .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-start.1 b/deps/npm/man/man1/npm-start.1 index c3cb48c6f72576..d2ecf94226cc75 100644 --- a/deps/npm/man/man1/npm-start.1 +++ b/deps/npm/man/man1/npm-start.1 @@ -1,4 +1,4 @@ -.TH "NPM-START" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-START" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-start\fR - Start a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-stop.1 b/deps/npm/man/man1/npm-stop.1 index 36ef105209b7f0..8b125a42495e95 100644 --- a/deps/npm/man/man1/npm-stop.1 +++ b/deps/npm/man/man1/npm-stop.1 @@ -1,4 +1,4 @@ -.TH "NPM-STOP" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-STOP" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-stop\fR - Stop a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-team.1 b/deps/npm/man/man1/npm-team.1 index 6712c8f92b23c8..10b05e3250360d 100644 --- a/deps/npm/man/man1/npm-team.1 +++ b/deps/npm/man/man1/npm-team.1 @@ -1,4 +1,4 @@ -.TH "NPM-TEAM" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-TEAM" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-team\fR - Manage organization teams and team memberships .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-test.1 b/deps/npm/man/man1/npm-test.1 index 5e63e9c6d76ab3..ab418a7842c014 100644 --- a/deps/npm/man/man1/npm-test.1 +++ b/deps/npm/man/man1/npm-test.1 @@ -1,4 +1,4 @@ -.TH "NPM-TEST" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-TEST" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-test\fR - Test a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-token.1 b/deps/npm/man/man1/npm-token.1 index 29a2c361574121..dda637ff7337fc 100644 --- a/deps/npm/man/man1/npm-token.1 +++ b/deps/npm/man/man1/npm-token.1 @@ -1,4 +1,4 @@ -.TH "NPM-TOKEN" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-TOKEN" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-token\fR - Manage your authentication tokens .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-uninstall.1 b/deps/npm/man/man1/npm-uninstall.1 index 0c10a60c1f07e7..cdcf6e3821a384 100644 --- a/deps/npm/man/man1/npm-uninstall.1 +++ b/deps/npm/man/man1/npm-uninstall.1 @@ -1,4 +1,4 @@ -.TH "NPM-UNINSTALL" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-UNINSTALL" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-uninstall\fR - Remove a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-unpublish.1 b/deps/npm/man/man1/npm-unpublish.1 index 098af2e59310bc..118e362d07e850 100644 --- a/deps/npm/man/man1/npm-unpublish.1 +++ b/deps/npm/man/man1/npm-unpublish.1 @@ -1,4 +1,4 @@ -.TH "NPM-UNPUBLISH" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-UNPUBLISH" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-unpublish\fR - Remove a package from the registry .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-unstar.1 b/deps/npm/man/man1/npm-unstar.1 index 7326984aa8b4d2..a03b7936ecd133 100644 --- a/deps/npm/man/man1/npm-unstar.1 +++ b/deps/npm/man/man1/npm-unstar.1 @@ -1,4 +1,4 @@ -.TH "NPM-UNSTAR" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-UNSTAR" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-unstar\fR - Remove an item from your favorite packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-update.1 b/deps/npm/man/man1/npm-update.1 index 797a9e601cf3d2..db8153d5ec5cc1 100644 --- a/deps/npm/man/man1/npm-update.1 +++ b/deps/npm/man/man1/npm-update.1 @@ -1,4 +1,4 @@ -.TH "NPM-UPDATE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-UPDATE" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-update\fR - Update packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-version.1 b/deps/npm/man/man1/npm-version.1 index d0a7dcee6fea65..26e496ffea90af 100644 --- a/deps/npm/man/man1/npm-version.1 +++ b/deps/npm/man/man1/npm-version.1 @@ -1,4 +1,4 @@ -.TH "NPM-VERSION" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-VERSION" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-version\fR - Bump a package version .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-view.1 b/deps/npm/man/man1/npm-view.1 index 2925f8d7d2cffe..66f23dea09cf36 100644 --- a/deps/npm/man/man1/npm-view.1 +++ b/deps/npm/man/man1/npm-view.1 @@ -1,4 +1,4 @@ -.TH "NPM-VIEW" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-VIEW" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-view\fR - View registry info .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-whoami.1 b/deps/npm/man/man1/npm-whoami.1 index f283db5c9d247a..5065d9439b7476 100644 --- a/deps/npm/man/man1/npm-whoami.1 +++ b/deps/npm/man/man1/npm-whoami.1 @@ -1,4 +1,4 @@ -.TH "NPM-WHOAMI" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-WHOAMI" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-whoami\fR - Display npm username .SS "Synopsis" diff --git a/deps/npm/man/man1/npm.1 b/deps/npm/man/man1/npm.1 index b2034bc50fa5e3..465bd98994480c 100644 --- a/deps/npm/man/man1/npm.1 +++ b/deps/npm/man/man1/npm.1 @@ -1,4 +1,4 @@ -.TH "NPM" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm\fR - javascript package manager .SS "Synopsis" @@ -12,7 +12,7 @@ npm Note: This command is unaware of workspaces. .SS "Version" .P -10.9.0 +10.9.1 .SS "Description" .P npm is the package manager for the Node JavaScript platform. It puts modules in place so that node can find them, and manages dependency conflicts intelligently. diff --git a/deps/npm/man/man1/npx.1 b/deps/npm/man/man1/npx.1 index a1000443cc665e..d2b9ca19f0b07b 100644 --- a/deps/npm/man/man1/npx.1 +++ b/deps/npm/man/man1/npx.1 @@ -1,4 +1,4 @@ -.TH "NPX" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPX" "1" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpx\fR - Run a command from a local or remote npm package .SS "Synopsis" diff --git a/deps/npm/man/man5/folders.5 b/deps/npm/man/man5/folders.5 index 6e3045729c46ca..4ec7f9e421229b 100644 --- a/deps/npm/man/man5/folders.5 +++ b/deps/npm/man/man5/folders.5 @@ -1,4 +1,4 @@ -.TH "FOLDERS" "5" "October 2024" "NPM@10.9.0" "" +.TH "FOLDERS" "5" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBfolders\fR - Folder Structures Used by npm .SS "Description" diff --git a/deps/npm/man/man5/install.5 b/deps/npm/man/man5/install.5 index f5fabf02d7431b..8d2ba04de228df 100644 --- a/deps/npm/man/man5/install.5 +++ b/deps/npm/man/man5/install.5 @@ -1,4 +1,4 @@ -.TH "INSTALL" "5" "October 2024" "NPM@10.9.0" "" +.TH "INSTALL" "5" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBinstall\fR - Download and install node and npm .SS "Description" diff --git a/deps/npm/man/man5/npm-global.5 b/deps/npm/man/man5/npm-global.5 index 6e3045729c46ca..4ec7f9e421229b 100644 --- a/deps/npm/man/man5/npm-global.5 +++ b/deps/npm/man/man5/npm-global.5 @@ -1,4 +1,4 @@ -.TH "FOLDERS" "5" "October 2024" "NPM@10.9.0" "" +.TH "FOLDERS" "5" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBfolders\fR - Folder Structures Used by npm .SS "Description" diff --git a/deps/npm/man/man5/npm-json.5 b/deps/npm/man/man5/npm-json.5 index 49773dda01d706..4dfb85cf0ec290 100644 --- a/deps/npm/man/man5/npm-json.5 +++ b/deps/npm/man/man5/npm-json.5 @@ -1,4 +1,4 @@ -.TH "PACKAGE.JSON" "5" "October 2024" "NPM@10.9.0" "" +.TH "PACKAGE.JSON" "5" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBpackage.json\fR - Specifics of npm's package.json handling .SS "Description" diff --git a/deps/npm/man/man5/npm-shrinkwrap-json.5 b/deps/npm/man/man5/npm-shrinkwrap-json.5 index d1f394d23aa1ce..18d600846e61e0 100644 --- a/deps/npm/man/man5/npm-shrinkwrap-json.5 +++ b/deps/npm/man/man5/npm-shrinkwrap-json.5 @@ -1,4 +1,4 @@ -.TH "NPM-SHRINKWRAP.JSON" "5" "October 2024" "NPM@10.9.0" "" +.TH "NPM-SHRINKWRAP.JSON" "5" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpm-shrinkwrap.json\fR - A publishable lockfile .SS "Description" diff --git a/deps/npm/man/man5/npmrc.5 b/deps/npm/man/man5/npmrc.5 index 3b78f989c23545..c0aea346446c26 100644 --- a/deps/npm/man/man5/npmrc.5 +++ b/deps/npm/man/man5/npmrc.5 @@ -1,4 +1,4 @@ -.TH "NPMRC" "5" "October 2024" "NPM@10.9.0" "" +.TH "NPMRC" "5" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBnpmrc\fR - The npm config files .SS "Description" diff --git a/deps/npm/man/man5/package-json.5 b/deps/npm/man/man5/package-json.5 index 49773dda01d706..4dfb85cf0ec290 100644 --- a/deps/npm/man/man5/package-json.5 +++ b/deps/npm/man/man5/package-json.5 @@ -1,4 +1,4 @@ -.TH "PACKAGE.JSON" "5" "October 2024" "NPM@10.9.0" "" +.TH "PACKAGE.JSON" "5" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBpackage.json\fR - Specifics of npm's package.json handling .SS "Description" diff --git a/deps/npm/man/man5/package-lock-json.5 b/deps/npm/man/man5/package-lock-json.5 index df3dcca1c4fa75..a8e005d6eacd95 100644 --- a/deps/npm/man/man5/package-lock-json.5 +++ b/deps/npm/man/man5/package-lock-json.5 @@ -1,4 +1,4 @@ -.TH "PACKAGE-LOCK.JSON" "5" "October 2024" "NPM@10.9.0" "" +.TH "PACKAGE-LOCK.JSON" "5" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBpackage-lock.json\fR - A manifestation of the manifest .SS "Description" diff --git a/deps/npm/man/man7/config.7 b/deps/npm/man/man7/config.7 index ee166c9d4ccee9..91182bebb7bb58 100644 --- a/deps/npm/man/man7/config.7 +++ b/deps/npm/man/man7/config.7 @@ -1,4 +1,4 @@ -.TH "CONFIG" "7" "October 2024" "NPM@10.9.0" "" +.TH "CONFIG" "7" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBconfig\fR - More than you probably want to know about npm configuration .SS "Description" diff --git a/deps/npm/man/man7/dependency-selectors.7 b/deps/npm/man/man7/dependency-selectors.7 index 54d80fee4dc58d..9bad66a7133656 100644 --- a/deps/npm/man/man7/dependency-selectors.7 +++ b/deps/npm/man/man7/dependency-selectors.7 @@ -1,4 +1,4 @@ -.TH "QUERYING" "7" "October 2024" "NPM@10.9.0" "" +.TH "QUERYING" "7" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBQuerying\fR - Dependency Selector Syntax & Querying .SS "Description" diff --git a/deps/npm/man/man7/developers.7 b/deps/npm/man/man7/developers.7 index aa907f3f51deda..45e5e7f88ef717 100644 --- a/deps/npm/man/man7/developers.7 +++ b/deps/npm/man/man7/developers.7 @@ -1,4 +1,4 @@ -.TH "DEVELOPERS" "7" "October 2024" "NPM@10.9.0" "" +.TH "DEVELOPERS" "7" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBdevelopers\fR - Developer Guide .SS "Description" diff --git a/deps/npm/man/man7/logging.7 b/deps/npm/man/man7/logging.7 index f36ae6e4703048..82995205531df6 100644 --- a/deps/npm/man/man7/logging.7 +++ b/deps/npm/man/man7/logging.7 @@ -1,4 +1,4 @@ -.TH "LOGGING" "7" "October 2024" "NPM@10.9.0" "" +.TH "LOGGING" "7" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBLogging\fR - Why, What & How We Log .SS "Description" diff --git a/deps/npm/man/man7/orgs.7 b/deps/npm/man/man7/orgs.7 index 2866d7dd9800a5..a8fb5afa8fca2d 100644 --- a/deps/npm/man/man7/orgs.7 +++ b/deps/npm/man/man7/orgs.7 @@ -1,4 +1,4 @@ -.TH "ORGS" "7" "October 2024" "NPM@10.9.0" "" +.TH "ORGS" "7" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBorgs\fR - Working with Teams & Orgs .SS "Description" diff --git a/deps/npm/man/man7/package-spec.7 b/deps/npm/man/man7/package-spec.7 index 0d76a5f3017253..25e65529ecd110 100644 --- a/deps/npm/man/man7/package-spec.7 +++ b/deps/npm/man/man7/package-spec.7 @@ -1,4 +1,4 @@ -.TH "PACKAGE-SPEC" "7" "October 2024" "NPM@10.9.0" "" +.TH "PACKAGE-SPEC" "7" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBpackage-spec\fR - Package name specifier .SS "Description" diff --git a/deps/npm/man/man7/registry.7 b/deps/npm/man/man7/registry.7 index 61ad2702f7b235..4183b46aca6022 100644 --- a/deps/npm/man/man7/registry.7 +++ b/deps/npm/man/man7/registry.7 @@ -1,4 +1,4 @@ -.TH "REGISTRY" "7" "October 2024" "NPM@10.9.0" "" +.TH "REGISTRY" "7" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBregistry\fR - The JavaScript Package Registry .SS "Description" diff --git a/deps/npm/man/man7/removal.7 b/deps/npm/man/man7/removal.7 index a76b743553c304..2323ea863ee840 100644 --- a/deps/npm/man/man7/removal.7 +++ b/deps/npm/man/man7/removal.7 @@ -1,4 +1,4 @@ -.TH "REMOVAL" "7" "October 2024" "NPM@10.9.0" "" +.TH "REMOVAL" "7" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBremoval\fR - Cleaning the Slate .SS "Synopsis" diff --git a/deps/npm/man/man7/scope.7 b/deps/npm/man/man7/scope.7 index 94c22ccbd9f8fa..89631de3d8d8ca 100644 --- a/deps/npm/man/man7/scope.7 +++ b/deps/npm/man/man7/scope.7 @@ -1,4 +1,4 @@ -.TH "SCOPE" "7" "October 2024" "NPM@10.9.0" "" +.TH "SCOPE" "7" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBscope\fR - Scoped packages .SS "Description" diff --git a/deps/npm/man/man7/scripts.7 b/deps/npm/man/man7/scripts.7 index 925d3181dc9174..dffe4dacc6357e 100644 --- a/deps/npm/man/man7/scripts.7 +++ b/deps/npm/man/man7/scripts.7 @@ -1,4 +1,4 @@ -.TH "SCRIPTS" "7" "October 2024" "NPM@10.9.0" "" +.TH "SCRIPTS" "7" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBscripts\fR - How npm handles the "scripts" field .SS "Description" diff --git a/deps/npm/man/man7/workspaces.7 b/deps/npm/man/man7/workspaces.7 index 1812eb73eb7ece..f68b418b73f994 100644 --- a/deps/npm/man/man7/workspaces.7 +++ b/deps/npm/man/man7/workspaces.7 @@ -1,4 +1,4 @@ -.TH "WORKSPACES" "7" "October 2024" "NPM@10.9.0" "" +.TH "WORKSPACES" "7" "November 2024" "NPM@10.9.1" "" .SH "NAME" \fBworkspaces\fR - Working with workspaces .SS "Description" diff --git a/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/index.js b/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/index.js index 130a0929b8ce8c..ddfdba39a783a4 100644 --- a/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/index.js +++ b/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/index.js @@ -1,7 +1,9 @@ export default function ansiRegex({onlyFirst = false} = {}) { + // Valid string terminator sequences are BEL, ESC\, and 0x9c + const ST = '(?:\\u0007|\\u001B\\u005C|\\u009C)'; const pattern = [ - '[\\u001B\\u009B][[\\]()#;?]*(?:(?:(?:(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]+)*|[a-zA-Z\\d]+(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]*)*)?\\u0007)', - '(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PR-TZcf-ntqry=><~]))' + `[\\u001B\\u009B][[\\]()#;?]*(?:(?:(?:(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]+)*|[a-zA-Z\\d]+(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]*)*)?${ST})`, + '(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PR-TZcf-nq-uy=><~]))', ].join('|'); return new RegExp(pattern, onlyFirst ? undefined : 'g'); diff --git a/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/package.json b/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/package.json index 7bbb563bf2a70a..49f3f61021512b 100644 --- a/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/package.json +++ b/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/package.json @@ -1,6 +1,6 @@ { "name": "ansi-regex", - "version": "6.0.1", + "version": "6.1.0", "description": "Regular expression for matching ANSI escape codes", "license": "MIT", "repository": "chalk/ansi-regex", @@ -12,6 +12,8 @@ }, "type": "module", "exports": "./index.js", + "types": "./index.d.ts", + "sideEffects": false, "engines": { "node": ">=12" }, @@ -51,8 +53,9 @@ "pattern" ], "devDependencies": { + "ansi-escapes": "^5.0.0", "ava": "^3.15.0", - "tsd": "^0.14.0", - "xo": "^0.38.2" + "tsd": "^0.21.0", + "xo": "^0.54.2" } } diff --git a/deps/npm/node_modules/tuf-js/node_modules/proc-log/LICENSE b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/LICENSE similarity index 90% rename from deps/npm/node_modules/tuf-js/node_modules/proc-log/LICENSE rename to deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/LICENSE index 83837797202b70..a03cd0ed0b338b 100644 --- a/deps/npm/node_modules/tuf-js/node_modules/proc-log/LICENSE +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/LICENSE @@ -1,6 +1,6 @@ The ISC License -Copyright (c) GitHub, Inc. +Copyright (c) Isaac Z. Schlueter, Kat Marchán, npm, Inc., and Contributors Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/README.md b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/README.md new file mode 100644 index 00000000000000..dbb0051de23a4d --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/README.md @@ -0,0 +1,283 @@ +# pacote + +Fetches package manifests and tarballs from the npm registry. + +## USAGE + +```js +const pacote = require('pacote') + +// get a package manifest +pacote.manifest('foo@1.x').then(manifest => console.log('got it', manifest)) + +// extract a package into a folder +pacote.extract('github:npm/cli', 'some/path', options) + .then(({from, resolved, integrity}) => { + console.log('extracted!', from, resolved, integrity) + }) + +pacote.tarball('https://server.com/package.tgz').then(data => { + console.log('got ' + data.length + ' bytes of tarball data') +}) +``` + +`pacote` works with any kind of package specifier that npm can install. If +you can pass it to the npm CLI, you can pass it to pacote. (In fact, that's +exactly what the npm CLI does.) + +Anything that you can do with one kind of package, you can do with another. + +Data that isn't relevant (like a packument for a tarball) will be +simulated. + +`prepare` scripts will be run when generating tarballs from `git` and +`directory` locations, to simulate what _would_ be published to the +registry, so that you get a working package instead of just raw source +code that might need to be transpiled. + +## CLI + +This module exports a command line interface that can do most of what is +described below. Run `pacote -h` to learn more. + +``` +Pacote - The JavaScript Package Handler, v10.1.1 + +Usage: + + pacote resolve + Resolve a specifier and output the fully resolved target + Returns integrity and from if '--long' flag is set. + + pacote manifest + Fetch a manifest and print to stdout + + pacote packument + Fetch a full packument and print to stdout + + pacote tarball [] + Fetch a package tarball and save to + If is missing or '-', the tarball will be streamed to stdout. + + pacote extract + Extract a package to the destination folder. + +Configuration values all match the names of configs passed to npm, or +options passed to Pacote. Additional flags for this executable: + + --long Print an object from 'resolve', including integrity and spec. + --json Print result objects as JSON rather than node's default. + (This is the default if stdout is not a TTY.) + --help -h Print this helpful text. + +For example '--cache=/path/to/folder' will use that folder as the cache. +``` + +## API + +The `spec` refers to any kind of package specifier that npm can install. +If you can pass it to the npm CLI, you can pass it to pacote. (In fact, +that's exactly what the npm CLI does.) + +See below for valid `opts` values. + +* `pacote.resolve(spec, opts)` Resolve a specifier like `foo@latest` or + `github:user/project` all the way to a tarball url, tarball file, or git + repo with commit hash. + +* `pacote.extract(spec, dest, opts)` Extract a package's tarball into a + destination folder. Returns a promise that resolves to the + `{from,resolved,integrity}` of the extracted package. + +* `pacote.manifest(spec, opts)` Fetch (or simulate) a package's manifest + (basically, the `package.json` file, plus a bit of metadata). + See below for more on manifests and packuments. Returns a Promise that + resolves to the manifest object. + +* `pacote.packument(spec, opts)` Fetch (or simulate) a package's packument + (basically, the top-level package document listing all the manifests that + the registry returns). See below for more on manifests and packuments. + Returns a Promise that resolves to the packument object. + +* `pacote.tarball(spec, opts)` Get a package tarball data as a buffer in + memory. Returns a Promise that resolves to the tarball data Buffer, with + `from`, `resolved`, and `integrity` fields attached. + +* `pacote.tarball.file(spec, dest, opts)` Save a package tarball data to + a file on disk. Returns a Promise that resolves to + `{from,integrity,resolved}` of the fetched tarball. + +* `pacote.tarball.stream(spec, streamHandler, opts)` Fetch a tarball and + make the stream available to the `streamHandler` function. + + This is mostly an internal function, but it is exposed because it does + provide some functionality that may be difficult to achieve otherwise. + + The `streamHandler` function MUST return a Promise that resolves when + the stream (and all associated work) is ended, or rejects if the stream + has an error. + + The `streamHandler` function MAY be called multiple times, as Pacote + retries requests in some scenarios, such as cache corruption or + retriable network failures. + +### Options + +Options are passed to +[`npm-registry-fetch`](http://npm.im/npm-registry-fetch) and +[`cacache`](http://npm.im/cacache), so in addition to these, anything for +those modules can be given to pacote as well. + +Options object is cloned, and mutated along the way to add integrity, +resolved, and other properties, as they are determined. + +* `cache` Where to store cache entries and temp files. Passed to + [`cacache`](http://npm.im/cacache). Defaults to the same cache directory + that npm will use by default, based on platform and environment. +* `where` Base folder for resolving relative `file:` dependencies. +* `resolved` Shortcut for looking up resolved values. Should be specified + if known. +* `integrity` Expected integrity of fetched package tarball. If specified, + tarballs with mismatched integrity values will raise an `EINTEGRITY` + error. +* `umask` Permission mode mask for extracted files and directories. + Defaults to `0o22`. See "Extracted File Modes" below. +* `fmode` Minimum permission mode for extracted files. Defaults to + `0o666`. See "Extracted File Modes" below. +* `dmode` Minimum permission mode for extracted directories. Defaults to + `0o777`. See "Extracted File Modes" below. +* `preferOnline` Prefer to revalidate cache entries, even when it would not + be strictly necessary. Default `false`. +* `before` When picking a manifest from a packument, only consider + packages published before the specified date. Default `null`. +* `defaultTag` The default `dist-tag` to use when choosing a manifest from a + packument. Defaults to `latest`. +* `registry` The npm registry to use by default. Defaults to + `https://registry.npmjs.org/`. +* `fullMetadata` Fetch the full metadata from the registry for packuments, + including information not strictly required for installation (author, + description, etc.) Defaults to `true` when `before` is set, since the + version publish time is part of the extended packument metadata. +* `fullReadJson` Use the slower `read-package-json` package insted of + `read-package-json-fast` in order to include extra fields like "readme" in + the manifest. Defaults to `false`. +* `packumentCache` For registry packuments only, you may provide a `Map` + object which will be used to cache packument requests between pacote + calls. This allows you to easily avoid hitting the registry multiple + times (even just to validate the cache) for a given packument, since it + is unlikely to change in the span of a single command. +* `verifySignatures` A boolean that will make pacote verify the + integrity signature of a manifest, if present. There must be a + configured `_keys` entry in the config that is scoped to the + registry the manifest is being fetched from. +* `verifyAttestations` A boolean that will make pacote verify Sigstore + attestations, if present. There must be a configured `_keys` entry in the + config that is scoped to the registry the manifest is being fetched from. +* `tufCache` Where to store metadata/target files when retrieving the package + attestation key material via TUF. Defaults to the same cache directory that + npm will use by default, based on platform and environment. + +### Advanced API + +Each different type of fetcher is exposed for more advanced usage such as +using helper methods from this classes: + +* `DirFetcher` +* `FileFetcher` +* `GitFetcher` +* `RegistryFetcher` +* `RemoteFetcher` + +## Extracted File Modes + +Files are extracted with a mode matching the following formula: + +``` +( (tarball entry mode value) | (minimum mode option) ) ~ (umask) +``` + +This is in order to prevent unreadable files or unlistable directories from +cluttering a project's `node_modules` folder, even if the package tarball +specifies that the file should be inaccessible. + +It also prevents files from being group- or world-writable without explicit +opt-in by the user, because all file and directory modes are masked against +the `umask` value. + +So, a file which is `0o771` in the tarball, using the default `fmode` of +`0o666` and `umask` of `0o22`, will result in a file mode of `0o755`: + +``` +(0o771 | 0o666) => 0o777 +(0o777 ~ 0o22) => 0o755 +``` + +In almost every case, the defaults are appropriate. To respect exactly +what is in the package tarball (even if this makes an unusable system), set +both `dmode` and `fmode` options to `0`. Otherwise, the `umask` config +should be used in most cases where file mode modifications are required, +and this functions more or less the same as the `umask` value in most Unix +systems. + +## Extracted File Ownership + +When running as `root` on Unix systems, all extracted files and folders +will have their owning `uid` and `gid` values set to match the ownership +of the containing folder. + +This prevents `root`-owned files showing up in a project's `node_modules` +folder when a user runs `sudo npm install`. + +## Manifests + +A `manifest` is similar to a `package.json` file. However, it has a few +pieces of extra metadata, and sometimes lacks metadata that is inessential +to package installation. + +In addition to the common `package.json` fields, manifests include: + +* `manifest._resolved` The tarball url or file path where the package + artifact can be found. +* `manifest._from` A normalized form of the spec passed in as an argument. +* `manifest._integrity` The integrity value for the package artifact. +* `manifest._id` The canonical spec of this package version: name@version. +* `manifest.dist` Registry manifests (those included in a packument) have a + `dist` object. Only `tarball` is required, though at least one of + `shasum` or `integrity` is almost always present. + + * `tarball` The url to the associated package artifact. (Copied by + Pacote to `manifest._resolved`.) + * `integrity` The integrity SRI string for the artifact. This may not + be present for older packages on the npm registry. (Copied by Pacote + to `manifest._integrity`.) + * `shasum` Legacy integrity value. Hexadecimal-encoded sha1 hash. + (Converted to an SRI string and copied by Pacote to + `manifest._integrity` when `dist.integrity` is not present.) + * `fileCount` Number of files in the tarball. + * `unpackedSize` Size on disk of the package when unpacked. + * `signatures` Signatures of the shasum. Includes the keyid that + correlates to a [`key from the npm + registry`](https://registry.npmjs.org/-/npm/v1/keys) + +## Packuments + +A packument is the top-level package document that lists the set of +manifests for available versions for a package. + +When a packument is fetched with `accept: +application/vnd.npm.install-v1+json` in the HTTP headers, only the most +minimum necessary metadata is returned. Additional metadata is returned +when fetched with only `accept: application/json`. + +For Pacote's purposes, the following fields are relevant: + +* `versions` An object where each key is a version, and each value is the + manifest for that version. +* `dist-tags` An object mapping dist-tags to version numbers. This is how + `foo@latest` gets turned into `foo@1.2.3`. +* `time` In the full packument, an object mapping version numbers to + publication times, for the `opts.before` functionality. + +Pacote adds the following field, regardless of the accept header: + +* `_contentLength` The size of the packument. diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/bin/index.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/bin/index.js new file mode 100755 index 00000000000000..f35b62ca71a537 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/bin/index.js @@ -0,0 +1,158 @@ +#!/usr/bin/env node + +const run = conf => { + const pacote = require('../') + switch (conf._[0]) { + case 'resolve': + case 'manifest': + case 'packument': + if (conf._[0] === 'resolve' && conf.long) { + return pacote.manifest(conf._[1], conf).then(mani => ({ + resolved: mani._resolved, + integrity: mani._integrity, + from: mani._from, + })) + } + return pacote[conf._[0]](conf._[1], conf) + + case 'tarball': + if (!conf._[2] || conf._[2] === '-') { + return pacote.tarball.stream(conf._[1], stream => { + stream.pipe( + conf.testStdout || + /* istanbul ignore next */ + process.stdout + ) + // make sure it resolves something falsey + return stream.promise().then(() => { + return false + }) + }, conf) + } else { + return pacote.tarball.file(conf._[1], conf._[2], conf) + } + + case 'extract': + return pacote.extract(conf._[1], conf._[2], conf) + + default: /* istanbul ignore next */ { + throw new Error(`bad command: ${conf._[0]}`) + } + } +} + +const version = require('../package.json').version +const usage = () => +`Pacote - The JavaScript Package Handler, v${version} + +Usage: + + pacote resolve + Resolve a specifier and output the fully resolved target + Returns integrity and from if '--long' flag is set. + + pacote manifest + Fetch a manifest and print to stdout + + pacote packument + Fetch a full packument and print to stdout + + pacote tarball [] + Fetch a package tarball and save to + If is missing or '-', the tarball will be streamed to stdout. + + pacote extract + Extract a package to the destination folder. + +Configuration values all match the names of configs passed to npm, or +options passed to Pacote. Additional flags for this executable: + + --long Print an object from 'resolve', including integrity and spec. + --json Print result objects as JSON rather than node's default. + (This is the default if stdout is not a TTY.) + --help -h Print this helpful text. + +For example '--cache=/path/to/folder' will use that folder as the cache. +` + +const shouldJSON = (conf, result) => + conf.json || + !process.stdout.isTTY && + conf.json === undefined && + result && + typeof result === 'object' + +const pretty = (conf, result) => + shouldJSON(conf, result) ? JSON.stringify(result, 0, 2) : result + +let addedLogListener = false +const main = args => { + const conf = parse(args) + if (conf.help || conf.h) { + return console.log(usage()) + } + + if (!addedLogListener) { + process.on('log', console.error) + addedLogListener = true + } + + try { + return run(conf) + .then(result => result && console.log(pretty(conf, result))) + .catch(er => { + console.error(er) + process.exit(1) + }) + } catch (er) { + console.error(er.message) + console.error(usage()) + } +} + +const parseArg = arg => { + const split = arg.slice(2).split('=') + const k = split.shift() + const v = split.join('=') + const no = /^no-/.test(k) && !v + const key = (no ? k.slice(3) : k) + .replace(/^tag$/, 'defaultTag') + .replace(/-([a-z])/g, (_, c) => c.toUpperCase()) + const value = v ? v.replace(/^~/, process.env.HOME) : !no + return { key, value } +} + +const parse = args => { + const conf = { + _: [], + cache: process.env.HOME + '/.npm/_cacache', + } + let dashdash = false + args.forEach(arg => { + if (dashdash) { + conf._.push(arg) + } else if (arg === '--') { + dashdash = true + } else if (arg === '-h') { + conf.help = true + } else if (/^--/.test(arg)) { + const { key, value } = parseArg(arg) + conf[key] = value + } else { + conf._.push(arg) + } + }) + return conf +} + +if (module === require.main) { + main(process.argv.slice(2)) +} else { + module.exports = { + main, + run, + usage, + parseArg, + parse, + } +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/dir.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/dir.js new file mode 100644 index 00000000000000..04846eb8a6e221 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/dir.js @@ -0,0 +1,105 @@ +const { resolve } = require('node:path') +const packlist = require('npm-packlist') +const runScript = require('@npmcli/run-script') +const tar = require('tar') +const { Minipass } = require('minipass') +const Fetcher = require('./fetcher.js') +const FileFetcher = require('./file.js') +const _ = require('./util/protected.js') +const tarCreateOptions = require('./util/tar-create-options.js') + +class DirFetcher extends Fetcher { + constructor (spec, opts) { + super(spec, opts) + // just the fully resolved filename + this.resolved = this.spec.fetchSpec + + this.tree = opts.tree || null + this.Arborist = opts.Arborist || null + } + + // exposes tarCreateOptions as public API + static tarCreateOptions (manifest) { + return tarCreateOptions(manifest) + } + + get types () { + return ['directory'] + } + + #prepareDir () { + return this.manifest().then(mani => { + if (!mani.scripts || !mani.scripts.prepare) { + return + } + if (this.opts.ignoreScripts) { + return + } + + // we *only* run prepare. + // pre/post-pack is run by the npm CLI for publish and pack, + // but this function is *also* run when installing git deps + const stdio = this.opts.foregroundScripts ? 'inherit' : 'pipe' + + return runScript({ + // this || undefined is because runScript will be unhappy with the default null value + scriptShell: this.opts.scriptShell || undefined, + pkg: mani, + event: 'prepare', + path: this.resolved, + stdio, + env: { + npm_package_resolved: this.resolved, + npm_package_integrity: this.integrity, + npm_package_json: resolve(this.resolved, 'package.json'), + }, + }) + }) + } + + [_.tarballFromResolved] () { + if (!this.tree && !this.Arborist) { + throw new Error('DirFetcher requires either a tree or an Arborist constructor to pack') + } + + const stream = new Minipass() + stream.resolved = this.resolved + stream.integrity = this.integrity + + const { prefix, workspaces } = this.opts + + // run the prepare script, get the list of files, and tar it up + // pipe to the stream, and proxy errors the chain. + this.#prepareDir() + .then(async () => { + if (!this.tree) { + const arb = new this.Arborist({ path: this.resolved }) + this.tree = await arb.loadActual() + } + return packlist(this.tree, { path: this.resolved, prefix, workspaces }) + }) + .then(files => tar.c(tarCreateOptions(this.package), files) + .on('error', er => stream.emit('error', er)).pipe(stream)) + .catch(er => stream.emit('error', er)) + return stream + } + + manifest () { + if (this.package) { + return Promise.resolve(this.package) + } + + return this[_.readPackageJson](this.resolved) + .then(mani => this.package = { + ...mani, + _integrity: this.integrity && String(this.integrity), + _resolved: this.resolved, + _from: this.from, + }) + } + + packument () { + return FileFetcher.prototype.packument.apply(this) + } +} +module.exports = DirFetcher diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/fetcher.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/fetcher.js new file mode 100644 index 00000000000000..f2ac97619d3af1 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/fetcher.js @@ -0,0 +1,497 @@ +// This is the base class that the other fetcher types in lib +// all descend from. +// It handles the unpacking and retry logic that is shared among +// all of the other Fetcher types. + +const { basename, dirname } = require('node:path') +const { rm, mkdir } = require('node:fs/promises') +const PackageJson = require('@npmcli/package-json') +const cacache = require('cacache') +const fsm = require('fs-minipass') +const getContents = require('@npmcli/installed-package-contents') +const npa = require('npm-package-arg') +const retry = require('promise-retry') +const ssri = require('ssri') +const tar = require('tar') +const { Minipass } = require('minipass') +const { log } = require('proc-log') +const _ = require('./util/protected.js') +const cacheDir = require('./util/cache-dir.js') +const isPackageBin = require('./util/is-package-bin.js') +const removeTrailingSlashes = require('./util/trailing-slashes.js') + +// Pacote is only concerned with the package.json contents +const packageJsonPrepare = (p) => PackageJson.prepare(p).then(pkg => pkg.content) +const packageJsonNormalize = (p) => PackageJson.normalize(p).then(pkg => pkg.content) + +class FetcherBase { + constructor (spec, opts) { + if (!opts || typeof opts !== 'object') { + throw new TypeError('options object is required') + } + this.spec = npa(spec, opts.where) + + this.allowGitIgnore = !!opts.allowGitIgnore + + // a bit redundant because presumably the caller already knows this, + // but it makes it easier to not have to keep track of the requested + // spec when we're dispatching thousands of these at once, and normalizing + // is nice. saveSpec is preferred if set, because it turns stuff like + // x/y#committish into github:x/y#committish. use name@rawSpec for + // registry deps so that we turn xyz and xyz@ -> xyz@ + this.from = this.spec.registry + ? `${this.spec.name}@${this.spec.rawSpec}` : this.spec.saveSpec + + this.#assertType() + // clone the opts object so that others aren't upset when we mutate it + // by adding/modifying the integrity value. + this.opts = { ...opts } + + this.cache = opts.cache || cacheDir().cacache + this.tufCache = opts.tufCache || cacheDir().tufcache + this.resolved = opts.resolved || null + + // default to caching/verifying with sha512, that's what we usually have + // need to change this default, or start overriding it, when sha512 + // is no longer strong enough. + this.defaultIntegrityAlgorithm = opts.defaultIntegrityAlgorithm || 'sha512' + + if (typeof opts.integrity === 'string') { + this.opts.integrity = ssri.parse(opts.integrity) + } + + this.package = null + this.type = this.constructor.name + this.fmode = opts.fmode || 0o666 + this.dmode = opts.dmode || 0o777 + // we don't need a default umask, because we don't chmod files coming + // out of package tarballs. they're forced to have a mode that is + // valid, regardless of what's in the tarball entry, and then we let + // the process's umask setting do its job. but if configured, we do + // respect it. + this.umask = opts.umask || 0 + + this.preferOnline = !!opts.preferOnline + this.preferOffline = !!opts.preferOffline + this.offline = !!opts.offline + + this.before = opts.before + this.fullMetadata = this.before ? true : !!opts.fullMetadata + this.fullReadJson = !!opts.fullReadJson + this[_.readPackageJson] = this.fullReadJson + ? packageJsonPrepare + : packageJsonNormalize + + // rrh is a registry hostname or 'never' or 'always' + // defaults to registry.npmjs.org + this.replaceRegistryHost = (!opts.replaceRegistryHost || opts.replaceRegistryHost === 'npmjs') ? + 'registry.npmjs.org' : opts.replaceRegistryHost + + this.defaultTag = opts.defaultTag || 'latest' + this.registry = removeTrailingSlashes(opts.registry || 'https://registry.npmjs.org') + + // command to run 'prepare' scripts on directories and git dirs + // To use pacote with yarn, for example, set npmBin to 'yarn' + // and npmCliConfig with yarn's equivalents. + this.npmBin = opts.npmBin || 'npm' + + // command to install deps for preparing + this.npmInstallCmd = opts.npmInstallCmd || ['install', '--force'] + + // XXX fill more of this in based on what we know from this.opts + // we explicitly DO NOT fill in --tag, though, since we are often + // going to be packing in the context of a publish, which may set + // a dist-tag, but certainly wants to keep defaulting to latest. + this.npmCliConfig = opts.npmCliConfig || [ + `--cache=${dirname(this.cache)}`, + `--prefer-offline=${!!this.preferOffline}`, + `--prefer-online=${!!this.preferOnline}`, + `--offline=${!!this.offline}`, + ...(this.before ? [`--before=${this.before.toISOString()}`] : []), + '--no-progress', + '--no-save', + '--no-audit', + // override any omit settings from the environment + '--include=dev', + '--include=peer', + '--include=optional', + // we need the actual things, not just the lockfile + '--no-package-lock-only', + '--no-dry-run', + ] + } + + get integrity () { + return this.opts.integrity || null + } + + set integrity (i) { + if (!i) { + return + } + + i = ssri.parse(i) + const current = this.opts.integrity + + // do not ever update an existing hash value, but do + // merge in NEW algos and hashes that we don't already have. + if (current) { + current.merge(i) + } else { + this.opts.integrity = i + } + } + + get notImplementedError () { + return new Error('not implemented in this fetcher type: ' + this.type) + } + + // override in child classes + // Returns a Promise that resolves to this.resolved string value + resolve () { + return this.resolved ? Promise.resolve(this.resolved) + : Promise.reject(this.notImplementedError) + } + + packument () { + return Promise.reject(this.notImplementedError) + } + + // override in child class + // returns a manifest containing: + // - name + // - version + // - _resolved + // - _integrity + // - plus whatever else was in there (corgi, full metadata, or pj file) + manifest () { + return Promise.reject(this.notImplementedError) + } + + // private, should be overridden. + // Note that they should *not* calculate or check integrity or cache, + // but *just* return the raw tarball data stream. + [_.tarballFromResolved] () { + throw this.notImplementedError + } + + // public, should not be overridden + tarball () { + return this.tarballStream(stream => stream.concat().then(data => { + data.integrity = this.integrity && String(this.integrity) + data.resolved = this.resolved + data.from = this.from + return data + })) + } + + // private + // Note: cacache will raise a EINTEGRITY error if the integrity doesn't match + #tarballFromCache () { + const startTime = Date.now() + const stream = cacache.get.stream.byDigest(this.cache, this.integrity, this.opts) + const elapsedTime = Date.now() - startTime + // cache is good, so log it as a hit in particular since there was no fetch logged + log.http( + 'cache', + `${this.spec} ${elapsedTime}ms (cache hit)` + ) + return stream + } + + get [_.cacheFetches] () { + return true + } + + #istream (stream) { + // if not caching this, just return it + if (!this.opts.cache || !this[_.cacheFetches]) { + // instead of creating a new integrity stream, we only piggyback on the + // provided stream's events + if (stream.hasIntegrityEmitter) { + stream.on('integrity', i => this.integrity = i) + return stream + } + + const istream = ssri.integrityStream(this.opts) + istream.on('integrity', i => this.integrity = i) + stream.on('error', err => istream.emit('error', err)) + return stream.pipe(istream) + } + + // we have to return a stream that gets ALL the data, and proxies errors, + // but then pipe from the original tarball stream into the cache as well. + // To do this without losing any data, and since the cacache put stream + // is not a passthrough, we have to pipe from the original stream into + // the cache AFTER we pipe into the middleStream. Since the cache stream + // has an asynchronous flush to write its contents to disk, we need to + // defer the middleStream end until the cache stream ends. + const middleStream = new Minipass() + stream.on('error', err => middleStream.emit('error', err)) + stream.pipe(middleStream, { end: false }) + const cstream = cacache.put.stream( + this.opts.cache, + `pacote:tarball:${this.from}`, + this.opts + ) + cstream.on('integrity', i => this.integrity = i) + cstream.on('error', err => stream.emit('error', err)) + stream.pipe(cstream) + + // eslint-disable-next-line promise/catch-or-return + cstream.promise().catch(() => {}).then(() => middleStream.end()) + return middleStream + } + + pickIntegrityAlgorithm () { + return this.integrity ? this.integrity.pickAlgorithm(this.opts) + : this.defaultIntegrityAlgorithm + } + + // TODO: check error class, once those are rolled out to our deps + isDataCorruptionError (er) { + return er.code === 'EINTEGRITY' || er.code === 'Z_DATA_ERROR' + } + + // override the types getter + get types () { + return false + } + + #assertType () { + if (this.types && !this.types.includes(this.spec.type)) { + throw new TypeError(`Wrong spec type (${ + this.spec.type + }) for ${ + this.constructor.name + }. Supported types: ${this.types.join(', ')}`) + } + } + + // We allow ENOENTs from cacache, but not anywhere else. + // An ENOENT trying to read a tgz file, for example, is Right Out. + isRetriableError (er) { + // TODO: check error class, once those are rolled out to our deps + return this.isDataCorruptionError(er) || + er.code === 'ENOENT' || + er.code === 'EISDIR' + } + + // Mostly internal, but has some uses + // Pass in a function which returns a promise + // Function will be called 1 or more times with streams that may fail. + // Retries: + // Function MUST handle errors on the stream by rejecting the promise, + // so that retry logic can pick it up and either retry or fail whatever + // promise it was making (ie, failing extraction, etc.) + // + // The return value of this method is a Promise that resolves the same + // as whatever the streamHandler resolves to. + // + // This should never be overridden by child classes, but it is public. + tarballStream (streamHandler) { + // Only short-circuit via cache if we have everything else we'll need, + // and the user has not expressed a preference for checking online. + + const fromCache = ( + !this.preferOnline && + this.integrity && + this.resolved + ) ? streamHandler(this.#tarballFromCache()).catch(er => { + if (this.isDataCorruptionError(er)) { + log.warn('tarball', `cached data for ${ + this.spec + } (${this.integrity}) seems to be corrupted. Refreshing cache.`) + return this.cleanupCached().then(() => { + throw er + }) + } else { + throw er + } + }) : null + + const fromResolved = er => { + if (er) { + if (!this.isRetriableError(er)) { + throw er + } + log.silly('tarball', `no local data for ${ + this.spec + }. Extracting by manifest.`) + } + return this.resolve().then(() => retry(tryAgain => + streamHandler(this.#istream(this[_.tarballFromResolved]())) + .catch(streamErr => { + // Most likely data integrity. A cache ENOENT error is unlikely + // here, since we're definitely not reading from the cache, but it + // IS possible that the fetch subsystem accessed the cache, and the + // entry got blown away or something. Try one more time to be sure. + if (this.isRetriableError(streamErr)) { + log.warn('tarball', `tarball data for ${ + this.spec + } (${this.integrity}) seems to be corrupted. Trying again.`) + return this.cleanupCached().then(() => tryAgain(streamErr)) + } + throw streamErr + }), { retries: 1, minTimeout: 0, maxTimeout: 0 })) + } + + return fromCache ? fromCache.catch(fromResolved) : fromResolved() + } + + cleanupCached () { + return cacache.rm.content(this.cache, this.integrity, this.opts) + } + + #empty (path) { + return getContents({ path, depth: 1 }).then(contents => Promise.all( + contents.map(entry => rm(entry, { recursive: true, force: true })))) + } + + async #mkdir (dest) { + await this.#empty(dest) + return await mkdir(dest, { recursive: true }) + } + + // extraction is always the same. the only difference is where + // the tarball comes from. + async extract (dest) { + await this.#mkdir(dest) + return this.tarballStream((tarball) => this.#extract(dest, tarball)) + } + + #toFile (dest) { + return this.tarballStream(str => new Promise((res, rej) => { + const writer = new fsm.WriteStream(dest) + str.on('error', er => writer.emit('error', er)) + writer.on('error', er => rej(er)) + writer.on('close', () => res({ + integrity: this.integrity && String(this.integrity), + resolved: this.resolved, + from: this.from, + })) + str.pipe(writer) + })) + } + + // don't use this.#mkdir because we don't want to rimraf anything + async tarballFile (dest) { + const dir = dirname(dest) + await mkdir(dir, { recursive: true }) + return this.#toFile(dest) + } + + #extract (dest, tarball) { + const extractor = tar.x(this.#tarxOptions({ cwd: dest })) + const p = new Promise((resolve, reject) => { + extractor.on('end', () => { + resolve({ + resolved: this.resolved, + integrity: this.integrity && String(this.integrity), + from: this.from, + }) + }) + + extractor.on('error', er => { + log.warn('tar', er.message) + log.silly('tar', er) + reject(er) + }) + + tarball.on('error', er => reject(er)) + }) + + tarball.pipe(extractor) + return p + } + + // always ensure that entries are at least as permissive as our configured + // dmode/fmode, but never more permissive than the umask allows. + #entryMode (path, mode, type) { + const m = /Directory|GNUDumpDir/.test(type) ? this.dmode + : /File$/.test(type) ? this.fmode + : /* istanbul ignore next - should never happen in a pkg */ 0 + + // make sure package bins are executable + const exe = isPackageBin(this.package, path) ? 0o111 : 0 + // always ensure that files are read/writable by the owner + return ((mode | m) & ~this.umask) | exe | 0o600 + } + + #tarxOptions ({ cwd }) { + const sawIgnores = new Set() + return { + cwd, + noChmod: true, + noMtime: true, + filter: (name, entry) => { + if (/Link$/.test(entry.type)) { + return false + } + entry.mode = this.#entryMode(entry.path, entry.mode, entry.type) + // this replicates the npm pack behavior where .gitignore files + // are treated like .npmignore files, but only if a .npmignore + // file is not present. + if (/File$/.test(entry.type)) { + const base = basename(entry.path) + if (base === '.npmignore') { + sawIgnores.add(entry.path) + } else if (base === '.gitignore' && !this.allowGitIgnore) { + // rename, but only if there's not already a .npmignore + const ni = entry.path.replace(/\.gitignore$/, '.npmignore') + if (sawIgnores.has(ni)) { + return false + } + entry.path = ni + } + return true + } + }, + strip: 1, + onwarn: /* istanbul ignore next - we can trust that tar logs */ + (code, msg, data) => { + log.warn('tar', code, msg) + log.silly('tar', code, msg, data) + }, + umask: this.umask, + // always ignore ownership info from tarball metadata + preserveOwner: false, + } + } +} + +module.exports = FetcherBase + +// Child classes +const GitFetcher = require('./git.js') +const RegistryFetcher = require('./registry.js') +const FileFetcher = require('./file.js') +const DirFetcher = require('./dir.js') +const RemoteFetcher = require('./remote.js') + +// Get an appropriate fetcher object from a spec and options +FetcherBase.get = (rawSpec, opts = {}) => { + const spec = npa(rawSpec, opts.where) + switch (spec.type) { + case 'git': + return new GitFetcher(spec, opts) + + case 'remote': + return new RemoteFetcher(spec, opts) + + case 'version': + case 'range': + case 'tag': + case 'alias': + return new RegistryFetcher(spec.subSpec || spec, opts) + + case 'file': + return new FileFetcher(spec, opts) + + case 'directory': + return new DirFetcher(spec, opts) + + default: + throw new TypeError('Unknown spec type: ' + spec.type) + } +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/file.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/file.js new file mode 100644 index 00000000000000..2021325085e4f0 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/file.js @@ -0,0 +1,94 @@ +const { resolve } = require('node:path') +const { stat, chmod } = require('node:fs/promises') +const cacache = require('cacache') +const fsm = require('fs-minipass') +const Fetcher = require('./fetcher.js') +const _ = require('./util/protected.js') + +class FileFetcher extends Fetcher { + constructor (spec, opts) { + super(spec, opts) + // just the fully resolved filename + this.resolved = this.spec.fetchSpec + } + + get types () { + return ['file'] + } + + manifest () { + if (this.package) { + return Promise.resolve(this.package) + } + + // have to unpack the tarball for this. + return cacache.tmp.withTmp(this.cache, this.opts, dir => + this.extract(dir) + .then(() => this[_.readPackageJson](dir)) + .then(mani => this.package = { + ...mani, + _integrity: this.integrity && String(this.integrity), + _resolved: this.resolved, + _from: this.from, + })) + } + + #exeBins (pkg, dest) { + if (!pkg.bin) { + return Promise.resolve() + } + + return Promise.all(Object.keys(pkg.bin).map(async k => { + const script = resolve(dest, pkg.bin[k]) + // Best effort. Ignore errors here, the only result is that + // a bin script is not executable. But if it's missing or + // something, we just leave it for a later stage to trip over + // when we can provide a more useful contextual error. + try { + const st = await stat(script) + const mode = st.mode | 0o111 + if (mode === st.mode) { + return + } + await chmod(script, mode) + } catch { + // Ignore errors here + } + })) + } + + extract (dest) { + // if we've already loaded the manifest, then the super got it. + // but if not, read the unpacked manifest and chmod properly. + return super.extract(dest) + .then(result => this.package ? result + : this[_.readPackageJson](dest).then(pkg => + this.#exeBins(pkg, dest)).then(() => result)) + } + + [_.tarballFromResolved] () { + // create a read stream and return it + return new fsm.ReadStream(this.resolved) + } + + packument () { + // simulate based on manifest + return this.manifest().then(mani => ({ + name: mani.name, + 'dist-tags': { + [this.defaultTag]: mani.version, + }, + versions: { + [mani.version]: { + ...mani, + dist: { + tarball: `file:${this.resolved}`, + integrity: this.integrity && String(this.integrity), + }, + }, + }, + })) + } +} + +module.exports = FileFetcher diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/git.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/git.js new file mode 100644 index 00000000000000..077193a86f026f --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/git.js @@ -0,0 +1,317 @@ +const cacache = require('cacache') +const git = require('@npmcli/git') +const npa = require('npm-package-arg') +const pickManifest = require('npm-pick-manifest') +const { Minipass } = require('minipass') +const { log } = require('proc-log') +const DirFetcher = require('./dir.js') +const Fetcher = require('./fetcher.js') +const FileFetcher = require('./file.js') +const RemoteFetcher = require('./remote.js') +const _ = require('./util/protected.js') +const addGitSha = require('./util/add-git-sha.js') +const npm = require('./util/npm.js') + +const hashre = /^[a-f0-9]{40}$/ + +// get the repository url. +// prefer https if there's auth, since ssh will drop that. +// otherwise, prefer ssh if available (more secure). +// We have to add the git+ back because npa suppresses it. +const repoUrl = (h, opts) => + h.sshurl && !(h.https && h.auth) && addGitPlus(h.sshurl(opts)) || + h.https && addGitPlus(h.https(opts)) + +// add git+ to the url, but only one time. +const addGitPlus = url => url && `git+${url}`.replace(/^(git\+)+/, 'git+') + +class GitFetcher extends Fetcher { + constructor (spec, opts) { + super(spec, opts) + + // we never want to compare integrity for git dependencies: npm/rfcs#525 + if (this.opts.integrity) { + delete this.opts.integrity + log.warn(`skipping integrity check for git dependency ${this.spec.fetchSpec}`) + } + + this.resolvedRef = null + if (this.spec.hosted) { + this.from = this.spec.hosted.shortcut({ noCommittish: false }) + } + + // shortcut: avoid full clone when we can go straight to the tgz + // if we have the full sha and it's a hosted git platform + if (this.spec.gitCommittish && hashre.test(this.spec.gitCommittish)) { + this.resolvedSha = this.spec.gitCommittish + // use hosted.tarball() when we shell to RemoteFetcher later + this.resolved = this.spec.hosted + ? repoUrl(this.spec.hosted, { noCommittish: false }) + : this.spec.rawSpec + } else { + this.resolvedSha = '' + } + + this.Arborist = opts.Arborist || null + } + + // just exposed to make it easier to test all the combinations + static repoUrl (hosted, opts) { + return repoUrl(hosted, opts) + } + + get types () { + return ['git'] + } + + resolve () { + // likely a hosted git repo with a sha, so get the tarball url + // but in general, no reason to resolve() more than necessary! + if (this.resolved) { + return super.resolve() + } + + // fetch the git repo and then look at the current hash + const h = this.spec.hosted + // try to use ssh, fall back to git. + return h + ? this.#resolvedFromHosted(h) + : this.#resolvedFromRepo(this.spec.fetchSpec) + } + + // first try https, since that's faster and passphrase-less for + // public repos, and supports private repos when auth is provided. + // Fall back to SSH to support private repos + // NB: we always store the https url in resolved field if auth + // is present, otherwise ssh if the hosted type provides it + #resolvedFromHosted (hosted) { + return this.#resolvedFromRepo(hosted.https && hosted.https()).catch(er => { + // Throw early since we know pathspec errors will fail again if retried + if (er instanceof git.errors.GitPathspecError) { + throw er + } + const ssh = hosted.sshurl && hosted.sshurl() + // no fallthrough if we can't fall through or have https auth + if (!ssh || hosted.auth) { + throw er + } + return this.#resolvedFromRepo(ssh) + }) + } + + #resolvedFromRepo (gitRemote) { + // XXX make this a custom error class + if (!gitRemote) { + return Promise.reject(new Error(`No git url for ${this.spec}`)) + } + const gitRange = this.spec.gitRange + const name = this.spec.name + return git.revs(gitRemote, this.opts).then(remoteRefs => { + return gitRange ? pickManifest({ + versions: remoteRefs.versions, + 'dist-tags': remoteRefs['dist-tags'], + name, + }, gitRange, this.opts) + : this.spec.gitCommittish ? + remoteRefs.refs[this.spec.gitCommittish] || + remoteRefs.refs[remoteRefs.shas[this.spec.gitCommittish]] + : remoteRefs.refs.HEAD // no git committish, get default head + }).then(revDoc => { + // the committish provided isn't in the rev list + // things like HEAD~3 or @yesterday can land here. + if (!revDoc || !revDoc.sha) { + return this.#resolvedFromClone() + } + + this.resolvedRef = revDoc + this.resolvedSha = revDoc.sha + this.#addGitSha(revDoc.sha) + return this.resolved + }) + } + + #setResolvedWithSha (withSha) { + // we haven't cloned, so a tgz download is still faster + // of course, if it's not a known host, we can't do that. + this.resolved = !this.spec.hosted ? withSha + : repoUrl(npa(withSha).hosted, { noCommittish: false }) + } + + // when we get the git sha, we affix it to our spec to build up + // either a git url with a hash, or a tarball download URL + #addGitSha (sha) { + this.#setResolvedWithSha(addGitSha(this.spec, sha)) + } + + #resolvedFromClone () { + // do a full or shallow clone, then look at the HEAD + // kind of wasteful, but no other option, really + return this.#clone(() => this.resolved) + } + + #prepareDir (dir) { + return this[_.readPackageJson](dir).then(mani => { + // no need if we aren't going to do any preparation. + const scripts = mani.scripts + if (!mani.workspaces && (!scripts || !( + scripts.postinstall || + scripts.build || + scripts.preinstall || + scripts.install || + scripts.prepack || + scripts.prepare))) { + return + } + + // to avoid cases where we have an cycle of git deps that depend + // on one another, we only ever do preparation for one instance + // of a given git dep along the chain of installations. + // Note that this does mean that a dependency MAY in theory end up + // trying to run its prepare script using a dependency that has not + // been properly prepared itself, but that edge case is smaller + // and less hazardous than a fork bomb of npm and git commands. + const noPrepare = !process.env._PACOTE_NO_PREPARE_ ? [] + : process.env._PACOTE_NO_PREPARE_.split('\n') + if (noPrepare.includes(this.resolved)) { + log.info('prepare', 'skip prepare, already seen', this.resolved) + return + } + noPrepare.push(this.resolved) + + // the DirFetcher will do its own preparation to run the prepare scripts + // All we have to do is put the deps in place so that it can succeed. + return npm( + this.npmBin, + [].concat(this.npmInstallCmd).concat(this.npmCliConfig), + dir, + { ...process.env, _PACOTE_NO_PREPARE_: noPrepare.join('\n') }, + { message: 'git dep preparation failed' } + ) + }) + } + + [_.tarballFromResolved] () { + const stream = new Minipass() + stream.resolved = this.resolved + stream.from = this.from + + // check it out and then shell out to the DirFetcher tarball packer + this.#clone(dir => this.#prepareDir(dir) + .then(() => new Promise((res, rej) => { + if (!this.Arborist) { + throw new Error('GitFetcher requires an Arborist constructor to pack a tarball') + } + const df = new DirFetcher(`file:${dir}`, { + ...this.opts, + Arborist: this.Arborist, + resolved: null, + integrity: null, + }) + const dirStream = df[_.tarballFromResolved]() + dirStream.on('error', rej) + dirStream.on('end', res) + dirStream.pipe(stream) + }))).catch( + /* istanbul ignore next: very unlikely and hard to test */ + er => stream.emit('error', er) + ) + return stream + } + + // clone a git repo into a temp folder (or fetch and unpack if possible) + // handler accepts a directory, and returns a promise that resolves + // when we're done with it, at which point, cacache deletes it + // + // TODO: after cloning, create a tarball of the folder, and add to the cache + // with cacache.put.stream(), using a key that's deterministic based on the + // spec and repo, so that we don't ever clone the same thing multiple times. + #clone (handler, tarballOk = true) { + const o = { tmpPrefix: 'git-clone' } + const ref = this.resolvedSha || this.spec.gitCommittish + const h = this.spec.hosted + const resolved = this.resolved + + // can be set manually to false to fall back to actual git clone + tarballOk = tarballOk && + h && resolved === repoUrl(h, { noCommittish: false }) && h.tarball + + return cacache.tmp.withTmp(this.cache, o, async tmp => { + // if we're resolved, and have a tarball url, shell out to RemoteFetcher + if (tarballOk) { + const nameat = this.spec.name ? `${this.spec.name}@` : '' + return new RemoteFetcher(h.tarball({ noCommittish: false }), { + ...this.opts, + allowGitIgnore: true, + pkgid: `git:${nameat}${this.resolved}`, + resolved: this.resolved, + integrity: null, // it'll always be different, if we have one + }).extract(tmp).then(() => handler(tmp), er => { + // fall back to ssh download if tarball fails + if (er.constructor.name.match(/^Http/)) { + return this.#clone(handler, false) + } else { + throw er + } + }) + } + + const sha = await ( + h ? this.#cloneHosted(ref, tmp) + : this.#cloneRepo(this.spec.fetchSpec, ref, tmp) + ) + this.resolvedSha = sha + if (!this.resolved) { + await this.#addGitSha(sha) + } + return handler(tmp) + }) + } + + // first try https, since that's faster and passphrase-less for + // public repos, and supports private repos when auth is provided. + // Fall back to SSH to support private repos + // NB: we always store the https url in resolved field if auth + // is present, otherwise ssh if the hosted type provides it + #cloneHosted (ref, tmp) { + const hosted = this.spec.hosted + return this.#cloneRepo(hosted.https({ noCommittish: true }), ref, tmp) + .catch(er => { + // Throw early since we know pathspec errors will fail again if retried + if (er instanceof git.errors.GitPathspecError) { + throw er + } + const ssh = hosted.sshurl && hosted.sshurl({ noCommittish: true }) + // no fallthrough if we can't fall through or have https auth + if (!ssh || hosted.auth) { + throw er + } + return this.#cloneRepo(ssh, ref, tmp) + }) + } + + #cloneRepo (repo, ref, tmp) { + const { opts, spec } = this + return git.clone(repo, ref, tmp, { ...opts, spec }) + } + + manifest () { + if (this.package) { + return Promise.resolve(this.package) + } + + return this.spec.hosted && this.resolved + ? FileFetcher.prototype.manifest.apply(this) + : this.#clone(dir => + this[_.readPackageJson](dir) + .then(mani => this.package = { + ...mani, + _resolved: this.resolved, + _from: this.from, + })) + } + + packument () { + return FileFetcher.prototype.packument.apply(this) + } +} +module.exports = GitFetcher diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/index.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/index.js new file mode 100644 index 00000000000000..f35314d275d5fd --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/index.js @@ -0,0 +1,23 @@ +const { get } = require('./fetcher.js') +const GitFetcher = require('./git.js') +const RegistryFetcher = require('./registry.js') +const FileFetcher = require('./file.js') +const DirFetcher = require('./dir.js') +const RemoteFetcher = require('./remote.js') + +const tarball = (spec, opts) => get(spec, opts).tarball() +tarball.stream = (spec, handler, opts) => get(spec, opts).tarballStream(handler) +tarball.file = (spec, dest, opts) => get(spec, opts).tarballFile(dest) + +module.exports = { + GitFetcher, + RegistryFetcher, + FileFetcher, + DirFetcher, + RemoteFetcher, + resolve: (spec, opts) => get(spec, opts).resolve(), + extract: (spec, dest, opts) => get(spec, opts).extract(dest), + manifest: (spec, opts) => get(spec, opts).manifest(), + packument: (spec, opts) => get(spec, opts).packument(), + tarball, +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/registry.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/registry.js new file mode 100644 index 00000000000000..1ecf4ee1773499 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/registry.js @@ -0,0 +1,369 @@ +const crypto = require('node:crypto') +const PackageJson = require('@npmcli/package-json') +const pickManifest = require('npm-pick-manifest') +const ssri = require('ssri') +const npa = require('npm-package-arg') +const sigstore = require('sigstore') +const fetch = require('npm-registry-fetch') +const Fetcher = require('./fetcher.js') +const RemoteFetcher = require('./remote.js') +const pacoteVersion = require('../package.json').version +const removeTrailingSlashes = require('./util/trailing-slashes.js') +const _ = require('./util/protected.js') + +// Corgis are cute. 🐕🐶 +const corgiDoc = 'application/vnd.npm.install-v1+json; q=1.0, application/json; q=0.8, */*' +const fullDoc = 'application/json' + +// Some really old packages have no time field in their packument so we need a +// cutoff date. +const MISSING_TIME_CUTOFF = '2015-01-01T00:00:00.000Z' + +class RegistryFetcher extends Fetcher { + #cacheKey + constructor (spec, opts) { + super(spec, opts) + + // you usually don't want to fetch the same packument multiple times in + // the span of a given script or command, no matter how many pacote calls + // are made, so this lets us avoid doing that. It's only relevant for + // registry fetchers, because other types simulate their packument from + // the manifest, which they memoize on this.package, so it's very cheap + // already. + this.packumentCache = this.opts.packumentCache || null + + this.registry = fetch.pickRegistry(spec, opts) + this.packumentUrl = `${removeTrailingSlashes(this.registry)}/${this.spec.escapedName}` + this.#cacheKey = `${this.fullMetadata ? 'full' : 'corgi'}:${this.packumentUrl}` + + const parsed = new URL(this.registry) + const regKey = `//${parsed.host}${parsed.pathname}` + // unlike the nerf-darted auth keys, this one does *not* allow a mismatch + // of trailing slashes. It must match exactly. + if (this.opts[`${regKey}:_keys`]) { + this.registryKeys = this.opts[`${regKey}:_keys`] + } + + // XXX pacote <=9 has some logic to ignore opts.resolved if + // the resolved URL doesn't go to the same registry. + // Consider reproducing that here, to throw away this.resolved + // in that case. + } + + async resolve () { + // fetching the manifest sets resolved and (if present) integrity + await this.manifest() + if (!this.resolved) { + throw Object.assign( + new Error('Invalid package manifest: no `dist.tarball` field'), + { package: this.spec.toString() } + ) + } + return this.resolved + } + + #headers () { + return { + // npm will override UA, but ensure that we always send *something* + 'user-agent': this.opts.userAgent || + `pacote/${pacoteVersion} node/${process.version}`, + ...(this.opts.headers || {}), + 'pacote-version': pacoteVersion, + 'pacote-req-type': 'packument', + 'pacote-pkg-id': `registry:${this.spec.name}`, + accept: this.fullMetadata ? fullDoc : corgiDoc, + } + } + + async packument () { + // note this might be either an in-flight promise for a request, + // or the actual packument, but we never want to make more than + // one request at a time for the same thing regardless. + if (this.packumentCache?.has(this.#cacheKey)) { + return this.packumentCache.get(this.#cacheKey) + } + + // npm-registry-fetch the packument + // set the appropriate header for corgis if fullMetadata isn't set + // return the res.json() promise + try { + const res = await fetch(this.packumentUrl, { + ...this.opts, + headers: this.#headers(), + spec: this.spec, + + // never check integrity for packuments themselves + integrity: null, + }) + const packument = await res.json() + const contentLength = res.headers.get('content-length') + if (contentLength) { + packument._contentLength = Number(contentLength) + } + this.packumentCache?.set(this.#cacheKey, packument) + return packument + } catch (err) { + this.packumentCache?.delete(this.#cacheKey) + if (err.code !== 'E404' || this.fullMetadata) { + throw err + } + // possible that corgis are not supported by this registry + this.fullMetadata = true + return this.packument() + } + } + + async manifest () { + if (this.package) { + return this.package + } + + // When verifying signatures, we need to fetch the full/uncompressed + // packument to get publish time as this is not included in the + // corgi/compressed packument. + if (this.opts.verifySignatures) { + this.fullMetadata = true + } + + const packument = await this.packument() + const steps = PackageJson.normalizeSteps.filter(s => s !== '_attributes') + const mani = await new PackageJson().fromContent(pickManifest(packument, this.spec.fetchSpec, { + ...this.opts, + defaultTag: this.defaultTag, + before: this.before, + })).normalize({ steps }).then(p => p.content) + + /* XXX add ETARGET and E403 revalidation of cached packuments here */ + + // add _time from packument if fetched with fullMetadata + const time = packument.time?.[mani.version] + if (time) { + mani._time = time + } + + // add _resolved and _integrity from dist object + const { dist } = mani + if (dist) { + this.resolved = mani._resolved = dist.tarball + mani._from = this.from + const distIntegrity = dist.integrity ? ssri.parse(dist.integrity) + : dist.shasum ? ssri.fromHex(dist.shasum, 'sha1', { ...this.opts }) + : null + if (distIntegrity) { + if (this.integrity && !this.integrity.match(distIntegrity)) { + // only bork if they have algos in common. + // otherwise we end up breaking if we have saved a sha512 + // previously for the tarball, but the manifest only + // provides a sha1, which is possible for older publishes. + // Otherwise, this is almost certainly a case of holding it + // wrong, and will result in weird or insecure behavior + // later on when building package tree. + for (const algo of Object.keys(this.integrity)) { + if (distIntegrity[algo]) { + throw Object.assign(new Error( + `Integrity checksum failed when using ${algo}: ` + + `wanted ${this.integrity} but got ${distIntegrity}.` + ), { code: 'EINTEGRITY' }) + } + } + } + // made it this far, the integrity is worthwhile. accept it. + // the setter here will take care of merging it into what we already + // had. + this.integrity = distIntegrity + } + } + if (this.integrity) { + mani._integrity = String(this.integrity) + if (dist.signatures) { + if (this.opts.verifySignatures) { + // validate and throw on error, then set _signatures + const message = `${mani._id}:${mani._integrity}` + for (const signature of dist.signatures) { + const publicKey = this.registryKeys && + this.registryKeys.filter(key => (key.keyid === signature.keyid))[0] + if (!publicKey) { + throw Object.assign(new Error( + `${mani._id} has a registry signature with keyid: ${signature.keyid} ` + + 'but no corresponding public key can be found' + ), { code: 'EMISSINGSIGNATUREKEY' }) + } + + const publishedTime = Date.parse(mani._time || MISSING_TIME_CUTOFF) + const validPublicKey = !publicKey.expires || + publishedTime < Date.parse(publicKey.expires) + if (!validPublicKey) { + throw Object.assign(new Error( + `${mani._id} has a registry signature with keyid: ${signature.keyid} ` + + `but the corresponding public key has expired ${publicKey.expires}` + ), { code: 'EEXPIREDSIGNATUREKEY' }) + } + const verifier = crypto.createVerify('SHA256') + verifier.write(message) + verifier.end() + const valid = verifier.verify( + publicKey.pemkey, + signature.sig, + 'base64' + ) + if (!valid) { + throw Object.assign(new Error( + `${mani._id} has an invalid registry signature with ` + + `keyid: ${publicKey.keyid} and signature: ${signature.sig}` + ), { + code: 'EINTEGRITYSIGNATURE', + keyid: publicKey.keyid, + signature: signature.sig, + resolved: mani._resolved, + integrity: mani._integrity, + }) + } + } + mani._signatures = dist.signatures + } else { + mani._signatures = dist.signatures + } + } + + if (dist.attestations) { + if (this.opts.verifyAttestations) { + // Always fetch attestations from the current registry host + const attestationsPath = new URL(dist.attestations.url).pathname + const attestationsUrl = removeTrailingSlashes(this.registry) + attestationsPath + const res = await fetch(attestationsUrl, { + ...this.opts, + // disable integrity check for attestations json payload, we check the + // integrity in the verification steps below + integrity: null, + }) + const { attestations } = await res.json() + const bundles = attestations.map(({ predicateType, bundle }) => { + const statement = JSON.parse( + Buffer.from(bundle.dsseEnvelope.payload, 'base64').toString('utf8') + ) + const keyid = bundle.dsseEnvelope.signatures[0].keyid + const signature = bundle.dsseEnvelope.signatures[0].sig + + return { + predicateType, + bundle, + statement, + keyid, + signature, + } + }) + + const attestationKeyIds = bundles.map((b) => b.keyid).filter((k) => !!k) + const attestationRegistryKeys = (this.registryKeys || []) + .filter(key => attestationKeyIds.includes(key.keyid)) + if (!attestationRegistryKeys.length) { + throw Object.assign(new Error( + `${mani._id} has attestations but no corresponding public key(s) can be found` + ), { code: 'EMISSINGSIGNATUREKEY' }) + } + + for (const { predicateType, bundle, keyid, signature, statement } of bundles) { + const publicKey = attestationRegistryKeys.find(key => key.keyid === keyid) + // Publish attestations have a keyid set and a valid public key must be found + if (keyid) { + if (!publicKey) { + throw Object.assign(new Error( + `${mani._id} has attestations with keyid: ${keyid} ` + + 'but no corresponding public key can be found' + ), { code: 'EMISSINGSIGNATUREKEY' }) + } + + const integratedTime = new Date( + Number( + bundle.verificationMaterial.tlogEntries[0].integratedTime + ) * 1000 + ) + const validPublicKey = !publicKey.expires || + (integratedTime < Date.parse(publicKey.expires)) + if (!validPublicKey) { + throw Object.assign(new Error( + `${mani._id} has attestations with keyid: ${keyid} ` + + `but the corresponding public key has expired ${publicKey.expires}` + ), { code: 'EEXPIREDSIGNATUREKEY' }) + } + } + + const subject = { + name: statement.subject[0].name, + sha512: statement.subject[0].digest.sha512, + } + + // Only type 'version' can be turned into a PURL + const purl = this.spec.type === 'version' ? npa.toPurl(this.spec) : this.spec + // Verify the statement subject matches the package, version + if (subject.name !== purl) { + throw Object.assign(new Error( + `${mani._id} package name and version (PURL): ${purl} ` + + `doesn't match what was signed: ${subject.name}` + ), { code: 'EATTESTATIONSUBJECT' }) + } + + // Verify the statement subject matches the tarball integrity + const integrityHexDigest = ssri.parse(this.integrity).hexDigest() + if (subject.sha512 !== integrityHexDigest) { + throw Object.assign(new Error( + `${mani._id} package integrity (hex digest): ` + + `${integrityHexDigest} ` + + `doesn't match what was signed: ${subject.sha512}` + ), { code: 'EATTESTATIONSUBJECT' }) + } + + try { + // Provenance attestations are signed with a signing certificate + // (including the key) so we don't need to return a public key. + // + // Publish attestations are signed with a keyid so we need to + // specify a public key from the keys endpoint: `registry-host.tld/-/npm/v1/keys` + const options = { + tufCachePath: this.tufCache, + tufForceCache: true, + keySelector: publicKey ? () => publicKey.pemkey : undefined, + } + await sigstore.verify(bundle, options) + } catch (e) { + throw Object.assign(new Error( + `${mani._id} failed to verify attestation: ${e.message}` + ), { + code: 'EATTESTATIONVERIFY', + predicateType, + keyid, + signature, + resolved: mani._resolved, + integrity: mani._integrity, + }) + } + } + mani._attestations = dist.attestations + } else { + mani._attestations = dist.attestations + } + } + } + + this.package = mani + return this.package + } + + [_.tarballFromResolved] () { + // we use a RemoteFetcher to get the actual tarball stream + return new RemoteFetcher(this.resolved, { + ...this.opts, + resolved: this.resolved, + pkgid: `registry:${this.spec.name}@${this.resolved}`, + })[_.tarballFromResolved]() + } + + get types () { + return [ + 'tag', + 'version', + 'range', + ] + } +} +module.exports = RegistryFetcher diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/remote.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/remote.js new file mode 100644 index 00000000000000..bd321e65a1f18a --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/remote.js @@ -0,0 +1,89 @@ +const fetch = require('npm-registry-fetch') +const { Minipass } = require('minipass') +const Fetcher = require('./fetcher.js') +const FileFetcher = require('./file.js') +const _ = require('./util/protected.js') +const pacoteVersion = require('../package.json').version + +class RemoteFetcher extends Fetcher { + constructor (spec, opts) { + super(spec, opts) + this.resolved = this.spec.fetchSpec + const resolvedURL = new URL(this.resolved) + if (this.replaceRegistryHost !== 'never' + && (this.replaceRegistryHost === 'always' + || this.replaceRegistryHost === resolvedURL.host)) { + this.resolved = new URL(resolvedURL.pathname, this.registry).href + } + + // nam is a fermented pork sausage that is good to eat + const nameat = this.spec.name ? `${this.spec.name}@` : '' + this.pkgid = opts.pkgid ? opts.pkgid : `remote:${nameat}${this.resolved}` + } + + // Don't need to cache tarball fetches in pacote, because make-fetch-happen + // will write into cacache anyway. + get [_.cacheFetches] () { + return false + } + + [_.tarballFromResolved] () { + const stream = new Minipass() + stream.hasIntegrityEmitter = true + + const fetchOpts = { + ...this.opts, + headers: this.#headers(), + spec: this.spec, + integrity: this.integrity, + algorithms: [this.pickIntegrityAlgorithm()], + } + + // eslint-disable-next-line promise/always-return + fetch(this.resolved, fetchOpts).then(res => { + res.body.on('error', + /* istanbul ignore next - exceedingly rare and hard to simulate */ + er => stream.emit('error', er) + ) + + res.body.on('integrity', i => { + this.integrity = i + stream.emit('integrity', i) + }) + + res.body.pipe(stream) + }).catch(er => stream.emit('error', er)) + + return stream + } + + #headers () { + return { + // npm will override this, but ensure that we always send *something* + 'user-agent': this.opts.userAgent || + `pacote/${pacoteVersion} node/${process.version}`, + ...(this.opts.headers || {}), + 'pacote-version': pacoteVersion, + 'pacote-req-type': 'tarball', + 'pacote-pkg-id': this.pkgid, + ...(this.integrity ? { 'pacote-integrity': String(this.integrity) } + : {}), + ...(this.opts.headers || {}), + } + } + + get types () { + return ['remote'] + } + + // getting a packument and/or manifest is the same as with a file: spec. + // unpack the tarball stream, and then read from the package.json file. + packument () { + return FileFetcher.prototype.packument.apply(this) + } + + manifest () { + return FileFetcher.prototype.manifest.apply(this) + } +} +module.exports = RemoteFetcher diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/add-git-sha.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/add-git-sha.js new file mode 100644 index 00000000000000..843fe5b600cafa --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/add-git-sha.js @@ -0,0 +1,15 @@ +// add a sha to a git remote url spec +const addGitSha = (spec, sha) => { + if (spec.hosted) { + const h = spec.hosted + const opt = { noCommittish: true } + const base = h.https && h.auth ? h.https(opt) : h.shortcut(opt) + + return `${base}#${sha}` + } else { + // don't use new URL for this, because it doesn't handle scp urls + return spec.rawSpec.replace(/#.*$/, '') + `#${sha}` + } +} + +module.exports = addGitSha diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/cache-dir.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/cache-dir.js new file mode 100644 index 00000000000000..ba5683a7bb5bf3 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/cache-dir.js @@ -0,0 +1,15 @@ +const { resolve } = require('node:path') +const { tmpdir, homedir } = require('node:os') + +module.exports = (fakePlatform = false) => { + const temp = tmpdir() + const uidOrPid = process.getuid ? process.getuid() : process.pid + const home = homedir() || resolve(temp, 'npm-' + uidOrPid) + const platform = fakePlatform || process.platform + const cacheExtra = platform === 'win32' ? 'npm-cache' : '.npm' + const cacheRoot = (platform === 'win32' && process.env.LOCALAPPDATA) || home + return { + cacache: resolve(cacheRoot, cacheExtra, '_cacache'), + tufcache: resolve(cacheRoot, cacheExtra, '_tuf'), + } +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/is-package-bin.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/is-package-bin.js new file mode 100644 index 00000000000000..49a3f73f537ce9 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/is-package-bin.js @@ -0,0 +1,25 @@ +// Function to determine whether a path is in the package.bin set. +// Used to prevent issues when people publish a package from a +// windows machine, and then install with --no-bin-links. +// +// Note: this is not possible in remote or file fetchers, since +// we don't have the manifest until AFTER we've unpacked. But the +// main use case is registry fetching with git a distant second, +// so that's an acceptable edge case to not handle. + +const binObj = (name, bin) => + typeof bin === 'string' ? { [name]: bin } : bin + +const hasBin = (pkg, path) => { + const bin = binObj(pkg.name, pkg.bin) + const p = path.replace(/^[^\\/]*\//, '') + for (const kv of Object.entries(bin)) { + if (kv[1] === p) { + return true + } + } + return false +} + +module.exports = (pkg, path) => + pkg && pkg.bin ? hasBin(pkg, path) : false diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/npm.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/npm.js new file mode 100644 index 00000000000000..a3005c255565fb --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/npm.js @@ -0,0 +1,14 @@ +// run an npm command +const spawn = require('@npmcli/promise-spawn') + +module.exports = (npmBin, npmCommand, cwd, env, extra) => { + const isJS = npmBin.endsWith('.js') + const cmd = isJS ? process.execPath : npmBin + const args = (isJS ? [npmBin] : []).concat(npmCommand) + // when installing to run the `prepare` script for a git dep, we need + // to ensure that we don't run into a cycle of checking out packages + // in temp directories. this lets us link previously-seen repos that + // are also being prepared. + + return spawn(cmd, args, { cwd, env }, extra) +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/protected.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/protected.js new file mode 100644 index 00000000000000..e05203b481e6aa --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/protected.js @@ -0,0 +1,5 @@ +module.exports = { + cacheFetches: Symbol.for('pacote.Fetcher._cacheFetches'), + readPackageJson: Symbol.for('package.Fetcher._readPackageJson'), + tarballFromResolved: Symbol.for('pacote.Fetcher._tarballFromResolved'), +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/tar-create-options.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/tar-create-options.js new file mode 100644 index 00000000000000..d070f0f7ba2d4e --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/tar-create-options.js @@ -0,0 +1,31 @@ +const isPackageBin = require('./is-package-bin.js') + +const tarCreateOptions = manifest => ({ + cwd: manifest._resolved, + prefix: 'package/', + portable: true, + gzip: { + // forcing the level to 9 seems to avoid some + // platform specific optimizations that cause + // integrity mismatch errors due to differing + // end results after compression + level: 9, + }, + + // ensure that package bins are always executable + // Note that npm-packlist is already filtering out + // anything that is not a regular file, ignored by + // .npmignore or package.json "files", etc. + filter: (path, stat) => { + if (isPackageBin(manifest, path)) { + stat.mode |= 0o111 + } + return true + }, + + // Provide a specific date in the 1980s for the benefit of zip, + // which is confounded by files dated at the Unix epoch 0. + mtime: new Date('1985-10-26T08:15:00.000Z'), +}) + +module.exports = tarCreateOptions diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/trailing-slashes.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/trailing-slashes.js new file mode 100644 index 00000000000000..c50cb6173b92eb --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/trailing-slashes.js @@ -0,0 +1,10 @@ +const removeTrailingSlashes = (input) => { + // in order to avoid regexp redos detection + let output = input + while (output.endsWith('/')) { + output = output.slice(0, -1) + } + return output +} + +module.exports = removeTrailingSlashes diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/package.json b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/package.json new file mode 100644 index 00000000000000..335c7a6c87bd3c --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/package.json @@ -0,0 +1,79 @@ +{ + "name": "pacote", + "version": "20.0.0", + "description": "JavaScript package downloader", + "author": "GitHub Inc.", + "bin": { + "pacote": "bin/index.js" + }, + "license": "ISC", + "main": "lib/index.js", + "scripts": { + "test": "tap", + "snap": "tap", + "lint": "npm run eslint", + "postlint": "template-oss-check", + "lintfix": "npm run eslint -- --fix", + "posttest": "npm run lint", + "template-oss-apply": "template-oss-apply --force", + "eslint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"" + }, + "tap": { + "timeout": 300, + "nyc-arg": [ + "--exclude", + "tap-snapshots/**" + ] + }, + "devDependencies": { + "@npmcli/arborist": "^7.1.0", + "@npmcli/eslint-config": "^5.0.0", + "@npmcli/template-oss": "4.23.3", + "hosted-git-info": "^8.0.0", + "mutate-fs": "^2.1.1", + "nock": "^13.2.4", + "npm-registry-mock": "^1.3.2", + "tap": "^16.0.1" + }, + "files": [ + "bin/", + "lib/" + ], + "keywords": [ + "packages", + "npm", + "git" + ], + "dependencies": { + "@npmcli/git": "^6.0.0", + "@npmcli/installed-package-contents": "^3.0.0", + "@npmcli/package-json": "^6.0.0", + "@npmcli/promise-spawn": "^8.0.0", + "@npmcli/run-script": "^9.0.0", + "cacache": "^19.0.0", + "fs-minipass": "^3.0.0", + "minipass": "^7.0.2", + "npm-package-arg": "^12.0.0", + "npm-packlist": "^9.0.0", + "npm-pick-manifest": "^10.0.0", + "npm-registry-fetch": "^18.0.0", + "proc-log": "^5.0.0", + "promise-retry": "^2.0.1", + "sigstore": "^3.0.0", + "ssri": "^12.0.0", + "tar": "^6.1.11" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/npm/pacote.git" + }, + "templateOSS": { + "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", + "version": "4.23.3", + "windowsCI": false, + "publish": "true" + } +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/package.json b/deps/npm/node_modules/@npmcli/metavuln-calculator/package.json index d4c3cf54d83ea7..df0b8f2f4faf1c 100644 --- a/deps/npm/node_modules/@npmcli/metavuln-calculator/package.json +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/metavuln-calculator", - "version": "8.0.0", + "version": "8.0.1", "main": "lib/index.js", "files": [ "bin/", @@ -41,7 +41,7 @@ "dependencies": { "cacache": "^19.0.0", "json-parse-even-better-errors": "^4.0.0", - "pacote": "^19.0.0", + "pacote": "^20.0.0", "proc-log": "^5.0.0", "semver": "^7.3.5" }, diff --git a/deps/npm/node_modules/@npmcli/promise-spawn/lib/index.js b/deps/npm/node_modules/@npmcli/promise-spawn/lib/index.js index e147cb8f9c746f..aa7b55d8f038d4 100644 --- a/deps/npm/node_modules/@npmcli/promise-spawn/lib/index.js +++ b/deps/npm/node_modules/@npmcli/promise-spawn/lib/index.js @@ -131,9 +131,19 @@ const open = (_args, opts = {}, extra = {}) => { let platform = process.platform // process.platform === 'linux' may actually indicate WSL, if that's the case - // we want to treat things as win32 anyway so the host can open the argument + // open the argument with sensible-browser which is pre-installed + // In WSL, set the default browser using, for example, + // export BROWSER="/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe" + // or + // export BROWSER="/mnt/c/Program Files (x86)/Microsoft/Edge/Application/msedge.exe" + // To permanently set the default browser, add the appropriate entry to your shell's + // RC file, e.g. .bashrc or .zshrc. if (platform === 'linux' && os.release().toLowerCase().includes('microsoft')) { - platform = 'win32' + platform = 'wsl' + if (!process.env.BROWSER) { + return Promise.reject( + new Error('Set the BROWSER environment variable to your desired browser.')) + } } let command = options.command @@ -146,6 +156,8 @@ const open = (_args, opts = {}, extra = {}) => { // accidentally interpret the first arg as the title, we stick an empty // string immediately after the start command command = 'start ""' + } else if (platform === 'wsl') { + command = 'sensible-browser' } else if (platform === 'darwin') { command = 'open' } else { diff --git a/deps/npm/node_modules/@npmcli/promise-spawn/package.json b/deps/npm/node_modules/@npmcli/promise-spawn/package.json index 9914063f85156d..f5fb026be50e85 100644 --- a/deps/npm/node_modules/@npmcli/promise-spawn/package.json +++ b/deps/npm/node_modules/@npmcli/promise-spawn/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/promise-spawn", - "version": "8.0.1", + "version": "8.0.2", "files": [ "bin/", "lib/" @@ -33,7 +33,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", + "@npmcli/template-oss": "4.23.4", "spawk": "^1.7.1", "tap": "^16.0.1" }, @@ -42,7 +42,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.4", "publish": true }, "dependencies": { diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/agents.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/agents.js deleted file mode 100644 index c541b93001517e..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/agents.js +++ /dev/null @@ -1,206 +0,0 @@ -'use strict' - -const net = require('net') -const tls = require('tls') -const { once } = require('events') -const timers = require('timers/promises') -const { normalizeOptions, cacheOptions } = require('./options') -const { getProxy, getProxyAgent, proxyCache } = require('./proxy.js') -const Errors = require('./errors.js') -const { Agent: AgentBase } = require('agent-base') - -module.exports = class Agent extends AgentBase { - #options - #timeouts - #proxy - #noProxy - #ProxyAgent - - constructor (options = {}) { - const { timeouts, proxy, noProxy, ...normalizedOptions } = normalizeOptions(options) - - super(normalizedOptions) - - this.#options = normalizedOptions - this.#timeouts = timeouts - - if (proxy) { - this.#proxy = new URL(proxy) - this.#noProxy = noProxy - this.#ProxyAgent = getProxyAgent(proxy) - } - } - - get proxy () { - return this.#proxy ? { url: this.#proxy } : {} - } - - #getProxy (options) { - if (!this.#proxy) { - return - } - - const proxy = getProxy(`${options.protocol}//${options.host}:${options.port}`, { - proxy: this.#proxy, - noProxy: this.#noProxy, - }) - - if (!proxy) { - return - } - - const cacheKey = cacheOptions({ - ...options, - ...this.#options, - timeouts: this.#timeouts, - proxy, - }) - - if (proxyCache.has(cacheKey)) { - return proxyCache.get(cacheKey) - } - - let ProxyAgent = this.#ProxyAgent - if (Array.isArray(ProxyAgent)) { - ProxyAgent = this.isSecureEndpoint(options) ? ProxyAgent[1] : ProxyAgent[0] - } - - const proxyAgent = new ProxyAgent(proxy, { - ...this.#options, - socketOptions: { family: this.#options.family }, - }) - proxyCache.set(cacheKey, proxyAgent) - - return proxyAgent - } - - // takes an array of promises and races them against the connection timeout - // which will throw the necessary error if it is hit. This will return the - // result of the promise race. - async #timeoutConnection ({ promises, options, timeout }, ac = new AbortController()) { - if (timeout) { - const connectionTimeout = timers.setTimeout(timeout, null, { signal: ac.signal }) - .then(() => { - throw new Errors.ConnectionTimeoutError(`${options.host}:${options.port}`) - }).catch((err) => { - if (err.name === 'AbortError') { - return - } - throw err - }) - promises.push(connectionTimeout) - } - - let result - try { - result = await Promise.race(promises) - ac.abort() - } catch (err) { - ac.abort() - throw err - } - return result - } - - async connect (request, options) { - // if the connection does not have its own lookup function - // set, then use the one from our options - options.lookup ??= this.#options.lookup - - let socket - let timeout = this.#timeouts.connection - const isSecureEndpoint = this.isSecureEndpoint(options) - - const proxy = this.#getProxy(options) - if (proxy) { - // some of the proxies will wait for the socket to fully connect before - // returning so we have to await this while also racing it against the - // connection timeout. - const start = Date.now() - socket = await this.#timeoutConnection({ - options, - timeout, - promises: [proxy.connect(request, options)], - }) - // see how much time proxy.connect took and subtract it from - // the timeout - if (timeout) { - timeout = timeout - (Date.now() - start) - } - } else { - socket = (isSecureEndpoint ? tls : net).connect(options) - } - - socket.setKeepAlive(this.keepAlive, this.keepAliveMsecs) - socket.setNoDelay(this.keepAlive) - - const abortController = new AbortController() - const { signal } = abortController - - const connectPromise = socket[isSecureEndpoint ? 'secureConnecting' : 'connecting'] - ? once(socket, isSecureEndpoint ? 'secureConnect' : 'connect', { signal }) - : Promise.resolve() - - await this.#timeoutConnection({ - options, - timeout, - promises: [ - connectPromise, - once(socket, 'error', { signal }).then((err) => { - throw err[0] - }), - ], - }, abortController) - - if (this.#timeouts.idle) { - socket.setTimeout(this.#timeouts.idle, () => { - socket.destroy(new Errors.IdleTimeoutError(`${options.host}:${options.port}`)) - }) - } - - return socket - } - - addRequest (request, options) { - const proxy = this.#getProxy(options) - // it would be better to call proxy.addRequest here but this causes the - // http-proxy-agent to call its super.addRequest which causes the request - // to be added to the agent twice. since we only support 3 agents - // currently (see the required agents in proxy.js) we have manually - // checked that the only public methods we need to call are called in the - // next block. this could change in the future and presumably we would get - // failing tests until we have properly called the necessary methods on - // each of our proxy agents - if (proxy?.setRequestProps) { - proxy.setRequestProps(request, options) - } - - request.setHeader('connection', this.keepAlive ? 'keep-alive' : 'close') - - if (this.#timeouts.response) { - let responseTimeout - request.once('finish', () => { - setTimeout(() => { - request.destroy(new Errors.ResponseTimeoutError(request, this.#proxy)) - }, this.#timeouts.response) - }) - request.once('response', () => { - clearTimeout(responseTimeout) - }) - } - - if (this.#timeouts.transfer) { - let transferTimeout - request.once('response', (res) => { - setTimeout(() => { - res.destroy(new Errors.TransferTimeoutError(request, this.#proxy)) - }, this.#timeouts.transfer) - res.once('close', () => { - clearTimeout(transferTimeout) - }) - }) - } - - return super.addRequest(request, options) - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/dns.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/dns.js deleted file mode 100644 index 3c6946c566d736..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/dns.js +++ /dev/null @@ -1,53 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') -const dns = require('dns') - -// this is a factory so that each request can have its own opts (i.e. ttl) -// while still sharing the cache across all requests -const cache = new LRUCache({ max: 50 }) - -const getOptions = ({ - family = 0, - hints = dns.ADDRCONFIG, - all = false, - verbatim = undefined, - ttl = 5 * 60 * 1000, - lookup = dns.lookup, -}) => ({ - // hints and lookup are returned since both are top level properties to (net|tls).connect - hints, - lookup: (hostname, ...args) => { - const callback = args.pop() // callback is always last arg - const lookupOptions = args[0] ?? {} - - const options = { - family, - hints, - all, - verbatim, - ...(typeof lookupOptions === 'number' ? { family: lookupOptions } : lookupOptions), - } - - const key = JSON.stringify({ hostname, ...options }) - - if (cache.has(key)) { - const cached = cache.get(key) - return process.nextTick(callback, null, ...cached) - } - - lookup(hostname, options, (err, ...result) => { - if (err) { - return callback(err) - } - - cache.set(key, result, { ttl }) - return callback(null, ...result) - }) - }, -}) - -module.exports = { - cache, - getOptions, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/errors.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/errors.js deleted file mode 100644 index 70475aec8eb357..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/errors.js +++ /dev/null @@ -1,61 +0,0 @@ -'use strict' - -class InvalidProxyProtocolError extends Error { - constructor (url) { - super(`Invalid protocol \`${url.protocol}\` connecting to proxy \`${url.host}\``) - this.code = 'EINVALIDPROXY' - this.proxy = url - } -} - -class ConnectionTimeoutError extends Error { - constructor (host) { - super(`Timeout connecting to host \`${host}\``) - this.code = 'ECONNECTIONTIMEOUT' - this.host = host - } -} - -class IdleTimeoutError extends Error { - constructor (host) { - super(`Idle timeout reached for host \`${host}\``) - this.code = 'EIDLETIMEOUT' - this.host = host - } -} - -class ResponseTimeoutError extends Error { - constructor (request, proxy) { - let msg = 'Response timeout ' - if (proxy) { - msg += `from proxy \`${proxy.host}\` ` - } - msg += `connecting to host \`${request.host}\`` - super(msg) - this.code = 'ERESPONSETIMEOUT' - this.proxy = proxy - this.request = request - } -} - -class TransferTimeoutError extends Error { - constructor (request, proxy) { - let msg = 'Transfer timeout ' - if (proxy) { - msg += `from proxy \`${proxy.host}\` ` - } - msg += `for \`${request.host}\`` - super(msg) - this.code = 'ETRANSFERTIMEOUT' - this.proxy = proxy - this.request = request - } -} - -module.exports = { - InvalidProxyProtocolError, - ConnectionTimeoutError, - IdleTimeoutError, - ResponseTimeoutError, - TransferTimeoutError, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/index.js deleted file mode 100644 index b33d6eaef07a21..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/index.js +++ /dev/null @@ -1,56 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') -const { normalizeOptions, cacheOptions } = require('./options') -const { getProxy, proxyCache } = require('./proxy.js') -const dns = require('./dns.js') -const Agent = require('./agents.js') - -const agentCache = new LRUCache({ max: 20 }) - -const getAgent = (url, { agent, proxy, noProxy, ...options } = {}) => { - // false has meaning so this can't be a simple truthiness check - if (agent != null) { - return agent - } - - url = new URL(url) - - const proxyForUrl = getProxy(url, { proxy, noProxy }) - const normalizedOptions = { - ...normalizeOptions(options), - proxy: proxyForUrl, - } - - const cacheKey = cacheOptions({ - ...normalizedOptions, - secureEndpoint: url.protocol === 'https:', - }) - - if (agentCache.has(cacheKey)) { - return agentCache.get(cacheKey) - } - - const newAgent = new Agent(normalizedOptions) - agentCache.set(cacheKey, newAgent) - - return newAgent -} - -module.exports = { - getAgent, - Agent, - // these are exported for backwards compatability - HttpAgent: Agent, - HttpsAgent: Agent, - cache: { - proxy: proxyCache, - agent: agentCache, - dns: dns.cache, - clear: () => { - proxyCache.clear() - agentCache.clear() - dns.cache.clear() - }, - }, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/options.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/options.js deleted file mode 100644 index 0bf53f725f0846..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/options.js +++ /dev/null @@ -1,86 +0,0 @@ -'use strict' - -const dns = require('./dns') - -const normalizeOptions = (opts) => { - const family = parseInt(opts.family ?? '0', 10) - const keepAlive = opts.keepAlive ?? true - - const normalized = { - // nodejs http agent options. these are all the defaults - // but kept here to increase the likelihood of cache hits - // https://nodejs.org/api/http.html#new-agentoptions - keepAliveMsecs: keepAlive ? 1000 : undefined, - maxSockets: opts.maxSockets ?? 15, - maxTotalSockets: Infinity, - maxFreeSockets: keepAlive ? 256 : undefined, - scheduling: 'fifo', - // then spread the rest of the options - ...opts, - // we already set these to their defaults that we want - family, - keepAlive, - // our custom timeout options - timeouts: { - // the standard timeout option is mapped to our idle timeout - // and then deleted below - idle: opts.timeout ?? 0, - connection: 0, - response: 0, - transfer: 0, - ...opts.timeouts, - }, - // get the dns options that go at the top level of socket connection - ...dns.getOptions({ family, ...opts.dns }), - } - - // remove timeout since we already used it to set our own idle timeout - delete normalized.timeout - - return normalized -} - -const createKey = (obj) => { - let key = '' - const sorted = Object.entries(obj).sort((a, b) => a[0] - b[0]) - for (let [k, v] of sorted) { - if (v == null) { - v = 'null' - } else if (v instanceof URL) { - v = v.toString() - } else if (typeof v === 'object') { - v = createKey(v) - } - key += `${k}:${v}:` - } - return key -} - -const cacheOptions = ({ secureEndpoint, ...options }) => createKey({ - secureEndpoint: !!secureEndpoint, - // socket connect options - family: options.family, - hints: options.hints, - localAddress: options.localAddress, - // tls specific connect options - strictSsl: secureEndpoint ? !!options.rejectUnauthorized : false, - ca: secureEndpoint ? options.ca : null, - cert: secureEndpoint ? options.cert : null, - key: secureEndpoint ? options.key : null, - // http agent options - keepAlive: options.keepAlive, - keepAliveMsecs: options.keepAliveMsecs, - maxSockets: options.maxSockets, - maxTotalSockets: options.maxTotalSockets, - maxFreeSockets: options.maxFreeSockets, - scheduling: options.scheduling, - // timeout options - timeouts: options.timeouts, - // proxy - proxy: options.proxy, -}) - -module.exports = { - normalizeOptions, - cacheOptions, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/proxy.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/proxy.js deleted file mode 100644 index 6272e929e57bcf..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/proxy.js +++ /dev/null @@ -1,88 +0,0 @@ -'use strict' - -const { HttpProxyAgent } = require('http-proxy-agent') -const { HttpsProxyAgent } = require('https-proxy-agent') -const { SocksProxyAgent } = require('socks-proxy-agent') -const { LRUCache } = require('lru-cache') -const { InvalidProxyProtocolError } = require('./errors.js') - -const PROXY_CACHE = new LRUCache({ max: 20 }) - -const SOCKS_PROTOCOLS = new Set(SocksProxyAgent.protocols) - -const PROXY_ENV_KEYS = new Set(['https_proxy', 'http_proxy', 'proxy', 'no_proxy']) - -const PROXY_ENV = Object.entries(process.env).reduce((acc, [key, value]) => { - key = key.toLowerCase() - if (PROXY_ENV_KEYS.has(key)) { - acc[key] = value - } - return acc -}, {}) - -const getProxyAgent = (url) => { - url = new URL(url) - - const protocol = url.protocol.slice(0, -1) - if (SOCKS_PROTOCOLS.has(protocol)) { - return SocksProxyAgent - } - if (protocol === 'https' || protocol === 'http') { - return [HttpProxyAgent, HttpsProxyAgent] - } - - throw new InvalidProxyProtocolError(url) -} - -const isNoProxy = (url, noProxy) => { - if (typeof noProxy === 'string') { - noProxy = noProxy.split(',').map((p) => p.trim()).filter(Boolean) - } - - if (!noProxy || !noProxy.length) { - return false - } - - const hostSegments = url.hostname.split('.').reverse() - - return noProxy.some((no) => { - const noSegments = no.split('.').filter(Boolean).reverse() - if (!noSegments.length) { - return false - } - - for (let i = 0; i < noSegments.length; i++) { - if (hostSegments[i] !== noSegments[i]) { - return false - } - } - - return true - }) -} - -const getProxy = (url, { proxy, noProxy }) => { - url = new URL(url) - - if (!proxy) { - proxy = url.protocol === 'https:' - ? PROXY_ENV.https_proxy - : PROXY_ENV.https_proxy || PROXY_ENV.http_proxy || PROXY_ENV.proxy - } - - if (!noProxy) { - noProxy = PROXY_ENV.no_proxy - } - - if (!proxy || isNoProxy(url, noProxy)) { - return null - } - - return new URL(proxy) -} - -module.exports = { - getProxyAgent, - getProxy, - proxyCache: PROXY_CACHE, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/package.json deleted file mode 100644 index ef5b4e3228cc46..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/package.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "name": "@npmcli/agent", - "version": "2.2.2", - "description": "the http/https agent used by the npm cli", - "main": "lib/index.js", - "scripts": { - "gencerts": "bash scripts/create-cert.sh", - "test": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "author": "GitHub Inc.", - "license": "ISC", - "bugs": { - "url": "https://github.com/npm/agent/issues" - }, - "homepage": "https://github.com/npm/agent#readme", - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", - "publish": "true" - }, - "dependencies": { - "agent-base": "^7.1.0", - "http-proxy-agent": "^7.0.0", - "https-proxy-agent": "^7.0.1", - "lru-cache": "^10.0.1", - "socks-proxy-agent": "^8.0.3" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", - "minipass-fetch": "^3.0.3", - "nock": "^13.2.7", - "semver": "^7.5.4", - "simple-socks": "^3.1.0", - "tap": "^16.3.0" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/agent.git" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/LICENSE.md b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/LICENSE.md deleted file mode 100644 index 5fc208ff122e08..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ - - -ISC License - -Copyright npm, Inc. - -Permission to use, copy, modify, and/or distribute this -software for any purpose with or without fee is hereby -granted, provided that the above copyright notice and this -permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND NPM DISCLAIMS ALL -WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO -EVENT SHALL NPM BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/get-options.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/get-options.js deleted file mode 100644 index cb5982f79077ac..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/get-options.js +++ /dev/null @@ -1,20 +0,0 @@ -// given an input that may or may not be an object, return an object that has -// a copy of every defined property listed in 'copy'. if the input is not an -// object, assign it to the property named by 'wrap' -const getOptions = (input, { copy, wrap }) => { - const result = {} - - if (input && typeof input === 'object') { - for (const prop of copy) { - if (input[prop] !== undefined) { - result[prop] = input[prop] - } - } - } else { - result[wrap] = input - } - - return result -} - -module.exports = getOptions diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/node.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/node.js deleted file mode 100644 index 4d13bc037359d7..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/node.js +++ /dev/null @@ -1,9 +0,0 @@ -const semver = require('semver') - -const satisfies = (range) => { - return semver.satisfies(process.version, range, { includePrerelease: true }) -} - -module.exports = { - satisfies, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/LICENSE b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/LICENSE deleted file mode 100644 index 93546dfb7655bf..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -(The MIT License) - -Copyright (c) 2011-2017 JP Richardson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files -(the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, - merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/errors.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/errors.js deleted file mode 100644 index 1cd1e05d0c533d..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/errors.js +++ /dev/null @@ -1,129 +0,0 @@ -'use strict' -const { inspect } = require('util') - -// adapted from node's internal/errors -// https://github.com/nodejs/node/blob/c8a04049/lib/internal/errors.js - -// close copy of node's internal SystemError class. -class SystemError { - constructor (code, prefix, context) { - // XXX context.code is undefined in all constructors used in cp/polyfill - // that may be a bug copied from node, maybe the constructor should use - // `code` not `errno`? nodejs/node#41104 - let message = `${prefix}: ${context.syscall} returned ` + - `${context.code} (${context.message})` - - if (context.path !== undefined) { - message += ` ${context.path}` - } - if (context.dest !== undefined) { - message += ` => ${context.dest}` - } - - this.code = code - Object.defineProperties(this, { - name: { - value: 'SystemError', - enumerable: false, - writable: true, - configurable: true, - }, - message: { - value: message, - enumerable: false, - writable: true, - configurable: true, - }, - info: { - value: context, - enumerable: true, - configurable: true, - writable: false, - }, - errno: { - get () { - return context.errno - }, - set (value) { - context.errno = value - }, - enumerable: true, - configurable: true, - }, - syscall: { - get () { - return context.syscall - }, - set (value) { - context.syscall = value - }, - enumerable: true, - configurable: true, - }, - }) - - if (context.path !== undefined) { - Object.defineProperty(this, 'path', { - get () { - return context.path - }, - set (value) { - context.path = value - }, - enumerable: true, - configurable: true, - }) - } - - if (context.dest !== undefined) { - Object.defineProperty(this, 'dest', { - get () { - return context.dest - }, - set (value) { - context.dest = value - }, - enumerable: true, - configurable: true, - }) - } - } - - toString () { - return `${this.name} [${this.code}]: ${this.message}` - } - - [Symbol.for('nodejs.util.inspect.custom')] (_recurseTimes, ctx) { - return inspect(this, { - ...ctx, - getters: true, - customInspect: false, - }) - } -} - -function E (code, message) { - module.exports[code] = class NodeError extends SystemError { - constructor (ctx) { - super(code, message, ctx) - } - } -} - -E('ERR_FS_CP_DIR_TO_NON_DIR', 'Cannot overwrite directory with non-directory') -E('ERR_FS_CP_EEXIST', 'Target already exists') -E('ERR_FS_CP_EINVAL', 'Invalid src or dest') -E('ERR_FS_CP_FIFO_PIPE', 'Cannot copy a FIFO pipe') -E('ERR_FS_CP_NON_DIR_TO_DIR', 'Cannot overwrite non-directory with directory') -E('ERR_FS_CP_SOCKET', 'Cannot copy a socket file') -E('ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY', 'Cannot overwrite symlink in subdirectory of self') -E('ERR_FS_CP_UNKNOWN', 'Cannot copy an unknown file type') -E('ERR_FS_EISDIR', 'Path is a directory') - -module.exports.ERR_INVALID_ARG_TYPE = class ERR_INVALID_ARG_TYPE extends Error { - constructor (name, expected, actual) { - super() - this.code = 'ERR_INVALID_ARG_TYPE' - this.message = `The ${name} argument must be ${expected}. Received ${typeof actual}` - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/index.js deleted file mode 100644 index 972ce7aa12abef..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/index.js +++ /dev/null @@ -1,22 +0,0 @@ -const fs = require('fs/promises') -const getOptions = require('../common/get-options.js') -const node = require('../common/node.js') -const polyfill = require('./polyfill.js') - -// node 16.7.0 added fs.cp -const useNative = node.satisfies('>=16.7.0') - -const cp = async (src, dest, opts) => { - const options = getOptions(opts, { - copy: ['dereference', 'errorOnExist', 'filter', 'force', 'preserveTimestamps', 'recursive'], - }) - - // the polyfill is tested separately from this module, no need to hack - // process.version to try to trigger it just for coverage - // istanbul ignore next - return useNative - ? fs.cp(src, dest, options) - : polyfill(src, dest, options) -} - -module.exports = cp diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/polyfill.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/polyfill.js deleted file mode 100644 index 80eb10de971918..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/polyfill.js +++ /dev/null @@ -1,428 +0,0 @@ -// this file is a modified version of the code in node 17.2.0 -// which is, in turn, a modified version of the fs-extra module on npm -// node core changes: -// - Use of the assert module has been replaced with core's error system. -// - All code related to the glob dependency has been removed. -// - Bring your own custom fs module is not currently supported. -// - Some basic code cleanup. -// changes here: -// - remove all callback related code -// - drop sync support -// - change assertions back to non-internal methods (see options.js) -// - throws ENOTDIR when rmdir gets an ENOENT for a path that exists in Windows -'use strict' - -const { - ERR_FS_CP_DIR_TO_NON_DIR, - ERR_FS_CP_EEXIST, - ERR_FS_CP_EINVAL, - ERR_FS_CP_FIFO_PIPE, - ERR_FS_CP_NON_DIR_TO_DIR, - ERR_FS_CP_SOCKET, - ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY, - ERR_FS_CP_UNKNOWN, - ERR_FS_EISDIR, - ERR_INVALID_ARG_TYPE, -} = require('./errors.js') -const { - constants: { - errno: { - EEXIST, - EISDIR, - EINVAL, - ENOTDIR, - }, - }, -} = require('os') -const { - chmod, - copyFile, - lstat, - mkdir, - readdir, - readlink, - stat, - symlink, - unlink, - utimes, -} = require('fs/promises') -const { - dirname, - isAbsolute, - join, - parse, - resolve, - sep, - toNamespacedPath, -} = require('path') -const { fileURLToPath } = require('url') - -const defaultOptions = { - dereference: false, - errorOnExist: false, - filter: undefined, - force: true, - preserveTimestamps: false, - recursive: false, -} - -async function cp (src, dest, opts) { - if (opts != null && typeof opts !== 'object') { - throw new ERR_INVALID_ARG_TYPE('options', ['Object'], opts) - } - return cpFn( - toNamespacedPath(getValidatedPath(src)), - toNamespacedPath(getValidatedPath(dest)), - { ...defaultOptions, ...opts }) -} - -function getValidatedPath (fileURLOrPath) { - const path = fileURLOrPath != null && fileURLOrPath.href - && fileURLOrPath.origin - ? fileURLToPath(fileURLOrPath) - : fileURLOrPath - return path -} - -async function cpFn (src, dest, opts) { - // Warn about using preserveTimestamps on 32-bit node - // istanbul ignore next - if (opts.preserveTimestamps && process.arch === 'ia32') { - const warning = 'Using the preserveTimestamps option in 32-bit ' + - 'node is not recommended' - process.emitWarning(warning, 'TimestampPrecisionWarning') - } - const stats = await checkPaths(src, dest, opts) - const { srcStat, destStat } = stats - await checkParentPaths(src, srcStat, dest) - if (opts.filter) { - return handleFilter(checkParentDir, destStat, src, dest, opts) - } - return checkParentDir(destStat, src, dest, opts) -} - -async function checkPaths (src, dest, opts) { - const { 0: srcStat, 1: destStat } = await getStats(src, dest, opts) - if (destStat) { - if (areIdentical(srcStat, destStat)) { - throw new ERR_FS_CP_EINVAL({ - message: 'src and dest cannot be the same', - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - if (srcStat.isDirectory() && !destStat.isDirectory()) { - throw new ERR_FS_CP_DIR_TO_NON_DIR({ - message: `cannot overwrite directory ${src} ` + - `with non-directory ${dest}`, - path: dest, - syscall: 'cp', - errno: EISDIR, - }) - } - if (!srcStat.isDirectory() && destStat.isDirectory()) { - throw new ERR_FS_CP_NON_DIR_TO_DIR({ - message: `cannot overwrite non-directory ${src} ` + - `with directory ${dest}`, - path: dest, - syscall: 'cp', - errno: ENOTDIR, - }) - } - } - - if (srcStat.isDirectory() && isSrcSubdir(src, dest)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${src} to a subdirectory of self ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return { srcStat, destStat } -} - -function areIdentical (srcStat, destStat) { - return destStat.ino && destStat.dev && destStat.ino === srcStat.ino && - destStat.dev === srcStat.dev -} - -function getStats (src, dest, opts) { - const statFunc = opts.dereference ? - (file) => stat(file, { bigint: true }) : - (file) => lstat(file, { bigint: true }) - return Promise.all([ - statFunc(src), - statFunc(dest).catch((err) => { - // istanbul ignore next: unsure how to cover. - if (err.code === 'ENOENT') { - return null - } - // istanbul ignore next: unsure how to cover. - throw err - }), - ]) -} - -async function checkParentDir (destStat, src, dest, opts) { - const destParent = dirname(dest) - const dirExists = await pathExists(destParent) - if (dirExists) { - return getStatsForCopy(destStat, src, dest, opts) - } - await mkdir(destParent, { recursive: true }) - return getStatsForCopy(destStat, src, dest, opts) -} - -function pathExists (dest) { - return stat(dest).then( - () => true, - // istanbul ignore next: not sure when this would occur - (err) => (err.code === 'ENOENT' ? false : Promise.reject(err))) -} - -// Recursively check if dest parent is a subdirectory of src. -// It works for all file types including symlinks since it -// checks the src and dest inodes. It starts from the deepest -// parent and stops once it reaches the src parent or the root path. -async function checkParentPaths (src, srcStat, dest) { - const srcParent = resolve(dirname(src)) - const destParent = resolve(dirname(dest)) - if (destParent === srcParent || destParent === parse(destParent).root) { - return - } - let destStat - try { - destStat = await stat(destParent, { bigint: true }) - } catch (err) { - // istanbul ignore else: not sure when this would occur - if (err.code === 'ENOENT') { - return - } - // istanbul ignore next: not sure when this would occur - throw err - } - if (areIdentical(srcStat, destStat)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${src} to a subdirectory of self ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return checkParentPaths(src, srcStat, destParent) -} - -const normalizePathToArray = (path) => - resolve(path).split(sep).filter(Boolean) - -// Return true if dest is a subdir of src, otherwise false. -// It only checks the path strings. -function isSrcSubdir (src, dest) { - const srcArr = normalizePathToArray(src) - const destArr = normalizePathToArray(dest) - return srcArr.every((cur, i) => destArr[i] === cur) -} - -async function handleFilter (onInclude, destStat, src, dest, opts, cb) { - const include = await opts.filter(src, dest) - if (include) { - return onInclude(destStat, src, dest, opts, cb) - } -} - -function startCopy (destStat, src, dest, opts) { - if (opts.filter) { - return handleFilter(getStatsForCopy, destStat, src, dest, opts) - } - return getStatsForCopy(destStat, src, dest, opts) -} - -async function getStatsForCopy (destStat, src, dest, opts) { - const statFn = opts.dereference ? stat : lstat - const srcStat = await statFn(src) - // istanbul ignore else: can't portably test FIFO - if (srcStat.isDirectory() && opts.recursive) { - return onDir(srcStat, destStat, src, dest, opts) - } else if (srcStat.isDirectory()) { - throw new ERR_FS_EISDIR({ - message: `${src} is a directory (not copied)`, - path: src, - syscall: 'cp', - errno: EINVAL, - }) - } else if (srcStat.isFile() || - srcStat.isCharacterDevice() || - srcStat.isBlockDevice()) { - return onFile(srcStat, destStat, src, dest, opts) - } else if (srcStat.isSymbolicLink()) { - return onLink(destStat, src, dest) - } else if (srcStat.isSocket()) { - throw new ERR_FS_CP_SOCKET({ - message: `cannot copy a socket file: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } else if (srcStat.isFIFO()) { - throw new ERR_FS_CP_FIFO_PIPE({ - message: `cannot copy a FIFO pipe: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - // istanbul ignore next: should be unreachable - throw new ERR_FS_CP_UNKNOWN({ - message: `cannot copy an unknown file type: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) -} - -function onFile (srcStat, destStat, src, dest, opts) { - if (!destStat) { - return _copyFile(srcStat, src, dest, opts) - } - return mayCopyFile(srcStat, src, dest, opts) -} - -async function mayCopyFile (srcStat, src, dest, opts) { - if (opts.force) { - await unlink(dest) - return _copyFile(srcStat, src, dest, opts) - } else if (opts.errorOnExist) { - throw new ERR_FS_CP_EEXIST({ - message: `${dest} already exists`, - path: dest, - syscall: 'cp', - errno: EEXIST, - }) - } -} - -async function _copyFile (srcStat, src, dest, opts) { - await copyFile(src, dest) - if (opts.preserveTimestamps) { - return handleTimestampsAndMode(srcStat.mode, src, dest) - } - return setDestMode(dest, srcStat.mode) -} - -async function handleTimestampsAndMode (srcMode, src, dest) { - // Make sure the file is writable before setting the timestamp - // otherwise open fails with EPERM when invoked with 'r+' - // (through utimes call) - if (fileIsNotWritable(srcMode)) { - await makeFileWritable(dest, srcMode) - return setDestTimestampsAndMode(srcMode, src, dest) - } - return setDestTimestampsAndMode(srcMode, src, dest) -} - -function fileIsNotWritable (srcMode) { - return (srcMode & 0o200) === 0 -} - -function makeFileWritable (dest, srcMode) { - return setDestMode(dest, srcMode | 0o200) -} - -async function setDestTimestampsAndMode (srcMode, src, dest) { - await setDestTimestamps(src, dest) - return setDestMode(dest, srcMode) -} - -function setDestMode (dest, srcMode) { - return chmod(dest, srcMode) -} - -async function setDestTimestamps (src, dest) { - // The initial srcStat.atime cannot be trusted - // because it is modified by the read(2) system call - // (See https://nodejs.org/api/fs.html#fs_stat_time_values) - const updatedSrcStat = await stat(src) - return utimes(dest, updatedSrcStat.atime, updatedSrcStat.mtime) -} - -function onDir (srcStat, destStat, src, dest, opts) { - if (!destStat) { - return mkDirAndCopy(srcStat.mode, src, dest, opts) - } - return copyDir(src, dest, opts) -} - -async function mkDirAndCopy (srcMode, src, dest, opts) { - await mkdir(dest) - await copyDir(src, dest, opts) - return setDestMode(dest, srcMode) -} - -async function copyDir (src, dest, opts) { - const dir = await readdir(src) - for (let i = 0; i < dir.length; i++) { - const item = dir[i] - const srcItem = join(src, item) - const destItem = join(dest, item) - const { destStat } = await checkPaths(srcItem, destItem, opts) - await startCopy(destStat, srcItem, destItem, opts) - } -} - -async function onLink (destStat, src, dest) { - let resolvedSrc = await readlink(src) - if (!isAbsolute(resolvedSrc)) { - resolvedSrc = resolve(dirname(src), resolvedSrc) - } - if (!destStat) { - return symlink(resolvedSrc, dest) - } - let resolvedDest - try { - resolvedDest = await readlink(dest) - } catch (err) { - // Dest exists and is a regular file or directory, - // Windows may throw UNKNOWN error. If dest already exists, - // fs throws error anyway, so no need to guard against it here. - // istanbul ignore next: can only test on windows - if (err.code === 'EINVAL' || err.code === 'UNKNOWN') { - return symlink(resolvedSrc, dest) - } - // istanbul ignore next: should not be possible - throw err - } - if (!isAbsolute(resolvedDest)) { - resolvedDest = resolve(dirname(dest), resolvedDest) - } - if (isSrcSubdir(resolvedSrc, resolvedDest)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${resolvedSrc} to a subdirectory of self ` + - `${resolvedDest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - // Do not copy if src is a subdir of dest since unlinking - // dest in this case would result in removing src contents - // and therefore a broken symlink would be created. - const srcStat = await stat(src) - if (srcStat.isDirectory() && isSrcSubdir(resolvedDest, resolvedSrc)) { - throw new ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY({ - message: `cannot overwrite ${resolvedDest} with ${resolvedSrc}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return copyLink(resolvedSrc, dest) -} - -async function copyLink (resolvedSrc, dest) { - await unlink(dest) - return symlink(resolvedSrc, dest) -} - -module.exports = cp diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/index.js deleted file mode 100644 index 81c746304cc428..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/index.js +++ /dev/null @@ -1,13 +0,0 @@ -'use strict' - -const cp = require('./cp/index.js') -const withTempDir = require('./with-temp-dir.js') -const readdirScoped = require('./readdir-scoped.js') -const moveFile = require('./move-file.js') - -module.exports = { - cp, - withTempDir, - readdirScoped, - moveFile, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/move-file.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/move-file.js deleted file mode 100644 index d56e06d384659a..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/move-file.js +++ /dev/null @@ -1,78 +0,0 @@ -const { dirname, join, resolve, relative, isAbsolute } = require('path') -const fs = require('fs/promises') - -const pathExists = async path => { - try { - await fs.access(path) - return true - } catch (er) { - return er.code !== 'ENOENT' - } -} - -const moveFile = async (source, destination, options = {}, root = true, symlinks = []) => { - if (!source || !destination) { - throw new TypeError('`source` and `destination` file required') - } - - options = { - overwrite: true, - ...options, - } - - if (!options.overwrite && await pathExists(destination)) { - throw new Error(`The destination file exists: ${destination}`) - } - - await fs.mkdir(dirname(destination), { recursive: true }) - - try { - await fs.rename(source, destination) - } catch (error) { - if (error.code === 'EXDEV' || error.code === 'EPERM') { - const sourceStat = await fs.lstat(source) - if (sourceStat.isDirectory()) { - const files = await fs.readdir(source) - await Promise.all(files.map((file) => - moveFile(join(source, file), join(destination, file), options, false, symlinks) - )) - } else if (sourceStat.isSymbolicLink()) { - symlinks.push({ source, destination }) - } else { - await fs.copyFile(source, destination) - } - } else { - throw error - } - } - - if (root) { - await Promise.all(symlinks.map(async ({ source: symSource, destination: symDestination }) => { - let target = await fs.readlink(symSource) - // junction symlinks in windows will be absolute paths, so we need to - // make sure they point to the symlink destination - if (isAbsolute(target)) { - target = resolve(symDestination, relative(symSource, target)) - } - // try to determine what the actual file is so we can create the correct - // type of symlink in windows - let targetStat = 'file' - try { - targetStat = await fs.stat(resolve(dirname(symSource), target)) - if (targetStat.isDirectory()) { - targetStat = 'junction' - } - } catch { - // targetStat remains 'file' - } - await fs.symlink( - target, - symDestination, - targetStat - ) - })) - await fs.rm(source, { recursive: true, force: true }) - } -} - -module.exports = moveFile diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/readdir-scoped.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/readdir-scoped.js deleted file mode 100644 index cd601dfbe7486b..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/readdir-scoped.js +++ /dev/null @@ -1,20 +0,0 @@ -const { readdir } = require('fs/promises') -const { join } = require('path') - -const readdirScoped = async (dir) => { - const results = [] - - for (const item of await readdir(dir)) { - if (item.startsWith('@')) { - for (const scopedItem of await readdir(join(dir, item))) { - results.push(join(item, scopedItem)) - } - } else { - results.push(item) - } - } - - return results -} - -module.exports = readdirScoped diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/with-temp-dir.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/with-temp-dir.js deleted file mode 100644 index 0738ac4f29e1be..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/with-temp-dir.js +++ /dev/null @@ -1,39 +0,0 @@ -const { join, sep } = require('path') - -const getOptions = require('./common/get-options.js') -const { mkdir, mkdtemp, rm } = require('fs/promises') - -// create a temp directory, ensure its permissions match its parent, then call -// the supplied function passing it the path to the directory. clean up after -// the function finishes, whether it throws or not -const withTempDir = async (root, fn, opts) => { - const options = getOptions(opts, { - copy: ['tmpPrefix'], - }) - // create the directory - await mkdir(root, { recursive: true }) - - const target = await mkdtemp(join(`${root}${sep}`, options.tmpPrefix || '')) - let err - let result - - try { - result = await fn(target) - } catch (_err) { - err = _err - } - - try { - await rm(target, { force: true, recursive: true }) - } catch { - // ignore errors - } - - if (err) { - throw err - } - - return result -} - -module.exports = withTempDir diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/package.json deleted file mode 100644 index 5261a11b78000e..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/package.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "@npmcli/fs", - "version": "3.1.1", - "description": "filesystem utilities for the npm cli", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "snap": "tap", - "test": "tap", - "npmclilint": "npmcli-lint", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "lintfix": "npm run lint -- --fix", - "posttest": "npm run lint", - "postsnap": "npm run lintfix --", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/fs.git" - }, - "keywords": [ - "npm", - "oss" - ], - "author": "GitHub Inc.", - "license": "ISC", - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.1" - }, - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/LICENSE.md b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/LICENSE.md deleted file mode 100644 index 8d28acf866d932..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/LICENSE.md +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/path.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/path.js deleted file mode 100644 index ad5a76a4f73f26..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/path.js +++ /dev/null @@ -1,29 +0,0 @@ -'use strict' - -const contentVer = require('../../package.json')['cache-version'].content -const hashToSegments = require('../util/hash-to-segments') -const path = require('path') -const ssri = require('ssri') - -// Current format of content file path: -// -// sha512-BaSE64Hex= -> -// ~/.my-cache/content-v2/sha512/ba/da/55deadbeefc0ffee -// -module.exports = contentPath - -function contentPath (cache, integrity) { - const sri = ssri.parse(integrity, { single: true }) - // contentPath is the *strongest* algo given - return path.join( - contentDir(cache), - sri.algorithm, - ...hashToSegments(sri.hexDigest()) - ) -} - -module.exports.contentDir = contentDir - -function contentDir (cache) { - return path.join(cache, `content-v${contentVer}`) -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/read.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/read.js deleted file mode 100644 index 5f6192c3cec566..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/read.js +++ /dev/null @@ -1,165 +0,0 @@ -'use strict' - -const fs = require('fs/promises') -const fsm = require('fs-minipass') -const ssri = require('ssri') -const contentPath = require('./path') -const Pipeline = require('minipass-pipeline') - -module.exports = read - -const MAX_SINGLE_READ_SIZE = 64 * 1024 * 1024 -async function read (cache, integrity, opts = {}) { - const { size } = opts - const { stat, cpath, sri } = await withContentSri(cache, integrity, async (cpath, sri) => { - // get size - const stat = size ? { size } : await fs.stat(cpath) - return { stat, cpath, sri } - }) - - if (stat.size > MAX_SINGLE_READ_SIZE) { - return readPipeline(cpath, stat.size, sri, new Pipeline()).concat() - } - - const data = await fs.readFile(cpath, { encoding: null }) - - if (stat.size !== data.length) { - throw sizeError(stat.size, data.length) - } - - if (!ssri.checkData(data, sri)) { - throw integrityError(sri, cpath) - } - - return data -} - -const readPipeline = (cpath, size, sri, stream) => { - stream.push( - new fsm.ReadStream(cpath, { - size, - readSize: MAX_SINGLE_READ_SIZE, - }), - ssri.integrityStream({ - integrity: sri, - size, - }) - ) - return stream -} - -module.exports.stream = readStream -module.exports.readStream = readStream - -function readStream (cache, integrity, opts = {}) { - const { size } = opts - const stream = new Pipeline() - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const { stat, cpath, sri } = await withContentSri(cache, integrity, async (cpath, sri) => { - // get size - const stat = size ? { size } : await fs.stat(cpath) - return { stat, cpath, sri } - }) - - return readPipeline(cpath, stat.size, sri, stream) - }).catch(err => stream.emit('error', err)) - - return stream -} - -module.exports.copy = copy - -function copy (cache, integrity, dest) { - return withContentSri(cache, integrity, (cpath) => { - return fs.copyFile(cpath, dest) - }) -} - -module.exports.hasContent = hasContent - -async function hasContent (cache, integrity) { - if (!integrity) { - return false - } - - try { - return await withContentSri(cache, integrity, async (cpath, sri) => { - const stat = await fs.stat(cpath) - return { size: stat.size, sri, stat } - }) - } catch (err) { - if (err.code === 'ENOENT') { - return false - } - - if (err.code === 'EPERM') { - /* istanbul ignore else */ - if (process.platform !== 'win32') { - throw err - } else { - return false - } - } - } -} - -async function withContentSri (cache, integrity, fn) { - const sri = ssri.parse(integrity) - // If `integrity` has multiple entries, pick the first digest - // with available local data. - const algo = sri.pickAlgorithm() - const digests = sri[algo] - - if (digests.length <= 1) { - const cpath = contentPath(cache, digests[0]) - return fn(cpath, digests[0]) - } else { - // Can't use race here because a generic error can happen before - // a ENOENT error, and can happen before a valid result - const results = await Promise.all(digests.map(async (meta) => { - try { - return await withContentSri(cache, meta, fn) - } catch (err) { - if (err.code === 'ENOENT') { - return Object.assign( - new Error('No matching content found for ' + sri.toString()), - { code: 'ENOENT' } - ) - } - return err - } - })) - // Return the first non error if it is found - const result = results.find((r) => !(r instanceof Error)) - if (result) { - return result - } - - // Throw the No matching content found error - const enoentError = results.find((r) => r.code === 'ENOENT') - if (enoentError) { - throw enoentError - } - - // Throw generic error - throw results.find((r) => r instanceof Error) - } -} - -function sizeError (expected, found) { - /* eslint-disable-next-line max-len */ - const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`) - err.expected = expected - err.found = found - err.code = 'EBADSIZE' - return err -} - -function integrityError (sri, path) { - const err = new Error(`Integrity verification failed for ${sri} (${path})`) - err.code = 'EINTEGRITY' - err.sri = sri - err.path = path - return err -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/rm.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/rm.js deleted file mode 100644 index ce58d679e4cb25..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/rm.js +++ /dev/null @@ -1,18 +0,0 @@ -'use strict' - -const fs = require('fs/promises') -const contentPath = require('./path') -const { hasContent } = require('./read') - -module.exports = rm - -async function rm (cache, integrity) { - const content = await hasContent(cache, integrity) - // ~pretty~ sure we can't end up with a content lacking sri, but be safe - if (content && content.sri) { - await fs.rm(contentPath(cache, content.sri), { recursive: true, force: true }) - return true - } else { - return false - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/write.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/write.js deleted file mode 100644 index e7187abca8788a..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/write.js +++ /dev/null @@ -1,206 +0,0 @@ -'use strict' - -const events = require('events') - -const contentPath = require('./path') -const fs = require('fs/promises') -const { moveFile } = require('@npmcli/fs') -const { Minipass } = require('minipass') -const Pipeline = require('minipass-pipeline') -const Flush = require('minipass-flush') -const path = require('path') -const ssri = require('ssri') -const uniqueFilename = require('unique-filename') -const fsm = require('fs-minipass') - -module.exports = write - -// Cache of move operations in process so we don't duplicate -const moveOperations = new Map() - -async function write (cache, data, opts = {}) { - const { algorithms, size, integrity } = opts - - if (typeof size === 'number' && data.length !== size) { - throw sizeError(size, data.length) - } - - const sri = ssri.fromData(data, algorithms ? { algorithms } : {}) - if (integrity && !ssri.checkData(data, integrity, opts)) { - throw checksumError(integrity, sri) - } - - for (const algo in sri) { - const tmp = await makeTmp(cache, opts) - const hash = sri[algo].toString() - try { - await fs.writeFile(tmp.target, data, { flag: 'wx' }) - await moveToDestination(tmp, cache, hash, opts) - } finally { - if (!tmp.moved) { - await fs.rm(tmp.target, { recursive: true, force: true }) - } - } - } - return { integrity: sri, size: data.length } -} - -module.exports.stream = writeStream - -// writes proxied to the 'inputStream' that is passed to the Promise -// 'end' is deferred until content is handled. -class CacacheWriteStream extends Flush { - constructor (cache, opts) { - super() - this.opts = opts - this.cache = cache - this.inputStream = new Minipass() - this.inputStream.on('error', er => this.emit('error', er)) - this.inputStream.on('drain', () => this.emit('drain')) - this.handleContentP = null - } - - write (chunk, encoding, cb) { - if (!this.handleContentP) { - this.handleContentP = handleContent( - this.inputStream, - this.cache, - this.opts - ) - this.handleContentP.catch(error => this.emit('error', error)) - } - return this.inputStream.write(chunk, encoding, cb) - } - - flush (cb) { - this.inputStream.end(() => { - if (!this.handleContentP) { - const e = new Error('Cache input stream was empty') - e.code = 'ENODATA' - // empty streams are probably emitting end right away. - // defer this one tick by rejecting a promise on it. - return Promise.reject(e).catch(cb) - } - // eslint-disable-next-line promise/catch-or-return - this.handleContentP.then( - (res) => { - res.integrity && this.emit('integrity', res.integrity) - // eslint-disable-next-line promise/always-return - res.size !== null && this.emit('size', res.size) - cb() - }, - (er) => cb(er) - ) - }) - } -} - -function writeStream (cache, opts = {}) { - return new CacacheWriteStream(cache, opts) -} - -async function handleContent (inputStream, cache, opts) { - const tmp = await makeTmp(cache, opts) - try { - const res = await pipeToTmp(inputStream, cache, tmp.target, opts) - await moveToDestination( - tmp, - cache, - res.integrity, - opts - ) - return res - } finally { - if (!tmp.moved) { - await fs.rm(tmp.target, { recursive: true, force: true }) - } - } -} - -async function pipeToTmp (inputStream, cache, tmpTarget, opts) { - const outStream = new fsm.WriteStream(tmpTarget, { - flags: 'wx', - }) - - if (opts.integrityEmitter) { - // we need to create these all simultaneously since they can fire in any order - const [integrity, size] = await Promise.all([ - events.once(opts.integrityEmitter, 'integrity').then(res => res[0]), - events.once(opts.integrityEmitter, 'size').then(res => res[0]), - new Pipeline(inputStream, outStream).promise(), - ]) - return { integrity, size } - } - - let integrity - let size - const hashStream = ssri.integrityStream({ - integrity: opts.integrity, - algorithms: opts.algorithms, - size: opts.size, - }) - hashStream.on('integrity', i => { - integrity = i - }) - hashStream.on('size', s => { - size = s - }) - - const pipeline = new Pipeline(inputStream, hashStream, outStream) - await pipeline.promise() - return { integrity, size } -} - -async function makeTmp (cache, opts) { - const tmpTarget = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix) - await fs.mkdir(path.dirname(tmpTarget), { recursive: true }) - return { - target: tmpTarget, - moved: false, - } -} - -async function moveToDestination (tmp, cache, sri) { - const destination = contentPath(cache, sri) - const destDir = path.dirname(destination) - if (moveOperations.has(destination)) { - return moveOperations.get(destination) - } - moveOperations.set( - destination, - fs.mkdir(destDir, { recursive: true }) - .then(async () => { - await moveFile(tmp.target, destination, { overwrite: false }) - tmp.moved = true - return tmp.moved - }) - .catch(err => { - if (!err.message.startsWith('The destination file exists')) { - throw Object.assign(err, { code: 'EEXIST' }) - } - }).finally(() => { - moveOperations.delete(destination) - }) - - ) - return moveOperations.get(destination) -} - -function sizeError (expected, found) { - /* eslint-disable-next-line max-len */ - const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`) - err.expected = expected - err.found = found - err.code = 'EBADSIZE' - return err -} - -function checksumError (expected, found) { - const err = new Error(`Integrity check failed: - Wanted: ${expected} - Found: ${found}`) - err.code = 'EINTEGRITY' - err.expected = expected - err.found = found - return err -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/entry-index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/entry-index.js deleted file mode 100644 index 89c28f2f257d48..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/entry-index.js +++ /dev/null @@ -1,336 +0,0 @@ -'use strict' - -const crypto = require('crypto') -const { - appendFile, - mkdir, - readFile, - readdir, - rm, - writeFile, -} = require('fs/promises') -const { Minipass } = require('minipass') -const path = require('path') -const ssri = require('ssri') -const uniqueFilename = require('unique-filename') - -const contentPath = require('./content/path') -const hashToSegments = require('./util/hash-to-segments') -const indexV = require('../package.json')['cache-version'].index -const { moveFile } = require('@npmcli/fs') - -const pMap = require('p-map') -const lsStreamConcurrency = 5 - -module.exports.NotFoundError = class NotFoundError extends Error { - constructor (cache, key) { - super(`No cache entry for ${key} found in ${cache}`) - this.code = 'ENOENT' - this.cache = cache - this.key = key - } -} - -module.exports.compact = compact - -async function compact (cache, key, matchFn, opts = {}) { - const bucket = bucketPath(cache, key) - const entries = await bucketEntries(bucket) - const newEntries = [] - // we loop backwards because the bottom-most result is the newest - // since we add new entries with appendFile - for (let i = entries.length - 1; i >= 0; --i) { - const entry = entries[i] - // a null integrity could mean either a delete was appended - // or the user has simply stored an index that does not map - // to any content. we determine if the user wants to keep the - // null integrity based on the validateEntry function passed in options. - // if the integrity is null and no validateEntry is provided, we break - // as we consider the null integrity to be a deletion of everything - // that came before it. - if (entry.integrity === null && !opts.validateEntry) { - break - } - - // if this entry is valid, and it is either the first entry or - // the newEntries array doesn't already include an entry that - // matches this one based on the provided matchFn, then we add - // it to the beginning of our list - if ((!opts.validateEntry || opts.validateEntry(entry) === true) && - (newEntries.length === 0 || - !newEntries.find((oldEntry) => matchFn(oldEntry, entry)))) { - newEntries.unshift(entry) - } - } - - const newIndex = '\n' + newEntries.map((entry) => { - const stringified = JSON.stringify(entry) - const hash = hashEntry(stringified) - return `${hash}\t${stringified}` - }).join('\n') - - const setup = async () => { - const target = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix) - await mkdir(path.dirname(target), { recursive: true }) - return { - target, - moved: false, - } - } - - const teardown = async (tmp) => { - if (!tmp.moved) { - return rm(tmp.target, { recursive: true, force: true }) - } - } - - const write = async (tmp) => { - await writeFile(tmp.target, newIndex, { flag: 'wx' }) - await mkdir(path.dirname(bucket), { recursive: true }) - // we use @npmcli/move-file directly here because we - // want to overwrite the existing file - await moveFile(tmp.target, bucket) - tmp.moved = true - } - - // write the file atomically - const tmp = await setup() - try { - await write(tmp) - } finally { - await teardown(tmp) - } - - // we reverse the list we generated such that the newest - // entries come first in order to make looping through them easier - // the true passed to formatEntry tells it to keep null - // integrity values, if they made it this far it's because - // validateEntry returned true, and as such we should return it - return newEntries.reverse().map((entry) => formatEntry(cache, entry, true)) -} - -module.exports.insert = insert - -async function insert (cache, key, integrity, opts = {}) { - const { metadata, size, time } = opts - const bucket = bucketPath(cache, key) - const entry = { - key, - integrity: integrity && ssri.stringify(integrity), - time: time || Date.now(), - size, - metadata, - } - try { - await mkdir(path.dirname(bucket), { recursive: true }) - const stringified = JSON.stringify(entry) - // NOTE - Cleverness ahoy! - // - // This works because it's tremendously unlikely for an entry to corrupt - // another while still preserving the string length of the JSON in - // question. So, we just slap the length in there and verify it on read. - // - // Thanks to @isaacs for the whiteboarding session that ended up with - // this. - await appendFile(bucket, `\n${hashEntry(stringified)}\t${stringified}`) - } catch (err) { - if (err.code === 'ENOENT') { - return undefined - } - - throw err - } - return formatEntry(cache, entry) -} - -module.exports.find = find - -async function find (cache, key) { - const bucket = bucketPath(cache, key) - try { - const entries = await bucketEntries(bucket) - return entries.reduce((latest, next) => { - if (next && next.key === key) { - return formatEntry(cache, next) - } else { - return latest - } - }, null) - } catch (err) { - if (err.code === 'ENOENT') { - return null - } else { - throw err - } - } -} - -module.exports.delete = del - -function del (cache, key, opts = {}) { - if (!opts.removeFully) { - return insert(cache, key, null, opts) - } - - const bucket = bucketPath(cache, key) - return rm(bucket, { recursive: true, force: true }) -} - -module.exports.lsStream = lsStream - -function lsStream (cache) { - const indexDir = bucketDir(cache) - const stream = new Minipass({ objectMode: true }) - - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const buckets = await readdirOrEmpty(indexDir) - await pMap(buckets, async (bucket) => { - const bucketPath = path.join(indexDir, bucket) - const subbuckets = await readdirOrEmpty(bucketPath) - await pMap(subbuckets, async (subbucket) => { - const subbucketPath = path.join(bucketPath, subbucket) - - // "/cachename//./*" - const subbucketEntries = await readdirOrEmpty(subbucketPath) - await pMap(subbucketEntries, async (entry) => { - const entryPath = path.join(subbucketPath, entry) - try { - const entries = await bucketEntries(entryPath) - // using a Map here prevents duplicate keys from showing up - // twice, I guess? - const reduced = entries.reduce((acc, entry) => { - acc.set(entry.key, entry) - return acc - }, new Map()) - // reduced is a map of key => entry - for (const entry of reduced.values()) { - const formatted = formatEntry(cache, entry) - if (formatted) { - stream.write(formatted) - } - } - } catch (err) { - if (err.code === 'ENOENT') { - return undefined - } - throw err - } - }, - { concurrency: lsStreamConcurrency }) - }, - { concurrency: lsStreamConcurrency }) - }, - { concurrency: lsStreamConcurrency }) - stream.end() - return stream - }).catch(err => stream.emit('error', err)) - - return stream -} - -module.exports.ls = ls - -async function ls (cache) { - const entries = await lsStream(cache).collect() - return entries.reduce((acc, xs) => { - acc[xs.key] = xs - return acc - }, {}) -} - -module.exports.bucketEntries = bucketEntries - -async function bucketEntries (bucket, filter) { - const data = await readFile(bucket, 'utf8') - return _bucketEntries(data, filter) -} - -function _bucketEntries (data) { - const entries = [] - data.split('\n').forEach((entry) => { - if (!entry) { - return - } - - const pieces = entry.split('\t') - if (!pieces[1] || hashEntry(pieces[1]) !== pieces[0]) { - // Hash is no good! Corruption or malice? Doesn't matter! - // EJECT EJECT - return - } - let obj - try { - obj = JSON.parse(pieces[1]) - } catch (_) { - // eslint-ignore-next-line no-empty-block - } - // coverage disabled here, no need to test with an entry that parses to something falsey - // istanbul ignore else - if (obj) { - entries.push(obj) - } - }) - return entries -} - -module.exports.bucketDir = bucketDir - -function bucketDir (cache) { - return path.join(cache, `index-v${indexV}`) -} - -module.exports.bucketPath = bucketPath - -function bucketPath (cache, key) { - const hashed = hashKey(key) - return path.join.apply( - path, - [bucketDir(cache)].concat(hashToSegments(hashed)) - ) -} - -module.exports.hashKey = hashKey - -function hashKey (key) { - return hash(key, 'sha256') -} - -module.exports.hashEntry = hashEntry - -function hashEntry (str) { - return hash(str, 'sha1') -} - -function hash (str, digest) { - return crypto - .createHash(digest) - .update(str) - .digest('hex') -} - -function formatEntry (cache, entry, keepAll) { - // Treat null digests as deletions. They'll shadow any previous entries. - if (!entry.integrity && !keepAll) { - return null - } - - return { - key: entry.key, - integrity: entry.integrity, - path: entry.integrity ? contentPath(cache, entry.integrity) : undefined, - size: entry.size, - time: entry.time, - metadata: entry.metadata, - } -} - -function readdirOrEmpty (dir) { - return readdir(dir).catch((err) => { - if (err.code === 'ENOENT' || err.code === 'ENOTDIR') { - return [] - } - - throw err - }) -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/get.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/get.js deleted file mode 100644 index 80ec206c7ecaaa..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/get.js +++ /dev/null @@ -1,170 +0,0 @@ -'use strict' - -const Collect = require('minipass-collect') -const { Minipass } = require('minipass') -const Pipeline = require('minipass-pipeline') - -const index = require('./entry-index') -const memo = require('./memoization') -const read = require('./content/read') - -async function getData (cache, key, opts = {}) { - const { integrity, memoize, size } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return { - metadata: memoized.entry.metadata, - data: memoized.data, - integrity: memoized.entry.integrity, - size: memoized.entry.size, - } - } - - const entry = await index.find(cache, key, opts) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - const data = await read(cache, entry.integrity, { integrity, size }) - if (memoize) { - memo.put(cache, entry, data, opts) - } - - return { - data, - metadata: entry.metadata, - size: entry.size, - integrity: entry.integrity, - } -} -module.exports = getData - -async function getDataByDigest (cache, key, opts = {}) { - const { integrity, memoize, size } = opts - const memoized = memo.get.byDigest(cache, key, opts) - if (memoized && memoize !== false) { - return memoized - } - - const res = await read(cache, key, { integrity, size }) - if (memoize) { - memo.put.byDigest(cache, key, res, opts) - } - return res -} -module.exports.byDigest = getDataByDigest - -const getMemoizedStream = (memoized) => { - const stream = new Minipass() - stream.on('newListener', function (ev, cb) { - ev === 'metadata' && cb(memoized.entry.metadata) - ev === 'integrity' && cb(memoized.entry.integrity) - ev === 'size' && cb(memoized.entry.size) - }) - stream.end(memoized.data) - return stream -} - -function getStream (cache, key, opts = {}) { - const { memoize, size } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return getMemoizedStream(memoized) - } - - const stream = new Pipeline() - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const entry = await index.find(cache, key) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - - stream.emit('metadata', entry.metadata) - stream.emit('integrity', entry.integrity) - stream.emit('size', entry.size) - stream.on('newListener', function (ev, cb) { - ev === 'metadata' && cb(entry.metadata) - ev === 'integrity' && cb(entry.integrity) - ev === 'size' && cb(entry.size) - }) - - const src = read.readStream( - cache, - entry.integrity, - { ...opts, size: typeof size !== 'number' ? entry.size : size } - ) - - if (memoize) { - const memoStream = new Collect.PassThrough() - memoStream.on('collect', data => memo.put(cache, entry, data, opts)) - stream.unshift(memoStream) - } - stream.unshift(src) - return stream - }).catch((err) => stream.emit('error', err)) - - return stream -} - -module.exports.stream = getStream - -function getStreamDigest (cache, integrity, opts = {}) { - const { memoize } = opts - const memoized = memo.get.byDigest(cache, integrity, opts) - if (memoized && memoize !== false) { - const stream = new Minipass() - stream.end(memoized) - return stream - } else { - const stream = read.readStream(cache, integrity, opts) - if (!memoize) { - return stream - } - - const memoStream = new Collect.PassThrough() - memoStream.on('collect', data => memo.put.byDigest( - cache, - integrity, - data, - opts - )) - return new Pipeline(stream, memoStream) - } -} - -module.exports.stream.byDigest = getStreamDigest - -function info (cache, key, opts = {}) { - const { memoize } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return Promise.resolve(memoized.entry) - } else { - return index.find(cache, key) - } -} -module.exports.info = info - -async function copy (cache, key, dest, opts = {}) { - const entry = await index.find(cache, key, opts) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - await read.copy(cache, entry.integrity, dest, opts) - return { - metadata: entry.metadata, - size: entry.size, - integrity: entry.integrity, - } -} - -module.exports.copy = copy - -async function copyByDigest (cache, key, dest, opts = {}) { - await read.copy(cache, key, dest, opts) - return key -} - -module.exports.copy.byDigest = copyByDigest - -module.exports.hasContent = read.hasContent diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/index.js deleted file mode 100644 index c9b0da5f3a271b..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/index.js +++ /dev/null @@ -1,42 +0,0 @@ -'use strict' - -const get = require('./get.js') -const put = require('./put.js') -const rm = require('./rm.js') -const verify = require('./verify.js') -const { clearMemoized } = require('./memoization.js') -const tmp = require('./util/tmp.js') -const index = require('./entry-index.js') - -module.exports.index = {} -module.exports.index.compact = index.compact -module.exports.index.insert = index.insert - -module.exports.ls = index.ls -module.exports.ls.stream = index.lsStream - -module.exports.get = get -module.exports.get.byDigest = get.byDigest -module.exports.get.stream = get.stream -module.exports.get.stream.byDigest = get.stream.byDigest -module.exports.get.copy = get.copy -module.exports.get.copy.byDigest = get.copy.byDigest -module.exports.get.info = get.info -module.exports.get.hasContent = get.hasContent - -module.exports.put = put -module.exports.put.stream = put.stream - -module.exports.rm = rm.entry -module.exports.rm.all = rm.all -module.exports.rm.entry = module.exports.rm -module.exports.rm.content = rm.content - -module.exports.clearMemoized = clearMemoized - -module.exports.tmp = {} -module.exports.tmp.mkdir = tmp.mkdir -module.exports.tmp.withTmp = tmp.withTmp - -module.exports.verify = verify -module.exports.verify.lastRun = verify.lastRun diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/memoization.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/memoization.js deleted file mode 100644 index 2ecc60912e4563..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/memoization.js +++ /dev/null @@ -1,72 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') - -const MEMOIZED = new LRUCache({ - max: 500, - maxSize: 50 * 1024 * 1024, // 50MB - ttl: 3 * 60 * 1000, // 3 minutes - sizeCalculation: (entry, key) => key.startsWith('key:') ? entry.data.length : entry.length, -}) - -module.exports.clearMemoized = clearMemoized - -function clearMemoized () { - const old = {} - MEMOIZED.forEach((v, k) => { - old[k] = v - }) - MEMOIZED.clear() - return old -} - -module.exports.put = put - -function put (cache, entry, data, opts) { - pickMem(opts).set(`key:${cache}:${entry.key}`, { entry, data }) - putDigest(cache, entry.integrity, data, opts) -} - -module.exports.put.byDigest = putDigest - -function putDigest (cache, integrity, data, opts) { - pickMem(opts).set(`digest:${cache}:${integrity}`, data) -} - -module.exports.get = get - -function get (cache, key, opts) { - return pickMem(opts).get(`key:${cache}:${key}`) -} - -module.exports.get.byDigest = getDigest - -function getDigest (cache, integrity, opts) { - return pickMem(opts).get(`digest:${cache}:${integrity}`) -} - -class ObjProxy { - constructor (obj) { - this.obj = obj - } - - get (key) { - return this.obj[key] - } - - set (key, val) { - this.obj[key] = val - } -} - -function pickMem (opts) { - if (!opts || !opts.memoize) { - return MEMOIZED - } else if (opts.memoize.get && opts.memoize.set) { - return opts.memoize - } else if (typeof opts.memoize === 'object') { - return new ObjProxy(opts.memoize) - } else { - return MEMOIZED - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/put.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/put.js deleted file mode 100644 index 9fc932d5f6dec5..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/put.js +++ /dev/null @@ -1,80 +0,0 @@ -'use strict' - -const index = require('./entry-index') -const memo = require('./memoization') -const write = require('./content/write') -const Flush = require('minipass-flush') -const { PassThrough } = require('minipass-collect') -const Pipeline = require('minipass-pipeline') - -const putOpts = (opts) => ({ - algorithms: ['sha512'], - ...opts, -}) - -module.exports = putData - -async function putData (cache, key, data, opts = {}) { - const { memoize } = opts - opts = putOpts(opts) - const res = await write(cache, data, opts) - const entry = await index.insert(cache, key, res.integrity, { ...opts, size: res.size }) - if (memoize) { - memo.put(cache, entry, data, opts) - } - - return res.integrity -} - -module.exports.stream = putStream - -function putStream (cache, key, opts = {}) { - const { memoize } = opts - opts = putOpts(opts) - let integrity - let size - let error - - let memoData - const pipeline = new Pipeline() - // first item in the pipeline is the memoizer, because we need - // that to end first and get the collected data. - if (memoize) { - const memoizer = new PassThrough().on('collect', data => { - memoData = data - }) - pipeline.push(memoizer) - } - - // contentStream is a write-only, not a passthrough - // no data comes out of it. - const contentStream = write.stream(cache, opts) - .on('integrity', (int) => { - integrity = int - }) - .on('size', (s) => { - size = s - }) - .on('error', (err) => { - error = err - }) - - pipeline.push(contentStream) - - // last but not least, we write the index and emit hash and size, - // and memoize if we're doing that - pipeline.push(new Flush({ - async flush () { - if (!error) { - const entry = await index.insert(cache, key, integrity, { ...opts, size }) - if (memoize && memoData) { - memo.put(cache, entry, memoData, opts) - } - pipeline.emit('integrity', integrity) - pipeline.emit('size', size) - } - }, - })) - - return pipeline -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/rm.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/rm.js deleted file mode 100644 index a94760c7cf2430..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/rm.js +++ /dev/null @@ -1,31 +0,0 @@ -'use strict' - -const { rm } = require('fs/promises') -const glob = require('./util/glob.js') -const index = require('./entry-index') -const memo = require('./memoization') -const path = require('path') -const rmContent = require('./content/rm') - -module.exports = entry -module.exports.entry = entry - -function entry (cache, key, opts) { - memo.clearMemoized() - return index.delete(cache, key, opts) -} - -module.exports.content = content - -function content (cache, integrity) { - memo.clearMemoized() - return rmContent(cache, integrity) -} - -module.exports.all = all - -async function all (cache) { - memo.clearMemoized() - const paths = await glob(path.join(cache, '*(content-*|index-*)'), { silent: true, nosort: true }) - return Promise.all(paths.map((p) => rm(p, { recursive: true, force: true }))) -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/glob.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/glob.js deleted file mode 100644 index 8500c1c16a429f..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/glob.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -const { glob } = require('glob') -const path = require('path') - -const globify = (pattern) => pattern.split(path.win32.sep).join(path.posix.sep) -module.exports = (path, options) => glob(globify(path), options) diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/hash-to-segments.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/hash-to-segments.js deleted file mode 100644 index 445599b5038088..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/hash-to-segments.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -module.exports = hashToSegments - -function hashToSegments (hash) { - return [hash.slice(0, 2), hash.slice(2, 4), hash.slice(4)] -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/tmp.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/tmp.js deleted file mode 100644 index 0bf5302136ebeb..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/tmp.js +++ /dev/null @@ -1,26 +0,0 @@ -'use strict' - -const { withTempDir } = require('@npmcli/fs') -const fs = require('fs/promises') -const path = require('path') - -module.exports.mkdir = mktmpdir - -async function mktmpdir (cache, opts = {}) { - const { tmpPrefix } = opts - const tmpDir = path.join(cache, 'tmp') - await fs.mkdir(tmpDir, { recursive: true, owner: 'inherit' }) - // do not use path.join(), it drops the trailing / if tmpPrefix is unset - const target = `${tmpDir}${path.sep}${tmpPrefix || ''}` - return fs.mkdtemp(target, { owner: 'inherit' }) -} - -module.exports.withTmp = withTmp - -function withTmp (cache, opts, cb) { - if (!cb) { - cb = opts - opts = {} - } - return withTempDir(path.join(cache, 'tmp'), cb, opts) -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/verify.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/verify.js deleted file mode 100644 index d7423da1295b68..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/verify.js +++ /dev/null @@ -1,257 +0,0 @@ -'use strict' - -const { - mkdir, - readFile, - rm, - stat, - truncate, - writeFile, -} = require('fs/promises') -const pMap = require('p-map') -const contentPath = require('./content/path') -const fsm = require('fs-minipass') -const glob = require('./util/glob.js') -const index = require('./entry-index') -const path = require('path') -const ssri = require('ssri') - -const hasOwnProperty = (obj, key) => - Object.prototype.hasOwnProperty.call(obj, key) - -const verifyOpts = (opts) => ({ - concurrency: 20, - log: { silly () {} }, - ...opts, -}) - -module.exports = verify - -async function verify (cache, opts) { - opts = verifyOpts(opts) - opts.log.silly('verify', 'verifying cache at', cache) - - const steps = [ - markStartTime, - fixPerms, - garbageCollect, - rebuildIndex, - cleanTmp, - writeVerifile, - markEndTime, - ] - - const stats = {} - for (const step of steps) { - const label = step.name - const start = new Date() - const s = await step(cache, opts) - if (s) { - Object.keys(s).forEach((k) => { - stats[k] = s[k] - }) - } - const end = new Date() - if (!stats.runTime) { - stats.runTime = {} - } - stats.runTime[label] = end - start - } - stats.runTime.total = stats.endTime - stats.startTime - opts.log.silly( - 'verify', - 'verification finished for', - cache, - 'in', - `${stats.runTime.total}ms` - ) - return stats -} - -async function markStartTime () { - return { startTime: new Date() } -} - -async function markEndTime () { - return { endTime: new Date() } -} - -async function fixPerms (cache, opts) { - opts.log.silly('verify', 'fixing cache permissions') - await mkdir(cache, { recursive: true }) - return null -} - -// Implements a naive mark-and-sweep tracing garbage collector. -// -// The algorithm is basically as follows: -// 1. Read (and filter) all index entries ("pointers") -// 2. Mark each integrity value as "live" -// 3. Read entire filesystem tree in `content-vX/` dir -// 4. If content is live, verify its checksum and delete it if it fails -// 5. If content is not marked as live, rm it. -// -async function garbageCollect (cache, opts) { - opts.log.silly('verify', 'garbage collecting content') - const indexStream = index.lsStream(cache) - const liveContent = new Set() - indexStream.on('data', (entry) => { - if (opts.filter && !opts.filter(entry)) { - return - } - - // integrity is stringified, re-parse it so we can get each hash - const integrity = ssri.parse(entry.integrity) - for (const algo in integrity) { - liveContent.add(integrity[algo].toString()) - } - }) - await new Promise((resolve, reject) => { - indexStream.on('end', resolve).on('error', reject) - }) - const contentDir = contentPath.contentDir(cache) - const files = await glob(path.join(contentDir, '**'), { - follow: false, - nodir: true, - nosort: true, - }) - const stats = { - verifiedContent: 0, - reclaimedCount: 0, - reclaimedSize: 0, - badContentCount: 0, - keptSize: 0, - } - await pMap( - files, - async (f) => { - const split = f.split(/[/\\]/) - const digest = split.slice(split.length - 3).join('') - const algo = split[split.length - 4] - const integrity = ssri.fromHex(digest, algo) - if (liveContent.has(integrity.toString())) { - const info = await verifyContent(f, integrity) - if (!info.valid) { - stats.reclaimedCount++ - stats.badContentCount++ - stats.reclaimedSize += info.size - } else { - stats.verifiedContent++ - stats.keptSize += info.size - } - } else { - // No entries refer to this content. We can delete. - stats.reclaimedCount++ - const s = await stat(f) - await rm(f, { recursive: true, force: true }) - stats.reclaimedSize += s.size - } - return stats - }, - { concurrency: opts.concurrency } - ) - return stats -} - -async function verifyContent (filepath, sri) { - const contentInfo = {} - try { - const { size } = await stat(filepath) - contentInfo.size = size - contentInfo.valid = true - await ssri.checkStream(new fsm.ReadStream(filepath), sri) - } catch (err) { - if (err.code === 'ENOENT') { - return { size: 0, valid: false } - } - if (err.code !== 'EINTEGRITY') { - throw err - } - - await rm(filepath, { recursive: true, force: true }) - contentInfo.valid = false - } - return contentInfo -} - -async function rebuildIndex (cache, opts) { - opts.log.silly('verify', 'rebuilding index') - const entries = await index.ls(cache) - const stats = { - missingContent: 0, - rejectedEntries: 0, - totalEntries: 0, - } - const buckets = {} - for (const k in entries) { - /* istanbul ignore else */ - if (hasOwnProperty(entries, k)) { - const hashed = index.hashKey(k) - const entry = entries[k] - const excluded = opts.filter && !opts.filter(entry) - excluded && stats.rejectedEntries++ - if (buckets[hashed] && !excluded) { - buckets[hashed].push(entry) - } else if (buckets[hashed] && excluded) { - // skip - } else if (excluded) { - buckets[hashed] = [] - buckets[hashed]._path = index.bucketPath(cache, k) - } else { - buckets[hashed] = [entry] - buckets[hashed]._path = index.bucketPath(cache, k) - } - } - } - await pMap( - Object.keys(buckets), - (key) => { - return rebuildBucket(cache, buckets[key], stats, opts) - }, - { concurrency: opts.concurrency } - ) - return stats -} - -async function rebuildBucket (cache, bucket, stats) { - await truncate(bucket._path) - // This needs to be serialized because cacache explicitly - // lets very racy bucket conflicts clobber each other. - for (const entry of bucket) { - const content = contentPath(cache, entry.integrity) - try { - await stat(content) - await index.insert(cache, entry.key, entry.integrity, { - metadata: entry.metadata, - size: entry.size, - time: entry.time, - }) - stats.totalEntries++ - } catch (err) { - if (err.code === 'ENOENT') { - stats.rejectedEntries++ - stats.missingContent++ - } else { - throw err - } - } - } -} - -function cleanTmp (cache, opts) { - opts.log.silly('verify', 'cleaning tmp directory') - return rm(path.join(cache, 'tmp'), { recursive: true, force: true }) -} - -async function writeVerifile (cache, opts) { - const verifile = path.join(cache, '_lastverified') - opts.log.silly('verify', 'writing verifile to ' + verifile) - return writeFile(verifile, `${Date.now()}`) -} - -module.exports.lastRun = lastRun - -async function lastRun (cache) { - const data = await readFile(path.join(cache, '_lastverified'), { encoding: 'utf8' }) - return new Date(+data) -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/package.json deleted file mode 100644 index 6e6219158ed759..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/package.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "name": "cacache", - "version": "18.0.4", - "cache-version": { - "content": "2", - "index": "5" - }, - "description": "Fast, fault-tolerant, cross-platform, disk-based, data-agnostic, content-addressable cache.", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "test": "tap", - "snap": "tap", - "coverage": "tap", - "test-docker": "docker run -it --rm --name pacotest -v \"$PWD\":/tmp -w /tmp node:latest npm test", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "npmclilint": "npmcli-lint", - "lintfix": "npm run lint -- --fix", - "postsnap": "npm run lintfix --", - "postlint": "template-oss-check", - "posttest": "npm run lint", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/cacache.git" - }, - "keywords": [ - "cache", - "caching", - "content-addressable", - "sri", - "sri hash", - "subresource integrity", - "cache", - "storage", - "store", - "file store", - "filesystem", - "disk cache", - "disk storage" - ], - "license": "ISC", - "dependencies": { - "@npmcli/fs": "^3.1.0", - "fs-minipass": "^3.0.0", - "glob": "^10.2.2", - "lru-cache": "^10.0.1", - "minipass": "^7.0.3", - "minipass-collect": "^2.0.1", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "p-map": "^4.0.0", - "ssri": "^10.0.0", - "tar": "^6.1.11", - "unique-filename": "^3.0.0" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.0" - }, - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "windowsCI": false, - "version": "4.22.0", - "publish": "true" - }, - "author": "GitHub Inc.", - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/LICENSE b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/LICENSE deleted file mode 100644 index 1808eb2844231c..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/LICENSE +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright 2017-2022 (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/entry.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/entry.js deleted file mode 100644 index bfcfacbcc95e18..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/entry.js +++ /dev/null @@ -1,471 +0,0 @@ -const { Request, Response } = require('minipass-fetch') -const { Minipass } = require('minipass') -const MinipassFlush = require('minipass-flush') -const cacache = require('cacache') -const url = require('url') - -const CachingMinipassPipeline = require('../pipeline.js') -const CachePolicy = require('./policy.js') -const cacheKey = require('./key.js') -const remote = require('../remote.js') - -const hasOwnProperty = (obj, prop) => Object.prototype.hasOwnProperty.call(obj, prop) - -// allow list for request headers that will be written to the cache index -// note: we will also store any request headers -// that are named in a response's vary header -const KEEP_REQUEST_HEADERS = [ - 'accept-charset', - 'accept-encoding', - 'accept-language', - 'accept', - 'cache-control', -] - -// allow list for response headers that will be written to the cache index -// note: we must not store the real response's age header, or when we load -// a cache policy based on the metadata it will think the cached response -// is always stale -const KEEP_RESPONSE_HEADERS = [ - 'cache-control', - 'content-encoding', - 'content-language', - 'content-type', - 'date', - 'etag', - 'expires', - 'last-modified', - 'link', - 'location', - 'pragma', - 'vary', -] - -// return an object containing all metadata to be written to the index -const getMetadata = (request, response, options) => { - const metadata = { - time: Date.now(), - url: request.url, - reqHeaders: {}, - resHeaders: {}, - - // options on which we must match the request and vary the response - options: { - compress: options.compress != null ? options.compress : request.compress, - }, - } - - // only save the status if it's not a 200 or 304 - if (response.status !== 200 && response.status !== 304) { - metadata.status = response.status - } - - for (const name of KEEP_REQUEST_HEADERS) { - if (request.headers.has(name)) { - metadata.reqHeaders[name] = request.headers.get(name) - } - } - - // if the request's host header differs from the host in the url - // we need to keep it, otherwise it's just noise and we ignore it - const host = request.headers.get('host') - const parsedUrl = new url.URL(request.url) - if (host && parsedUrl.host !== host) { - metadata.reqHeaders.host = host - } - - // if the response has a vary header, make sure - // we store the relevant request headers too - if (response.headers.has('vary')) { - const vary = response.headers.get('vary') - // a vary of "*" means every header causes a different response. - // in that scenario, we do not include any additional headers - // as the freshness check will always fail anyway and we don't - // want to bloat the cache indexes - if (vary !== '*') { - // copy any other request headers that will vary the response - const varyHeaders = vary.trim().toLowerCase().split(/\s*,\s*/) - for (const name of varyHeaders) { - if (request.headers.has(name)) { - metadata.reqHeaders[name] = request.headers.get(name) - } - } - } - } - - for (const name of KEEP_RESPONSE_HEADERS) { - if (response.headers.has(name)) { - metadata.resHeaders[name] = response.headers.get(name) - } - } - - for (const name of options.cacheAdditionalHeaders) { - if (response.headers.has(name)) { - metadata.resHeaders[name] = response.headers.get(name) - } - } - - return metadata -} - -// symbols used to hide objects that may be lazily evaluated in a getter -const _request = Symbol('request') -const _response = Symbol('response') -const _policy = Symbol('policy') - -class CacheEntry { - constructor ({ entry, request, response, options }) { - if (entry) { - this.key = entry.key - this.entry = entry - // previous versions of this module didn't write an explicit timestamp in - // the metadata, so fall back to the entry's timestamp. we can't use the - // entry timestamp to determine staleness because cacache will update it - // when it verifies its data - this.entry.metadata.time = this.entry.metadata.time || this.entry.time - } else { - this.key = cacheKey(request) - } - - this.options = options - - // these properties are behind getters that lazily evaluate - this[_request] = request - this[_response] = response - this[_policy] = null - } - - // returns a CacheEntry instance that satisfies the given request - // or undefined if no existing entry satisfies - static async find (request, options) { - try { - // compacts the index and returns an array of unique entries - var matches = await cacache.index.compact(options.cachePath, cacheKey(request), (A, B) => { - const entryA = new CacheEntry({ entry: A, options }) - const entryB = new CacheEntry({ entry: B, options }) - return entryA.policy.satisfies(entryB.request) - }, { - validateEntry: (entry) => { - // clean out entries with a buggy content-encoding value - if (entry.metadata && - entry.metadata.resHeaders && - entry.metadata.resHeaders['content-encoding'] === null) { - return false - } - - // if an integrity is null, it needs to have a status specified - if (entry.integrity === null) { - return !!(entry.metadata && entry.metadata.status) - } - - return true - }, - }) - } catch (err) { - // if the compact request fails, ignore the error and return - return - } - - // a cache mode of 'reload' means to behave as though we have no cache - // on the way to the network. return undefined to allow cacheFetch to - // create a brand new request no matter what. - if (options.cache === 'reload') { - return - } - - // find the specific entry that satisfies the request - let match - for (const entry of matches) { - const _entry = new CacheEntry({ - entry, - options, - }) - - if (_entry.policy.satisfies(request)) { - match = _entry - break - } - } - - return match - } - - // if the user made a PUT/POST/PATCH then we invalidate our - // cache for the same url by deleting the index entirely - static async invalidate (request, options) { - const key = cacheKey(request) - try { - await cacache.rm.entry(options.cachePath, key, { removeFully: true }) - } catch (err) { - // ignore errors - } - } - - get request () { - if (!this[_request]) { - this[_request] = new Request(this.entry.metadata.url, { - method: 'GET', - headers: this.entry.metadata.reqHeaders, - ...this.entry.metadata.options, - }) - } - - return this[_request] - } - - get response () { - if (!this[_response]) { - this[_response] = new Response(null, { - url: this.entry.metadata.url, - counter: this.options.counter, - status: this.entry.metadata.status || 200, - headers: { - ...this.entry.metadata.resHeaders, - 'content-length': this.entry.size, - }, - }) - } - - return this[_response] - } - - get policy () { - if (!this[_policy]) { - this[_policy] = new CachePolicy({ - entry: this.entry, - request: this.request, - response: this.response, - options: this.options, - }) - } - - return this[_policy] - } - - // wraps the response in a pipeline that stores the data - // in the cache while the user consumes it - async store (status) { - // if we got a status other than 200, 301, or 308, - // or the CachePolicy forbid storage, append the - // cache status header and return it untouched - if ( - this.request.method !== 'GET' || - ![200, 301, 308].includes(this.response.status) || - !this.policy.storable() - ) { - this.response.headers.set('x-local-cache-status', 'skip') - return this.response - } - - const size = this.response.headers.get('content-length') - const cacheOpts = { - algorithms: this.options.algorithms, - metadata: getMetadata(this.request, this.response, this.options), - size, - integrity: this.options.integrity, - integrityEmitter: this.response.body.hasIntegrityEmitter && this.response.body, - } - - let body = null - // we only set a body if the status is a 200, redirects are - // stored as metadata only - if (this.response.status === 200) { - let cacheWriteResolve, cacheWriteReject - const cacheWritePromise = new Promise((resolve, reject) => { - cacheWriteResolve = resolve - cacheWriteReject = reject - }).catch((err) => { - body.emit('error', err) - }) - - body = new CachingMinipassPipeline({ events: ['integrity', 'size'] }, new MinipassFlush({ - flush () { - return cacheWritePromise - }, - })) - // this is always true since if we aren't reusing the one from the remote fetch, we - // are using the one from cacache - body.hasIntegrityEmitter = true - - const onResume = () => { - const tee = new Minipass() - const cacheStream = cacache.put.stream(this.options.cachePath, this.key, cacheOpts) - // re-emit the integrity and size events on our new response body so they can be reused - cacheStream.on('integrity', i => body.emit('integrity', i)) - cacheStream.on('size', s => body.emit('size', s)) - // stick a flag on here so downstream users will know if they can expect integrity events - tee.pipe(cacheStream) - // TODO if the cache write fails, log a warning but return the response anyway - // eslint-disable-next-line promise/catch-or-return - cacheStream.promise().then(cacheWriteResolve, cacheWriteReject) - body.unshift(tee) - body.unshift(this.response.body) - } - - body.once('resume', onResume) - body.once('end', () => body.removeListener('resume', onResume)) - } else { - await cacache.index.insert(this.options.cachePath, this.key, null, cacheOpts) - } - - // note: we do not set the x-local-cache-hash header because we do not know - // the hash value until after the write to the cache completes, which doesn't - // happen until after the response has been sent and it's too late to write - // the header anyway - this.response.headers.set('x-local-cache', encodeURIComponent(this.options.cachePath)) - this.response.headers.set('x-local-cache-key', encodeURIComponent(this.key)) - this.response.headers.set('x-local-cache-mode', 'stream') - this.response.headers.set('x-local-cache-status', status) - this.response.headers.set('x-local-cache-time', new Date().toISOString()) - const newResponse = new Response(body, { - url: this.response.url, - status: this.response.status, - headers: this.response.headers, - counter: this.options.counter, - }) - return newResponse - } - - // use the cached data to create a response and return it - async respond (method, options, status) { - let response - if (method === 'HEAD' || [301, 308].includes(this.response.status)) { - // if the request is a HEAD, or the response is a redirect, - // then the metadata in the entry already includes everything - // we need to build a response - response = this.response - } else { - // we're responding with a full cached response, so create a body - // that reads from cacache and attach it to a new Response - const body = new Minipass() - const headers = { ...this.policy.responseHeaders() } - - const onResume = () => { - const cacheStream = cacache.get.stream.byDigest( - this.options.cachePath, this.entry.integrity, { memoize: this.options.memoize } - ) - cacheStream.on('error', async (err) => { - cacheStream.pause() - if (err.code === 'EINTEGRITY') { - await cacache.rm.content( - this.options.cachePath, this.entry.integrity, { memoize: this.options.memoize } - ) - } - if (err.code === 'ENOENT' || err.code === 'EINTEGRITY') { - await CacheEntry.invalidate(this.request, this.options) - } - body.emit('error', err) - cacheStream.resume() - }) - // emit the integrity and size events based on our metadata so we're consistent - body.emit('integrity', this.entry.integrity) - body.emit('size', Number(headers['content-length'])) - cacheStream.pipe(body) - } - - body.once('resume', onResume) - body.once('end', () => body.removeListener('resume', onResume)) - response = new Response(body, { - url: this.entry.metadata.url, - counter: options.counter, - status: 200, - headers, - }) - } - - response.headers.set('x-local-cache', encodeURIComponent(this.options.cachePath)) - response.headers.set('x-local-cache-hash', encodeURIComponent(this.entry.integrity)) - response.headers.set('x-local-cache-key', encodeURIComponent(this.key)) - response.headers.set('x-local-cache-mode', 'stream') - response.headers.set('x-local-cache-status', status) - response.headers.set('x-local-cache-time', new Date(this.entry.metadata.time).toUTCString()) - return response - } - - // use the provided request along with this cache entry to - // revalidate the stored response. returns a response, either - // from the cache or from the update - async revalidate (request, options) { - const revalidateRequest = new Request(request, { - headers: this.policy.revalidationHeaders(request), - }) - - try { - // NOTE: be sure to remove the headers property from the - // user supplied options, since we have already defined - // them on the new request object. if they're still in the - // options then those will overwrite the ones from the policy - var response = await remote(revalidateRequest, { - ...options, - headers: undefined, - }) - } catch (err) { - // if the network fetch fails, return the stale - // cached response unless it has a cache-control - // of 'must-revalidate' - if (!this.policy.mustRevalidate) { - return this.respond(request.method, options, 'stale') - } - - throw err - } - - if (this.policy.revalidated(revalidateRequest, response)) { - // we got a 304, write a new index to the cache and respond from cache - const metadata = getMetadata(request, response, options) - // 304 responses do not include headers that are specific to the response data - // since they do not include a body, so we copy values for headers that were - // in the old cache entry to the new one, if the new metadata does not already - // include that header - for (const name of KEEP_RESPONSE_HEADERS) { - if ( - !hasOwnProperty(metadata.resHeaders, name) && - hasOwnProperty(this.entry.metadata.resHeaders, name) - ) { - metadata.resHeaders[name] = this.entry.metadata.resHeaders[name] - } - } - - for (const name of options.cacheAdditionalHeaders) { - const inMeta = hasOwnProperty(metadata.resHeaders, name) - const inEntry = hasOwnProperty(this.entry.metadata.resHeaders, name) - const inPolicy = hasOwnProperty(this.policy.response.headers, name) - - // if the header is in the existing entry, but it is not in the metadata - // then we need to write it to the metadata as this will refresh the on-disk cache - if (!inMeta && inEntry) { - metadata.resHeaders[name] = this.entry.metadata.resHeaders[name] - } - // if the header is in the metadata, but not in the policy, then we need to set - // it in the policy so that it's included in the immediate response. future - // responses will load a new cache entry, so we don't need to change that - if (!inPolicy && inMeta) { - this.policy.response.headers[name] = metadata.resHeaders[name] - } - } - - try { - await cacache.index.insert(options.cachePath, this.key, this.entry.integrity, { - size: this.entry.size, - metadata, - }) - } catch (err) { - // if updating the cache index fails, we ignore it and - // respond anyway - } - return this.respond(request.method, options, 'revalidated') - } - - // if we got a modified response, create a new entry based on it - const newEntry = new CacheEntry({ - request, - response, - options, - }) - - // respond with the new entry while writing it to the cache - return newEntry.store('updated') - } -} - -module.exports = CacheEntry diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/errors.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/errors.js deleted file mode 100644 index 67a66573bebe66..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/errors.js +++ /dev/null @@ -1,11 +0,0 @@ -class NotCachedError extends Error { - constructor (url) { - /* eslint-disable-next-line max-len */ - super(`request to ${url} failed: cache mode is 'only-if-cached' but no cached response is available.`) - this.code = 'ENOTCACHED' - } -} - -module.exports = { - NotCachedError, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/index.js deleted file mode 100644 index 0de49d23fb9336..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/index.js +++ /dev/null @@ -1,49 +0,0 @@ -const { NotCachedError } = require('./errors.js') -const CacheEntry = require('./entry.js') -const remote = require('../remote.js') - -// do whatever is necessary to get a Response and return it -const cacheFetch = async (request, options) => { - // try to find a cached entry that satisfies this request - const entry = await CacheEntry.find(request, options) - if (!entry) { - // no cached result, if the cache mode is 'only-if-cached' that's a failure - if (options.cache === 'only-if-cached') { - throw new NotCachedError(request.url) - } - - // otherwise, we make a request, store it and return it - const response = await remote(request, options) - const newEntry = new CacheEntry({ request, response, options }) - return newEntry.store('miss') - } - - // we have a cached response that satisfies this request, however if the cache - // mode is 'no-cache' then we send the revalidation request no matter what - if (options.cache === 'no-cache') { - return entry.revalidate(request, options) - } - - // if the cached entry is not stale, or if the cache mode is 'force-cache' or - // 'only-if-cached' we can respond with the cached entry. set the status - // based on the result of needsRevalidation and respond - const _needsRevalidation = entry.policy.needsRevalidation(request) - if (options.cache === 'force-cache' || - options.cache === 'only-if-cached' || - !_needsRevalidation) { - return entry.respond(request.method, options, _needsRevalidation ? 'stale' : 'hit') - } - - // if we got here, the cache entry is stale so revalidate it - return entry.revalidate(request, options) -} - -cacheFetch.invalidate = async (request, options) => { - if (!options.cachePath) { - return - } - - return CacheEntry.invalidate(request, options) -} - -module.exports = cacheFetch diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/key.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/key.js deleted file mode 100644 index f7684d562b7fae..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/key.js +++ /dev/null @@ -1,17 +0,0 @@ -const { URL, format } = require('url') - -// options passed to url.format() when generating a key -const formatOptions = { - auth: false, - fragment: false, - search: true, - unicode: false, -} - -// returns a string to be used as the cache key for the Request -const cacheKey = (request) => { - const parsed = new URL(request.url) - return `make-fetch-happen:request-cache:${format(parsed, formatOptions)}` -} - -module.exports = cacheKey diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/policy.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/policy.js deleted file mode 100644 index ada3c8600dae92..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/policy.js +++ /dev/null @@ -1,161 +0,0 @@ -const CacheSemantics = require('http-cache-semantics') -const Negotiator = require('negotiator') -const ssri = require('ssri') - -// options passed to http-cache-semantics constructor -const policyOptions = { - shared: false, - ignoreCargoCult: true, -} - -// a fake empty response, used when only testing the -// request for storability -const emptyResponse = { status: 200, headers: {} } - -// returns a plain object representation of the Request -const requestObject = (request) => { - const _obj = { - method: request.method, - url: request.url, - headers: {}, - compress: request.compress, - } - - request.headers.forEach((value, key) => { - _obj.headers[key] = value - }) - - return _obj -} - -// returns a plain object representation of the Response -const responseObject = (response) => { - const _obj = { - status: response.status, - headers: {}, - } - - response.headers.forEach((value, key) => { - _obj.headers[key] = value - }) - - return _obj -} - -class CachePolicy { - constructor ({ entry, request, response, options }) { - this.entry = entry - this.request = requestObject(request) - this.response = responseObject(response) - this.options = options - this.policy = new CacheSemantics(this.request, this.response, policyOptions) - - if (this.entry) { - // if we have an entry, copy the timestamp to the _responseTime - // this is necessary because the CacheSemantics constructor forces - // the value to Date.now() which means a policy created from a - // cache entry is likely to always identify itself as stale - this.policy._responseTime = this.entry.metadata.time - } - } - - // static method to quickly determine if a request alone is storable - static storable (request, options) { - // no cachePath means no caching - if (!options.cachePath) { - return false - } - - // user explicitly asked not to cache - if (options.cache === 'no-store') { - return false - } - - // we only cache GET and HEAD requests - if (!['GET', 'HEAD'].includes(request.method)) { - return false - } - - // otherwise, let http-cache-semantics make the decision - // based on the request's headers - const policy = new CacheSemantics(requestObject(request), emptyResponse, policyOptions) - return policy.storable() - } - - // returns true if the policy satisfies the request - satisfies (request) { - const _req = requestObject(request) - if (this.request.headers.host !== _req.headers.host) { - return false - } - - if (this.request.compress !== _req.compress) { - return false - } - - const negotiatorA = new Negotiator(this.request) - const negotiatorB = new Negotiator(_req) - - if (JSON.stringify(negotiatorA.mediaTypes()) !== JSON.stringify(negotiatorB.mediaTypes())) { - return false - } - - if (JSON.stringify(negotiatorA.languages()) !== JSON.stringify(negotiatorB.languages())) { - return false - } - - if (JSON.stringify(negotiatorA.encodings()) !== JSON.stringify(negotiatorB.encodings())) { - return false - } - - if (this.options.integrity) { - return ssri.parse(this.options.integrity).match(this.entry.integrity) - } - - return true - } - - // returns true if the request and response allow caching - storable () { - return this.policy.storable() - } - - // NOTE: this is a hack to avoid parsing the cache-control - // header ourselves, it returns true if the response's - // cache-control contains must-revalidate - get mustRevalidate () { - return !!this.policy._rescc['must-revalidate'] - } - - // returns true if the cached response requires revalidation - // for the given request - needsRevalidation (request) { - const _req = requestObject(request) - // force method to GET because we only cache GETs - // but can serve a HEAD from a cached GET - _req.method = 'GET' - return !this.policy.satisfiesWithoutRevalidation(_req) - } - - responseHeaders () { - return this.policy.responseHeaders() - } - - // returns a new object containing the appropriate headers - // to send a revalidation request - revalidationHeaders (request) { - const _req = requestObject(request) - return this.policy.revalidationHeaders(_req) - } - - // returns true if the request/response was revalidated - // successfully. returns false if a new response was received - revalidated (request, response) { - const _req = requestObject(request) - const _res = responseObject(response) - const policy = this.policy.revalidatedPolicy(_req, _res) - return !policy.modified - } -} - -module.exports = CachePolicy diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/fetch.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/fetch.js deleted file mode 100644 index 233ba67e165502..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/fetch.js +++ /dev/null @@ -1,118 +0,0 @@ -'use strict' - -const { FetchError, Request, isRedirect } = require('minipass-fetch') -const url = require('url') - -const CachePolicy = require('./cache/policy.js') -const cache = require('./cache/index.js') -const remote = require('./remote.js') - -// given a Request, a Response and user options -// return true if the response is a redirect that -// can be followed. we throw errors that will result -// in the fetch being rejected if the redirect is -// possible but invalid for some reason -const canFollowRedirect = (request, response, options) => { - if (!isRedirect(response.status)) { - return false - } - - if (options.redirect === 'manual') { - return false - } - - if (options.redirect === 'error') { - throw new FetchError(`redirect mode is set to error: ${request.url}`, - 'no-redirect', { code: 'ENOREDIRECT' }) - } - - if (!response.headers.has('location')) { - throw new FetchError(`redirect location header missing for: ${request.url}`, - 'no-location', { code: 'EINVALIDREDIRECT' }) - } - - if (request.counter >= request.follow) { - throw new FetchError(`maximum redirect reached at: ${request.url}`, - 'max-redirect', { code: 'EMAXREDIRECT' }) - } - - return true -} - -// given a Request, a Response, and the user's options return an object -// with a new Request and a new options object that will be used for -// following the redirect -const getRedirect = (request, response, options) => { - const _opts = { ...options } - const location = response.headers.get('location') - const redirectUrl = new url.URL(location, /^https?:/.test(location) ? undefined : request.url) - // Comment below is used under the following license: - /** - * @license - * Copyright (c) 2010-2012 Mikeal Rogers - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS - * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language - * governing permissions and limitations under the License. - */ - - // Remove authorization if changing hostnames (but not if just - // changing ports or protocols). This matches the behavior of request: - // https://github.com/request/request/blob/b12a6245/lib/redirect.js#L134-L138 - if (new url.URL(request.url).hostname !== redirectUrl.hostname) { - request.headers.delete('authorization') - request.headers.delete('cookie') - } - - // for POST request with 301/302 response, or any request with 303 response, - // use GET when following redirect - if ( - response.status === 303 || - (request.method === 'POST' && [301, 302].includes(response.status)) - ) { - _opts.method = 'GET' - _opts.body = null - request.headers.delete('content-length') - } - - _opts.headers = {} - request.headers.forEach((value, key) => { - _opts.headers[key] = value - }) - - _opts.counter = ++request.counter - const redirectReq = new Request(url.format(redirectUrl), _opts) - return { - request: redirectReq, - options: _opts, - } -} - -const fetch = async (request, options) => { - const response = CachePolicy.storable(request, options) - ? await cache(request, options) - : await remote(request, options) - - // if the request wasn't a GET or HEAD, and the response - // status is between 200 and 399 inclusive, invalidate the - // request url - if (!['GET', 'HEAD'].includes(request.method) && - response.status >= 200 && - response.status <= 399) { - await cache.invalidate(request, options) - } - - if (!canFollowRedirect(request, response, options)) { - return response - } - - const redirect = getRedirect(request, response, options) - return fetch(redirect.request, redirect.options) -} - -module.exports = fetch diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/index.js deleted file mode 100644 index 2f12e8e1b61131..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/index.js +++ /dev/null @@ -1,41 +0,0 @@ -const { FetchError, Headers, Request, Response } = require('minipass-fetch') - -const configureOptions = require('./options.js') -const fetch = require('./fetch.js') - -const makeFetchHappen = (url, opts) => { - const options = configureOptions(opts) - - const request = new Request(url, options) - return fetch(request, options) -} - -makeFetchHappen.defaults = (defaultUrl, defaultOptions = {}, wrappedFetch = makeFetchHappen) => { - if (typeof defaultUrl === 'object') { - defaultOptions = defaultUrl - defaultUrl = null - } - - const defaultedFetch = (url, options = {}) => { - const finalUrl = url || defaultUrl - const finalOptions = { - ...defaultOptions, - ...options, - headers: { - ...defaultOptions.headers, - ...options.headers, - }, - } - return wrappedFetch(finalUrl, finalOptions) - } - - defaultedFetch.defaults = (defaultUrl1, defaultOptions1 = {}) => - makeFetchHappen.defaults(defaultUrl1, defaultOptions1, defaultedFetch) - return defaultedFetch -} - -module.exports = makeFetchHappen -module.exports.FetchError = FetchError -module.exports.Headers = Headers -module.exports.Request = Request -module.exports.Response = Response diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/options.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/options.js deleted file mode 100644 index f77511279f831d..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/options.js +++ /dev/null @@ -1,54 +0,0 @@ -const dns = require('dns') - -const conditionalHeaders = [ - 'if-modified-since', - 'if-none-match', - 'if-unmodified-since', - 'if-match', - 'if-range', -] - -const configureOptions = (opts) => { - const { strictSSL, ...options } = { ...opts } - options.method = options.method ? options.method.toUpperCase() : 'GET' - options.rejectUnauthorized = strictSSL !== false - - if (!options.retry) { - options.retry = { retries: 0 } - } else if (typeof options.retry === 'string') { - const retries = parseInt(options.retry, 10) - if (isFinite(retries)) { - options.retry = { retries } - } else { - options.retry = { retries: 0 } - } - } else if (typeof options.retry === 'number') { - options.retry = { retries: options.retry } - } else { - options.retry = { retries: 0, ...options.retry } - } - - options.dns = { ttl: 5 * 60 * 1000, lookup: dns.lookup, ...options.dns } - - options.cache = options.cache || 'default' - if (options.cache === 'default') { - const hasConditionalHeader = Object.keys(options.headers || {}).some((name) => { - return conditionalHeaders.includes(name.toLowerCase()) - }) - if (hasConditionalHeader) { - options.cache = 'no-store' - } - } - - options.cacheAdditionalHeaders = options.cacheAdditionalHeaders || [] - - // cacheManager is deprecated, but if it's set and - // cachePath is not we should copy it to the new field - if (options.cacheManager && !options.cachePath) { - options.cachePath = options.cacheManager - } - - return options -} - -module.exports = configureOptions diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/pipeline.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/pipeline.js deleted file mode 100644 index b1d221b2d0ce31..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/pipeline.js +++ /dev/null @@ -1,41 +0,0 @@ -'use strict' - -const MinipassPipeline = require('minipass-pipeline') - -class CachingMinipassPipeline extends MinipassPipeline { - #events = [] - #data = new Map() - - constructor (opts, ...streams) { - // CRITICAL: do NOT pass the streams to the call to super(), this will start - // the flow of data and potentially cause the events we need to catch to emit - // before we've finished our own setup. instead we call super() with no args, - // finish our setup, and then push the streams into ourselves to start the - // data flow - super() - this.#events = opts.events - - /* istanbul ignore next - coverage disabled because this is pointless to test here */ - if (streams.length) { - this.push(...streams) - } - } - - on (event, handler) { - if (this.#events.includes(event) && this.#data.has(event)) { - return handler(...this.#data.get(event)) - } - - return super.on(event, handler) - } - - emit (event, ...data) { - if (this.#events.includes(event)) { - this.#data.set(event, data) - } - - return super.emit(event, ...data) - } -} - -module.exports = CachingMinipassPipeline diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/remote.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/remote.js deleted file mode 100644 index 8554564074de6e..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/remote.js +++ /dev/null @@ -1,131 +0,0 @@ -const { Minipass } = require('minipass') -const fetch = require('minipass-fetch') -const promiseRetry = require('promise-retry') -const ssri = require('ssri') -const { log } = require('proc-log') - -const CachingMinipassPipeline = require('./pipeline.js') -const { getAgent } = require('@npmcli/agent') -const pkg = require('../package.json') - -const USER_AGENT = `${pkg.name}/${pkg.version} (+https://npm.im/${pkg.name})` - -const RETRY_ERRORS = [ - 'ECONNRESET', // remote socket closed on us - 'ECONNREFUSED', // remote host refused to open connection - 'EADDRINUSE', // failed to bind to a local port (proxy?) - 'ETIMEDOUT', // someone in the transaction is WAY TOO SLOW - // from @npmcli/agent - 'ECONNECTIONTIMEOUT', - 'EIDLETIMEOUT', - 'ERESPONSETIMEOUT', - 'ETRANSFERTIMEOUT', - // Known codes we do NOT retry on: - // ENOTFOUND (getaddrinfo failure. Either bad hostname, or offline) - // EINVALIDPROXY // invalid protocol from @npmcli/agent - // EINVALIDRESPONSE // invalid status code from @npmcli/agent -] - -const RETRY_TYPES = [ - 'request-timeout', -] - -// make a request directly to the remote source, -// retrying certain classes of errors as well as -// following redirects (through the cache if necessary) -// and verifying response integrity -const remoteFetch = (request, options) => { - const agent = getAgent(request.url, options) - if (!request.headers.has('connection')) { - request.headers.set('connection', agent ? 'keep-alive' : 'close') - } - - if (!request.headers.has('user-agent')) { - request.headers.set('user-agent', USER_AGENT) - } - - // keep our own options since we're overriding the agent - // and the redirect mode - const _opts = { - ...options, - agent, - redirect: 'manual', - } - - return promiseRetry(async (retryHandler, attemptNum) => { - const req = new fetch.Request(request, _opts) - try { - let res = await fetch(req, _opts) - if (_opts.integrity && res.status === 200) { - // we got a 200 response and the user has specified an expected - // integrity value, so wrap the response in an ssri stream to verify it - const integrityStream = ssri.integrityStream({ - algorithms: _opts.algorithms, - integrity: _opts.integrity, - size: _opts.size, - }) - const pipeline = new CachingMinipassPipeline({ - events: ['integrity', 'size'], - }, res.body, integrityStream) - // we also propagate the integrity and size events out to the pipeline so we can use - // this new response body as an integrityEmitter for cacache - integrityStream.on('integrity', i => pipeline.emit('integrity', i)) - integrityStream.on('size', s => pipeline.emit('size', s)) - res = new fetch.Response(pipeline, res) - // set an explicit flag so we know if our response body will emit integrity and size - res.body.hasIntegrityEmitter = true - } - - res.headers.set('x-fetch-attempts', attemptNum) - - // do not retry POST requests, or requests with a streaming body - // do retry requests with a 408, 420, 429 or 500+ status in the response - const isStream = Minipass.isStream(req.body) - const isRetriable = req.method !== 'POST' && - !isStream && - ([408, 420, 429].includes(res.status) || res.status >= 500) - - if (isRetriable) { - if (typeof options.onRetry === 'function') { - options.onRetry(res) - } - - /* eslint-disable-next-line max-len */ - log.http('fetch', `${req.method} ${req.url} attempt ${attemptNum} failed with ${res.status}`) - return retryHandler(res) - } - - return res - } catch (err) { - const code = (err.code === 'EPROMISERETRY') - ? err.retried.code - : err.code - - // err.retried will be the thing that was thrown from above - // if it's a response, we just got a bad status code and we - // can re-throw to allow the retry - const isRetryError = err.retried instanceof fetch.Response || - (RETRY_ERRORS.includes(code) && RETRY_TYPES.includes(err.type)) - - if (req.method === 'POST' || isRetryError) { - throw err - } - - if (typeof options.onRetry === 'function') { - options.onRetry(err) - } - - log.http('fetch', `${req.method} ${req.url} attempt ${attemptNum} failed with ${err.code}`) - return retryHandler(err) - } - }, options.retry).catch((err) => { - // don't reject for http errors, just return them - if (err.status >= 400 && err.type !== 'system') { - return err - } - - throw err - }) -} - -module.exports = remoteFetch diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/package.json deleted file mode 100644 index 7adb4d1e7f9719..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/package.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "name": "make-fetch-happen", - "version": "13.0.1", - "description": "Opinionated, caching, retrying fetch client", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "test": "tap", - "posttest": "npm run lint", - "eslint": "eslint", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "lintfix": "npm run lint -- --fix", - "postlint": "template-oss-check", - "snap": "tap", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/make-fetch-happen.git" - }, - "keywords": [ - "http", - "request", - "fetch", - "mean girls", - "caching", - "cache", - "subresource integrity" - ], - "author": "GitHub Inc.", - "license": "ISC", - "dependencies": { - "@npmcli/agent": "^2.0.0", - "cacache": "^18.0.0", - "http-cache-semantics": "^4.1.1", - "is-lambda": "^1.0.1", - "minipass": "^7.0.2", - "minipass-fetch": "^3.0.0", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.3", - "proc-log": "^4.2.0", - "promise-retry": "^2.0.1", - "ssri": "^10.0.0" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.4", - "nock": "^13.2.4", - "safe-buffer": "^5.2.1", - "standard-version": "^9.3.2", - "tap": "^16.0.0" - }, - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "tap": { - "color": 1, - "files": "test/*.js", - "check-coverage": true, - "timeout": 60, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.4", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/LICENSE b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/LICENSE deleted file mode 100644 index 3c3410cdc12ee3..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -The MIT License (MIT) - -Copyright (c) Isaac Z. Schlueter and Contributors -Copyright (c) 2016 David Frank - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---- - -Note: This is a derivative work based on "node-fetch" by David Frank, -modified and distributed under the terms of the MIT license above. -https://github.com/bitinn/node-fetch diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/abort-error.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/abort-error.js deleted file mode 100644 index b18f643269e375..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/abort-error.js +++ /dev/null @@ -1,17 +0,0 @@ -'use strict' -class AbortError extends Error { - constructor (message) { - super(message) - this.code = 'FETCH_ABORTED' - this.type = 'aborted' - Error.captureStackTrace(this, this.constructor) - } - - get name () { - return 'AbortError' - } - - // don't allow name to be overridden, but don't throw either - set name (s) {} -} -module.exports = AbortError diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/blob.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/blob.js deleted file mode 100644 index 121b1730102e72..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/blob.js +++ /dev/null @@ -1,97 +0,0 @@ -'use strict' -const { Minipass } = require('minipass') -const TYPE = Symbol('type') -const BUFFER = Symbol('buffer') - -class Blob { - constructor (blobParts, options) { - this[TYPE] = '' - - const buffers = [] - let size = 0 - - if (blobParts) { - const a = blobParts - const length = Number(a.length) - for (let i = 0; i < length; i++) { - const element = a[i] - const buffer = element instanceof Buffer ? element - : ArrayBuffer.isView(element) - ? Buffer.from(element.buffer, element.byteOffset, element.byteLength) - : element instanceof ArrayBuffer ? Buffer.from(element) - : element instanceof Blob ? element[BUFFER] - : typeof element === 'string' ? Buffer.from(element) - : Buffer.from(String(element)) - size += buffer.length - buffers.push(buffer) - } - } - - this[BUFFER] = Buffer.concat(buffers, size) - - const type = options && options.type !== undefined - && String(options.type).toLowerCase() - if (type && !/[^\u0020-\u007E]/.test(type)) { - this[TYPE] = type - } - } - - get size () { - return this[BUFFER].length - } - - get type () { - return this[TYPE] - } - - text () { - return Promise.resolve(this[BUFFER].toString()) - } - - arrayBuffer () { - const buf = this[BUFFER] - const off = buf.byteOffset - const len = buf.byteLength - const ab = buf.buffer.slice(off, off + len) - return Promise.resolve(ab) - } - - stream () { - return new Minipass().end(this[BUFFER]) - } - - slice (start, end, type) { - const size = this.size - const relativeStart = start === undefined ? 0 - : start < 0 ? Math.max(size + start, 0) - : Math.min(start, size) - const relativeEnd = end === undefined ? size - : end < 0 ? Math.max(size + end, 0) - : Math.min(end, size) - const span = Math.max(relativeEnd - relativeStart, 0) - - const buffer = this[BUFFER] - const slicedBuffer = buffer.slice( - relativeStart, - relativeStart + span - ) - const blob = new Blob([], { type }) - blob[BUFFER] = slicedBuffer - return blob - } - - get [Symbol.toStringTag] () { - return 'Blob' - } - - static get BUFFER () { - return BUFFER - } -} - -Object.defineProperties(Blob.prototype, { - size: { enumerable: true }, - type: { enumerable: true }, -}) - -module.exports = Blob diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/body.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/body.js deleted file mode 100644 index 62286bd1de0d91..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/body.js +++ /dev/null @@ -1,350 +0,0 @@ -'use strict' -const { Minipass } = require('minipass') -const MinipassSized = require('minipass-sized') - -const Blob = require('./blob.js') -const { BUFFER } = Blob -const FetchError = require('./fetch-error.js') - -// optional dependency on 'encoding' -let convert -try { - convert = require('encoding').convert -} catch (e) { - // defer error until textConverted is called -} - -const INTERNALS = Symbol('Body internals') -const CONSUME_BODY = Symbol('consumeBody') - -class Body { - constructor (bodyArg, options = {}) { - const { size = 0, timeout = 0 } = options - const body = bodyArg === undefined || bodyArg === null ? null - : isURLSearchParams(bodyArg) ? Buffer.from(bodyArg.toString()) - : isBlob(bodyArg) ? bodyArg - : Buffer.isBuffer(bodyArg) ? bodyArg - : Object.prototype.toString.call(bodyArg) === '[object ArrayBuffer]' - ? Buffer.from(bodyArg) - : ArrayBuffer.isView(bodyArg) - ? Buffer.from(bodyArg.buffer, bodyArg.byteOffset, bodyArg.byteLength) - : Minipass.isStream(bodyArg) ? bodyArg - : Buffer.from(String(bodyArg)) - - this[INTERNALS] = { - body, - disturbed: false, - error: null, - } - - this.size = size - this.timeout = timeout - - if (Minipass.isStream(body)) { - body.on('error', er => { - const error = er.name === 'AbortError' ? er - : new FetchError(`Invalid response while trying to fetch ${ - this.url}: ${er.message}`, 'system', er) - this[INTERNALS].error = error - }) - } - } - - get body () { - return this[INTERNALS].body - } - - get bodyUsed () { - return this[INTERNALS].disturbed - } - - arrayBuffer () { - return this[CONSUME_BODY]().then(buf => - buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength)) - } - - blob () { - const ct = this.headers && this.headers.get('content-type') || '' - return this[CONSUME_BODY]().then(buf => Object.assign( - new Blob([], { type: ct.toLowerCase() }), - { [BUFFER]: buf } - )) - } - - async json () { - const buf = await this[CONSUME_BODY]() - try { - return JSON.parse(buf.toString()) - } catch (er) { - throw new FetchError( - `invalid json response body at ${this.url} reason: ${er.message}`, - 'invalid-json' - ) - } - } - - text () { - return this[CONSUME_BODY]().then(buf => buf.toString()) - } - - buffer () { - return this[CONSUME_BODY]() - } - - textConverted () { - return this[CONSUME_BODY]().then(buf => convertBody(buf, this.headers)) - } - - [CONSUME_BODY] () { - if (this[INTERNALS].disturbed) { - return Promise.reject(new TypeError(`body used already for: ${ - this.url}`)) - } - - this[INTERNALS].disturbed = true - - if (this[INTERNALS].error) { - return Promise.reject(this[INTERNALS].error) - } - - // body is null - if (this.body === null) { - return Promise.resolve(Buffer.alloc(0)) - } - - if (Buffer.isBuffer(this.body)) { - return Promise.resolve(this.body) - } - - const upstream = isBlob(this.body) ? this.body.stream() : this.body - - /* istanbul ignore if: should never happen */ - if (!Minipass.isStream(upstream)) { - return Promise.resolve(Buffer.alloc(0)) - } - - const stream = this.size && upstream instanceof MinipassSized ? upstream - : !this.size && upstream instanceof Minipass && - !(upstream instanceof MinipassSized) ? upstream - : this.size ? new MinipassSized({ size: this.size }) - : new Minipass() - - // allow timeout on slow response body, but only if the stream is still writable. this - // makes the timeout center on the socket stream from lib/index.js rather than the - // intermediary minipass stream we create to receive the data - const resTimeout = this.timeout && stream.writable ? setTimeout(() => { - stream.emit('error', new FetchError( - `Response timeout while trying to fetch ${ - this.url} (over ${this.timeout}ms)`, 'body-timeout')) - }, this.timeout) : null - - // do not keep the process open just for this timeout, even - // though we expect it'll get cleared eventually. - if (resTimeout && resTimeout.unref) { - resTimeout.unref() - } - - // do the pipe in the promise, because the pipe() can send too much - // data through right away and upset the MP Sized object - return new Promise((resolve) => { - // if the stream is some other kind of stream, then pipe through a MP - // so we can collect it more easily. - if (stream !== upstream) { - upstream.on('error', er => stream.emit('error', er)) - upstream.pipe(stream) - } - resolve() - }).then(() => stream.concat()).then(buf => { - clearTimeout(resTimeout) - return buf - }).catch(er => { - clearTimeout(resTimeout) - // request was aborted, reject with this Error - if (er.name === 'AbortError' || er.name === 'FetchError') { - throw er - } else if (er.name === 'RangeError') { - throw new FetchError(`Could not create Buffer from response body for ${ - this.url}: ${er.message}`, 'system', er) - } else { - // other errors, such as incorrect content-encoding or content-length - throw new FetchError(`Invalid response body while trying to fetch ${ - this.url}: ${er.message}`, 'system', er) - } - }) - } - - static clone (instance) { - if (instance.bodyUsed) { - throw new Error('cannot clone body after it is used') - } - - const body = instance.body - - // check that body is a stream and not form-data object - // NB: can't clone the form-data object without having it as a dependency - if (Minipass.isStream(body) && typeof body.getBoundary !== 'function') { - // create a dedicated tee stream so that we don't lose data - // potentially sitting in the body stream's buffer by writing it - // immediately to p1 and not having it for p2. - const tee = new Minipass() - const p1 = new Minipass() - const p2 = new Minipass() - tee.on('error', er => { - p1.emit('error', er) - p2.emit('error', er) - }) - body.on('error', er => tee.emit('error', er)) - tee.pipe(p1) - tee.pipe(p2) - body.pipe(tee) - // set instance body to one fork, return the other - instance[INTERNALS].body = p1 - return p2 - } else { - return instance.body - } - } - - static extractContentType (body) { - return body === null || body === undefined ? null - : typeof body === 'string' ? 'text/plain;charset=UTF-8' - : isURLSearchParams(body) - ? 'application/x-www-form-urlencoded;charset=UTF-8' - : isBlob(body) ? body.type || null - : Buffer.isBuffer(body) ? null - : Object.prototype.toString.call(body) === '[object ArrayBuffer]' ? null - : ArrayBuffer.isView(body) ? null - : typeof body.getBoundary === 'function' - ? `multipart/form-data;boundary=${body.getBoundary()}` - : Minipass.isStream(body) ? null - : 'text/plain;charset=UTF-8' - } - - static getTotalBytes (instance) { - const { body } = instance - return (body === null || body === undefined) ? 0 - : isBlob(body) ? body.size - : Buffer.isBuffer(body) ? body.length - : body && typeof body.getLengthSync === 'function' && ( - // detect form data input from form-data module - body._lengthRetrievers && - /* istanbul ignore next */ body._lengthRetrievers.length === 0 || // 1.x - body.hasKnownLength && body.hasKnownLength()) // 2.x - ? body.getLengthSync() - : null - } - - static writeToStream (dest, instance) { - const { body } = instance - - if (body === null || body === undefined) { - dest.end() - } else if (Buffer.isBuffer(body) || typeof body === 'string') { - dest.end(body) - } else { - // body is stream or blob - const stream = isBlob(body) ? body.stream() : body - stream.on('error', er => dest.emit('error', er)).pipe(dest) - } - - return dest - } -} - -Object.defineProperties(Body.prototype, { - body: { enumerable: true }, - bodyUsed: { enumerable: true }, - arrayBuffer: { enumerable: true }, - blob: { enumerable: true }, - json: { enumerable: true }, - text: { enumerable: true }, -}) - -const isURLSearchParams = obj => - // Duck-typing as a necessary condition. - (typeof obj !== 'object' || - typeof obj.append !== 'function' || - typeof obj.delete !== 'function' || - typeof obj.get !== 'function' || - typeof obj.getAll !== 'function' || - typeof obj.has !== 'function' || - typeof obj.set !== 'function') ? false - // Brand-checking and more duck-typing as optional condition. - : obj.constructor.name === 'URLSearchParams' || - Object.prototype.toString.call(obj) === '[object URLSearchParams]' || - typeof obj.sort === 'function' - -const isBlob = obj => - typeof obj === 'object' && - typeof obj.arrayBuffer === 'function' && - typeof obj.type === 'string' && - typeof obj.stream === 'function' && - typeof obj.constructor === 'function' && - typeof obj.constructor.name === 'string' && - /^(Blob|File)$/.test(obj.constructor.name) && - /^(Blob|File)$/.test(obj[Symbol.toStringTag]) - -const convertBody = (buffer, headers) => { - /* istanbul ignore if */ - if (typeof convert !== 'function') { - throw new Error('The package `encoding` must be installed to use the textConverted() function') - } - - const ct = headers && headers.get('content-type') - let charset = 'utf-8' - let res - - // header - if (ct) { - res = /charset=([^;]*)/i.exec(ct) - } - - // no charset in content type, peek at response body for at most 1024 bytes - const str = buffer.slice(0, 1024).toString() - - // html5 - if (!res && str) { - res = / this.expect - ? 'max-size' : type - this.message = message - Error.captureStackTrace(this, this.constructor) - } - - get name () { - return 'FetchError' - } - - // don't allow name to be overwritten - set name (n) {} - - get [Symbol.toStringTag] () { - return 'FetchError' - } -} -module.exports = FetchError diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/headers.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/headers.js deleted file mode 100644 index dd6e854d5ba399..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/headers.js +++ /dev/null @@ -1,267 +0,0 @@ -'use strict' -const invalidTokenRegex = /[^^_`a-zA-Z\-0-9!#$%&'*+.|~]/ -const invalidHeaderCharRegex = /[^\t\x20-\x7e\x80-\xff]/ - -const validateName = name => { - name = `${name}` - if (invalidTokenRegex.test(name) || name === '') { - throw new TypeError(`${name} is not a legal HTTP header name`) - } -} - -const validateValue = value => { - value = `${value}` - if (invalidHeaderCharRegex.test(value)) { - throw new TypeError(`${value} is not a legal HTTP header value`) - } -} - -const find = (map, name) => { - name = name.toLowerCase() - for (const key in map) { - if (key.toLowerCase() === name) { - return key - } - } - return undefined -} - -const MAP = Symbol('map') -class Headers { - constructor (init = undefined) { - this[MAP] = Object.create(null) - if (init instanceof Headers) { - const rawHeaders = init.raw() - const headerNames = Object.keys(rawHeaders) - for (const headerName of headerNames) { - for (const value of rawHeaders[headerName]) { - this.append(headerName, value) - } - } - return - } - - // no-op - if (init === undefined || init === null) { - return - } - - if (typeof init === 'object') { - const method = init[Symbol.iterator] - if (method !== null && method !== undefined) { - if (typeof method !== 'function') { - throw new TypeError('Header pairs must be iterable') - } - - // sequence> - // Note: per spec we have to first exhaust the lists then process them - const pairs = [] - for (const pair of init) { - if (typeof pair !== 'object' || - typeof pair[Symbol.iterator] !== 'function') { - throw new TypeError('Each header pair must be iterable') - } - const arrPair = Array.from(pair) - if (arrPair.length !== 2) { - throw new TypeError('Each header pair must be a name/value tuple') - } - pairs.push(arrPair) - } - - for (const pair of pairs) { - this.append(pair[0], pair[1]) - } - } else { - // record - for (const key of Object.keys(init)) { - this.append(key, init[key]) - } - } - } else { - throw new TypeError('Provided initializer must be an object') - } - } - - get (name) { - name = `${name}` - validateName(name) - const key = find(this[MAP], name) - if (key === undefined) { - return null - } - - return this[MAP][key].join(', ') - } - - forEach (callback, thisArg = undefined) { - let pairs = getHeaders(this) - for (let i = 0; i < pairs.length; i++) { - const [name, value] = pairs[i] - callback.call(thisArg, value, name, this) - // refresh in case the callback added more headers - pairs = getHeaders(this) - } - } - - set (name, value) { - name = `${name}` - value = `${value}` - validateName(name) - validateValue(value) - const key = find(this[MAP], name) - this[MAP][key !== undefined ? key : name] = [value] - } - - append (name, value) { - name = `${name}` - value = `${value}` - validateName(name) - validateValue(value) - const key = find(this[MAP], name) - if (key !== undefined) { - this[MAP][key].push(value) - } else { - this[MAP][name] = [value] - } - } - - has (name) { - name = `${name}` - validateName(name) - return find(this[MAP], name) !== undefined - } - - delete (name) { - name = `${name}` - validateName(name) - const key = find(this[MAP], name) - if (key !== undefined) { - delete this[MAP][key] - } - } - - raw () { - return this[MAP] - } - - keys () { - return new HeadersIterator(this, 'key') - } - - values () { - return new HeadersIterator(this, 'value') - } - - [Symbol.iterator] () { - return new HeadersIterator(this, 'key+value') - } - - entries () { - return new HeadersIterator(this, 'key+value') - } - - get [Symbol.toStringTag] () { - return 'Headers' - } - - static exportNodeCompatibleHeaders (headers) { - const obj = Object.assign(Object.create(null), headers[MAP]) - - // http.request() only supports string as Host header. This hack makes - // specifying custom Host header possible. - const hostHeaderKey = find(headers[MAP], 'Host') - if (hostHeaderKey !== undefined) { - obj[hostHeaderKey] = obj[hostHeaderKey][0] - } - - return obj - } - - static createHeadersLenient (obj) { - const headers = new Headers() - for (const name of Object.keys(obj)) { - if (invalidTokenRegex.test(name)) { - continue - } - - if (Array.isArray(obj[name])) { - for (const val of obj[name]) { - if (invalidHeaderCharRegex.test(val)) { - continue - } - - if (headers[MAP][name] === undefined) { - headers[MAP][name] = [val] - } else { - headers[MAP][name].push(val) - } - } - } else if (!invalidHeaderCharRegex.test(obj[name])) { - headers[MAP][name] = [obj[name]] - } - } - return headers - } -} - -Object.defineProperties(Headers.prototype, { - get: { enumerable: true }, - forEach: { enumerable: true }, - set: { enumerable: true }, - append: { enumerable: true }, - has: { enumerable: true }, - delete: { enumerable: true }, - keys: { enumerable: true }, - values: { enumerable: true }, - entries: { enumerable: true }, -}) - -const getHeaders = (headers, kind = 'key+value') => - Object.keys(headers[MAP]).sort().map( - kind === 'key' ? k => k.toLowerCase() - : kind === 'value' ? k => headers[MAP][k].join(', ') - : k => [k.toLowerCase(), headers[MAP][k].join(', ')] - ) - -const INTERNAL = Symbol('internal') - -class HeadersIterator { - constructor (target, kind) { - this[INTERNAL] = { - target, - kind, - index: 0, - } - } - - get [Symbol.toStringTag] () { - return 'HeadersIterator' - } - - next () { - /* istanbul ignore if: should be impossible */ - if (!this || Object.getPrototypeOf(this) !== HeadersIterator.prototype) { - throw new TypeError('Value of `this` is not a HeadersIterator') - } - - const { target, kind, index } = this[INTERNAL] - const values = getHeaders(target, kind) - const len = values.length - if (index >= len) { - return { - value: undefined, - done: true, - } - } - - this[INTERNAL].index++ - - return { value: values[index], done: false } - } -} - -// manually extend because 'extends' requires a ctor -Object.setPrototypeOf(HeadersIterator.prototype, - Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))) - -module.exports = Headers diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/index.js deleted file mode 100644 index da402161670e65..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/index.js +++ /dev/null @@ -1,377 +0,0 @@ -'use strict' -const { URL } = require('url') -const http = require('http') -const https = require('https') -const zlib = require('minizlib') -const { Minipass } = require('minipass') - -const Body = require('./body.js') -const { writeToStream, getTotalBytes } = Body -const Response = require('./response.js') -const Headers = require('./headers.js') -const { createHeadersLenient } = Headers -const Request = require('./request.js') -const { getNodeRequestOptions } = Request -const FetchError = require('./fetch-error.js') -const AbortError = require('./abort-error.js') - -// XXX this should really be split up and unit-ized for easier testing -// and better DRY implementation of data/http request aborting -const fetch = async (url, opts) => { - if (/^data:/.test(url)) { - const request = new Request(url, opts) - // delay 1 promise tick so that the consumer can abort right away - return Promise.resolve().then(() => new Promise((resolve, reject) => { - let type, data - try { - const { pathname, search } = new URL(url) - const split = pathname.split(',') - if (split.length < 2) { - throw new Error('invalid data: URI') - } - const mime = split.shift() - const base64 = /;base64$/.test(mime) - type = base64 ? mime.slice(0, -1 * ';base64'.length) : mime - const rawData = decodeURIComponent(split.join(',') + search) - data = base64 ? Buffer.from(rawData, 'base64') : Buffer.from(rawData) - } catch (er) { - return reject(new FetchError(`[${request.method}] ${ - request.url} invalid URL, ${er.message}`, 'system', er)) - } - - const { signal } = request - if (signal && signal.aborted) { - return reject(new AbortError('The user aborted a request.')) - } - - const headers = { 'Content-Length': data.length } - if (type) { - headers['Content-Type'] = type - } - return resolve(new Response(data, { headers })) - })) - } - - return new Promise((resolve, reject) => { - // build request object - const request = new Request(url, opts) - let options - try { - options = getNodeRequestOptions(request) - } catch (er) { - return reject(er) - } - - const send = (options.protocol === 'https:' ? https : http).request - const { signal } = request - let response = null - const abort = () => { - const error = new AbortError('The user aborted a request.') - reject(error) - if (Minipass.isStream(request.body) && - typeof request.body.destroy === 'function') { - request.body.destroy(error) - } - if (response && response.body) { - response.body.emit('error', error) - } - } - - if (signal && signal.aborted) { - return abort() - } - - const abortAndFinalize = () => { - abort() - finalize() - } - - const finalize = () => { - req.abort() - if (signal) { - signal.removeEventListener('abort', abortAndFinalize) - } - clearTimeout(reqTimeout) - } - - // send request - const req = send(options) - - if (signal) { - signal.addEventListener('abort', abortAndFinalize) - } - - let reqTimeout = null - if (request.timeout) { - req.once('socket', () => { - reqTimeout = setTimeout(() => { - reject(new FetchError(`network timeout at: ${ - request.url}`, 'request-timeout')) - finalize() - }, request.timeout) - }) - } - - req.on('error', er => { - // if a 'response' event is emitted before the 'error' event, then by the - // time this handler is run it's too late to reject the Promise for the - // response. instead, we forward the error event to the response stream - // so that the error will surface to the user when they try to consume - // the body. this is done as a side effect of aborting the request except - // for in windows, where we must forward the event manually, otherwise - // there is no longer a ref'd socket attached to the request and the - // stream never ends so the event loop runs out of work and the process - // exits without warning. - // coverage skipped here due to the difficulty in testing - // istanbul ignore next - if (req.res) { - req.res.emit('error', er) - } - reject(new FetchError(`request to ${request.url} failed, reason: ${ - er.message}`, 'system', er)) - finalize() - }) - - req.on('response', res => { - clearTimeout(reqTimeout) - - const headers = createHeadersLenient(res.headers) - - // HTTP fetch step 5 - if (fetch.isRedirect(res.statusCode)) { - // HTTP fetch step 5.2 - const location = headers.get('Location') - - // HTTP fetch step 5.3 - let locationURL = null - try { - locationURL = location === null ? null : new URL(location, request.url).toString() - } catch { - // error here can only be invalid URL in Location: header - // do not throw when options.redirect == manual - // let the user extract the errorneous redirect URL - if (request.redirect !== 'manual') { - /* eslint-disable-next-line max-len */ - reject(new FetchError(`uri requested responds with an invalid redirect URL: ${location}`, 'invalid-redirect')) - finalize() - return - } - } - - // HTTP fetch step 5.5 - if (request.redirect === 'error') { - reject(new FetchError('uri requested responds with a redirect, ' + - `redirect mode is set to error: ${request.url}`, 'no-redirect')) - finalize() - return - } else if (request.redirect === 'manual') { - // node-fetch-specific step: make manual redirect a bit easier to - // use by setting the Location header value to the resolved URL. - if (locationURL !== null) { - // handle corrupted header - try { - headers.set('Location', locationURL) - } catch (err) { - /* istanbul ignore next: nodejs server prevent invalid - response headers, we can't test this through normal - request */ - reject(err) - } - } - } else if (request.redirect === 'follow' && locationURL !== null) { - // HTTP-redirect fetch step 5 - if (request.counter >= request.follow) { - reject(new FetchError(`maximum redirect reached at: ${ - request.url}`, 'max-redirect')) - finalize() - return - } - - // HTTP-redirect fetch step 9 - if (res.statusCode !== 303 && - request.body && - getTotalBytes(request) === null) { - reject(new FetchError( - 'Cannot follow redirect with body being a readable stream', - 'unsupported-redirect' - )) - finalize() - return - } - - // Update host due to redirection - request.headers.set('host', (new URL(locationURL)).host) - - // HTTP-redirect fetch step 6 (counter increment) - // Create a new Request object. - const requestOpts = { - headers: new Headers(request.headers), - follow: request.follow, - counter: request.counter + 1, - agent: request.agent, - compress: request.compress, - method: request.method, - body: request.body, - signal: request.signal, - timeout: request.timeout, - } - - // if the redirect is to a new hostname, strip the authorization and cookie headers - const parsedOriginal = new URL(request.url) - const parsedRedirect = new URL(locationURL) - if (parsedOriginal.hostname !== parsedRedirect.hostname) { - requestOpts.headers.delete('authorization') - requestOpts.headers.delete('cookie') - } - - // HTTP-redirect fetch step 11 - if (res.statusCode === 303 || ( - (res.statusCode === 301 || res.statusCode === 302) && - request.method === 'POST' - )) { - requestOpts.method = 'GET' - requestOpts.body = undefined - requestOpts.headers.delete('content-length') - } - - // HTTP-redirect fetch step 15 - resolve(fetch(new Request(locationURL, requestOpts))) - finalize() - return - } - } // end if(isRedirect) - - // prepare response - res.once('end', () => - signal && signal.removeEventListener('abort', abortAndFinalize)) - - const body = new Minipass() - // if an error occurs, either on the response stream itself, on one of the - // decoder streams, or a response length timeout from the Body class, we - // forward the error through to our internal body stream. If we see an - // error event on that, we call finalize to abort the request and ensure - // we don't leave a socket believing a request is in flight. - // this is difficult to test, so lacks specific coverage. - body.on('error', finalize) - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - res.on('error', /* istanbul ignore next */ er => body.emit('error', er)) - res.on('data', (chunk) => body.write(chunk)) - res.on('end', () => body.end()) - - const responseOptions = { - url: request.url, - status: res.statusCode, - statusText: res.statusMessage, - headers: headers, - size: request.size, - timeout: request.timeout, - counter: request.counter, - trailer: new Promise(resolveTrailer => - res.on('end', () => resolveTrailer(createHeadersLenient(res.trailers)))), - } - - // HTTP-network fetch step 12.1.1.3 - const codings = headers.get('Content-Encoding') - - // HTTP-network fetch step 12.1.1.4: handle content codings - - // in following scenarios we ignore compression support - // 1. compression support is disabled - // 2. HEAD request - // 3. no Content-Encoding header - // 4. no content response (204) - // 5. content not modified response (304) - if (!request.compress || - request.method === 'HEAD' || - codings === null || - res.statusCode === 204 || - res.statusCode === 304) { - response = new Response(body, responseOptions) - resolve(response) - return - } - - // Be less strict when decoding compressed responses, since sometimes - // servers send slightly invalid responses that are still accepted - // by common browsers. - // Always using Z_SYNC_FLUSH is what cURL does. - const zlibOptions = { - flush: zlib.constants.Z_SYNC_FLUSH, - finishFlush: zlib.constants.Z_SYNC_FLUSH, - } - - // for gzip - if (codings === 'gzip' || codings === 'x-gzip') { - const unzip = new zlib.Gunzip(zlibOptions) - response = new Response( - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => unzip.emit('error', er)).pipe(unzip), - responseOptions - ) - resolve(response) - return - } - - // for deflate - if (codings === 'deflate' || codings === 'x-deflate') { - // handle the infamous raw deflate response from old servers - // a hack for old IIS and Apache servers - const raw = res.pipe(new Minipass()) - raw.once('data', chunk => { - // see http://stackoverflow.com/questions/37519828 - const decoder = (chunk[0] & 0x0F) === 0x08 - ? new zlib.Inflate() - : new zlib.InflateRaw() - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => decoder.emit('error', er)).pipe(decoder) - response = new Response(decoder, responseOptions) - resolve(response) - }) - return - } - - // for br - if (codings === 'br') { - // ignoring coverage so tests don't have to fake support (or lack of) for brotli - // istanbul ignore next - try { - var decoder = new zlib.BrotliDecompress() - } catch (err) { - reject(err) - finalize() - return - } - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => decoder.emit('error', er)).pipe(decoder) - response = new Response(decoder, responseOptions) - resolve(response) - return - } - - // otherwise, use response as-is - response = new Response(body, responseOptions) - resolve(response) - }) - - writeToStream(req, request) - }) -} - -module.exports = fetch - -fetch.isRedirect = code => - code === 301 || - code === 302 || - code === 303 || - code === 307 || - code === 308 - -fetch.Headers = Headers -fetch.Request = Request -fetch.Response = Response -fetch.FetchError = FetchError -fetch.AbortError = AbortError diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/request.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/request.js deleted file mode 100644 index 054439e6699107..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/request.js +++ /dev/null @@ -1,282 +0,0 @@ -'use strict' -const { URL } = require('url') -const { Minipass } = require('minipass') -const Headers = require('./headers.js') -const { exportNodeCompatibleHeaders } = Headers -const Body = require('./body.js') -const { clone, extractContentType, getTotalBytes } = Body - -const version = require('../package.json').version -const defaultUserAgent = - `minipass-fetch/${version} (+https://github.com/isaacs/minipass-fetch)` - -const INTERNALS = Symbol('Request internals') - -const isRequest = input => - typeof input === 'object' && typeof input[INTERNALS] === 'object' - -const isAbortSignal = signal => { - const proto = ( - signal - && typeof signal === 'object' - && Object.getPrototypeOf(signal) - ) - return !!(proto && proto.constructor.name === 'AbortSignal') -} - -class Request extends Body { - constructor (input, init = {}) { - const parsedURL = isRequest(input) ? new URL(input.url) - : input && input.href ? new URL(input.href) - : new URL(`${input}`) - - if (isRequest(input)) { - init = { ...input[INTERNALS], ...init } - } else if (!input || typeof input === 'string') { - input = {} - } - - const method = (init.method || input.method || 'GET').toUpperCase() - const isGETHEAD = method === 'GET' || method === 'HEAD' - - if ((init.body !== null && init.body !== undefined || - isRequest(input) && input.body !== null) && isGETHEAD) { - throw new TypeError('Request with GET/HEAD method cannot have body') - } - - const inputBody = init.body !== null && init.body !== undefined ? init.body - : isRequest(input) && input.body !== null ? clone(input) - : null - - super(inputBody, { - timeout: init.timeout || input.timeout || 0, - size: init.size || input.size || 0, - }) - - const headers = new Headers(init.headers || input.headers || {}) - - if (inputBody !== null && inputBody !== undefined && - !headers.has('Content-Type')) { - const contentType = extractContentType(inputBody) - if (contentType) { - headers.append('Content-Type', contentType) - } - } - - const signal = 'signal' in init ? init.signal - : null - - if (signal !== null && signal !== undefined && !isAbortSignal(signal)) { - throw new TypeError('Expected signal must be an instanceof AbortSignal') - } - - // TLS specific options that are handled by node - const { - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized = process.env.NODE_TLS_REJECT_UNAUTHORIZED !== '0', - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } = init - - this[INTERNALS] = { - method, - redirect: init.redirect || input.redirect || 'follow', - headers, - parsedURL, - signal, - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } - - // node-fetch-only options - this.follow = init.follow !== undefined ? init.follow - : input.follow !== undefined ? input.follow - : 20 - this.compress = init.compress !== undefined ? init.compress - : input.compress !== undefined ? input.compress - : true - this.counter = init.counter || input.counter || 0 - this.agent = init.agent || input.agent - } - - get method () { - return this[INTERNALS].method - } - - get url () { - return this[INTERNALS].parsedURL.toString() - } - - get headers () { - return this[INTERNALS].headers - } - - get redirect () { - return this[INTERNALS].redirect - } - - get signal () { - return this[INTERNALS].signal - } - - clone () { - return new Request(this) - } - - get [Symbol.toStringTag] () { - return 'Request' - } - - static getNodeRequestOptions (request) { - const parsedURL = request[INTERNALS].parsedURL - const headers = new Headers(request[INTERNALS].headers) - - // fetch step 1.3 - if (!headers.has('Accept')) { - headers.set('Accept', '*/*') - } - - // Basic fetch - if (!/^https?:$/.test(parsedURL.protocol)) { - throw new TypeError('Only HTTP(S) protocols are supported') - } - - if (request.signal && - Minipass.isStream(request.body) && - typeof request.body.destroy !== 'function') { - throw new Error( - 'Cancellation of streamed requests with AbortSignal is not supported') - } - - // HTTP-network-or-cache fetch steps 2.4-2.7 - const contentLengthValue = - (request.body === null || request.body === undefined) && - /^(POST|PUT)$/i.test(request.method) ? '0' - : request.body !== null && request.body !== undefined - ? getTotalBytes(request) - : null - - if (contentLengthValue) { - headers.set('Content-Length', contentLengthValue + '') - } - - // HTTP-network-or-cache fetch step 2.11 - if (!headers.has('User-Agent')) { - headers.set('User-Agent', defaultUserAgent) - } - - // HTTP-network-or-cache fetch step 2.15 - if (request.compress && !headers.has('Accept-Encoding')) { - headers.set('Accept-Encoding', 'gzip,deflate') - } - - const agent = typeof request.agent === 'function' - ? request.agent(parsedURL) - : request.agent - - if (!headers.has('Connection') && !agent) { - headers.set('Connection', 'close') - } - - // TLS specific options that are handled by node - const { - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } = request[INTERNALS] - - // HTTP-network fetch step 4.2 - // chunked encoding is handled by Node.js - - // we cannot spread parsedURL directly, so we have to read each property one-by-one - // and map them to the equivalent https?.request() method options - const urlProps = { - auth: parsedURL.username || parsedURL.password - ? `${parsedURL.username}:${parsedURL.password}` - : '', - host: parsedURL.host, - hostname: parsedURL.hostname, - path: `${parsedURL.pathname}${parsedURL.search}`, - port: parsedURL.port, - protocol: parsedURL.protocol, - } - - return { - ...urlProps, - method: request.method, - headers: exportNodeCompatibleHeaders(headers), - agent, - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - timeout: request.timeout, - } - } -} - -module.exports = Request - -Object.defineProperties(Request.prototype, { - method: { enumerable: true }, - url: { enumerable: true }, - headers: { enumerable: true }, - redirect: { enumerable: true }, - clone: { enumerable: true }, - signal: { enumerable: true }, -}) diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/response.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/response.js deleted file mode 100644 index 54cb52db3594a7..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/response.js +++ /dev/null @@ -1,90 +0,0 @@ -'use strict' -const http = require('http') -const { STATUS_CODES } = http - -const Headers = require('./headers.js') -const Body = require('./body.js') -const { clone, extractContentType } = Body - -const INTERNALS = Symbol('Response internals') - -class Response extends Body { - constructor (body = null, opts = {}) { - super(body, opts) - - const status = opts.status || 200 - const headers = new Headers(opts.headers) - - if (body !== null && body !== undefined && !headers.has('Content-Type')) { - const contentType = extractContentType(body) - if (contentType) { - headers.append('Content-Type', contentType) - } - } - - this[INTERNALS] = { - url: opts.url, - status, - statusText: opts.statusText || STATUS_CODES[status], - headers, - counter: opts.counter, - trailer: Promise.resolve(opts.trailer || new Headers()), - } - } - - get trailer () { - return this[INTERNALS].trailer - } - - get url () { - return this[INTERNALS].url || '' - } - - get status () { - return this[INTERNALS].status - } - - get ok () { - return this[INTERNALS].status >= 200 && this[INTERNALS].status < 300 - } - - get redirected () { - return this[INTERNALS].counter > 0 - } - - get statusText () { - return this[INTERNALS].statusText - } - - get headers () { - return this[INTERNALS].headers - } - - clone () { - return new Response(clone(this), { - url: this.url, - status: this.status, - statusText: this.statusText, - headers: this.headers, - ok: this.ok, - redirected: this.redirected, - trailer: this.trailer, - }) - } - - get [Symbol.toStringTag] () { - return 'Response' - } -} - -module.exports = Response - -Object.defineProperties(Response.prototype, { - url: { enumerable: true }, - status: { enumerable: true }, - ok: { enumerable: true }, - redirected: { enumerable: true }, - statusText: { enumerable: true }, - headers: { enumerable: true }, - clone: { enumerable: true }, -}) diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/package.json deleted file mode 100644 index d491a7fba126d0..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/package.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "name": "minipass-fetch", - "version": "3.0.5", - "description": "An implementation of window.fetch in Node.js using Minipass streams", - "license": "MIT", - "main": "lib/index.js", - "scripts": { - "test:tls-fixtures": "./test/fixtures/tls/setup.sh", - "test": "tap", - "snap": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "lintfix": "npm run lint -- --fix", - "posttest": "npm run lint", - "template-oss-apply": "template-oss-apply --force" - }, - "tap": { - "coverage-map": "map.js", - "check-coverage": true, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "@ungap/url-search-params": "^0.2.2", - "abort-controller": "^3.0.0", - "abortcontroller-polyfill": "~1.7.3", - "encoding": "^0.1.13", - "form-data": "^4.0.0", - "nock": "^13.2.4", - "parted": "^0.1.1", - "string-to-arraybuffer": "^1.0.2", - "tap": "^16.0.0" - }, - "dependencies": { - "minipass": "^7.0.3", - "minipass-sized": "^1.0.3", - "minizlib": "^2.1.2" - }, - "optionalDependencies": { - "encoding": "^0.1.13" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/minipass-fetch.git" - }, - "keywords": [ - "fetch", - "minipass", - "node-fetch", - "window.fetch" - ], - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "author": "GitHub Inc.", - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/LICENSE b/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/LICENSE deleted file mode 100644 index 83837797202b70..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -The ISC License - -Copyright (c) GitHub, Inc. - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/lib/index.js deleted file mode 100644 index 86d90861078dab..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/lib/index.js +++ /dev/null @@ -1,153 +0,0 @@ -const META = Symbol('proc-log.meta') -module.exports = { - META: META, - output: { - LEVELS: [ - 'standard', - 'error', - 'buffer', - 'flush', - ], - KEYS: { - standard: 'standard', - error: 'error', - buffer: 'buffer', - flush: 'flush', - }, - standard: function (...args) { - return process.emit('output', 'standard', ...args) - }, - error: function (...args) { - return process.emit('output', 'error', ...args) - }, - buffer: function (...args) { - return process.emit('output', 'buffer', ...args) - }, - flush: function (...args) { - return process.emit('output', 'flush', ...args) - }, - }, - log: { - LEVELS: [ - 'notice', - 'error', - 'warn', - 'info', - 'verbose', - 'http', - 'silly', - 'timing', - 'pause', - 'resume', - ], - KEYS: { - notice: 'notice', - error: 'error', - warn: 'warn', - info: 'info', - verbose: 'verbose', - http: 'http', - silly: 'silly', - timing: 'timing', - pause: 'pause', - resume: 'resume', - }, - error: function (...args) { - return process.emit('log', 'error', ...args) - }, - notice: function (...args) { - return process.emit('log', 'notice', ...args) - }, - warn: function (...args) { - return process.emit('log', 'warn', ...args) - }, - info: function (...args) { - return process.emit('log', 'info', ...args) - }, - verbose: function (...args) { - return process.emit('log', 'verbose', ...args) - }, - http: function (...args) { - return process.emit('log', 'http', ...args) - }, - silly: function (...args) { - return process.emit('log', 'silly', ...args) - }, - timing: function (...args) { - return process.emit('log', 'timing', ...args) - }, - pause: function () { - return process.emit('log', 'pause') - }, - resume: function () { - return process.emit('log', 'resume') - }, - }, - time: { - LEVELS: [ - 'start', - 'end', - ], - KEYS: { - start: 'start', - end: 'end', - }, - start: function (name, fn) { - process.emit('time', 'start', name) - function end () { - return process.emit('time', 'end', name) - } - if (typeof fn === 'function') { - const res = fn() - if (res && res.finally) { - return res.finally(end) - } - end() - return res - } - return end - }, - end: function (name) { - return process.emit('time', 'end', name) - }, - }, - input: { - LEVELS: [ - 'start', - 'end', - 'read', - ], - KEYS: { - start: 'start', - end: 'end', - read: 'read', - }, - start: function (fn) { - process.emit('input', 'start') - function end () { - return process.emit('input', 'end') - } - if (typeof fn === 'function') { - const res = fn() - if (res && res.finally) { - return res.finally(end) - } - end() - return res - } - return end - }, - end: function () { - return process.emit('input', 'end') - }, - read: function (...args) { - let resolve, reject - const promise = new Promise((_resolve, _reject) => { - resolve = _resolve - reject = _reject - }) - process.emit('input', 'read', resolve, reject, ...args) - return promise - }, - }, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/package.json deleted file mode 100644 index 4ab89102ecc9b5..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/package.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "proc-log", - "version": "4.2.0", - "files": [ - "bin/", - "lib/" - ], - "main": "lib/index.js", - "description": "just emit 'log' events on the process object", - "repository": { - "type": "git", - "url": "https://github.com/npm/proc-log.git" - }, - "author": "GitHub Inc.", - "license": "ISC", - "scripts": { - "test": "tap", - "snap": "tap", - "posttest": "npm run lint", - "postsnap": "eslint index.js test/*.js --fix", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "lintfix": "npm run lint -- --fix", - "template-oss-apply": "template-oss-apply --force" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", - "tap": "^16.0.1" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", - "publish": true - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/LICENSE.md b/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/LICENSE.md deleted file mode 100644 index e335388869f50f..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/LICENSE.md +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright 2021 (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/lib/index.js deleted file mode 100644 index 7d749ed480fb98..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/lib/index.js +++ /dev/null @@ -1,580 +0,0 @@ -'use strict' - -const crypto = require('crypto') -const { Minipass } = require('minipass') - -const SPEC_ALGORITHMS = ['sha512', 'sha384', 'sha256'] -const DEFAULT_ALGORITHMS = ['sha512'] - -// TODO: this should really be a hardcoded list of algorithms we support, -// rather than [a-z0-9]. -const BASE64_REGEX = /^[a-z0-9+/]+(?:=?=?)$/i -const SRI_REGEX = /^([a-z0-9]+)-([^?]+)([?\S*]*)$/ -const STRICT_SRI_REGEX = /^([a-z0-9]+)-([A-Za-z0-9+/=]{44,88})(\?[\x21-\x7E]*)?$/ -const VCHAR_REGEX = /^[\x21-\x7E]+$/ - -const getOptString = options => options?.length ? `?${options.join('?')}` : '' - -class IntegrityStream extends Minipass { - #emittedIntegrity - #emittedSize - #emittedVerified - - constructor (opts) { - super() - this.size = 0 - this.opts = opts - - // may be overridden later, but set now for class consistency - this.#getOptions() - - // options used for calculating stream. can't be changed. - if (opts?.algorithms) { - this.algorithms = [...opts.algorithms] - } else { - this.algorithms = [...DEFAULT_ALGORITHMS] - } - if (this.algorithm !== null && !this.algorithms.includes(this.algorithm)) { - this.algorithms.push(this.algorithm) - } - - this.hashes = this.algorithms.map(crypto.createHash) - } - - #getOptions () { - // For verification - this.sri = this.opts?.integrity ? parse(this.opts?.integrity, this.opts) : null - this.expectedSize = this.opts?.size - - if (!this.sri) { - this.algorithm = null - } else if (this.sri.isHash) { - this.goodSri = true - this.algorithm = this.sri.algorithm - } else { - this.goodSri = !this.sri.isEmpty() - this.algorithm = this.sri.pickAlgorithm(this.opts) - } - - this.digests = this.goodSri ? this.sri[this.algorithm] : null - this.optString = getOptString(this.opts?.options) - } - - on (ev, handler) { - if (ev === 'size' && this.#emittedSize) { - return handler(this.#emittedSize) - } - - if (ev === 'integrity' && this.#emittedIntegrity) { - return handler(this.#emittedIntegrity) - } - - if (ev === 'verified' && this.#emittedVerified) { - return handler(this.#emittedVerified) - } - - return super.on(ev, handler) - } - - emit (ev, data) { - if (ev === 'end') { - this.#onEnd() - } - return super.emit(ev, data) - } - - write (data) { - this.size += data.length - this.hashes.forEach(h => h.update(data)) - return super.write(data) - } - - #onEnd () { - if (!this.goodSri) { - this.#getOptions() - } - const newSri = parse(this.hashes.map((h, i) => { - return `${this.algorithms[i]}-${h.digest('base64')}${this.optString}` - }).join(' '), this.opts) - // Integrity verification mode - const match = this.goodSri && newSri.match(this.sri, this.opts) - if (typeof this.expectedSize === 'number' && this.size !== this.expectedSize) { - /* eslint-disable-next-line max-len */ - const err = new Error(`stream size mismatch when checking ${this.sri}.\n Wanted: ${this.expectedSize}\n Found: ${this.size}`) - err.code = 'EBADSIZE' - err.found = this.size - err.expected = this.expectedSize - err.sri = this.sri - this.emit('error', err) - } else if (this.sri && !match) { - /* eslint-disable-next-line max-len */ - const err = new Error(`${this.sri} integrity checksum failed when using ${this.algorithm}: wanted ${this.digests} but got ${newSri}. (${this.size} bytes)`) - err.code = 'EINTEGRITY' - err.found = newSri - err.expected = this.digests - err.algorithm = this.algorithm - err.sri = this.sri - this.emit('error', err) - } else { - this.#emittedSize = this.size - this.emit('size', this.size) - this.#emittedIntegrity = newSri - this.emit('integrity', newSri) - if (match) { - this.#emittedVerified = match - this.emit('verified', match) - } - } - } -} - -class Hash { - get isHash () { - return true - } - - constructor (hash, opts) { - const strict = opts?.strict - this.source = hash.trim() - - // set default values so that we make V8 happy to - // always see a familiar object template. - this.digest = '' - this.algorithm = '' - this.options = [] - - // 3.1. Integrity metadata (called "Hash" by ssri) - // https://w3c.github.io/webappsec-subresource-integrity/#integrity-metadata-description - const match = this.source.match( - strict - ? STRICT_SRI_REGEX - : SRI_REGEX - ) - if (!match) { - return - } - if (strict && !SPEC_ALGORITHMS.includes(match[1])) { - return - } - this.algorithm = match[1] - this.digest = match[2] - - const rawOpts = match[3] - if (rawOpts) { - this.options = rawOpts.slice(1).split('?') - } - } - - hexDigest () { - return this.digest && Buffer.from(this.digest, 'base64').toString('hex') - } - - toJSON () { - return this.toString() - } - - match (integrity, opts) { - const other = parse(integrity, opts) - if (!other) { - return false - } - if (other.isIntegrity) { - const algo = other.pickAlgorithm(opts, [this.algorithm]) - - if (!algo) { - return false - } - - const foundHash = other[algo].find(hash => hash.digest === this.digest) - - if (foundHash) { - return foundHash - } - - return false - } - return other.digest === this.digest ? other : false - } - - toString (opts) { - if (opts?.strict) { - // Strict mode enforces the standard as close to the foot of the - // letter as it can. - if (!( - // The spec has very restricted productions for algorithms. - // https://www.w3.org/TR/CSP2/#source-list-syntax - SPEC_ALGORITHMS.includes(this.algorithm) && - // Usually, if someone insists on using a "different" base64, we - // leave it as-is, since there's multiple standards, and the - // specified is not a URL-safe variant. - // https://www.w3.org/TR/CSP2/#base64_value - this.digest.match(BASE64_REGEX) && - // Option syntax is strictly visual chars. - // https://w3c.github.io/webappsec-subresource-integrity/#grammardef-option-expression - // https://tools.ietf.org/html/rfc5234#appendix-B.1 - this.options.every(opt => opt.match(VCHAR_REGEX)) - )) { - return '' - } - } - return `${this.algorithm}-${this.digest}${getOptString(this.options)}` - } -} - -function integrityHashToString (toString, sep, opts, hashes) { - const toStringIsNotEmpty = toString !== '' - - let shouldAddFirstSep = false - let complement = '' - - const lastIndex = hashes.length - 1 - - for (let i = 0; i < lastIndex; i++) { - const hashString = Hash.prototype.toString.call(hashes[i], opts) - - if (hashString) { - shouldAddFirstSep = true - - complement += hashString - complement += sep - } - } - - const finalHashString = Hash.prototype.toString.call(hashes[lastIndex], opts) - - if (finalHashString) { - shouldAddFirstSep = true - complement += finalHashString - } - - if (toStringIsNotEmpty && shouldAddFirstSep) { - return toString + sep + complement - } - - return toString + complement -} - -class Integrity { - get isIntegrity () { - return true - } - - toJSON () { - return this.toString() - } - - isEmpty () { - return Object.keys(this).length === 0 - } - - toString (opts) { - let sep = opts?.sep || ' ' - let toString = '' - - if (opts?.strict) { - // Entries must be separated by whitespace, according to spec. - sep = sep.replace(/\S+/g, ' ') - - for (const hash of SPEC_ALGORITHMS) { - if (this[hash]) { - toString = integrityHashToString(toString, sep, opts, this[hash]) - } - } - } else { - for (const hash of Object.keys(this)) { - toString = integrityHashToString(toString, sep, opts, this[hash]) - } - } - - return toString - } - - concat (integrity, opts) { - const other = typeof integrity === 'string' - ? integrity - : stringify(integrity, opts) - return parse(`${this.toString(opts)} ${other}`, opts) - } - - hexDigest () { - return parse(this, { single: true }).hexDigest() - } - - // add additional hashes to an integrity value, but prevent - // *changing* an existing integrity hash. - merge (integrity, opts) { - const other = parse(integrity, opts) - for (const algo in other) { - if (this[algo]) { - if (!this[algo].find(hash => - other[algo].find(otherhash => - hash.digest === otherhash.digest))) { - throw new Error('hashes do not match, cannot update integrity') - } - } else { - this[algo] = other[algo] - } - } - } - - match (integrity, opts) { - const other = parse(integrity, opts) - if (!other) { - return false - } - const algo = other.pickAlgorithm(opts, Object.keys(this)) - return ( - !!algo && - this[algo] && - other[algo] && - this[algo].find(hash => - other[algo].find(otherhash => - hash.digest === otherhash.digest - ) - ) - ) || false - } - - // Pick the highest priority algorithm present, optionally also limited to a - // set of hashes found in another integrity. When limiting it may return - // nothing. - pickAlgorithm (opts, hashes) { - const pickAlgorithm = opts?.pickAlgorithm || getPrioritizedHash - const keys = Object.keys(this).filter(k => { - if (hashes?.length) { - return hashes.includes(k) - } - return true - }) - if (keys.length) { - return keys.reduce((acc, algo) => pickAlgorithm(acc, algo) || acc) - } - // no intersection between this and hashes, - return null - } -} - -module.exports.parse = parse -function parse (sri, opts) { - if (!sri) { - return null - } - if (typeof sri === 'string') { - return _parse(sri, opts) - } else if (sri.algorithm && sri.digest) { - const fullSri = new Integrity() - fullSri[sri.algorithm] = [sri] - return _parse(stringify(fullSri, opts), opts) - } else { - return _parse(stringify(sri, opts), opts) - } -} - -function _parse (integrity, opts) { - // 3.4.3. Parse metadata - // https://w3c.github.io/webappsec-subresource-integrity/#parse-metadata - if (opts?.single) { - return new Hash(integrity, opts) - } - const hashes = integrity.trim().split(/\s+/).reduce((acc, string) => { - const hash = new Hash(string, opts) - if (hash.algorithm && hash.digest) { - const algo = hash.algorithm - if (!acc[algo]) { - acc[algo] = [] - } - acc[algo].push(hash) - } - return acc - }, new Integrity()) - return hashes.isEmpty() ? null : hashes -} - -module.exports.stringify = stringify -function stringify (obj, opts) { - if (obj.algorithm && obj.digest) { - return Hash.prototype.toString.call(obj, opts) - } else if (typeof obj === 'string') { - return stringify(parse(obj, opts), opts) - } else { - return Integrity.prototype.toString.call(obj, opts) - } -} - -module.exports.fromHex = fromHex -function fromHex (hexDigest, algorithm, opts) { - const optString = getOptString(opts?.options) - return parse( - `${algorithm}-${ - Buffer.from(hexDigest, 'hex').toString('base64') - }${optString}`, opts - ) -} - -module.exports.fromData = fromData -function fromData (data, opts) { - const algorithms = opts?.algorithms || [...DEFAULT_ALGORITHMS] - const optString = getOptString(opts?.options) - return algorithms.reduce((acc, algo) => { - const digest = crypto.createHash(algo).update(data).digest('base64') - const hash = new Hash( - `${algo}-${digest}${optString}`, - opts - ) - /* istanbul ignore else - it would be VERY strange if the string we - * just calculated with an algo did not have an algo or digest. - */ - if (hash.algorithm && hash.digest) { - const hashAlgo = hash.algorithm - if (!acc[hashAlgo]) { - acc[hashAlgo] = [] - } - acc[hashAlgo].push(hash) - } - return acc - }, new Integrity()) -} - -module.exports.fromStream = fromStream -function fromStream (stream, opts) { - const istream = integrityStream(opts) - return new Promise((resolve, reject) => { - stream.pipe(istream) - stream.on('error', reject) - istream.on('error', reject) - let sri - istream.on('integrity', s => { - sri = s - }) - istream.on('end', () => resolve(sri)) - istream.resume() - }) -} - -module.exports.checkData = checkData -function checkData (data, sri, opts) { - sri = parse(sri, opts) - if (!sri || !Object.keys(sri).length) { - if (opts?.error) { - throw Object.assign( - new Error('No valid integrity hashes to check against'), { - code: 'EINTEGRITY', - } - ) - } else { - return false - } - } - const algorithm = sri.pickAlgorithm(opts) - const digest = crypto.createHash(algorithm).update(data).digest('base64') - const newSri = parse({ algorithm, digest }) - const match = newSri.match(sri, opts) - opts = opts || {} - if (match || !(opts.error)) { - return match - } else if (typeof opts.size === 'number' && (data.length !== opts.size)) { - /* eslint-disable-next-line max-len */ - const err = new Error(`data size mismatch when checking ${sri}.\n Wanted: ${opts.size}\n Found: ${data.length}`) - err.code = 'EBADSIZE' - err.found = data.length - err.expected = opts.size - err.sri = sri - throw err - } else { - /* eslint-disable-next-line max-len */ - const err = new Error(`Integrity checksum failed when using ${algorithm}: Wanted ${sri}, but got ${newSri}. (${data.length} bytes)`) - err.code = 'EINTEGRITY' - err.found = newSri - err.expected = sri - err.algorithm = algorithm - err.sri = sri - throw err - } -} - -module.exports.checkStream = checkStream -function checkStream (stream, sri, opts) { - opts = opts || Object.create(null) - opts.integrity = sri - sri = parse(sri, opts) - if (!sri || !Object.keys(sri).length) { - return Promise.reject(Object.assign( - new Error('No valid integrity hashes to check against'), { - code: 'EINTEGRITY', - } - )) - } - const checker = integrityStream(opts) - return new Promise((resolve, reject) => { - stream.pipe(checker) - stream.on('error', reject) - checker.on('error', reject) - let verified - checker.on('verified', s => { - verified = s - }) - checker.on('end', () => resolve(verified)) - checker.resume() - }) -} - -module.exports.integrityStream = integrityStream -function integrityStream (opts = Object.create(null)) { - return new IntegrityStream(opts) -} - -module.exports.create = createIntegrity -function createIntegrity (opts) { - const algorithms = opts?.algorithms || [...DEFAULT_ALGORITHMS] - const optString = getOptString(opts?.options) - - const hashes = algorithms.map(crypto.createHash) - - return { - update: function (chunk, enc) { - hashes.forEach(h => h.update(chunk, enc)) - return this - }, - digest: function () { - const integrity = algorithms.reduce((acc, algo) => { - const digest = hashes.shift().digest('base64') - const hash = new Hash( - `${algo}-${digest}${optString}`, - opts - ) - /* istanbul ignore else - it would be VERY strange if the hash we - * just calculated with an algo did not have an algo or digest. - */ - if (hash.algorithm && hash.digest) { - const hashAlgo = hash.algorithm - if (!acc[hashAlgo]) { - acc[hashAlgo] = [] - } - acc[hashAlgo].push(hash) - } - return acc - }, new Integrity()) - - return integrity - }, - } -} - -const NODE_HASHES = crypto.getHashes() - -// This is a Best Effort™ at a reasonable priority for hash algos -const DEFAULT_PRIORITY = [ - 'md5', 'whirlpool', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', - // TODO - it's unclear _which_ of these Node will actually use as its name - // for the algorithm, so we guesswork it based on the OpenSSL names. - 'sha3', - 'sha3-256', 'sha3-384', 'sha3-512', - 'sha3_256', 'sha3_384', 'sha3_512', -].filter(algo => NODE_HASHES.includes(algo)) - -function getPrioritizedHash (algo1, algo2) { - /* eslint-disable-next-line max-len */ - return DEFAULT_PRIORITY.indexOf(algo1.toLowerCase()) >= DEFAULT_PRIORITY.indexOf(algo2.toLowerCase()) - ? algo1 - : algo2 -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/package.json deleted file mode 100644 index 28395414e4643c..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/package.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "name": "ssri", - "version": "10.0.6", - "description": "Standard Subresource Integrity library -- parses, serializes, generates, and verifies integrity metadata according to the SRI spec.", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "prerelease": "npm t", - "postrelease": "npm publish", - "posttest": "npm run lint", - "test": "tap", - "coverage": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap" - }, - "tap": { - "check-coverage": true, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/ssri.git" - }, - "keywords": [ - "w3c", - "web", - "security", - "integrity", - "checksum", - "hashing", - "subresource integrity", - "sri", - "sri hash", - "sri string", - "sri generator", - "html" - ], - "author": "GitHub Inc.", - "license": "ISC", - "dependencies": { - "minipass": "^7.0.3" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.1" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/LICENSE b/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/LICENSE deleted file mode 100644 index 69619c125ea7ef..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/LICENSE +++ /dev/null @@ -1,5 +0,0 @@ -Copyright npm, Inc - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/lib/index.js deleted file mode 100644 index d067d2e709809a..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/lib/index.js +++ /dev/null @@ -1,7 +0,0 @@ -var path = require('path') - -var uniqueSlug = require('unique-slug') - -module.exports = function (filepath, prefix, uniq) { - return path.join(filepath, (prefix ? prefix + '-' : '') + uniqueSlug(uniq)) -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/package.json deleted file mode 100644 index b2fbf0666489a6..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/package.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "unique-filename", - "version": "3.0.0", - "description": "Generate a unique filename for use in temporary directories or caches.", - "main": "lib/index.js", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/unique-filename.git" - }, - "keywords": [], - "author": "GitHub Inc.", - "license": "ISC", - "bugs": { - "url": "https://github.com/iarna/unique-filename/issues" - }, - "homepage": "https://github.com/iarna/unique-filename", - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.5.1", - "tap": "^16.3.0" - }, - "dependencies": { - "unique-slug": "^4.0.0" - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.5.1" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/LICENSE b/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/LICENSE deleted file mode 100644 index 7953647e7760b8..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -The ISC License - -Copyright npm, Inc - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/lib/index.js deleted file mode 100644 index 1bac84d95d7307..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/lib/index.js +++ /dev/null @@ -1,11 +0,0 @@ -'use strict' -var MurmurHash3 = require('imurmurhash') - -module.exports = function (uniq) { - if (uniq) { - var hash = new MurmurHash3(uniq) - return ('00000000' + hash.result().toString(16)).slice(-8) - } else { - return (Math.random().toString(16) + '0000000').slice(2, 10) - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/package.json deleted file mode 100644 index 33732cdbb42859..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/package.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "name": "unique-slug", - "version": "4.0.0", - "description": "Generate a unique character string suitible for use in files and URLs.", - "main": "lib/index.js", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "keywords": [], - "author": "GitHub Inc.", - "license": "ISC", - "devDependencies": { - "@npmcli/eslint-config": "^3.1.0", - "@npmcli/template-oss": "4.5.1", - "tap": "^16.3.0" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/unique-slug.git" - }, - "dependencies": { - "imurmurhash": "^0.1.4" - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.5.1" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@sigstore/tuf/dist/appdata.js b/deps/npm/node_modules/@sigstore/tuf/dist/appdata.js index c9a8ee92b531eb..06a8143e70da2f 100644 --- a/deps/npm/node_modules/@sigstore/tuf/dist/appdata.js +++ b/deps/npm/node_modules/@sigstore/tuf/dist/appdata.js @@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.appDataPath = void 0; +exports.appDataPath = appDataPath; /* Copyright 2023 The Sigstore Authors. @@ -41,4 +41,3 @@ function appDataPath(name) { } } } -exports.appDataPath = appDataPath; diff --git a/deps/npm/node_modules/@sigstore/tuf/dist/client.js b/deps/npm/node_modules/@sigstore/tuf/dist/client.js index 2019c1fd30f886..328f49e40dbbd7 100644 --- a/deps/npm/node_modules/@sigstore/tuf/dist/client.js +++ b/deps/npm/node_modules/@sigstore/tuf/dist/client.js @@ -79,7 +79,6 @@ function seedCache({ cachePath, mirrorURL, tufRootPath, forceInit, }) { fs_1.default.copyFileSync(tufRootPath, cachedRootPath); } else { - /* eslint-disable @typescript-eslint/no-var-requires */ const seeds = require('../seeds.json'); const repoSeed = seeds[mirrorURL]; if (!repoSeed) { diff --git a/deps/npm/node_modules/@sigstore/tuf/dist/index.js b/deps/npm/node_modules/@sigstore/tuf/dist/index.js index 678c81d45d21ed..2af5de93ec5d2f 100644 --- a/deps/npm/node_modules/@sigstore/tuf/dist/index.js +++ b/deps/npm/node_modules/@sigstore/tuf/dist/index.js @@ -1,6 +1,8 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.TUFError = exports.initTUF = exports.getTrustedRoot = exports.DEFAULT_MIRROR_URL = void 0; +exports.TUFError = exports.DEFAULT_MIRROR_URL = void 0; +exports.getTrustedRoot = getTrustedRoot; +exports.initTUF = initTUF; /* Copyright 2023 The Sigstore Authors. @@ -31,14 +33,12 @@ options = {}) { const trustedRoot = await client.getTarget(TRUSTED_ROOT_TARGET); return protobuf_specs_1.TrustedRoot.fromJSON(JSON.parse(trustedRoot)); } -exports.getTrustedRoot = getTrustedRoot; async function initTUF( /* istanbul ignore next */ options = {}) { const client = createClient(options); return client.refresh().then(() => client); } -exports.initTUF = initTUF; // Create a TUF client with default options function createClient(options) { /* istanbul ignore next */ diff --git a/deps/npm/node_modules/@sigstore/tuf/dist/target.js b/deps/npm/node_modules/@sigstore/tuf/dist/target.js index 29eaf99a7e721c..5c6675bdfbf5fe 100644 --- a/deps/npm/node_modules/@sigstore/tuf/dist/target.js +++ b/deps/npm/node_modules/@sigstore/tuf/dist/target.js @@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.readTarget = void 0; +exports.readTarget = readTarget; /* Copyright 2023 The Sigstore Authors. @@ -39,7 +39,6 @@ async function readTarget(tuf, targetPath) { }); }); } -exports.readTarget = readTarget; // Returns the local path to the specified target. If the target is not yet // cached locally, the provided TUF Updater will be used to download and // cache the target. diff --git a/deps/npm/node_modules/@sigstore/tuf/package.json b/deps/npm/node_modules/@sigstore/tuf/package.json index b7fd34ac9674eb..808689dfddf92f 100644 --- a/deps/npm/node_modules/@sigstore/tuf/package.json +++ b/deps/npm/node_modules/@sigstore/tuf/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/tuf", - "version": "2.3.4", + "version": "3.0.0", "description": "Client for the Sigstore TUF repository", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -28,14 +28,14 @@ }, "devDependencies": { "@sigstore/jest": "^0.0.0", - "@tufjs/repo-mock": "^2.0.1", + "@tufjs/repo-mock": "^3.0.1", "@types/make-fetch-happen": "^10.0.4" }, "dependencies": { "@sigstore/protobuf-specs": "^0.3.2", - "tuf-js": "^2.2.1" + "tuf-js": "^3.0.1" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/@sigstore/tuf/seeds.json b/deps/npm/node_modules/@sigstore/tuf/seeds.json index e8d97d5fa7a672..d1d3c6b5c46040 100644 --- a/deps/npm/node_modules/@sigstore/tuf/seeds.json +++ b/deps/npm/node_modules/@sigstore/tuf/seeds.json @@ -1 +1 @@ -{"https://tuf-repo-cdn.sigstore.dev":{"root.json":"{
	"signed": {
		"_type": "root",
		"spec_version": "1.0",
		"version": 9,
		"expires": "2024-09-12T06:53:10Z",
		"keys": {
			"1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n"
				}
			},
			"230e212616274a4195cdc28e9fce782c20e6c720f1a811b40f98228376bdd3ac": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAELrWvNt94v4R085ELeeCMxHp7PldF\n0/T1GxukUh2ODuggLGJE0pc1e8CSBf6CS91Fwo9FUOuRsjBUld+VqSyCdQ==\n-----END PUBLIC KEY-----\n"
				}
			},
			"3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy8XKsmhBYDI8Jc0GwzBxeKax0cm5\nSTKEU65HPFunUn41sT8pi0FjM4IkHz/YUmwmLUO0Wt7lxhj6BkLIK4qYAw==\n-----END PUBLIC KEY-----\n"
				}
			},
			"923bb39e60dd6fa2c31e6ea55473aa93b64dd4e53e16fbe42f6a207d3f97de2d": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n"
				}
			},
			"e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n"
				}
			},
			"ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n"
				}
			},
			"fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n"
				}
			}
		},
		"roles": {
			"root": {
				"keyids": [
					"3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e",
					"ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e",
					"1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849",
					"e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523",
					"fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f"
				],
				"threshold": 3
			},
			"snapshot": {
				"keyids": [
					"230e212616274a4195cdc28e9fce782c20e6c720f1a811b40f98228376bdd3ac"
				],
				"threshold": 1
			},
			"targets": {
				"keyids": [
					"3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e",
					"ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e",
					"1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849",
					"e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523",
					"fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f"
				],
				"threshold": 3
			},
			"timestamp": {
				"keyids": [
					"923bb39e60dd6fa2c31e6ea55473aa93b64dd4e53e16fbe42f6a207d3f97de2d"
				],
				"threshold": 1
			}
		},
		"consistent_snapshot": true
	},
	"signatures": [
		{
			"keyid": "ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
			"sig": "30450221008b78f894c3cfed3bd486379c4e0e0dfb3e7dd8cbc4d5598d2818eea1ba3c7550022029d3d06e89d04d37849985dc46c0e10dc5b1fc68dc70af1ec9910303a1f3ee2f"
		},
		{
			"keyid": "25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
			"sig": "30450221009e6b90b935e09b837a90d4402eaa27d5ea26eb7891948ba0ed7090841248f436022003dc2251c4d4a7999b91e9ad0868765ae09ac7269279f2a7899bafef7a2d9260"
		},
		{
			"keyid": "f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f",
			"sig": "30440220099e907dcf90b7b6e109fd1d6e442006fccbb48894aaaff47ab824b03fb35d0d02202aa0a06c21a4233f37900a48bc8777d3b47f59e3a38616ce631a04df57f96736"
		},
		{
			"keyid": "3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e",
			"sig": "30450221008b78f894c3cfed3bd486379c4e0e0dfb3e7dd8cbc4d5598d2818eea1ba3c7550022029d3d06e89d04d37849985dc46c0e10dc5b1fc68dc70af1ec9910303a1f3ee2f"
		},
		{
			"keyid": "ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e",
			"sig": "30450221009e6b90b935e09b837a90d4402eaa27d5ea26eb7891948ba0ed7090841248f436022003dc2251c4d4a7999b91e9ad0868765ae09ac7269279f2a7899bafef7a2d9260"
		},
		{
			"keyid": "e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523",
			"sig": "304502200e5613b901e0f3e08eceabddc73f98b50ddf892e998d0b369c6e3d451ac48875022100940cf92d1f43ee2e5cdbb22572bb52925ed3863a688f7ffdd4bd2e2e56f028b3"
		},
		{
			"keyid": "2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de",
			"sig": "304502202cff44f2215d7a47b28b8f5f580c2cfbbd1bfcfcbbe78de323045b2c0badc5e9022100c743949eb3f4ea5a4b9ae27ac6eddea1f0ff9bfd004f8a9a9d18c6e4142b6e75"
		},
		{
			"keyid": "1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849",
			"sig": "30440220099e907dcf90b7b6e109fd1d6e442006fccbb48894aaaff47ab824b03fb35d0d02202aa0a06c21a4233f37900a48bc8777d3b47f59e3a38616ce631a04df57f96736"
		},
		{
			"keyid": "fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f",
			"sig": "304502202cff44f2215d7a47b28b8f5f580c2cfbbd1bfcfcbbe78de323045b2c0badc5e9022100c743949eb3f4ea5a4b9ae27ac6eddea1f0ff9bfd004f8a9a9d18c6e4142b6e75"
		},
		{
			"keyid": "7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
			"sig": "304502200e5613b901e0f3e08eceabddc73f98b50ddf892e998d0b369c6e3d451ac48875022100940cf92d1f43ee2e5cdbb22572bb52925ed3863a688f7ffdd4bd2e2e56f028b3"
		}
	]
}","targets":{"trusted_root.json":"{
  "mediaType": "application/vnd.dev.sigstore.trustedroot+json;version=0.1",
  "tlogs": [
    {
      "baseUrl": "https://rekor.sigstore.dev",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwrkBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2021-01-12T11:53:27.000Z"
        }
      },
      "logId": {
        "keyId": "wNI9atQGlz+VWfO6LRygH4QUfY/8W4RFwiT5i5WRgB0="
      }
    }
  ],
  "certificateAuthorities": [
    {
      "subject": {
        "organization": "sigstore.dev",
        "commonName": "sigstore"
      },
      "uri": "https://fulcio.sigstore.dev",
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIIB+DCCAX6gAwIBAgITNVkDZoCiofPDsy7dfm6geLbuhzAKBggqhkjOPQQDAzAqMRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxETAPBgNVBAMTCHNpZ3N0b3JlMB4XDTIxMDMwNzAzMjAyOVoXDTMxMDIyMzAzMjAyOVowKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABLSyA7Ii5k+pNO8ZEWY0ylemWDowOkNa3kL+GZE5Z5GWehL9/A9bRNA3RbrsZ5i0JcastaRL7Sp5fp/jD5dxqc/UdTVnlvS16an+2Yfswe/QuLolRUCrcOE2+2iA5+tzd6NmMGQwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFMjFHQBBmiQpMlEk6w2uSu1KBtPsMB8GA1UdIwQYMBaAFMjFHQBBmiQpMlEk6w2uSu1KBtPsMAoGCCqGSM49BAMDA2gAMGUCMH8liWJfMui6vXXBhjDgY4MwslmN/TJxVe/83WrFomwmNf056y1X48F9c4m3a3ozXAIxAKjRay5/aj/jsKKGIkmQatjI8uupHr/+CxFvaJWmpYqNkLDGRU+9orzh5hI2RrcuaQ=="
          }
        ]
      },
      "validFor": {
        "start": "2021-03-07T03:20:29.000Z",
        "end": "2022-12-31T23:59:59.999Z"
      }
    },
    {
      "subject": {
        "organization": "sigstore.dev",
        "commonName": "sigstore"
      },
      "uri": "https://fulcio.sigstore.dev",
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIICGjCCAaGgAwIBAgIUALnViVfnU0brJasmRkHrn/UnfaQwCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMjA0MTMyMDA2MTVaFw0zMTEwMDUxMzU2NThaMDcxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEeMBwGA1UEAxMVc2lnc3RvcmUtaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE8RVS/ysH+NOvuDZyPIZtilgUF9NlarYpAd9HP1vBBH1U5CV77LSS7s0ZiH4nE7Hv7ptS6LvvR/STk798LVgMzLlJ4HeIfF3tHSaexLcYpSASr1kS0N/RgBJz/9jWCiXno3sweTAOBgNVHQ8BAf8EBAMCAQYwEwYDVR0lBAwwCgYIKwYBBQUHAwMwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU39Ppz1YkEZb5qNjpKFWixi4YZD8wHwYDVR0jBBgwFoAUWMAeX5FFpWapesyQoZMi0CrFxfowCgYIKoZIzj0EAwMDZwAwZAIwPCsQK4DYiZYDPIaDi5HFKnfxXx6ASSVmERfsynYBiX2X6SJRnZU84/9DZdnFvvxmAjBOt6QpBlc4J/0DxvkTCqpclvziL6BCCPnjdlIB3Pu3BxsPmygUY7Ii2zbdCdliiow="
          },
          {
            "rawBytes": "MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxexX69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92jYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRYwB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQKsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCMWP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ"
          }
        ]
      },
      "validFor": {
        "start": "2022-04-13T20:06:15.000Z"
      }
    }
  ],
  "ctlogs": [
    {
      "baseUrl": "https://ctfe.sigstore.dev/test",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbfwR+RJudXscgRBRpKX1XFDy3PyudDxz/SfnRi1fT8ekpfBd2O1uoz7jr3Z8nKzxA69EUQ+eFCFI3zeubPWU7w==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2021-03-14T00:00:00.000Z",
          "end": "2022-10-31T23:59:59.999Z"
        }
      },
      "logId": {
        "keyId": "CGCS8ChS/2hF0dFrJ4ScRWcYrBY9wzjSbea8IgY2b3I="
      }
    },
    {
      "baseUrl": "https://ctfe.sigstore.dev/2022",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEiPSlFi0CmFTfEjCUqF9HuCEcYXNKAaYalIJmBZ8yyezPjTqhxrKBpMnaocVtLJBI1eM3uXnQzQGAJdJ4gs9Fyw==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2022-10-20T00:00:00.000Z"
        }
      },
      "logId": {
        "keyId": "3T0wasbHETJjGR4cmWc3AqJKXrjePK3/h4pygC8p7o4="
      }
    }
  ],
  "timestampAuthorities": [
    {
      "subject": {
        "organization": "GitHub, Inc.",
        "commonName": "Internal Services Root"
      },
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIIB3DCCAWKgAwIBAgIUchkNsH36Xa04b1LqIc+qr9DVecMwCgYIKoZIzj0EAwMwMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgaW50ZXJtZWRpYXRlMB4XDTIzMDQxNDAwMDAwMFoXDTI0MDQxMzAwMDAwMFowMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgVGltZXN0YW1waW5nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUD5ZNbSqYMd6r8qpOOEX9ibGnZT9GsuXOhr/f8U9FJugBGExKYp40OULS0erjZW7xV9xV52NnJf5OeDq4e5ZKqNWMFQwDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMIMAwGA1UdEwEB/wQCMAAwHwYDVR0jBBgwFoAUaW1RudOgVt0leqY0WKYbuPr47wAwCgYIKoZIzj0EAwMDaAAwZQIwbUH9HvD4ejCZJOWQnqAlkqURllvu9M8+VqLbiRK+zSfZCZwsiljRn8MQQRSkXEE5AjEAg+VxqtojfVfu8DhzzhCx9GKETbJHb19iV72mMKUbDAFmzZ6bQ8b54Zb8tidy5aWe"
          },
          {
            "rawBytes": "MIICEDCCAZWgAwIBAgIUX8ZO5QXP7vN4dMQ5e9sU3nub8OgwCgYIKoZIzj0EAwMwODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MB4XDTIzMDQxNDAwMDAwMFoXDTI4MDQxMjAwMDAwMFowMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEvMLY/dTVbvIJYANAuszEwJnQE1llftynyMKIMhh48HmqbVr5ygybzsLRLVKbBWOdZ21aeJz+gZiytZetqcyF9WlER5NEMf6JV7ZNojQpxHq4RHGoGSceQv/qvTiZxEDKo2YwZDAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUaW1RudOgVt0leqY0WKYbuPr47wAwHwYDVR0jBBgwFoAU9NYYlobnAG4c0/qjxyH/lq/wz+QwCgYIKoZIzj0EAwMDaQAwZgIxAK1B185ygCrIYFlIs3GjswjnwSMG6LY8woLVdakKDZxVa8f8cqMs1DhcxJ0+09w95QIxAO+tBzZk7vjUJ9iJgD4R6ZWTxQWKqNm74jO99o+o9sv4FI/SZTZTFyMn0IJEHdNmyA=="
          },
          {
            "rawBytes": "MIIB9DCCAXqgAwIBAgIUa/JAkdUjK4JUwsqtaiRJGWhqLSowCgYIKoZIzj0EAwMwODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MB4XDTIzMDQxNDAwMDAwMFoXDTMzMDQxMTAwMDAwMFowODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEf9jFAXxz4kx68AHRMOkFBhflDcMTvzaXz4x/FCcXjJ/1qEKon/qPIGnaURskDtyNbNDOpeJTDDFqt48iMPrnzpx6IZwqemfUJN4xBEZfza+pYt/iyod+9tZr20RRWSv/o0UwQzAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBAjAdBgNVHQ4EFgQU9NYYlobnAG4c0/qjxyH/lq/wz+QwCgYIKoZIzj0EAwMDaAAwZQIxALZLZ8BgRXzKxLMMN9VIlO+e4hrBnNBgF7tz7Hnrowv2NetZErIACKFymBlvWDvtMAIwZO+ki6ssQ1bsZo98O8mEAf2NZ7iiCgDDU0Vwjeco6zyeh0zBTs9/7gV6AHNQ53xD"
          }
        ]
      },
      "validFor": {
        "start": "2023-04-14T00:00:00.000Z"
      }
    }
  ]
}
","registry.npmjs.org%2Fkeys.json":"ewogICAgImtleXMiOiBbCiAgICAgICAgewogICAgICAgICAgICAia2V5SWQiOiAiU0hBMjU2OmpsM2J3c3d1ODBQampva0NnaDBvMnc1YzJVNExoUUFFNTdnajljejFrekEiLAogICAgICAgICAgICAia2V5VXNhZ2UiOiAibnBtOnNpZ25hdHVyZXMiLAogICAgICAgICAgICAicHVibGljS2V5IjogewogICAgICAgICAgICAgICAgInJhd0J5dGVzIjogIk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRTFPbGIzek1BRkZ4WEtIaUlrUU81Y0ozWWhsNWk2VVBwK0lodXRlQkpidUhjQTVVb2dLbzBFV3RsV3dXNktTYUtvVE5FWUw3SmxDUWlWbmtoQmt0VWdnPT0iLAogICAgICAgICAgICAgICAgImtleURldGFpbHMiOiAiUEtJWF9FQ0RTQV9QMjU2X1NIQV8yNTYiLAogICAgICAgICAgICAgICAgInZhbGlkRm9yIjogewogICAgICAgICAgICAgICAgICAgICJzdGFydCI6ICIxOTk5LTAxLTAxVDAwOjAwOjAwLjAwMFoiCiAgICAgICAgICAgICAgICB9CiAgICAgICAgICAgIH0KICAgICAgICB9LAogICAgICAgIHsKICAgICAgICAgICAgImtleUlkIjogIlNIQTI1NjpqbDNid3N3dTgwUGpqb2tDZ2gwbzJ3NWMyVTRMaFFBRTU3Z2o5Y3oxa3pBIiwKICAgICAgICAgICAgImtleVVzYWdlIjogIm5wbTphdHRlc3RhdGlvbnMiLAogICAgICAgICAgICAicHVibGljS2V5IjogewogICAgICAgICAgICAgICAgInJhd0J5dGVzIjogIk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRTFPbGIzek1BRkZ4WEtIaUlrUU81Y0ozWWhsNWk2VVBwK0lodXRlQkpidUhjQTVVb2dLbzBFV3RsV3dXNktTYUtvVE5FWUw3SmxDUWlWbmtoQmt0VWdnPT0iLAogICAgICAgICAgICAgICAgImtleURldGFpbHMiOiAiUEtJWF9FQ0RTQV9QMjU2X1NIQV8yNTYiLAogICAgICAgICAgICAgICAgInZhbGlkRm9yIjogewogICAgICAgICAgICAgICAgICAgICJzdGFydCI6ICIyMDIyLTEyLTAxVDAwOjAwOjAwLjAwMFoiCiAgICAgICAgICAgICAgICB9CiAgICAgICAgICAgIH0KICAgICAgICB9CiAgICBdCn0K"}}} +{"https://tuf-repo-cdn.sigstore.dev":{"root.json":"{
 "signatures": [
  {
   "keyid": "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3",
   "sig": "30460221008ab1f6f17d4f9e6d7dcf1c88912b6b53cc10388644ae1f09bc37a082cd06003e022100e145ef4c7b782d4e8107b53437e669d0476892ce999903ae33d14448366996e7"
  },
  {
   "keyid": "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2",
   "sig": "3045022100c768b2f86da99569019c160a081da54ae36c34c0a3120d3cb69b53b7d113758e02204f671518f617b20d46537fae6c3b63bae8913f4f1962156105cc4f019ac35c6a"
  },
  {
   "keyid": "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06",
   "sig": "3045022100b4434e6995d368d23e74759acd0cb9013c83a5d3511f0f997ec54c456ae4350a022015b0e265d182d2b61dc74e155d98b3c3fbe564ba05286aa14c8df02c9b756516"
  },
  {
   "keyid": "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222",
   "sig": "304502210082c58411d989eb9f861410857d42381590ec9424dbdaa51e78ed13515431904e0220118185da6a6c2947131c17797e2bb7620ce26e5f301d1ceac5f2a7e58f9dcf2e"
  },
  {
   "keyid": "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70",
   "sig": "3046022100c78513854cae9c32eaa6b88e18912f48006c2757a258f917312caba75948eb9e022100d9e1b4ce0adfe9fd2e2148d7fa27a2f40ba1122bd69da7612d8d1776b013c91d"
  },
  {
   "keyid": "fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f",
   "sig": "3045022056483a2d5d9ea9cec6e11eadfb33c484b614298faca15acf1c431b11ed7f734c022100d0c1d726af92a87e4e66459ca5adf38a05b44e1f94318423f954bae8bca5bb2e"
  },
  {
   "keyid": "e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523",
   "sig": "3046022100d004de88024c32dc5653a9f4843cfc5215427048ad9600d2cf9c969e6edff3d2022100d9ebb798f5fc66af10899dece014a8628ccf3c5402cd4a4270207472f8f6e712"
  },
  {
   "keyid": "3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e",
   "sig": "3046022100b7b09996c45ca2d4b05603e56baefa29718a0b71147cf8c6e66349baa61477df022100c4da80c717b4fa7bba0fd5c72da8a0499358b01358b2309f41d1456ea1e7e1d9"
  },
  {
   "keyid": "ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e",
   "sig": "3046022100be9782c30744e411a82fa85b5138d601ce148bc19258aec64e7ec24478f38812022100caef63dcaf1a4b9a500d3bd0e3f164ec18f1b63d7a9460d9acab1066db0f016d"
  },
  {
   "keyid": "1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849",
   "sig": "30450220746ec3f8534ce55531d0d01ff64964ef440d1e7d2c4c142409b8e9769f1ada6f022100e3b929fcd93ea18feaa0825887a7210489879a66780c07a83f4bd46e2f09ab3b"
  }
 ],
 "signed": {
  "_type": "root",
  "consistent_snapshot": true,
  "expires": "2025-02-19T08:04:32Z",
  "keys": {
   "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06": {
    "keyid_hash_algorithms": [
     "sha256",
     "sha512"
    ],
    "keytype": "ecdsa",
    "keyval": {
     "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n"
    },
    "scheme": "ecdsa-sha2-nistp256",
    "x-tuf-on-ci-keyowner": "@santiagotorres"
   },
   "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222": {
    "keyid_hash_algorithms": [
     "sha256",
     "sha512"
    ],
    "keytype": "ecdsa",
    "keyval": {
     "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n"
    },
    "scheme": "ecdsa-sha2-nistp256",
    "x-tuf-on-ci-keyowner": "@bobcallaway"
   },
   "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3": {
    "keyid_hash_algorithms": [
     "sha256",
     "sha512"
    ],
    "keytype": "ecdsa",
    "keyval": {
     "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy8XKsmhBYDI8Jc0GwzBxeKax0cm5\nSTKEU65HPFunUn41sT8pi0FjM4IkHz/YUmwmLUO0Wt7lxhj6BkLIK4qYAw==\n-----END PUBLIC KEY-----\n"
    },
    "scheme": "ecdsa-sha2-nistp256",
    "x-tuf-on-ci-keyowner": "@dlorenc"
   },
   "7247f0dbad85b147e1863bade761243cc785dcb7aa410e7105dd3d2b61a36d2c": {
    "keyid_hash_algorithms": [
     "sha256",
     "sha512"
    ],
    "keytype": "ecdsa",
    "keyval": {
     "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n"
    },
    "scheme": "ecdsa-sha2-nistp256",
    "x-tuf-on-ci-online-uri": "gcpkms://projects/sigstore-root-signing/locations/global/keyRings/root/cryptoKeys/timestamp"
   },
   "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70": {
    "keyid_hash_algorithms": [
     "sha256",
     "sha512"
    ],
    "keytype": "ecdsa",
    "keyval": {
     "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n"
    },
    "scheme": "ecdsa-sha2-nistp256",
    "x-tuf-on-ci-keyowner": "@joshuagl"
   },
   "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2": {
    "keyid_hash_algorithms": [
     "sha256",
     "sha512"
    ],
    "keytype": "ecdsa",
    "keyval": {
     "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n"
    },
    "scheme": "ecdsa-sha2-nistp256",
    "x-tuf-on-ci-keyowner": "@mnm678"
   }
  },
  "roles": {
   "root": {
    "keyids": [
     "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3",
     "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2",
     "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06",
     "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222",
     "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70"
    ],
    "threshold": 3
   },
   "snapshot": {
    "keyids": [
     "7247f0dbad85b147e1863bade761243cc785dcb7aa410e7105dd3d2b61a36d2c"
    ],
    "threshold": 1,
    "x-tuf-on-ci-expiry-period": 3650,
    "x-tuf-on-ci-signing-period": 365
   },
   "targets": {
    "keyids": [
     "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3",
     "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2",
     "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06",
     "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222",
     "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70"
    ],
    "threshold": 3
   },
   "timestamp": {
    "keyids": [
     "7247f0dbad85b147e1863bade761243cc785dcb7aa410e7105dd3d2b61a36d2c"
    ],
    "threshold": 1,
    "x-tuf-on-ci-expiry-period": 7,
    "x-tuf-on-ci-signing-period": 4
   }
  },
  "spec_version": "1.0",
  "version": 10,
  "x-tuf-on-ci-expiry-period": 182,
  "x-tuf-on-ci-signing-period": 31
 }
}","targets":{"trusted_root.json":"{
  "mediaType": "application/vnd.dev.sigstore.trustedroot+json;version=0.1",
  "tlogs": [
    {
      "baseUrl": "https://rekor.sigstore.dev",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwrkBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2021-01-12T11:53:27.000Z"
        }
      },
      "logId": {
        "keyId": "wNI9atQGlz+VWfO6LRygH4QUfY/8W4RFwiT5i5WRgB0="
      }
    }
  ],
  "certificateAuthorities": [
    {
      "subject": {
        "organization": "sigstore.dev",
        "commonName": "sigstore"
      },
      "uri": "https://fulcio.sigstore.dev",
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIIB+DCCAX6gAwIBAgITNVkDZoCiofPDsy7dfm6geLbuhzAKBggqhkjOPQQDAzAqMRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxETAPBgNVBAMTCHNpZ3N0b3JlMB4XDTIxMDMwNzAzMjAyOVoXDTMxMDIyMzAzMjAyOVowKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABLSyA7Ii5k+pNO8ZEWY0ylemWDowOkNa3kL+GZE5Z5GWehL9/A9bRNA3RbrsZ5i0JcastaRL7Sp5fp/jD5dxqc/UdTVnlvS16an+2Yfswe/QuLolRUCrcOE2+2iA5+tzd6NmMGQwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFMjFHQBBmiQpMlEk6w2uSu1KBtPsMB8GA1UdIwQYMBaAFMjFHQBBmiQpMlEk6w2uSu1KBtPsMAoGCCqGSM49BAMDA2gAMGUCMH8liWJfMui6vXXBhjDgY4MwslmN/TJxVe/83WrFomwmNf056y1X48F9c4m3a3ozXAIxAKjRay5/aj/jsKKGIkmQatjI8uupHr/+CxFvaJWmpYqNkLDGRU+9orzh5hI2RrcuaQ=="
          }
        ]
      },
      "validFor": {
        "start": "2021-03-07T03:20:29.000Z",
        "end": "2022-12-31T23:59:59.999Z"
      }
    },
    {
      "subject": {
        "organization": "sigstore.dev",
        "commonName": "sigstore"
      },
      "uri": "https://fulcio.sigstore.dev",
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIICGjCCAaGgAwIBAgIUALnViVfnU0brJasmRkHrn/UnfaQwCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMjA0MTMyMDA2MTVaFw0zMTEwMDUxMzU2NThaMDcxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEeMBwGA1UEAxMVc2lnc3RvcmUtaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE8RVS/ysH+NOvuDZyPIZtilgUF9NlarYpAd9HP1vBBH1U5CV77LSS7s0ZiH4nE7Hv7ptS6LvvR/STk798LVgMzLlJ4HeIfF3tHSaexLcYpSASr1kS0N/RgBJz/9jWCiXno3sweTAOBgNVHQ8BAf8EBAMCAQYwEwYDVR0lBAwwCgYIKwYBBQUHAwMwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU39Ppz1YkEZb5qNjpKFWixi4YZD8wHwYDVR0jBBgwFoAUWMAeX5FFpWapesyQoZMi0CrFxfowCgYIKoZIzj0EAwMDZwAwZAIwPCsQK4DYiZYDPIaDi5HFKnfxXx6ASSVmERfsynYBiX2X6SJRnZU84/9DZdnFvvxmAjBOt6QpBlc4J/0DxvkTCqpclvziL6BCCPnjdlIB3Pu3BxsPmygUY7Ii2zbdCdliiow="
          },
          {
            "rawBytes": "MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxexX69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92jYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRYwB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQKsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCMWP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ"
          }
        ]
      },
      "validFor": {
        "start": "2022-04-13T20:06:15.000Z"
      }
    }
  ],
  "ctlogs": [
    {
      "baseUrl": "https://ctfe.sigstore.dev/test",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbfwR+RJudXscgRBRpKX1XFDy3PyudDxz/SfnRi1fT8ekpfBd2O1uoz7jr3Z8nKzxA69EUQ+eFCFI3zeubPWU7w==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2021-03-14T00:00:00.000Z",
          "end": "2022-10-31T23:59:59.999Z"
        }
      },
      "logId": {
        "keyId": "CGCS8ChS/2hF0dFrJ4ScRWcYrBY9wzjSbea8IgY2b3I="
      }
    },
    {
      "baseUrl": "https://ctfe.sigstore.dev/2022",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEiPSlFi0CmFTfEjCUqF9HuCEcYXNKAaYalIJmBZ8yyezPjTqhxrKBpMnaocVtLJBI1eM3uXnQzQGAJdJ4gs9Fyw==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2022-10-20T00:00:00.000Z"
        }
      },
      "logId": {
        "keyId": "3T0wasbHETJjGR4cmWc3AqJKXrjePK3/h4pygC8p7o4="
      }
    }
  ],
  "timestampAuthorities": [
    {
      "subject": {
        "organization": "GitHub, Inc.",
        "commonName": "Internal Services Root"
      },
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIIB3DCCAWKgAwIBAgIUchkNsH36Xa04b1LqIc+qr9DVecMwCgYIKoZIzj0EAwMwMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgaW50ZXJtZWRpYXRlMB4XDTIzMDQxNDAwMDAwMFoXDTI0MDQxMzAwMDAwMFowMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgVGltZXN0YW1waW5nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUD5ZNbSqYMd6r8qpOOEX9ibGnZT9GsuXOhr/f8U9FJugBGExKYp40OULS0erjZW7xV9xV52NnJf5OeDq4e5ZKqNWMFQwDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMIMAwGA1UdEwEB/wQCMAAwHwYDVR0jBBgwFoAUaW1RudOgVt0leqY0WKYbuPr47wAwCgYIKoZIzj0EAwMDaAAwZQIwbUH9HvD4ejCZJOWQnqAlkqURllvu9M8+VqLbiRK+zSfZCZwsiljRn8MQQRSkXEE5AjEAg+VxqtojfVfu8DhzzhCx9GKETbJHb19iV72mMKUbDAFmzZ6bQ8b54Zb8tidy5aWe"
          },
          {
            "rawBytes": "MIICEDCCAZWgAwIBAgIUX8ZO5QXP7vN4dMQ5e9sU3nub8OgwCgYIKoZIzj0EAwMwODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MB4XDTIzMDQxNDAwMDAwMFoXDTI4MDQxMjAwMDAwMFowMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEvMLY/dTVbvIJYANAuszEwJnQE1llftynyMKIMhh48HmqbVr5ygybzsLRLVKbBWOdZ21aeJz+gZiytZetqcyF9WlER5NEMf6JV7ZNojQpxHq4RHGoGSceQv/qvTiZxEDKo2YwZDAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUaW1RudOgVt0leqY0WKYbuPr47wAwHwYDVR0jBBgwFoAU9NYYlobnAG4c0/qjxyH/lq/wz+QwCgYIKoZIzj0EAwMDaQAwZgIxAK1B185ygCrIYFlIs3GjswjnwSMG6LY8woLVdakKDZxVa8f8cqMs1DhcxJ0+09w95QIxAO+tBzZk7vjUJ9iJgD4R6ZWTxQWKqNm74jO99o+o9sv4FI/SZTZTFyMn0IJEHdNmyA=="
          },
          {
            "rawBytes": "MIIB9DCCAXqgAwIBAgIUa/JAkdUjK4JUwsqtaiRJGWhqLSowCgYIKoZIzj0EAwMwODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MB4XDTIzMDQxNDAwMDAwMFoXDTMzMDQxMTAwMDAwMFowODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEf9jFAXxz4kx68AHRMOkFBhflDcMTvzaXz4x/FCcXjJ/1qEKon/qPIGnaURskDtyNbNDOpeJTDDFqt48iMPrnzpx6IZwqemfUJN4xBEZfza+pYt/iyod+9tZr20RRWSv/o0UwQzAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBAjAdBgNVHQ4EFgQU9NYYlobnAG4c0/qjxyH/lq/wz+QwCgYIKoZIzj0EAwMDaAAwZQIxALZLZ8BgRXzKxLMMN9VIlO+e4hrBnNBgF7tz7Hnrowv2NetZErIACKFymBlvWDvtMAIwZO+ki6ssQ1bsZo98O8mEAf2NZ7iiCgDDU0Vwjeco6zyeh0zBTs9/7gV6AHNQ53xD"
          }
        ]
      },
      "validFor": {
        "start": "2023-04-14T00:00:00.000Z"
      }
    }
  ]
}
","registry.npmjs.org%2Fkeys.json":"ewogICAgImtleXMiOiBbCiAgICAgICAgewogICAgICAgICAgICAia2V5SWQiOiAiU0hBMjU2OmpsM2J3c3d1ODBQampva0NnaDBvMnc1YzJVNExoUUFFNTdnajljejFrekEiLAogICAgICAgICAgICAia2V5VXNhZ2UiOiAibnBtOnNpZ25hdHVyZXMiLAogICAgICAgICAgICAicHVibGljS2V5IjogewogICAgICAgICAgICAgICAgInJhd0J5dGVzIjogIk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRTFPbGIzek1BRkZ4WEtIaUlrUU81Y0ozWWhsNWk2VVBwK0lodXRlQkpidUhjQTVVb2dLbzBFV3RsV3dXNktTYUtvVE5FWUw3SmxDUWlWbmtoQmt0VWdnPT0iLAogICAgICAgICAgICAgICAgImtleURldGFpbHMiOiAiUEtJWF9FQ0RTQV9QMjU2X1NIQV8yNTYiLAogICAgICAgICAgICAgICAgInZhbGlkRm9yIjogewogICAgICAgICAgICAgICAgICAgICJzdGFydCI6ICIxOTk5LTAxLTAxVDAwOjAwOjAwLjAwMFoiCiAgICAgICAgICAgICAgICB9CiAgICAgICAgICAgIH0KICAgICAgICB9LAogICAgICAgIHsKICAgICAgICAgICAgImtleUlkIjogIlNIQTI1NjpqbDNid3N3dTgwUGpqb2tDZ2gwbzJ3NWMyVTRMaFFBRTU3Z2o5Y3oxa3pBIiwKICAgICAgICAgICAgImtleVVzYWdlIjogIm5wbTphdHRlc3RhdGlvbnMiLAogICAgICAgICAgICAicHVibGljS2V5IjogewogICAgICAgICAgICAgICAgInJhd0J5dGVzIjogIk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRTFPbGIzek1BRkZ4WEtIaUlrUU81Y0ozWWhsNWk2VVBwK0lodXRlQkpidUhjQTVVb2dLbzBFV3RsV3dXNktTYUtvVE5FWUw3SmxDUWlWbmtoQmt0VWdnPT0iLAogICAgICAgICAgICAgICAgImtleURldGFpbHMiOiAiUEtJWF9FQ0RTQV9QMjU2X1NIQV8yNTYiLAogICAgICAgICAgICAgICAgInZhbGlkRm9yIjogewogICAgICAgICAgICAgICAgICAgICJzdGFydCI6ICIyMDIyLTEyLTAxVDAwOjAwOjAwLjAwMFoiCiAgICAgICAgICAgICAgICB9CiAgICAgICAgICAgIH0KICAgICAgICB9CiAgICBdCn0K"}}} diff --git a/deps/npm/node_modules/ci-info/index.js b/deps/npm/node_modules/ci-info/index.js index 47907264581eb1..9eba6940c4147e 100644 --- a/deps/npm/node_modules/ci-info/index.js +++ b/deps/npm/node_modules/ci-info/index.js @@ -13,6 +13,7 @@ Object.defineProperty(exports, '_vendors', { exports.name = null exports.isPR = null +exports.id = null vendors.forEach(function (vendor) { const envs = Array.isArray(vendor.env) ? vendor.env : [vendor.env] @@ -27,45 +28,23 @@ vendors.forEach(function (vendor) { } exports.name = vendor.name - - switch (typeof vendor.pr) { - case 'string': - // "pr": "CIRRUS_PR" - exports.isPR = !!env[vendor.pr] - break - case 'object': - if ('env' in vendor.pr) { - // "pr": { "env": "BUILDKITE_PULL_REQUEST", "ne": "false" } - exports.isPR = vendor.pr.env in env && env[vendor.pr.env] !== vendor.pr.ne - } else if ('any' in vendor.pr) { - // "pr": { "any": ["ghprbPullId", "CHANGE_ID"] } - exports.isPR = vendor.pr.any.some(function (key) { - return !!env[key] - }) - } else { - // "pr": { "DRONE_BUILD_EVENT": "pull_request" } - exports.isPR = checkEnv(vendor.pr) - } - break - default: - // PR detection not supported for this vendor - exports.isPR = null - } + exports.isPR = checkPR(vendor) + exports.id = vendor.constant }) exports.isCI = !!( env.CI !== 'false' && // Bypass all checks if CI env is explicitly set to 'false' (env.BUILD_ID || // Jenkins, Cloudbees - env.BUILD_NUMBER || // Jenkins, TeamCity - env.CI || // Travis CI, CircleCI, Cirrus CI, Gitlab CI, Appveyor, CodeShip, dsari - env.CI_APP_ID || // Appflow - env.CI_BUILD_ID || // Appflow - env.CI_BUILD_NUMBER || // Appflow - env.CI_NAME || // Codeship and others - env.CONTINUOUS_INTEGRATION || // Travis CI, Cirrus CI - env.RUN_ID || // TaskCluster, dsari - exports.name || - false) + env.BUILD_NUMBER || // Jenkins, TeamCity + env.CI || // Travis CI, CircleCI, Cirrus CI, Gitlab CI, Appveyor, CodeShip, dsari + env.CI_APP_ID || // Appflow + env.CI_BUILD_ID || // Appflow + env.CI_BUILD_NUMBER || // Appflow + env.CI_NAME || // Codeship and others + env.CONTINUOUS_INTEGRATION || // Travis CI, Cirrus CI + env.RUN_ID || // TaskCluster, dsari + exports.name || + false) ) function checkEnv (obj) { @@ -79,12 +58,45 @@ function checkEnv (obj) { return env[obj.env] && env[obj.env].includes(obj.includes) // } } + if ('any' in obj) { return obj.any.some(function (k) { return !!env[k] }) } + return Object.keys(obj).every(function (k) { return env[k] === obj[k] }) } + +function checkPR (vendor) { + switch (typeof vendor.pr) { + case 'string': + // "pr": "CIRRUS_PR" + return !!env[vendor.pr] + case 'object': + if ('env' in vendor.pr) { + if ('any' in vendor.pr) { + // "pr": { "env": "CODEBUILD_WEBHOOK_EVENT", "any": ["PULL_REQUEST_CREATED", "PULL_REQUEST_UPDATED"] } + return vendor.pr.any.some(function (key) { + return env[vendor.pr.env] === key + }) + } else { + // "pr": { "env": "BUILDKITE_PULL_REQUEST", "ne": "false" } + return vendor.pr.env in env && env[vendor.pr.env] !== vendor.pr.ne + } + } else if ('any' in vendor.pr) { + // "pr": { "any": ["ghprbPullId", "CHANGE_ID"] } + return vendor.pr.any.some(function (key) { + return !!env[key] + }) + } else { + // "pr": { "DRONE_BUILD_EVENT": "pull_request" } + return checkEnv(vendor.pr) + } + default: + // PR detection not supported for this vendor + return null + } +} diff --git a/deps/npm/node_modules/ci-info/package.json b/deps/npm/node_modules/ci-info/package.json index 3c6b9e4adac8e8..156329d2ce379c 100644 --- a/deps/npm/node_modules/ci-info/package.json +++ b/deps/npm/node_modules/ci-info/package.json @@ -1,6 +1,6 @@ { "name": "ci-info", - "version": "4.0.0", + "version": "4.1.0", "description": "Get details about the current Continuous Integration environment", "main": "index.js", "typings": "index.d.ts", @@ -9,6 +9,18 @@ "repository": "https://github.com/watson/ci-info.git", "bugs": "https://github.com/watson/ci-info/issues", "homepage": "https://github.com/watson/ci-info", + "contributors": [ + { + "name": "Sibiraj", + "url": "https://github.com/sibiraj-s" + } + ], + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], "keywords": [ "ci", "continuous", @@ -22,22 +34,16 @@ "index.d.ts", "CHANGELOG.md" ], - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/sibiraj-s" - } - ], "scripts": { "lint:fix": "standard --fix", "test": "standard && node test.js", - "prepare": "husky install" + "prepare": "husky install || true" }, "devDependencies": { "clear-module": "^4.1.2", - "husky": "^8.0.3", - "standard": "^17.1.0", - "tape": "^5.7.0" + "husky": "^9.1.6", + "standard": "^17.1.2", + "tape": "^5.9.0" }, "engines": { "node": ">=8" diff --git a/deps/npm/node_modules/ci-info/vendors.json b/deps/npm/node_modules/ci-info/vendors.json index 6b65e3f9b541f8..64d5924d1a557e 100644 --- a/deps/npm/node_modules/ci-info/vendors.json +++ b/deps/npm/node_modules/ci-info/vendors.json @@ -8,7 +8,11 @@ { "name": "Appcircle", "constant": "APPCIRCLE", - "env": "AC_APPCIRCLE" + "env": "AC_APPCIRCLE", + "pr": { + "env": "AC_GIT_PR", + "ne": "false" + } }, { "name": "AppVeyor", @@ -19,7 +23,15 @@ { "name": "AWS CodeBuild", "constant": "CODEBUILD", - "env": "CODEBUILD_BUILD_ARN" + "env": "CODEBUILD_BUILD_ARN", + "pr": { + "env": "CODEBUILD_WEBHOOK_EVENT", + "any": [ + "PULL_REQUEST_CREATED", + "PULL_REQUEST_UPDATED", + "PULL_REQUEST_REOPENED" + ] + } }, { "name": "Azure Pipelines", diff --git a/deps/npm/node_modules/cross-spawn/lib/enoent.js b/deps/npm/node_modules/cross-spawn/lib/enoent.js index 14df9b623d0a20..da33471369c23f 100644 --- a/deps/npm/node_modules/cross-spawn/lib/enoent.js +++ b/deps/npm/node_modules/cross-spawn/lib/enoent.js @@ -24,7 +24,7 @@ function hookChildProcess(cp, parsed) { // the command exists and emit an "error" instead // See https://github.com/IndigoUnited/node-cross-spawn/issues/16 if (name === 'exit') { - const err = verifyENOENT(arg1, parsed, 'spawn'); + const err = verifyENOENT(arg1, parsed); if (err) { return originalEmit.call(cp, 'error', err); diff --git a/deps/npm/node_modules/cross-spawn/lib/util/escape.js b/deps/npm/node_modules/cross-spawn/lib/util/escape.js index b0bb84c3a14092..7bf2905cd035ad 100644 --- a/deps/npm/node_modules/cross-spawn/lib/util/escape.js +++ b/deps/npm/node_modules/cross-spawn/lib/util/escape.js @@ -15,15 +15,17 @@ function escapeArgument(arg, doubleEscapeMetaChars) { arg = `${arg}`; // Algorithm below is based on https://qntm.org/cmd + // It's slightly altered to disable JS backtracking to avoid hanging on specially crafted input + // Please see https://github.com/moxystudio/node-cross-spawn/pull/160 for more information // Sequence of backslashes followed by a double quote: // double up all the backslashes and escape the double quote - arg = arg.replace(/(\\*)"/g, '$1$1\\"'); + arg = arg.replace(/(?=(\\+?)?)\1"/g, '$1$1\\"'); // Sequence of backslashes followed by the end of the string // (which will become a double quote later): // double up all the backslashes - arg = arg.replace(/(\\*)$/, '$1$1'); + arg = arg.replace(/(?=(\\+?)?)\1$/, '$1$1'); // All other backslashes occur literally diff --git a/deps/npm/node_modules/cross-spawn/package.json b/deps/npm/node_modules/cross-spawn/package.json index 232ff97e04b213..24b2eb4c9900cf 100644 --- a/deps/npm/node_modules/cross-spawn/package.json +++ b/deps/npm/node_modules/cross-spawn/package.json @@ -1,6 +1,6 @@ { "name": "cross-spawn", - "version": "7.0.3", + "version": "7.0.6", "description": "Cross platform child_process#spawn and child_process#spawnSync", "keywords": [ "spawn", @@ -65,7 +65,7 @@ "lint-staged": "^9.2.5", "mkdirp": "^0.5.1", "rimraf": "^3.0.0", - "standard-version": "^7.0.0" + "standard-version": "^9.5.0" }, "engines": { "node": ">= 8" diff --git a/deps/npm/node_modules/debug/node_modules/ms/index.js b/deps/npm/node_modules/debug/node_modules/ms/index.js deleted file mode 100644 index c4498bcc212589..00000000000000 --- a/deps/npm/node_modules/debug/node_modules/ms/index.js +++ /dev/null @@ -1,162 +0,0 @@ -/** - * Helpers. - */ - -var s = 1000; -var m = s * 60; -var h = m * 60; -var d = h * 24; -var w = d * 7; -var y = d * 365.25; - -/** - * Parse or format the given `val`. - * - * Options: - * - * - `long` verbose formatting [false] - * - * @param {String|Number} val - * @param {Object} [options] - * @throws {Error} throw an error if val is not a non-empty string or a number - * @return {String|Number} - * @api public - */ - -module.exports = function(val, options) { - options = options || {}; - var type = typeof val; - if (type === 'string' && val.length > 0) { - return parse(val); - } else if (type === 'number' && isFinite(val)) { - return options.long ? fmtLong(val) : fmtShort(val); - } - throw new Error( - 'val is not a non-empty string or a valid number. val=' + - JSON.stringify(val) - ); -}; - -/** - * Parse the given `str` and return milliseconds. - * - * @param {String} str - * @return {Number} - * @api private - */ - -function parse(str) { - str = String(str); - if (str.length > 100) { - return; - } - var match = /^(-?(?:\d+)?\.?\d+) *(milliseconds?|msecs?|ms|seconds?|secs?|s|minutes?|mins?|m|hours?|hrs?|h|days?|d|weeks?|w|years?|yrs?|y)?$/i.exec( - str - ); - if (!match) { - return; - } - var n = parseFloat(match[1]); - var type = (match[2] || 'ms').toLowerCase(); - switch (type) { - case 'years': - case 'year': - case 'yrs': - case 'yr': - case 'y': - return n * y; - case 'weeks': - case 'week': - case 'w': - return n * w; - case 'days': - case 'day': - case 'd': - return n * d; - case 'hours': - case 'hour': - case 'hrs': - case 'hr': - case 'h': - return n * h; - case 'minutes': - case 'minute': - case 'mins': - case 'min': - case 'm': - return n * m; - case 'seconds': - case 'second': - case 'secs': - case 'sec': - case 's': - return n * s; - case 'milliseconds': - case 'millisecond': - case 'msecs': - case 'msec': - case 'ms': - return n; - default: - return undefined; - } -} - -/** - * Short format for `ms`. - * - * @param {Number} ms - * @return {String} - * @api private - */ - -function fmtShort(ms) { - var msAbs = Math.abs(ms); - if (msAbs >= d) { - return Math.round(ms / d) + 'd'; - } - if (msAbs >= h) { - return Math.round(ms / h) + 'h'; - } - if (msAbs >= m) { - return Math.round(ms / m) + 'm'; - } - if (msAbs >= s) { - return Math.round(ms / s) + 's'; - } - return ms + 'ms'; -} - -/** - * Long format for `ms`. - * - * @param {Number} ms - * @return {String} - * @api private - */ - -function fmtLong(ms) { - var msAbs = Math.abs(ms); - if (msAbs >= d) { - return plural(ms, msAbs, d, 'day'); - } - if (msAbs >= h) { - return plural(ms, msAbs, h, 'hour'); - } - if (msAbs >= m) { - return plural(ms, msAbs, m, 'minute'); - } - if (msAbs >= s) { - return plural(ms, msAbs, s, 'second'); - } - return ms + ' ms'; -} - -/** - * Pluralization helper. - */ - -function plural(ms, msAbs, n, name) { - var isPlural = msAbs >= n * 1.5; - return Math.round(ms / n) + ' ' + name + (isPlural ? 's' : ''); -} diff --git a/deps/npm/node_modules/debug/node_modules/ms/license.md b/deps/npm/node_modules/debug/node_modules/ms/license.md deleted file mode 100644 index 69b61253a38926..00000000000000 --- a/deps/npm/node_modules/debug/node_modules/ms/license.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Zeit, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/deps/npm/node_modules/debug/node_modules/ms/package.json b/deps/npm/node_modules/debug/node_modules/ms/package.json deleted file mode 100644 index eea666e1fb03d6..00000000000000 --- a/deps/npm/node_modules/debug/node_modules/ms/package.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "name": "ms", - "version": "2.1.2", - "description": "Tiny millisecond conversion utility", - "repository": "zeit/ms", - "main": "./index", - "files": [ - "index.js" - ], - "scripts": { - "precommit": "lint-staged", - "lint": "eslint lib/* bin/*", - "test": "mocha tests.js" - }, - "eslintConfig": { - "extends": "eslint:recommended", - "env": { - "node": true, - "es6": true - } - }, - "lint-staged": { - "*.js": [ - "npm run lint", - "prettier --single-quote --write", - "git add" - ] - }, - "license": "MIT", - "devDependencies": { - "eslint": "4.12.1", - "expect.js": "0.3.1", - "husky": "0.14.3", - "lint-staged": "5.0.0", - "mocha": "4.0.1" - } -} diff --git a/deps/npm/node_modules/debug/package.json b/deps/npm/node_modules/debug/package.json index 8eea05520554eb..2f782eb9aef450 100644 --- a/deps/npm/node_modules/debug/package.json +++ b/deps/npm/node_modules/debug/package.json @@ -1,6 +1,6 @@ { "name": "debug", - "version": "4.3.6", + "version": "4.3.7", "repository": { "type": "git", "url": "git://github.com/debug-js/debug.git" @@ -31,7 +31,7 @@ "test:coverage": "cat ./coverage/lcov.info | coveralls" }, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "devDependencies": { "brfs": "^2.0.1", diff --git a/deps/npm/node_modules/hosted-git-info/lib/hosts.js b/deps/npm/node_modules/hosted-git-info/lib/hosts.js index 9a08efd1b2d7e9..2a88e95927772a 100644 --- a/deps/npm/node_modules/hosted-git-info/lib/hosts.js +++ b/deps/npm/node_modules/hosted-git-info/lib/hosts.js @@ -4,7 +4,11 @@ const maybeJoin = (...args) => args.every(arg => arg) ? args.join('') : '' const maybeEncode = (arg) => arg ? encodeURIComponent(arg) : '' -const formatHashFragment = (f) => f.toLowerCase().replace(/^\W+|\/|\W+$/g, '').replace(/\W+/g, '-') +const formatHashFragment = (f) => f.toLowerCase() + .replace(/^\W+/g, '') // strip leading non-characters + .replace(/(? diff --git a/deps/npm/node_modules/hosted-git-info/package.json b/deps/npm/node_modules/hosted-git-info/package.json index 3bb8bcd1f49825..78356159af7723 100644 --- a/deps/npm/node_modules/hosted-git-info/package.json +++ b/deps/npm/node_modules/hosted-git-info/package.json @@ -1,6 +1,6 @@ { "name": "hosted-git-info", - "version": "8.0.0", + "version": "8.0.2", "description": "Provides metadata and conversions from repository urls for GitHub, Bitbucket and GitLab", "main": "./lib/index.js", "repository": { @@ -35,7 +35,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", + "@npmcli/template-oss": "4.23.4", "tap": "^16.0.1" }, "files": [ @@ -55,7 +55,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.4", "publish": "true" } } diff --git a/deps/npm/node_modules/libnpmpublish/lib/publish.js b/deps/npm/node_modules/libnpmpublish/lib/publish.js index 93d546efb5f0e1..2bcab4f3ba304e 100644 --- a/deps/npm/node_modules/libnpmpublish/lib/publish.js +++ b/deps/npm/node_modules/libnpmpublish/lib/publish.js @@ -137,7 +137,7 @@ const buildMetadata = async (registry, manifest, tarballData, spec, opts) => { if (provenance === true) { await ensureProvenanceGeneration(registry, spec, opts) - provenanceBundle = await generateProvenance([subject], opts) + provenanceBundle = await generateProvenance([subject], { legacyCompatibility: true, ...opts }) /* eslint-disable-next-line max-len */ log.notice('publish', `Signed provenance statement with source and build information from ${ciInfo.name}`) diff --git a/deps/npm/node_modules/libnpmpublish/package.json b/deps/npm/node_modules/libnpmpublish/package.json index f63d50f4e7b9c1..594f5041480b4a 100644 --- a/deps/npm/node_modules/libnpmpublish/package.json +++ b/deps/npm/node_modules/libnpmpublish/package.json @@ -1,6 +1,6 @@ { "name": "libnpmpublish", - "version": "10.0.0", + "version": "10.0.1", "description": "Programmatic API for the bits behind npm publish and unpublish", "author": "GitHub Inc.", "main": "lib/index.js", @@ -45,7 +45,7 @@ "npm-registry-fetch": "^18.0.1", "proc-log": "^5.0.0", "semver": "^7.3.7", - "sigstore": "^2.2.0", + "sigstore": "^3.0.0", "ssri": "^12.0.0" }, "engines": { diff --git a/deps/npm/node_modules/make-fetch-happen/lib/options.js b/deps/npm/node_modules/make-fetch-happen/lib/options.js index f77511279f831d..db51cc63248176 100644 --- a/deps/npm/node_modules/make-fetch-happen/lib/options.js +++ b/deps/npm/node_modules/make-fetch-happen/lib/options.js @@ -11,7 +11,12 @@ const conditionalHeaders = [ const configureOptions = (opts) => { const { strictSSL, ...options } = { ...opts } options.method = options.method ? options.method.toUpperCase() : 'GET' - options.rejectUnauthorized = strictSSL !== false + + if (strictSSL === undefined || strictSSL === null) { + options.rejectUnauthorized = process.env.NODE_TLS_REJECT_UNAUTHORIZED !== '0' + } else { + options.rejectUnauthorized = strictSSL !== false + } if (!options.retry) { options.retry = { retries: 0 } diff --git a/deps/npm/node_modules/make-fetch-happen/lib/remote.js b/deps/npm/node_modules/make-fetch-happen/lib/remote.js index 8554564074de6e..1d640e5380baaf 100644 --- a/deps/npm/node_modules/make-fetch-happen/lib/remote.js +++ b/deps/npm/node_modules/make-fetch-happen/lib/remote.js @@ -35,7 +35,8 @@ const RETRY_TYPES = [ // following redirects (through the cache if necessary) // and verifying response integrity const remoteFetch = (request, options) => { - const agent = getAgent(request.url, options) + // options.signal is intended for the fetch itself, not the agent. Attaching it to the agent will re-use that signal across multiple requests, which prevents any connections beyond the first one. + const agent = getAgent(request.url, { ...options, signal: undefined }) if (!request.headers.has('connection')) { request.headers.set('connection', agent ? 'keep-alive' : 'close') } diff --git a/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/HISTORY.md b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/HISTORY.md new file mode 100644 index 00000000000000..63d537d3f68114 --- /dev/null +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/HISTORY.md @@ -0,0 +1,114 @@ +1.0.0 / 2024-08-31 +================== + + * Drop support for node <18 + * Added an option preferred encodings array #59 + +0.6.3 / 2022-01-22 +================== + + * Revert "Lazy-load modules from main entry point" + +0.6.2 / 2019-04-29 +================== + + * Fix sorting charset, encoding, and language with extra parameters + +0.6.1 / 2016-05-02 +================== + + * perf: improve `Accept` parsing speed + * perf: improve `Accept-Charset` parsing speed + * perf: improve `Accept-Encoding` parsing speed + * perf: improve `Accept-Language` parsing speed + +0.6.0 / 2015-09-29 +================== + + * Fix including type extensions in parameters in `Accept` parsing + * Fix parsing `Accept` parameters with quoted equals + * Fix parsing `Accept` parameters with quoted semicolons + * Lazy-load modules from main entry point + * perf: delay type concatenation until needed + * perf: enable strict mode + * perf: hoist regular expressions + * perf: remove closures getting spec properties + * perf: remove a closure from media type parsing + * perf: remove property delete from media type parsing + +0.5.3 / 2015-05-10 +================== + + * Fix media type parameter matching to be case-insensitive + +0.5.2 / 2015-05-06 +================== + + * Fix comparing media types with quoted values + * Fix splitting media types with quoted commas + +0.5.1 / 2015-02-14 +================== + + * Fix preference sorting to be stable for long acceptable lists + +0.5.0 / 2014-12-18 +================== + + * Fix list return order when large accepted list + * Fix missing identity encoding when q=0 exists + * Remove dynamic building of Negotiator class + +0.4.9 / 2014-10-14 +================== + + * Fix error when media type has invalid parameter + +0.4.8 / 2014-09-28 +================== + + * Fix all negotiations to be case-insensitive + * Stable sort preferences of same quality according to client order + * Support Node.js 0.6 + +0.4.7 / 2014-06-24 +================== + + * Handle invalid provided languages + * Handle invalid provided media types + +0.4.6 / 2014-06-11 +================== + + * Order by specificity when quality is the same + +0.4.5 / 2014-05-29 +================== + + * Fix regression in empty header handling + +0.4.4 / 2014-05-29 +================== + + * Fix behaviors when headers are not present + +0.4.3 / 2014-04-16 +================== + + * Handle slashes on media params correctly + +0.4.2 / 2014-02-28 +================== + + * Fix media type sorting + * Handle media types params strictly + +0.4.1 / 2014-01-16 +================== + + * Use most specific matches + +0.4.0 / 2014-01-09 +================== + + * Remove preferred prefix from methods diff --git a/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/LICENSE b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/LICENSE new file mode 100644 index 00000000000000..ea6b9e2e9ac251 --- /dev/null +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/LICENSE @@ -0,0 +1,24 @@ +(The MIT License) + +Copyright (c) 2012-2014 Federico Romero +Copyright (c) 2012-2014 Isaac Z. Schlueter +Copyright (c) 2014-2015 Douglas Christopher Wilson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/index.js b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/index.js new file mode 100644 index 00000000000000..4f51315d6af4bd --- /dev/null +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/index.js @@ -0,0 +1,83 @@ +/*! + * negotiator + * Copyright(c) 2012 Federico Romero + * Copyright(c) 2012-2014 Isaac Z. Schlueter + * Copyright(c) 2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict'; + +var preferredCharsets = require('./lib/charset') +var preferredEncodings = require('./lib/encoding') +var preferredLanguages = require('./lib/language') +var preferredMediaTypes = require('./lib/mediaType') + +/** + * Module exports. + * @public + */ + +module.exports = Negotiator; +module.exports.Negotiator = Negotiator; + +/** + * Create a Negotiator instance from a request. + * @param {object} request + * @public + */ + +function Negotiator(request) { + if (!(this instanceof Negotiator)) { + return new Negotiator(request); + } + + this.request = request; +} + +Negotiator.prototype.charset = function charset(available) { + var set = this.charsets(available); + return set && set[0]; +}; + +Negotiator.prototype.charsets = function charsets(available) { + return preferredCharsets(this.request.headers['accept-charset'], available); +}; + +Negotiator.prototype.encoding = function encoding(available, opts) { + var set = this.encodings(available, opts); + return set && set[0]; +}; + +Negotiator.prototype.encodings = function encodings(available, options) { + var opts = options || {}; + return preferredEncodings(this.request.headers['accept-encoding'], available, opts.preferred); +}; + +Negotiator.prototype.language = function language(available) { + var set = this.languages(available); + return set && set[0]; +}; + +Negotiator.prototype.languages = function languages(available) { + return preferredLanguages(this.request.headers['accept-language'], available); +}; + +Negotiator.prototype.mediaType = function mediaType(available) { + var set = this.mediaTypes(available); + return set && set[0]; +}; + +Negotiator.prototype.mediaTypes = function mediaTypes(available) { + return preferredMediaTypes(this.request.headers.accept, available); +}; + +// Backwards compatibility +Negotiator.prototype.preferredCharset = Negotiator.prototype.charset; +Negotiator.prototype.preferredCharsets = Negotiator.prototype.charsets; +Negotiator.prototype.preferredEncoding = Negotiator.prototype.encoding; +Negotiator.prototype.preferredEncodings = Negotiator.prototype.encodings; +Negotiator.prototype.preferredLanguage = Negotiator.prototype.language; +Negotiator.prototype.preferredLanguages = Negotiator.prototype.languages; +Negotiator.prototype.preferredMediaType = Negotiator.prototype.mediaType; +Negotiator.prototype.preferredMediaTypes = Negotiator.prototype.mediaTypes; diff --git a/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/charset.js b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/charset.js new file mode 100644 index 00000000000000..cdd014803474a4 --- /dev/null +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/charset.js @@ -0,0 +1,169 @@ +/** + * negotiator + * Copyright(c) 2012 Isaac Z. Schlueter + * Copyright(c) 2014 Federico Romero + * Copyright(c) 2014-2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict'; + +/** + * Module exports. + * @public + */ + +module.exports = preferredCharsets; +module.exports.preferredCharsets = preferredCharsets; + +/** + * Module variables. + * @private + */ + +var simpleCharsetRegExp = /^\s*([^\s;]+)\s*(?:;(.*))?$/; + +/** + * Parse the Accept-Charset header. + * @private + */ + +function parseAcceptCharset(accept) { + var accepts = accept.split(','); + + for (var i = 0, j = 0; i < accepts.length; i++) { + var charset = parseCharset(accepts[i].trim(), i); + + if (charset) { + accepts[j++] = charset; + } + } + + // trim accepts + accepts.length = j; + + return accepts; +} + +/** + * Parse a charset from the Accept-Charset header. + * @private + */ + +function parseCharset(str, i) { + var match = simpleCharsetRegExp.exec(str); + if (!match) return null; + + var charset = match[1]; + var q = 1; + if (match[2]) { + var params = match[2].split(';') + for (var j = 0; j < params.length; j++) { + var p = params[j].trim().split('='); + if (p[0] === 'q') { + q = parseFloat(p[1]); + break; + } + } + } + + return { + charset: charset, + q: q, + i: i + }; +} + +/** + * Get the priority of a charset. + * @private + */ + +function getCharsetPriority(charset, accepted, index) { + var priority = {o: -1, q: 0, s: 0}; + + for (var i = 0; i < accepted.length; i++) { + var spec = specify(charset, accepted[i], index); + + if (spec && (priority.s - spec.s || priority.q - spec.q || priority.o - spec.o) < 0) { + priority = spec; + } + } + + return priority; +} + +/** + * Get the specificity of the charset. + * @private + */ + +function specify(charset, spec, index) { + var s = 0; + if(spec.charset.toLowerCase() === charset.toLowerCase()){ + s |= 1; + } else if (spec.charset !== '*' ) { + return null + } + + return { + i: index, + o: spec.i, + q: spec.q, + s: s + } +} + +/** + * Get the preferred charsets from an Accept-Charset header. + * @public + */ + +function preferredCharsets(accept, provided) { + // RFC 2616 sec 14.2: no header = * + var accepts = parseAcceptCharset(accept === undefined ? '*' : accept || ''); + + if (!provided) { + // sorted list of all charsets + return accepts + .filter(isQuality) + .sort(compareSpecs) + .map(getFullCharset); + } + + var priorities = provided.map(function getPriority(type, index) { + return getCharsetPriority(type, accepts, index); + }); + + // sorted list of accepted charsets + return priorities.filter(isQuality).sort(compareSpecs).map(function getCharset(priority) { + return provided[priorities.indexOf(priority)]; + }); +} + +/** + * Compare two specs. + * @private + */ + +function compareSpecs(a, b) { + return (b.q - a.q) || (b.s - a.s) || (a.o - b.o) || (a.i - b.i) || 0; +} + +/** + * Get full charset string. + * @private + */ + +function getFullCharset(spec) { + return spec.charset; +} + +/** + * Check if a spec has any quality. + * @private + */ + +function isQuality(spec) { + return spec.q > 0; +} diff --git a/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/encoding.js b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/encoding.js new file mode 100644 index 00000000000000..9ebb633d677433 --- /dev/null +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/encoding.js @@ -0,0 +1,205 @@ +/** + * negotiator + * Copyright(c) 2012 Isaac Z. Schlueter + * Copyright(c) 2014 Federico Romero + * Copyright(c) 2014-2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict'; + +/** + * Module exports. + * @public + */ + +module.exports = preferredEncodings; +module.exports.preferredEncodings = preferredEncodings; + +/** + * Module variables. + * @private + */ + +var simpleEncodingRegExp = /^\s*([^\s;]+)\s*(?:;(.*))?$/; + +/** + * Parse the Accept-Encoding header. + * @private + */ + +function parseAcceptEncoding(accept) { + var accepts = accept.split(','); + var hasIdentity = false; + var minQuality = 1; + + for (var i = 0, j = 0; i < accepts.length; i++) { + var encoding = parseEncoding(accepts[i].trim(), i); + + if (encoding) { + accepts[j++] = encoding; + hasIdentity = hasIdentity || specify('identity', encoding); + minQuality = Math.min(minQuality, encoding.q || 1); + } + } + + if (!hasIdentity) { + /* + * If identity doesn't explicitly appear in the accept-encoding header, + * it's added to the list of acceptable encoding with the lowest q + */ + accepts[j++] = { + encoding: 'identity', + q: minQuality, + i: i + }; + } + + // trim accepts + accepts.length = j; + + return accepts; +} + +/** + * Parse an encoding from the Accept-Encoding header. + * @private + */ + +function parseEncoding(str, i) { + var match = simpleEncodingRegExp.exec(str); + if (!match) return null; + + var encoding = match[1]; + var q = 1; + if (match[2]) { + var params = match[2].split(';'); + for (var j = 0; j < params.length; j++) { + var p = params[j].trim().split('='); + if (p[0] === 'q') { + q = parseFloat(p[1]); + break; + } + } + } + + return { + encoding: encoding, + q: q, + i: i + }; +} + +/** + * Get the priority of an encoding. + * @private + */ + +function getEncodingPriority(encoding, accepted, index) { + var priority = {encoding: encoding, o: -1, q: 0, s: 0}; + + for (var i = 0; i < accepted.length; i++) { + var spec = specify(encoding, accepted[i], index); + + if (spec && (priority.s - spec.s || priority.q - spec.q || priority.o - spec.o) < 0) { + priority = spec; + } + } + + return priority; +} + +/** + * Get the specificity of the encoding. + * @private + */ + +function specify(encoding, spec, index) { + var s = 0; + if(spec.encoding.toLowerCase() === encoding.toLowerCase()){ + s |= 1; + } else if (spec.encoding !== '*' ) { + return null + } + + return { + encoding: encoding, + i: index, + o: spec.i, + q: spec.q, + s: s + } +}; + +/** + * Get the preferred encodings from an Accept-Encoding header. + * @public + */ + +function preferredEncodings(accept, provided, preferred) { + var accepts = parseAcceptEncoding(accept || ''); + + var comparator = preferred ? function comparator (a, b) { + if (a.q !== b.q) { + return b.q - a.q // higher quality first + } + + var aPreferred = preferred.indexOf(a.encoding) + var bPreferred = preferred.indexOf(b.encoding) + + if (aPreferred === -1 && bPreferred === -1) { + // consider the original specifity/order + return (b.s - a.s) || (a.o - b.o) || (a.i - b.i) + } + + if (aPreferred !== -1 && bPreferred !== -1) { + return aPreferred - bPreferred // consider the preferred order + } + + return aPreferred === -1 ? 1 : -1 // preferred first + } : compareSpecs; + + if (!provided) { + // sorted list of all encodings + return accepts + .filter(isQuality) + .sort(comparator) + .map(getFullEncoding); + } + + var priorities = provided.map(function getPriority(type, index) { + return getEncodingPriority(type, accepts, index); + }); + + // sorted list of accepted encodings + return priorities.filter(isQuality).sort(comparator).map(function getEncoding(priority) { + return provided[priorities.indexOf(priority)]; + }); +} + +/** + * Compare two specs. + * @private + */ + +function compareSpecs(a, b) { + return (b.q - a.q) || (b.s - a.s) || (a.o - b.o) || (a.i - b.i); +} + +/** + * Get full encoding string. + * @private + */ + +function getFullEncoding(spec) { + return spec.encoding; +} + +/** + * Check if a spec has any quality. + * @private + */ + +function isQuality(spec) { + return spec.q > 0; +} diff --git a/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/language.js b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/language.js new file mode 100644 index 00000000000000..a23167252719be --- /dev/null +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/language.js @@ -0,0 +1,179 @@ +/** + * negotiator + * Copyright(c) 2012 Isaac Z. Schlueter + * Copyright(c) 2014 Federico Romero + * Copyright(c) 2014-2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict'; + +/** + * Module exports. + * @public + */ + +module.exports = preferredLanguages; +module.exports.preferredLanguages = preferredLanguages; + +/** + * Module variables. + * @private + */ + +var simpleLanguageRegExp = /^\s*([^\s\-;]+)(?:-([^\s;]+))?\s*(?:;(.*))?$/; + +/** + * Parse the Accept-Language header. + * @private + */ + +function parseAcceptLanguage(accept) { + var accepts = accept.split(','); + + for (var i = 0, j = 0; i < accepts.length; i++) { + var language = parseLanguage(accepts[i].trim(), i); + + if (language) { + accepts[j++] = language; + } + } + + // trim accepts + accepts.length = j; + + return accepts; +} + +/** + * Parse a language from the Accept-Language header. + * @private + */ + +function parseLanguage(str, i) { + var match = simpleLanguageRegExp.exec(str); + if (!match) return null; + + var prefix = match[1] + var suffix = match[2] + var full = prefix + + if (suffix) full += "-" + suffix; + + var q = 1; + if (match[3]) { + var params = match[3].split(';') + for (var j = 0; j < params.length; j++) { + var p = params[j].split('='); + if (p[0] === 'q') q = parseFloat(p[1]); + } + } + + return { + prefix: prefix, + suffix: suffix, + q: q, + i: i, + full: full + }; +} + +/** + * Get the priority of a language. + * @private + */ + +function getLanguagePriority(language, accepted, index) { + var priority = {o: -1, q: 0, s: 0}; + + for (var i = 0; i < accepted.length; i++) { + var spec = specify(language, accepted[i], index); + + if (spec && (priority.s - spec.s || priority.q - spec.q || priority.o - spec.o) < 0) { + priority = spec; + } + } + + return priority; +} + +/** + * Get the specificity of the language. + * @private + */ + +function specify(language, spec, index) { + var p = parseLanguage(language) + if (!p) return null; + var s = 0; + if(spec.full.toLowerCase() === p.full.toLowerCase()){ + s |= 4; + } else if (spec.prefix.toLowerCase() === p.full.toLowerCase()) { + s |= 2; + } else if (spec.full.toLowerCase() === p.prefix.toLowerCase()) { + s |= 1; + } else if (spec.full !== '*' ) { + return null + } + + return { + i: index, + o: spec.i, + q: spec.q, + s: s + } +}; + +/** + * Get the preferred languages from an Accept-Language header. + * @public + */ + +function preferredLanguages(accept, provided) { + // RFC 2616 sec 14.4: no header = * + var accepts = parseAcceptLanguage(accept === undefined ? '*' : accept || ''); + + if (!provided) { + // sorted list of all languages + return accepts + .filter(isQuality) + .sort(compareSpecs) + .map(getFullLanguage); + } + + var priorities = provided.map(function getPriority(type, index) { + return getLanguagePriority(type, accepts, index); + }); + + // sorted list of accepted languages + return priorities.filter(isQuality).sort(compareSpecs).map(function getLanguage(priority) { + return provided[priorities.indexOf(priority)]; + }); +} + +/** + * Compare two specs. + * @private + */ + +function compareSpecs(a, b) { + return (b.q - a.q) || (b.s - a.s) || (a.o - b.o) || (a.i - b.i) || 0; +} + +/** + * Get full language string. + * @private + */ + +function getFullLanguage(spec) { + return spec.full; +} + +/** + * Check if a spec has any quality. + * @private + */ + +function isQuality(spec) { + return spec.q > 0; +} diff --git a/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/mediaType.js b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/mediaType.js new file mode 100644 index 00000000000000..8e402ea88394c0 --- /dev/null +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/mediaType.js @@ -0,0 +1,294 @@ +/** + * negotiator + * Copyright(c) 2012 Isaac Z. Schlueter + * Copyright(c) 2014 Federico Romero + * Copyright(c) 2014-2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict'; + +/** + * Module exports. + * @public + */ + +module.exports = preferredMediaTypes; +module.exports.preferredMediaTypes = preferredMediaTypes; + +/** + * Module variables. + * @private + */ + +var simpleMediaTypeRegExp = /^\s*([^\s\/;]+)\/([^;\s]+)\s*(?:;(.*))?$/; + +/** + * Parse the Accept header. + * @private + */ + +function parseAccept(accept) { + var accepts = splitMediaTypes(accept); + + for (var i = 0, j = 0; i < accepts.length; i++) { + var mediaType = parseMediaType(accepts[i].trim(), i); + + if (mediaType) { + accepts[j++] = mediaType; + } + } + + // trim accepts + accepts.length = j; + + return accepts; +} + +/** + * Parse a media type from the Accept header. + * @private + */ + +function parseMediaType(str, i) { + var match = simpleMediaTypeRegExp.exec(str); + if (!match) return null; + + var params = Object.create(null); + var q = 1; + var subtype = match[2]; + var type = match[1]; + + if (match[3]) { + var kvps = splitParameters(match[3]).map(splitKeyValuePair); + + for (var j = 0; j < kvps.length; j++) { + var pair = kvps[j]; + var key = pair[0].toLowerCase(); + var val = pair[1]; + + // get the value, unwrapping quotes + var value = val && val[0] === '"' && val[val.length - 1] === '"' + ? val.slice(1, -1) + : val; + + if (key === 'q') { + q = parseFloat(value); + break; + } + + // store parameter + params[key] = value; + } + } + + return { + type: type, + subtype: subtype, + params: params, + q: q, + i: i + }; +} + +/** + * Get the priority of a media type. + * @private + */ + +function getMediaTypePriority(type, accepted, index) { + var priority = {o: -1, q: 0, s: 0}; + + for (var i = 0; i < accepted.length; i++) { + var spec = specify(type, accepted[i], index); + + if (spec && (priority.s - spec.s || priority.q - spec.q || priority.o - spec.o) < 0) { + priority = spec; + } + } + + return priority; +} + +/** + * Get the specificity of the media type. + * @private + */ + +function specify(type, spec, index) { + var p = parseMediaType(type); + var s = 0; + + if (!p) { + return null; + } + + if(spec.type.toLowerCase() == p.type.toLowerCase()) { + s |= 4 + } else if(spec.type != '*') { + return null; + } + + if(spec.subtype.toLowerCase() == p.subtype.toLowerCase()) { + s |= 2 + } else if(spec.subtype != '*') { + return null; + } + + var keys = Object.keys(spec.params); + if (keys.length > 0) { + if (keys.every(function (k) { + return spec.params[k] == '*' || (spec.params[k] || '').toLowerCase() == (p.params[k] || '').toLowerCase(); + })) { + s |= 1 + } else { + return null + } + } + + return { + i: index, + o: spec.i, + q: spec.q, + s: s, + } +} + +/** + * Get the preferred media types from an Accept header. + * @public + */ + +function preferredMediaTypes(accept, provided) { + // RFC 2616 sec 14.2: no header = */* + var accepts = parseAccept(accept === undefined ? '*/*' : accept || ''); + + if (!provided) { + // sorted list of all types + return accepts + .filter(isQuality) + .sort(compareSpecs) + .map(getFullType); + } + + var priorities = provided.map(function getPriority(type, index) { + return getMediaTypePriority(type, accepts, index); + }); + + // sorted list of accepted types + return priorities.filter(isQuality).sort(compareSpecs).map(function getType(priority) { + return provided[priorities.indexOf(priority)]; + }); +} + +/** + * Compare two specs. + * @private + */ + +function compareSpecs(a, b) { + return (b.q - a.q) || (b.s - a.s) || (a.o - b.o) || (a.i - b.i) || 0; +} + +/** + * Get full type string. + * @private + */ + +function getFullType(spec) { + return spec.type + '/' + spec.subtype; +} + +/** + * Check if a spec has any quality. + * @private + */ + +function isQuality(spec) { + return spec.q > 0; +} + +/** + * Count the number of quotes in a string. + * @private + */ + +function quoteCount(string) { + var count = 0; + var index = 0; + + while ((index = string.indexOf('"', index)) !== -1) { + count++; + index++; + } + + return count; +} + +/** + * Split a key value pair. + * @private + */ + +function splitKeyValuePair(str) { + var index = str.indexOf('='); + var key; + var val; + + if (index === -1) { + key = str; + } else { + key = str.slice(0, index); + val = str.slice(index + 1); + } + + return [key, val]; +} + +/** + * Split an Accept header into media types. + * @private + */ + +function splitMediaTypes(accept) { + var accepts = accept.split(','); + + for (var i = 1, j = 0; i < accepts.length; i++) { + if (quoteCount(accepts[j]) % 2 == 0) { + accepts[++j] = accepts[i]; + } else { + accepts[j] += ',' + accepts[i]; + } + } + + // trim accepts + accepts.length = j + 1; + + return accepts; +} + +/** + * Split a string of parameters. + * @private + */ + +function splitParameters(str) { + var parameters = str.split(';'); + + for (var i = 1, j = 0; i < parameters.length; i++) { + if (quoteCount(parameters[j]) % 2 == 0) { + parameters[++j] = parameters[i]; + } else { + parameters[j] += ';' + parameters[i]; + } + } + + // trim parameters + parameters.length = j + 1; + + for (var i = 0; i < parameters.length; i++) { + parameters[i] = parameters[i].trim(); + } + + return parameters; +} diff --git a/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/package.json b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/package.json new file mode 100644 index 00000000000000..e4bdc1ef4f7481 --- /dev/null +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/package.json @@ -0,0 +1,43 @@ +{ + "name": "negotiator", + "description": "HTTP content negotiation", + "version": "1.0.0", + "contributors": [ + "Douglas Christopher Wilson ", + "Federico Romero ", + "Isaac Z. Schlueter (http://blog.izs.me/)" + ], + "license": "MIT", + "keywords": [ + "http", + "content negotiation", + "accept", + "accept-language", + "accept-encoding", + "accept-charset" + ], + "repository": "jshttp/negotiator", + "devDependencies": { + "eslint": "7.32.0", + "eslint-plugin-markdown": "2.2.1", + "mocha": "9.1.3", + "nyc": "15.1.0" + }, + "files": [ + "lib/", + "HISTORY.md", + "LICENSE", + "index.js", + "README.md" + ], + "engines": { + "node": ">= 0.6" + }, + "scripts": { + "lint": "eslint .", + "test": "mocha --reporter spec --check-leaks --bail test/", + "test:debug": "mocha --reporter spec --check-leaks --inspect --inspect-brk test/", + "test-ci": "nyc --reporter=lcov --reporter=text npm test", + "test-cov": "nyc --reporter=html --reporter=text npm test" + } +} diff --git a/deps/npm/node_modules/make-fetch-happen/package.json b/deps/npm/node_modules/make-fetch-happen/package.json index 0868ff6d7efa54..054fe841f13b73 100644 --- a/deps/npm/node_modules/make-fetch-happen/package.json +++ b/deps/npm/node_modules/make-fetch-happen/package.json @@ -1,6 +1,6 @@ { "name": "make-fetch-happen", - "version": "14.0.1", + "version": "14.0.3", "description": "Opinionated, caching, retrying fetch client", "main": "lib/index.js", "files": [ @@ -40,14 +40,14 @@ "minipass-fetch": "^4.0.0", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.3", + "negotiator": "^1.0.0", "proc-log": "^5.0.0", "promise-retry": "^2.0.1", "ssri": "^12.0.0" }, "devDependencies": { "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", + "@npmcli/template-oss": "4.23.4", "nock": "^13.2.4", "safe-buffer": "^5.2.1", "standard-version": "^9.3.2", @@ -68,7 +68,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.4", "publish": "true" } } diff --git a/deps/npm/node_modules/negotiator/HISTORY.md b/deps/npm/node_modules/negotiator/HISTORY.md index a9a544914c43bb..e1929aba8e22ce 100644 --- a/deps/npm/node_modules/negotiator/HISTORY.md +++ b/deps/npm/node_modules/negotiator/HISTORY.md @@ -1,3 +1,8 @@ +unreleased +================== + + * Added an option preferred encodings array #59 + 0.6.3 / 2022-01-22 ================== diff --git a/deps/npm/node_modules/negotiator/index.js b/deps/npm/node_modules/negotiator/index.js index 4788264b16c9f2..7df0b0a5318156 100644 --- a/deps/npm/node_modules/negotiator/index.js +++ b/deps/npm/node_modules/negotiator/index.js @@ -44,13 +44,13 @@ Negotiator.prototype.charsets = function charsets(available) { return preferredCharsets(this.request.headers['accept-charset'], available); }; -Negotiator.prototype.encoding = function encoding(available) { - var set = this.encodings(available); +Negotiator.prototype.encoding = function encoding(available, preferred) { + var set = this.encodings(available, preferred); return set && set[0]; }; -Negotiator.prototype.encodings = function encodings(available) { - return preferredEncodings(this.request.headers['accept-encoding'], available); +Negotiator.prototype.encodings = function encodings(available, preferred) { + return preferredEncodings(this.request.headers['accept-encoding'], available, preferred); }; Negotiator.prototype.language = function language(available) { diff --git a/deps/npm/node_modules/negotiator/lib/encoding.js b/deps/npm/node_modules/negotiator/lib/encoding.js index 8432cd77b8a969..9ebb633d677433 100644 --- a/deps/npm/node_modules/negotiator/lib/encoding.js +++ b/deps/npm/node_modules/negotiator/lib/encoding.js @@ -96,7 +96,7 @@ function parseEncoding(str, i) { */ function getEncodingPriority(encoding, accepted, index) { - var priority = {o: -1, q: 0, s: 0}; + var priority = {encoding: encoding, o: -1, q: 0, s: 0}; for (var i = 0; i < accepted.length; i++) { var spec = specify(encoding, accepted[i], index); @@ -123,6 +123,7 @@ function specify(encoding, spec, index) { } return { + encoding: encoding, i: index, o: spec.i, q: spec.q, @@ -135,14 +136,34 @@ function specify(encoding, spec, index) { * @public */ -function preferredEncodings(accept, provided) { +function preferredEncodings(accept, provided, preferred) { var accepts = parseAcceptEncoding(accept || ''); + var comparator = preferred ? function comparator (a, b) { + if (a.q !== b.q) { + return b.q - a.q // higher quality first + } + + var aPreferred = preferred.indexOf(a.encoding) + var bPreferred = preferred.indexOf(b.encoding) + + if (aPreferred === -1 && bPreferred === -1) { + // consider the original specifity/order + return (b.s - a.s) || (a.o - b.o) || (a.i - b.i) + } + + if (aPreferred !== -1 && bPreferred !== -1) { + return aPreferred - bPreferred // consider the preferred order + } + + return aPreferred === -1 ? 1 : -1 // preferred first + } : compareSpecs; + if (!provided) { // sorted list of all encodings return accepts .filter(isQuality) - .sort(compareSpecs) + .sort(comparator) .map(getFullEncoding); } @@ -151,7 +172,7 @@ function preferredEncodings(accept, provided) { }); // sorted list of accepted encodings - return priorities.filter(isQuality).sort(compareSpecs).map(function getEncoding(priority) { + return priorities.filter(isQuality).sort(comparator).map(function getEncoding(priority) { return provided[priorities.indexOf(priority)]; }); } @@ -162,7 +183,7 @@ function preferredEncodings(accept, provided) { */ function compareSpecs(a, b) { - return (b.q - a.q) || (b.s - a.s) || (a.o - b.o) || (a.i - b.i) || 0; + return (b.q - a.q) || (b.s - a.s) || (a.o - b.o) || (a.i - b.i); } /** diff --git a/deps/npm/node_modules/negotiator/lib/mediaType.js b/deps/npm/node_modules/negotiator/lib/mediaType.js index 67309dd75f1b62..8e402ea88394c0 100644 --- a/deps/npm/node_modules/negotiator/lib/mediaType.js +++ b/deps/npm/node_modules/negotiator/lib/mediaType.js @@ -69,7 +69,7 @@ function parseMediaType(str, i) { // get the value, unwrapping quotes var value = val && val[0] === '"' && val[val.length - 1] === '"' - ? val.substr(1, val.length - 2) + ? val.slice(1, -1) : val; if (key === 'q') { @@ -238,8 +238,8 @@ function splitKeyValuePair(str) { if (index === -1) { key = str; } else { - key = str.substr(0, index); - val = str.substr(index + 1); + key = str.slice(0, index); + val = str.slice(index + 1); } return [key, val]; diff --git a/deps/npm/node_modules/negotiator/package.json b/deps/npm/node_modules/negotiator/package.json index 297635f6d34177..19b0a8a6ef6041 100644 --- a/deps/npm/node_modules/negotiator/package.json +++ b/deps/npm/node_modules/negotiator/package.json @@ -1,7 +1,7 @@ { "name": "negotiator", "description": "HTTP content negotiation", - "version": "0.6.3", + "version": "0.6.4", "contributors": [ "Douglas Christopher Wilson ", "Federico Romero ", diff --git a/deps/npm/node_modules/npm-install-checks/lib/current-env.js b/deps/npm/node_modules/npm-install-checks/lib/current-env.js index 9babde1f277ff1..31f154aac59b32 100644 --- a/deps/npm/node_modules/npm-install-checks/lib/current-env.js +++ b/deps/npm/node_modules/npm-install-checks/lib/current-env.js @@ -1,5 +1,6 @@ const process = require('node:process') const nodeOs = require('node:os') +const fs = require('node:fs') function isMusl (file) { return file.includes('libc.musl-') || file.includes('ld-musl-') @@ -13,12 +14,23 @@ function cpu () { return process.arch } -function libc (osName) { - // this is to make it faster on non linux machines - if (osName !== 'linux') { +const LDD_PATH = '/usr/bin/ldd' +function getFamilyFromFilesystem () { + try { + const content = fs.readFileSync(LDD_PATH, 'utf-8') + if (content.includes('musl')) { + return 'musl' + } + if (content.includes('GNU C Library')) { + return 'glibc' + } + return null + } catch { return undefined } - let family +} + +function getFamilyFromReport () { const originalExclude = process.report.excludeNetwork process.report.excludeNetwork = true const report = process.report.getReport() @@ -27,6 +39,22 @@ function libc (osName) { family = 'glibc' } else if (Array.isArray(report.sharedObjects) && report.sharedObjects.some(isMusl)) { family = 'musl' + } else { + family = null + } + return family +} + +let family +function libc (osName) { + if (osName !== 'linux') { + return undefined + } + if (family === undefined) { + family = getFamilyFromFilesystem() + if (family === undefined) { + family = getFamilyFromReport() + } } return family } diff --git a/deps/npm/node_modules/npm-install-checks/package.json b/deps/npm/node_modules/npm-install-checks/package.json index e9e69575a6dc6d..967f5f659b2fac 100644 --- a/deps/npm/node_modules/npm-install-checks/package.json +++ b/deps/npm/node_modules/npm-install-checks/package.json @@ -1,6 +1,6 @@ { "name": "npm-install-checks", - "version": "7.1.0", + "version": "7.1.1", "description": "Check the engines and platform fields in package.json", "main": "lib/index.js", "dependencies": { @@ -8,7 +8,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", + "@npmcli/template-oss": "4.23.4", "tap": "^16.0.1" }, "scripts": { @@ -40,7 +40,7 @@ "author": "GitHub Inc.", "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.4", "publish": "true" }, "tap": { diff --git a/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js b/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js index 65eea2963b0b4c..2f183082ab2ce2 100644 --- a/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js +++ b/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js @@ -48,10 +48,18 @@ function logRequest (method, res, startTime) { const cacheStr = cacheStatus ? ` (cache ${cacheStatus})` : '' const urlStr = cleanUrl(res.url) - log.http( - 'fetch', - `${method.toUpperCase()} ${res.status} ${urlStr} ${elapsedTime}ms${attemptStr}${cacheStr}` - ) + // If make-fetch-happen reports a cache hit, then there was no fetch + if (cacheStatus === 'hit') { + log.http( + 'cache', + `${urlStr} ${elapsedTime}ms${attemptStr}${cacheStr}` + ) + } else { + log.http( + 'fetch', + `${method.toUpperCase()} ${res.status} ${urlStr} ${elapsedTime}ms${attemptStr}${cacheStr}` + ) + } } function checkErrors (method, res, startTime, opts) { diff --git a/deps/npm/node_modules/npm-registry-fetch/package.json b/deps/npm/node_modules/npm-registry-fetch/package.json index 559473b964aaa5..bd7a79d35e26ac 100644 --- a/deps/npm/node_modules/npm-registry-fetch/package.json +++ b/deps/npm/node_modules/npm-registry-fetch/package.json @@ -1,6 +1,6 @@ { "name": "npm-registry-fetch", - "version": "18.0.1", + "version": "18.0.2", "description": "Fetch-based http client for use with npm registry APIs", "main": "lib", "files": [ @@ -42,8 +42,8 @@ }, "devDependencies": { "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", - "cacache": "^18.0.0", + "@npmcli/template-oss": "4.23.4", + "cacache": "^19.0.1", "nock": "^13.2.4", "require-inject": "^1.4.4", "ssri": "^12.0.0", @@ -62,7 +62,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.4", "publish": "true" } } diff --git a/deps/npm/node_modules/package-json-from-dist/dist/commonjs/index.js b/deps/npm/node_modules/package-json-from-dist/dist/commonjs/index.js index 5cff210d855cb0..b966ac9fef535b 100644 --- a/deps/npm/node_modules/package-json-from-dist/dist/commonjs/index.js +++ b/deps/npm/node_modules/package-json-from-dist/dist/commonjs/index.js @@ -5,6 +5,8 @@ const node_fs_1 = require("node:fs"); const node_path_1 = require("node:path"); const node_url_1 = require("node:url"); const NM = `${node_path_1.sep}node_modules${node_path_1.sep}`; +const STORE = `.store${node_path_1.sep}`; +const PKG = `${node_path_1.sep}package${node_path_1.sep}`; const DIST = `${node_path_1.sep}dist${node_path_1.sep}`; /** * Find the package.json file, either from a TypeScript file somewhere not @@ -59,8 +61,16 @@ const findPackageJson = (from, pathFromSrc = '../package.json') => { // inside of node_modules. find the dist directly under package name. const nm = __dirname.substring(0, nms + NM.length); const pkgDir = __dirname.substring(nms + NM.length); + // affordance for yarn berry, which puts package contents in + // '.../node_modules/.store/${id}-${hash}/package/...' + if (pkgDir.startsWith(STORE)) { + const pkg = pkgDir.indexOf(PKG, STORE.length); + if (pkg) { + return (0, node_path_1.resolve)(nm, pkgDir.substring(0, pkg + PKG.length), 'package.json'); + } + } const pkgName = pkgDir.startsWith('@') ? - pkgDir.split(node_path_1.sep).slice(0, 2).join(node_path_1.sep) + pkgDir.split(node_path_1.sep, 2).join(node_path_1.sep) : String(pkgDir.split(node_path_1.sep)[0]); return (0, node_path_1.resolve)(nm, pkgName, 'package.json'); } diff --git a/deps/npm/node_modules/package-json-from-dist/dist/esm/index.js b/deps/npm/node_modules/package-json-from-dist/dist/esm/index.js index 0627645f9c35a4..426ad3c2d18597 100644 --- a/deps/npm/node_modules/package-json-from-dist/dist/esm/index.js +++ b/deps/npm/node_modules/package-json-from-dist/dist/esm/index.js @@ -2,6 +2,8 @@ import { readFileSync } from 'node:fs'; import { dirname, resolve, sep } from 'node:path'; import { fileURLToPath } from 'node:url'; const NM = `${sep}node_modules${sep}`; +const STORE = `.store${sep}`; +const PKG = `${sep}package${sep}`; const DIST = `${sep}dist${sep}`; /** * Find the package.json file, either from a TypeScript file somewhere not @@ -56,8 +58,16 @@ export const findPackageJson = (from, pathFromSrc = '../package.json') => { // inside of node_modules. find the dist directly under package name. const nm = __dirname.substring(0, nms + NM.length); const pkgDir = __dirname.substring(nms + NM.length); + // affordance for yarn berry, which puts package contents in + // '.../node_modules/.store/${id}-${hash}/package/...' + if (pkgDir.startsWith(STORE)) { + const pkg = pkgDir.indexOf(PKG, STORE.length); + if (pkg) { + return resolve(nm, pkgDir.substring(0, pkg + PKG.length), 'package.json'); + } + } const pkgName = pkgDir.startsWith('@') ? - pkgDir.split(sep).slice(0, 2).join(sep) + pkgDir.split(sep, 2).join(sep) : String(pkgDir.split(sep)[0]); return resolve(nm, pkgName, 'package.json'); } diff --git a/deps/npm/node_modules/package-json-from-dist/package.json b/deps/npm/node_modules/package-json-from-dist/package.json index 2d5526e87b7fa0..a2d03c3269d72d 100644 --- a/deps/npm/node_modules/package-json-from-dist/package.json +++ b/deps/npm/node_modules/package-json-from-dist/package.json @@ -1,6 +1,6 @@ { "name": "package-json-from-dist", - "version": "1.0.0", + "version": "1.0.1", "description": "Load the local package.json from either src or dist folder", "main": "./dist/commonjs/index.js", "exports": { @@ -28,7 +28,7 @@ "presnap": "npm run prepare", "test": "tap", "snap": "tap", - "format": "prettier --write . --loglevel warn --ignore-path ../../.prettierignore --cache", + "format": "prettier --write . --log-level warn", "typedoc": "typedoc" }, "author": "Isaac Z. Schlueter (https://izs.me)", diff --git a/deps/npm/node_modules/pacote/lib/dir.js b/deps/npm/node_modules/pacote/lib/dir.js index f3229b34e463ab..4ae97c216fe64f 100644 --- a/deps/npm/node_modules/pacote/lib/dir.js +++ b/deps/npm/node_modules/pacote/lib/dir.js @@ -39,6 +39,8 @@ class DirFetcher extends Fetcher { const stdio = this.opts.foregroundScripts ? 'inherit' : 'pipe' return runScript({ + // this || undefined is because runScript will be unhappy with the default null value + scriptShell: this.opts.scriptShell || undefined, pkg: mani, event: 'prepare', path: this.resolved, diff --git a/deps/npm/node_modules/pacote/lib/fetcher.js b/deps/npm/node_modules/pacote/lib/fetcher.js index cc2c2db70c697d..f2ac97619d3af1 100644 --- a/deps/npm/node_modules/pacote/lib/fetcher.js +++ b/deps/npm/node_modules/pacote/lib/fetcher.js @@ -188,7 +188,15 @@ class FetcherBase { // private // Note: cacache will raise a EINTEGRITY error if the integrity doesn't match #tarballFromCache () { - return cacache.get.stream.byDigest(this.cache, this.integrity, this.opts) + const startTime = Date.now() + const stream = cacache.get.stream.byDigest(this.cache, this.integrity, this.opts) + const elapsedTime = Date.now() - startTime + // cache is good, so log it as a hit in particular since there was no fetch logged + log.http( + 'cache', + `${this.spec} ${elapsedTime}ms (cache hit)` + ) + return stream } get [_.cacheFetches] () { diff --git a/deps/npm/node_modules/pacote/package.json b/deps/npm/node_modules/pacote/package.json index 0eb8261af96e0c..71c9aa1ce32572 100644 --- a/deps/npm/node_modules/pacote/package.json +++ b/deps/npm/node_modules/pacote/package.json @@ -1,6 +1,6 @@ { "name": "pacote", - "version": "19.0.0", + "version": "19.0.1", "description": "JavaScript package downloader", "author": "GitHub Inc.", "bin": { @@ -59,7 +59,7 @@ "npm-registry-fetch": "^18.0.0", "proc-log": "^5.0.0", "promise-retry": "^2.0.1", - "sigstore": "^2.2.0", + "sigstore": "^3.0.0", "ssri": "^12.0.0", "tar": "^6.1.11" }, diff --git a/deps/npm/node_modules/promise-call-limit/dist/commonjs/index.js b/deps/npm/node_modules/promise-call-limit/dist/commonjs/index.js index 6ce5cfcef9559e..b32a85bb11aa39 100644 --- a/deps/npm/node_modules/promise-call-limit/dist/commonjs/index.js +++ b/deps/npm/node_modules/promise-call-limit/dist/commonjs/index.js @@ -29,8 +29,8 @@ const os = __importStar(require("node:os")); // cpus() cpus() can return an empty list if /proc is not mounted, use 1 in // this case /* c8 ignore start */ -const defLimit = 'availableParallelism' in os - ? Math.max(1, os.availableParallelism() - 1) +const defLimit = 'availableParallelism' in os ? + Math.max(1, os.availableParallelism() - 1) : Math.max(1, os.cpus().length - 1); const callLimit = (queue, { limit = defLimit, rejectLate } = {}) => new Promise((res, rej) => { let active = 0; diff --git a/deps/npm/node_modules/promise-call-limit/dist/esm/index.js b/deps/npm/node_modules/promise-call-limit/dist/esm/index.js index 030099929b3483..fe709db7fc04cc 100644 --- a/deps/npm/node_modules/promise-call-limit/dist/esm/index.js +++ b/deps/npm/node_modules/promise-call-limit/dist/esm/index.js @@ -3,8 +3,8 @@ import * as os from 'node:os'; // cpus() cpus() can return an empty list if /proc is not mounted, use 1 in // this case /* c8 ignore start */ -const defLimit = 'availableParallelism' in os - ? Math.max(1, os.availableParallelism() - 1) +const defLimit = 'availableParallelism' in os ? + Math.max(1, os.availableParallelism() - 1) : Math.max(1, os.cpus().length - 1); export const callLimit = (queue, { limit = defLimit, rejectLate } = {}) => new Promise((res, rej) => { let active = 0; diff --git a/deps/npm/node_modules/promise-call-limit/package.json b/deps/npm/node_modules/promise-call-limit/package.json index a3aa548d6538ac..ab14595366e223 100644 --- a/deps/npm/node_modules/promise-call-limit/package.json +++ b/deps/npm/node_modules/promise-call-limit/package.json @@ -1,6 +1,6 @@ { "name": "promise-call-limit", - "version": "3.0.1", + "version": "3.0.2", "files": [ "dist" ], @@ -18,16 +18,17 @@ "test": "tap", "preversion": "npm test", "postversion": "npm publish", - "prepublishOnly": "git push origin --follow-tags" + "prepublishOnly": "git push origin --follow-tags", + "format": "prettier --write . --log-level warn --cache" }, "devDependencies": { - "prettier": "^3.2.1", - "tap": "^18.6.1", - "tshy": "^1.8.2", - "format": "prettier --write . --loglevel warn --ignore-path ../../.prettierignore --cache", - "typedoc": "typedoc" + "prettier": "^3.3.3", + "tap": "^21.0.1", + "tshy": "^3.0.2", + "typedoc": "^0.26.6" }, "prettier": { + "experimentalTernaries": true, "semi": false, "printWidth": 70, "tabWidth": 2, @@ -62,5 +63,6 @@ }, "main": "./dist/commonjs/index.js", "types": "./dist/commonjs/index.d.ts", - "type": "module" + "type": "module", + "module": "./dist/esm/index.js" } diff --git a/deps/npm/node_modules/sigstore/dist/config.js b/deps/npm/node_modules/sigstore/dist/config.js index b4f0eea74fa4b4..e8b2392f97f236 100644 --- a/deps/npm/node_modules/sigstore/dist/config.js +++ b/deps/npm/node_modules/sigstore/dist/config.js @@ -1,6 +1,9 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.createVerificationPolicy = exports.createKeyFinder = exports.createBundleBuilder = exports.DEFAULT_TIMEOUT = exports.DEFAULT_RETRY = void 0; +exports.DEFAULT_TIMEOUT = exports.DEFAULT_RETRY = void 0; +exports.createBundleBuilder = createBundleBuilder; +exports.createKeyFinder = createKeyFinder; +exports.createVerificationPolicy = createVerificationPolicy; /* Copyright 2023 The Sigstore Authors. @@ -30,10 +33,12 @@ function createBundleBuilder(bundleType, options) { case 'messageSignature': return new sign_1.MessageSignatureBundleBuilder(bundlerOptions); case 'dsseEnvelope': - return new sign_1.DSSEBundleBuilder(bundlerOptions); + return new sign_1.DSSEBundleBuilder({ + ...bundlerOptions, + certificateChain: options.legacyCompatibility, + }); } } -exports.createBundleBuilder = createBundleBuilder; // Translates the public KeySelector type into the KeyFinderFunc type needed by // the verifier. function createKeyFinder(keySelector) { @@ -51,7 +56,6 @@ function createKeyFinder(keySelector) { }; }; } -exports.createKeyFinder = createKeyFinder; function createVerificationPolicy(options) { const policy = {}; const san = options.certificateIdentityEmail || options.certificateIdentityURI; @@ -63,7 +67,6 @@ function createVerificationPolicy(options) { } return policy; } -exports.createVerificationPolicy = createVerificationPolicy; // Instantiate the FulcioSigner based on the supplied options. function initSigner(options) { return new sign_1.FulcioSigner({ @@ -92,6 +95,7 @@ function initWitnesses(options) { if (isRekorEnabled(options)) { witnesses.push(new sign_1.RekorWitness({ rekorBaseURL: options.rekorURL, + entryType: options.legacyCompatibility ? 'intoto' : 'dsse', fetchOnConflict: false, retry: options.retry ?? exports.DEFAULT_RETRY, timeout: options.timeout ?? exports.DEFAULT_TIMEOUT, diff --git a/deps/npm/node_modules/sigstore/dist/sigstore.js b/deps/npm/node_modules/sigstore/dist/sigstore.js index 79d3440670cd50..c45524bbe21c22 100644 --- a/deps/npm/node_modules/sigstore/dist/sigstore.js +++ b/deps/npm/node_modules/sigstore/dist/sigstore.js @@ -23,7 +23,10 @@ var __importStar = (this && this.__importStar) || function (mod) { return result; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.createVerifier = exports.verify = exports.attest = exports.sign = void 0; +exports.sign = sign; +exports.attest = attest; +exports.verify = verify; +exports.createVerifier = createVerifier; /* Copyright 2023 The Sigstore Authors. @@ -50,7 +53,6 @@ options = {}) { const bundle = await bundler.create({ data: payload }); return (0, bundle_1.bundleToJSON)(bundle); } -exports.sign = sign; async function attest(payload, payloadType, /* istanbul ignore next */ options = {}) { @@ -58,7 +60,6 @@ options = {}) { const bundle = await bundler.create({ data: payload, type: payloadType }); return (0, bundle_1.bundleToJSON)(bundle); } -exports.attest = attest; async function verify(bundle, dataOrOptions, options) { let data; if (Buffer.isBuffer(dataOrOptions)) { @@ -69,7 +70,6 @@ async function verify(bundle, dataOrOptions, options) { } return createVerifier(options).then((verifier) => verifier.verify(bundle, data)); } -exports.verify = verify; async function createVerifier( /* istanbul ignore next */ options = {}) { @@ -100,4 +100,3 @@ options = {}) { }, }; } -exports.createVerifier = createVerifier; diff --git a/deps/npm/node_modules/@sigstore/bundle/LICENSE b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/LICENSE similarity index 100% rename from deps/npm/node_modules/@sigstore/bundle/LICENSE rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/LICENSE diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/build.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/build.js similarity index 87% rename from deps/npm/node_modules/@sigstore/bundle/dist/build.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/build.js index 65c71b100ad58f..ade736407554c6 100644 --- a/deps/npm/node_modules/@sigstore/bundle/dist/build.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/build.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.toDSSEBundle = exports.toMessageSignatureBundle = void 0; +exports.toMessageSignatureBundle = toMessageSignatureBundle; +exports.toDSSEBundle = toDSSEBundle; /* Copyright 2023 The Sigstore Authors. @@ -21,9 +22,9 @@ const bundle_1 = require("./bundle"); // Message signature bundle - $case: 'messageSignature' function toMessageSignatureBundle(options) { return { - mediaType: options.singleCertificate - ? bundle_1.BUNDLE_V03_MEDIA_TYPE - : bundle_1.BUNDLE_V02_MEDIA_TYPE, + mediaType: options.certificateChain + ? bundle_1.BUNDLE_V02_MEDIA_TYPE + : bundle_1.BUNDLE_V03_MEDIA_TYPE, content: { $case: 'messageSignature', messageSignature: { @@ -37,13 +38,12 @@ function toMessageSignatureBundle(options) { verificationMaterial: toVerificationMaterial(options), }; } -exports.toMessageSignatureBundle = toMessageSignatureBundle; // DSSE envelope bundle - $case: 'dsseEnvelope' function toDSSEBundle(options) { return { - mediaType: options.singleCertificate - ? bundle_1.BUNDLE_V03_MEDIA_TYPE - : bundle_1.BUNDLE_V02_MEDIA_TYPE, + mediaType: options.certificateChain + ? bundle_1.BUNDLE_V02_MEDIA_TYPE + : bundle_1.BUNDLE_V03_MEDIA_TYPE, content: { $case: 'dsseEnvelope', dsseEnvelope: toEnvelope(options), @@ -51,7 +51,6 @@ function toDSSEBundle(options) { verificationMaterial: toVerificationMaterial(options), }; } -exports.toDSSEBundle = toDSSEBundle; function toEnvelope(options) { return { payloadType: options.artifactType, @@ -75,13 +74,7 @@ function toVerificationMaterial(options) { } function toKeyContent(options) { if (options.certificate) { - if (options.singleCertificate) { - return { - $case: 'certificate', - certificate: { rawBytes: options.certificate }, - }; - } - else { + if (options.certificateChain) { return { $case: 'x509CertificateChain', x509CertificateChain: { @@ -89,6 +82,12 @@ function toKeyContent(options) { }, }; } + else { + return { + $case: 'certificate', + certificate: { rawBytes: options.certificate }, + }; + } } else { return { diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/bundle.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/bundle.js similarity index 79% rename from deps/npm/node_modules/@sigstore/bundle/dist/bundle.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/bundle.js index dbd35df2ca2bb3..eb67a0ddc17bbb 100644 --- a/deps/npm/node_modules/@sigstore/bundle/dist/bundle.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/bundle.js @@ -1,6 +1,10 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.isBundleWithDsseEnvelope = exports.isBundleWithMessageSignature = exports.isBundleWithPublicKey = exports.isBundleWithCertificateChain = exports.BUNDLE_V03_MEDIA_TYPE = exports.BUNDLE_V03_LEGACY_MEDIA_TYPE = exports.BUNDLE_V02_MEDIA_TYPE = exports.BUNDLE_V01_MEDIA_TYPE = void 0; +exports.BUNDLE_V03_MEDIA_TYPE = exports.BUNDLE_V03_LEGACY_MEDIA_TYPE = exports.BUNDLE_V02_MEDIA_TYPE = exports.BUNDLE_V01_MEDIA_TYPE = void 0; +exports.isBundleWithCertificateChain = isBundleWithCertificateChain; +exports.isBundleWithPublicKey = isBundleWithPublicKey; +exports.isBundleWithMessageSignature = isBundleWithMessageSignature; +exports.isBundleWithDsseEnvelope = isBundleWithDsseEnvelope; exports.BUNDLE_V01_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle+json;version=0.1'; exports.BUNDLE_V02_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle+json;version=0.2'; exports.BUNDLE_V03_LEGACY_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle+json;version=0.3'; @@ -9,16 +13,12 @@ exports.BUNDLE_V03_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle.v0.3+json'; function isBundleWithCertificateChain(b) { return b.verificationMaterial.content.$case === 'x509CertificateChain'; } -exports.isBundleWithCertificateChain = isBundleWithCertificateChain; function isBundleWithPublicKey(b) { return b.verificationMaterial.content.$case === 'publicKey'; } -exports.isBundleWithPublicKey = isBundleWithPublicKey; function isBundleWithMessageSignature(b) { return b.content.$case === 'messageSignature'; } -exports.isBundleWithMessageSignature = isBundleWithMessageSignature; function isBundleWithDsseEnvelope(b) { return b.content.$case === 'dsseEnvelope'; } -exports.isBundleWithDsseEnvelope = isBundleWithDsseEnvelope; diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/error.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/error.js similarity index 100% rename from deps/npm/node_modules/@sigstore/bundle/dist/error.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/error.js diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/bundle/dist/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/index.js diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/serialized.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/serialized.js similarity index 100% rename from deps/npm/node_modules/@sigstore/bundle/dist/serialized.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/serialized.js diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/utility.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/utility.js similarity index 100% rename from deps/npm/node_modules/@sigstore/bundle/dist/utility.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/utility.js diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/validate.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/validate.js similarity index 98% rename from deps/npm/node_modules/@sigstore/bundle/dist/validate.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/validate.js index 67079cd1f680a9..21b8b5ee293ba1 100644 --- a/deps/npm/node_modules/@sigstore/bundle/dist/validate.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/validate.js @@ -1,6 +1,10 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.assertBundleLatest = exports.assertBundleV02 = exports.isBundleV01 = exports.assertBundleV01 = exports.assertBundle = void 0; +exports.assertBundle = assertBundle; +exports.assertBundleV01 = assertBundleV01; +exports.isBundleV01 = isBundleV01; +exports.assertBundleV02 = assertBundleV02; +exports.assertBundleLatest = assertBundleLatest; /* Copyright 2023 The Sigstore Authors. @@ -27,7 +31,6 @@ function assertBundle(b) { throw new error_1.ValidationError('invalid bundle', invalidValues); } } -exports.assertBundle = assertBundle; // Asserts that the given bundle conforms to the v0.1 bundle format. function assertBundleV01(b) { const invalidValues = []; @@ -37,7 +40,6 @@ function assertBundleV01(b) { throw new error_1.ValidationError('invalid v0.1 bundle', invalidValues); } } -exports.assertBundleV01 = assertBundleV01; // Type guard to determine if Bundle is a v0.1 bundle. function isBundleV01(b) { try { @@ -48,7 +50,6 @@ function isBundleV01(b) { return false; } } -exports.isBundleV01 = isBundleV01; // Asserts that the given bundle conforms to the v0.2 bundle format. function assertBundleV02(b) { const invalidValues = []; @@ -58,7 +59,6 @@ function assertBundleV02(b) { throw new error_1.ValidationError('invalid v0.2 bundle', invalidValues); } } -exports.assertBundleV02 = assertBundleV02; // Asserts that the given bundle conforms to the newest (0.3) bundle format. function assertBundleLatest(b) { const invalidValues = []; @@ -69,7 +69,6 @@ function assertBundleLatest(b) { throw new error_1.ValidationError('invalid bundle', invalidValues); } } -exports.assertBundleLatest = assertBundleLatest; function validateBundleBase(b) { const invalidValues = []; // Media type validation @@ -192,6 +191,7 @@ function validateInclusionProof(b) { // Necessary for V03 and later bundles function validateNoCertificateChain(b) { const invalidValues = []; + /* istanbul ignore next */ if (b.verificationMaterial?.content?.$case === 'x509CertificateChain') { invalidValues.push('verificationMaterial.content.$case'); } diff --git a/deps/npm/node_modules/@sigstore/bundle/package.json b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/package.json similarity index 92% rename from deps/npm/node_modules/@sigstore/bundle/package.json rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/package.json index dd853897226d2f..ee5d2b92b801a5 100644 --- a/deps/npm/node_modules/@sigstore/bundle/package.json +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/bundle", - "version": "2.3.2", + "version": "3.0.0", "description": "Sigstore bundle type", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -30,6 +30,6 @@ "@sigstore/protobuf-specs": "^0.3.2" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/@sigstore/core/LICENSE b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/LICENSE similarity index 100% rename from deps/npm/node_modules/@sigstore/core/LICENSE rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/LICENSE diff --git a/deps/npm/node_modules/@sigstore/core/dist/asn1/error.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/error.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/asn1/error.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/error.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/asn1/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/asn1/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/index.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/asn1/length.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/length.js similarity index 97% rename from deps/npm/node_modules/@sigstore/core/dist/asn1/length.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/length.js index 36fdaf5b9777fd..cb7ebf09dbefa4 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/asn1/length.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/length.js @@ -15,7 +15,8 @@ See the License for the specific language governing permissions and limitations under the License. */ Object.defineProperty(exports, "__esModule", { value: true }); -exports.encodeLength = exports.decodeLength = void 0; +exports.decodeLength = decodeLength; +exports.encodeLength = encodeLength; const error_1 = require("./error"); // Decodes the length of a DER-encoded ANS.1 element from the supplied stream. // https://learn.microsoft.com/en-us/windows/win32/seccertenroll/about-encoded-length-and-value-bytes @@ -44,7 +45,6 @@ function decodeLength(stream) { } return len; } -exports.decodeLength = decodeLength; // Translates the supplied value to a DER-encoded length. function encodeLength(len) { if (len < 128) { @@ -60,4 +60,3 @@ function encodeLength(len) { } return Buffer.from([0x80 | bytes.length, ...bytes]); } -exports.encodeLength = encodeLength; diff --git a/deps/npm/node_modules/@sigstore/core/dist/asn1/obj.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/obj.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/asn1/obj.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/obj.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/asn1/parse.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/parse.js similarity index 96% rename from deps/npm/node_modules/@sigstore/core/dist/asn1/parse.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/parse.js index 482c7239e83162..7fbb42632c60e8 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/asn1/parse.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/parse.js @@ -1,6 +1,11 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.parseBitString = exports.parseBoolean = exports.parseOID = exports.parseTime = exports.parseStringASCII = exports.parseInteger = void 0; +exports.parseInteger = parseInteger; +exports.parseStringASCII = parseStringASCII; +exports.parseTime = parseTime; +exports.parseOID = parseOID; +exports.parseBoolean = parseBoolean; +exports.parseBitString = parseBitString; /* Copyright 2023 The Sigstore Authors. @@ -43,13 +48,11 @@ function parseInteger(buf) { } return n; } -exports.parseInteger = parseInteger; // Parse an ASCII string from the DER-encoded buffer // https://learn.microsoft.com/en-us/windows/win32/seccertenroll/about-basic-types#boolean function parseStringASCII(buf) { return buf.toString('ascii'); } -exports.parseStringASCII = parseStringASCII; // Parse a Date from the DER-encoded buffer // https://www.rfc-editor.org/rfc/rfc5280#section-4.1.2.5.1 function parseTime(buf, shortYear) { @@ -70,7 +73,6 @@ function parseTime(buf, shortYear) { // Translate to ISO8601 format and parse return new Date(`${m[1]}-${m[2]}-${m[3]}T${m[4]}:${m[5]}:${m[6]}Z`); } -exports.parseTime = parseTime; // Parse an OID from the DER-encoded buffer // https://learn.microsoft.com/en-us/windows/win32/seccertenroll/about-object-identifier function parseOID(buf) { @@ -95,13 +97,11 @@ function parseOID(buf) { } return oid; } -exports.parseOID = parseOID; // Parse a boolean from the DER-encoded buffer // https://learn.microsoft.com/en-us/windows/win32/seccertenroll/about-basic-types#boolean function parseBoolean(buf) { return buf[0] !== 0; } -exports.parseBoolean = parseBoolean; // Parse a bit string from the DER-encoded buffer // https://learn.microsoft.com/en-us/windows/win32/seccertenroll/about-bit-string function parseBitString(buf) { @@ -122,4 +122,3 @@ function parseBitString(buf) { } return bits; } -exports.parseBitString = parseBitString; diff --git a/deps/npm/node_modules/@sigstore/core/dist/asn1/tag.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/tag.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/asn1/tag.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/tag.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/crypto.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/crypto.js similarity index 83% rename from deps/npm/node_modules/@sigstore/core/dist/crypto.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/crypto.js index dbe65b165d3574..296b5ba43e86a0 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/crypto.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/crypto.js @@ -3,7 +3,10 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.bufferEqual = exports.verify = exports.hash = exports.digest = exports.createPublicKey = void 0; +exports.createPublicKey = createPublicKey; +exports.digest = digest; +exports.verify = verify; +exports.bufferEqual = bufferEqual; /* Copyright 2023 The Sigstore Authors. @@ -20,7 +23,6 @@ See the License for the specific language governing permissions and limitations under the License. */ const crypto_1 = __importDefault(require("crypto")); -const SHA256_ALGORITHM = 'sha256'; function createPublicKey(key, type = 'spki') { if (typeof key === 'string') { return crypto_1.default.createPublicKey(key); @@ -29,7 +31,6 @@ function createPublicKey(key, type = 'spki') { return crypto_1.default.createPublicKey({ key, format: 'der', type: type }); } } -exports.createPublicKey = createPublicKey; function digest(algorithm, ...data) { const hash = crypto_1.default.createHash(algorithm); for (const d of data) { @@ -37,16 +38,6 @@ function digest(algorithm, ...data) { } return hash.digest(); } -exports.digest = digest; -// TODO: deprecate this in favor of digest() -function hash(...data) { - const hash = crypto_1.default.createHash(SHA256_ALGORITHM); - for (const d of data) { - hash.update(d); - } - return hash.digest(); -} -exports.hash = hash; function verify(data, key, signature, algorithm) { // The try/catch is to work around an issue in Node 14.x where verify throws // an error in some scenarios if the signature is invalid. @@ -58,7 +49,6 @@ function verify(data, key, signature, algorithm) { return false; } } -exports.verify = verify; function bufferEqual(a, b) { try { return crypto_1.default.timingSafeEqual(a, b); @@ -68,4 +58,3 @@ function bufferEqual(a, b) { return false; } } -exports.bufferEqual = bufferEqual; diff --git a/deps/npm/node_modules/@sigstore/core/dist/dsse.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/dsse.js similarity index 96% rename from deps/npm/node_modules/@sigstore/core/dist/dsse.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/dsse.js index a78783c919a256..ca7b63630e2ba9 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/dsse.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/dsse.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.preAuthEncoding = void 0; +exports.preAuthEncoding = preAuthEncoding; /* Copyright 2023 The Sigstore Authors. @@ -28,4 +28,3 @@ function preAuthEncoding(payloadType, payload) { ].join(' '); return Buffer.concat([Buffer.from(prefix, 'ascii'), payload]); } -exports.preAuthEncoding = preAuthEncoding; diff --git a/deps/npm/node_modules/@sigstore/core/dist/encoding.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/encoding.js similarity index 94% rename from deps/npm/node_modules/@sigstore/core/dist/encoding.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/encoding.js index b020ac4d6ecd42..7113af66db4c2d 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/encoding.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/encoding.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.base64Decode = exports.base64Encode = void 0; +exports.base64Encode = base64Encode; +exports.base64Decode = base64Decode; /* Copyright 2023 The Sigstore Authors. @@ -21,8 +22,6 @@ const UTF8_ENCODING = 'utf-8'; function base64Encode(str) { return Buffer.from(str, UTF8_ENCODING).toString(BASE64_ENCODING); } -exports.base64Encode = base64Encode; function base64Decode(str) { return Buffer.from(str, BASE64_ENCODING).toString(UTF8_ENCODING); } -exports.base64Decode = base64Decode; diff --git a/deps/npm/node_modules/@sigstore/core/dist/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/index.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/json.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/json.js similarity index 98% rename from deps/npm/node_modules/@sigstore/core/dist/json.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/json.js index a50df7233c7c58..7808d033b98cc9 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/json.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/json.js @@ -15,7 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. */ Object.defineProperty(exports, "__esModule", { value: true }); -exports.canonicalize = void 0; +exports.canonicalize = canonicalize; // JSON canonicalization per https://github.com/cyberphone/json-canonicalization // eslint-disable-next-line @typescript-eslint/no-explicit-any function canonicalize(object) { @@ -58,4 +58,3 @@ function canonicalize(object) { } return buffer; } -exports.canonicalize = canonicalize; diff --git a/deps/npm/node_modules/@sigstore/core/dist/oid.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/oid.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/oid.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/oid.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/pem.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/pem.js similarity index 97% rename from deps/npm/node_modules/@sigstore/core/dist/pem.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/pem.js index f35bc3835bbd10..f1241d28d586ec 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/pem.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/pem.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.fromDER = exports.toDER = void 0; +exports.toDER = toDER; +exports.fromDER = fromDER; /* Copyright 2023 The Sigstore Authors. @@ -28,7 +29,6 @@ function toDER(certificate) { }); return Buffer.from(der, 'base64'); } -exports.toDER = toDER; // Translates a DER-encoded buffer into a PEM-encoded string. Standard PEM // encoding dictates that each certificate should have a trailing newline after // the footer. @@ -41,4 +41,3 @@ function fromDER(certificate, type = 'CERTIFICATE') { .join('\n') .concat('\n'); } -exports.fromDER = fromDER; diff --git a/deps/npm/node_modules/@sigstore/core/dist/rfc3161/error.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/error.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/rfc3161/error.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/error.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/rfc3161/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/rfc3161/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/index.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/rfc3161/timestamp.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/timestamp.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/rfc3161/timestamp.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/timestamp.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/rfc3161/tstinfo.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/tstinfo.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/rfc3161/tstinfo.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/tstinfo.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/stream.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/stream.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/stream.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/stream.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/x509/cert.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/cert.js similarity index 96% rename from deps/npm/node_modules/@sigstore/core/dist/x509/cert.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/cert.js index 16c0c40d858d8a..72ea8e0738bc83 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/x509/cert.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/cert.js @@ -97,13 +97,15 @@ class X509Certificate { } get subjectAltName() { const ext = this.extSubjectAltName; - return ext?.uri || ext?.rfc822Name; + return ext?.uri || /* istanbul ignore next */ ext?.rfc822Name; } get extensions() { // The extension list is the first (and only) element of the extensions // context specific tag + /* istanbul ignore next */ const extSeq = this.extensionsObj?.subs[0]; - return extSeq?.subs || /* istanbul ignore next */ []; + /* istanbul ignore next */ + return extSeq?.subs || []; } get extKeyUsage() { const ext = this.findExtension(EXTENSION_OID_KEY_USAGE); @@ -135,8 +137,10 @@ class X509Certificate { const ca = this.extBasicConstraints?.isCA || false; // If the KeyUsage extension is present, keyCertSign must be set if (this.extKeyUsage) { - ca && this.extKeyUsage.keyCertSign; + return ca && this.extKeyUsage.keyCertSign; } + // TODO: test coverage for this case + /* istanbul ignore next */ return ca; } extension(oid) { diff --git a/deps/npm/node_modules/@sigstore/core/dist/x509/ext.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/ext.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/x509/ext.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/ext.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/x509/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/x509/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/index.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/x509/sct.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/sct.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/x509/sct.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/sct.js diff --git a/deps/npm/node_modules/@sigstore/core/package.json b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/package.json similarity index 92% rename from deps/npm/node_modules/@sigstore/core/package.json rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/package.json index 621ff1715bcd1c..af5dd281ac90e4 100644 --- a/deps/npm/node_modules/@sigstore/core/package.json +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/core", - "version": "1.1.0", + "version": "2.0.0", "description": "Base library for Sigstore", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -26,6 +26,6 @@ "provenance": true }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/@sigstore/sign/LICENSE b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/LICENSE similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/LICENSE rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/LICENSE diff --git a/deps/npm/node_modules/@sigstore/sign/dist/bundler/base.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/base.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/bundler/base.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/base.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/bundler/bundle.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/bundle.js similarity index 93% rename from deps/npm/node_modules/@sigstore/sign/dist/bundler/bundle.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/bundle.js index 7c2ca9164f0dfe..ed32286ad88efd 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/bundler/bundle.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/bundle.js @@ -23,7 +23,8 @@ var __importStar = (this && this.__importStar) || function (mod) { return result; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.toDSSEBundle = exports.toMessageSignatureBundle = void 0; +exports.toMessageSignatureBundle = toMessageSignatureBundle; +exports.toDSSEBundle = toDSSEBundle; /* Copyright 2023 The Sigstore Authors. @@ -44,7 +45,7 @@ const util_1 = require("../util"); // Helper functions for assembling the parts of a Sigstore bundle // Message signature bundle - $case: 'messageSignature' function toMessageSignatureBundle(artifact, signature) { - const digest = util_1.crypto.hash(artifact.data); + const digest = util_1.crypto.digest('sha256', artifact.data); return sigstore.toMessageSignatureBundle({ digest, signature: signature.signature, @@ -52,11 +53,11 @@ function toMessageSignatureBundle(artifact, signature) { ? util_1.pem.toDER(signature.key.certificate) : undefined, keyHint: signature.key.$case === 'publicKey' ? signature.key.hint : undefined, + certificateChain: true, }); } -exports.toMessageSignatureBundle = toMessageSignatureBundle; // DSSE envelope bundle - $case: 'dsseEnvelope' -function toDSSEBundle(artifact, signature, singleCertificate) { +function toDSSEBundle(artifact, signature, certificateChain) { return sigstore.toDSSEBundle({ artifact: artifact.data, artifactType: artifact.type, @@ -65,7 +66,6 @@ function toDSSEBundle(artifact, signature, singleCertificate) { ? util_1.pem.toDER(signature.key.certificate) : undefined, keyHint: signature.key.$case === 'publicKey' ? signature.key.hint : undefined, - singleCertificate, + certificateChain, }); } -exports.toDSSEBundle = toDSSEBundle; diff --git a/deps/npm/node_modules/@sigstore/sign/dist/bundler/dsse.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/dsse.js similarity index 93% rename from deps/npm/node_modules/@sigstore/sign/dist/bundler/dsse.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/dsse.js index 621700df93842a..86046ba8f3013b 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/bundler/dsse.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/dsse.js @@ -23,7 +23,7 @@ const bundle_1 = require("./bundle"); class DSSEBundleBuilder extends base_1.BaseBundleBuilder { constructor(options) { super(options); - this.singleCertificate = options.singleCertificate ?? false; + this.certificateChain = options.certificateChain ?? false; } // DSSE requires the artifact to be pre-encoded with the payload type // before the signature is generated. @@ -33,7 +33,7 @@ class DSSEBundleBuilder extends base_1.BaseBundleBuilder { } // Packages the artifact and signature into a DSSE bundle async package(artifact, signature) { - return (0, bundle_1.toDSSEBundle)(artifactDefaults(artifact), signature, this.singleCertificate); + return (0, bundle_1.toDSSEBundle)(artifactDefaults(artifact), signature, this.certificateChain); } } exports.DSSEBundleBuilder = DSSEBundleBuilder; diff --git a/deps/npm/node_modules/@sigstore/sign/dist/bundler/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/bundler/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/bundler/message.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/message.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/bundler/message.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/message.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/error.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/error.js similarity index 95% rename from deps/npm/node_modules/@sigstore/sign/dist/error.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/error.js index d57e4567fb89ee..d28f1913cc77e9 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/error.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/error.js @@ -15,7 +15,8 @@ See the License for the specific language governing permissions and limitations under the License. */ Object.defineProperty(exports, "__esModule", { value: true }); -exports.internalError = exports.InternalError = void 0; +exports.InternalError = void 0; +exports.internalError = internalError; const error_1 = require("./external/error"); class InternalError extends Error { constructor({ code, message, cause, }) { @@ -36,4 +37,3 @@ function internalError(err, code, message) { cause: err, }); } -exports.internalError = internalError; diff --git a/deps/npm/node_modules/@sigstore/sign/dist/external/error.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/error.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/external/error.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/error.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/external/fetch.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/fetch.js similarity index 95% rename from deps/npm/node_modules/@sigstore/sign/dist/external/fetch.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/fetch.js index b2d81bde7be16f..116090f3c641ef 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/external/fetch.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/fetch.js @@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.fetchWithRetry = void 0; +exports.fetchWithRetry = fetchWithRetry; /* Copyright 2023 The Sigstore Authors. @@ -58,14 +58,13 @@ async function fetchWithRetry(url, options) { } }, retryOpts(options.retry)); } -exports.fetchWithRetry = fetchWithRetry; // Translate a Response into an HTTPError instance. This will attempt to parse // the response body for a message, but will default to the statusText if none // is found. const errorFromResponse = async (response) => { let message = response.statusText; - const location = response.headers?.get(HTTP2_HEADER_LOCATION) || undefined; - const contentType = response.headers?.get(HTTP2_HEADER_CONTENT_TYPE); + const location = response.headers.get(HTTP2_HEADER_LOCATION) || undefined; + const contentType = response.headers.get(HTTP2_HEADER_CONTENT_TYPE); // If response type is JSON, try to parse the body for a message if (contentType?.includes('application/json')) { try { diff --git a/deps/npm/node_modules/@sigstore/sign/dist/external/fulcio.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/fulcio.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/external/fulcio.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/fulcio.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/external/rekor.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/rekor.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/external/rekor.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/rekor.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/external/tsa.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/tsa.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/external/tsa.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/tsa.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/identity/ci.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/identity/ci.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/identity/ci.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/identity/ci.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/identity/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/identity/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/identity/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/identity/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/identity/provider.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/identity/provider.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/identity/provider.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/identity/provider.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/ca.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/ca.js similarity index 96% rename from deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/ca.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/ca.js index 81b421eabadb2e..f01703cfab5645 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/ca.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/ca.js @@ -35,7 +35,6 @@ class CAClient { const cert = resp.signedCertificateEmbeddedSct ? resp.signedCertificateEmbeddedSct : resp.signedCertificateDetachedSct; - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion return cert.chain.certificates; } catch (err) { diff --git a/deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/ephemeral.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/ephemeral.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/ephemeral.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/ephemeral.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/signer/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/signer/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/signer/signer.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/signer.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/signer/signer.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/signer.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/types/fetch.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/types/fetch.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/types/fetch.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/types/fetch.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/util/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/util/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/util/oidc.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/oidc.js similarity index 96% rename from deps/npm/node_modules/@sigstore/sign/dist/util/oidc.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/oidc.js index 2f5947d7b6b878..37c5b168ee12e6 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/util/oidc.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/oidc.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.extractJWTSubject = void 0; +exports.extractJWTSubject = extractJWTSubject; /* Copyright 2023 The Sigstore Authors. @@ -28,4 +28,3 @@ function extractJWTSubject(jwt) { return payload.sub; } } -exports.extractJWTSubject = extractJWTSubject; diff --git a/deps/npm/node_modules/@sigstore/sign/dist/util/ua.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/ua.js similarity index 95% rename from deps/npm/node_modules/@sigstore/sign/dist/util/ua.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/ua.js index c142330eb8338c..b15ff2070fb9fc 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/util/ua.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/ua.js @@ -23,7 +23,6 @@ const os_1 = __importDefault(require("os")); // Format User-Agent: / () // source: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/User-Agent const getUserAgent = () => { - // eslint-disable-next-line @typescript-eslint/no-var-requires const packageVersion = require('../../package.json').version; const nodeVersion = process.version; const platformName = os_1.default.platform(); diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/client.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/client.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/client.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/client.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/entry.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/entry.js similarity index 87% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/entry.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/entry.js index f6c165380ba45d..69a3b477e54429 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/entry.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/entry.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.toProposedEntry = void 0; +exports.toProposedEntry = toProposedEntry; /* Copyright 2023 The Sigstore Authors. @@ -18,21 +18,21 @@ limitations under the License. */ const bundle_1 = require("@sigstore/bundle"); const util_1 = require("../../util"); +const SHA256_ALGORITHM = 'sha256'; function toProposedEntry(content, publicKey, // TODO: Remove this parameter once have completely switched to 'dsse' entries -entryType = 'intoto') { +entryType = 'dsse') { switch (content.$case) { case 'dsseEnvelope': - // TODO: Remove this conditional once have completely switched to 'dsse' entries - if (entryType === 'dsse') { - return toProposedDSSEEntry(content.dsseEnvelope, publicKey); + // TODO: Remove this conditional once have completely ditched "intoto" entries + if (entryType === 'intoto') { + return toProposedIntotoEntry(content.dsseEnvelope, publicKey); } - return toProposedIntotoEntry(content.dsseEnvelope, publicKey); + return toProposedDSSEEntry(content.dsseEnvelope, publicKey); case 'messageSignature': return toProposedHashedRekordEntry(content.messageSignature, publicKey); } } -exports.toProposedEntry = toProposedEntry; // Returns a properly formatted Rekor "hashedrekord" entry for the given digest // and signature function toProposedHashedRekordEntry(messageSignature, publicKey) { @@ -45,7 +45,7 @@ function toProposedHashedRekordEntry(messageSignature, publicKey) { spec: { data: { hash: { - algorithm: 'sha256', + algorithm: SHA256_ALGORITHM, value: hexDigest, }, }, @@ -78,7 +78,9 @@ function toProposedDSSEEntry(envelope, publicKey) { // envelope and signature function toProposedIntotoEntry(envelope, publicKey) { // Calculate the value for the payloadHash field in the Rekor entry - const payloadHash = util_1.crypto.hash(envelope.payload).toString('hex'); + const payloadHash = util_1.crypto + .digest(SHA256_ALGORITHM, envelope.payload) + .toString('hex'); // Calculate the value for the hash field in the Rekor entry const envelopeHash = calculateDSSEHash(envelope, publicKey); // Collect values for re-creating the DSSE envelope. @@ -107,8 +109,8 @@ function toProposedIntotoEntry(envelope, publicKey) { spec: { content: { envelope: dsse, - hash: { algorithm: 'sha256', value: envelopeHash }, - payloadHash: { algorithm: 'sha256', value: payloadHash }, + hash: { algorithm: SHA256_ALGORITHM, value: envelopeHash }, + payloadHash: { algorithm: SHA256_ALGORITHM, value: payloadHash }, }, }, }; @@ -132,5 +134,7 @@ function calculateDSSEHash(envelope, publicKey) { if (envelope.signatures[0].keyid.length > 0) { dsse.signatures[0].keyid = envelope.signatures[0].keyid; } - return util_1.crypto.hash(util_1.json.canonicalize(dsse)).toString('hex'); + return util_1.crypto + .digest(SHA256_ALGORITHM, util_1.json.canonicalize(dsse)) + .toString('hex'); } diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/tsa/client.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tsa/client.js similarity index 86% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/tsa/client.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tsa/client.js index a334deb00b7756..754de3748dbb36 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/witness/tsa/client.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tsa/client.js @@ -19,6 +19,7 @@ limitations under the License. const error_1 = require("../../error"); const tsa_1 = require("../../external/tsa"); const util_1 = require("../../util"); +const SHA256_ALGORITHM = 'sha256'; class TSAClient { constructor(options) { this.tsa = new tsa_1.TimestampAuthority({ @@ -29,8 +30,10 @@ class TSAClient { } async createTimestamp(signature) { const request = { - artifactHash: util_1.crypto.hash(signature).toString('base64'), - hashAlgorithm: 'sha256', + artifactHash: util_1.crypto + .digest(SHA256_ALGORITHM, signature) + .toString('base64'), + hashAlgorithm: SHA256_ALGORITHM, }; try { return await this.tsa.createTimestamp(request); diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/tsa/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tsa/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/tsa/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tsa/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/witness.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/witness.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/witness.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/witness.js diff --git a/deps/npm/node_modules/@sigstore/sign/package.json b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/package.json similarity index 78% rename from deps/npm/node_modules/@sigstore/sign/package.json rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/package.json index 4adb3d24c6fa68..fe05e8dc2d73ad 100644 --- a/deps/npm/node_modules/@sigstore/sign/package.json +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/sign", - "version": "2.3.2", + "version": "3.0.0", "description": "Sigstore signing library", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -27,20 +27,20 @@ }, "devDependencies": { "@sigstore/jest": "^0.0.0", - "@sigstore/mock": "^0.7.4", - "@sigstore/rekor-types": "^2.0.0", + "@sigstore/mock": "^0.8.0", + "@sigstore/rekor-types": "^3.0.0", "@types/make-fetch-happen": "^10.0.4", "@types/promise-retry": "^1.1.6" }, "dependencies": { - "@sigstore/bundle": "^2.3.2", - "@sigstore/core": "^1.0.0", + "@sigstore/bundle": "^3.0.0", + "@sigstore/core": "^2.0.0", "@sigstore/protobuf-specs": "^0.3.2", - "make-fetch-happen": "^13.0.1", - "proc-log": "^4.2.0", + "make-fetch-happen": "^14.0.1", + "proc-log": "^5.0.0", "promise-retry": "^2.0.1" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/@sigstore/verify/dist/bundle/dsse.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/dsse.js similarity index 93% rename from deps/npm/node_modules/@sigstore/verify/dist/bundle/dsse.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/dsse.js index 193f875fd1014e..1033fc422aba09 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/bundle/dsse.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/dsse.js @@ -22,7 +22,7 @@ class DSSESignatureContent { this.env = env; } compareDigest(digest) { - return core_1.crypto.bufferEqual(digest, core_1.crypto.hash(this.env.payload)); + return core_1.crypto.bufferEqual(digest, core_1.crypto.digest('sha256', this.env.payload)); } compareSignature(signature) { return core_1.crypto.bufferEqual(signature, this.signature); diff --git a/deps/npm/node_modules/@sigstore/verify/dist/bundle/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/index.js similarity index 97% rename from deps/npm/node_modules/@sigstore/verify/dist/bundle/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/index.js index 63f8d4c4998811..4287d8032b75f0 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/bundle/index.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/index.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.signatureContent = exports.toSignedEntity = void 0; +exports.toSignedEntity = toSignedEntity; +exports.signatureContent = signatureContent; const core_1 = require("@sigstore/core"); const dsse_1 = require("./dsse"); const message_1 = require("./message"); @@ -26,7 +27,6 @@ function toSignedEntity(bundle, artifact) { timestamps, }; } -exports.toSignedEntity = toSignedEntity; function signatureContent(bundle, artifact) { switch (bundle.content.$case) { case 'dsseEnvelope': @@ -35,7 +35,6 @@ function signatureContent(bundle, artifact) { return new message_1.MessageSignatureContent(bundle.content.messageSignature, artifact); } } -exports.signatureContent = signatureContent; function key(bundle) { switch (bundle.verificationMaterial.content.$case) { case 'publicKey': diff --git a/deps/npm/node_modules/@sigstore/verify/dist/bundle/message.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/message.js similarity index 100% rename from deps/npm/node_modules/@sigstore/verify/dist/bundle/message.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/message.js diff --git a/deps/npm/node_modules/@sigstore/verify/dist/error.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/error.js similarity index 100% rename from deps/npm/node_modules/@sigstore/verify/dist/error.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/error.js diff --git a/deps/npm/node_modules/@sigstore/verify/dist/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/verify/dist/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/index.js diff --git a/deps/npm/node_modules/@sigstore/verify/dist/key/certificate.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/certificate.js similarity index 99% rename from deps/npm/node_modules/@sigstore/verify/dist/key/certificate.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/certificate.js index c9140dd98d58a6..a916de0e51e712 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/key/certificate.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/certificate.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.CertificateChainVerifier = exports.verifyCertificateChain = void 0; +exports.CertificateChainVerifier = void 0; +exports.verifyCertificateChain = verifyCertificateChain; const error_1 = require("../error"); const trust_1 = require("../trust"); function verifyCertificateChain(leaf, certificateAuthorities) { @@ -32,7 +33,6 @@ function verifyCertificateChain(leaf, certificateAuthorities) { cause: error, }); } -exports.verifyCertificateChain = verifyCertificateChain; class CertificateChainVerifier { constructor(opts) { this.untrustedCert = opts.untrustedCert; diff --git a/deps/npm/node_modules/@sigstore/verify/dist/key/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/index.js similarity index 97% rename from deps/npm/node_modules/@sigstore/verify/dist/key/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/index.js index 682a306803a991..cc894aab95a5d5 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/key/index.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/index.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyCertificate = exports.verifyPublicKey = void 0; +exports.verifyPublicKey = verifyPublicKey; +exports.verifyCertificate = verifyCertificate; /* Copyright 2023 The Sigstore Authors. @@ -34,7 +35,6 @@ function verifyPublicKey(hint, timestamps, trustMaterial) { }); return { key: key.publicKey }; } -exports.verifyPublicKey = verifyPublicKey; function verifyCertificate(leaf, timestamps, trustMaterial) { // Check that leaf certificate chains to a trusted CA const path = (0, certificate_1.verifyCertificateChain)(leaf, trustMaterial.certificateAuthorities); @@ -51,10 +51,10 @@ function verifyCertificate(leaf, timestamps, trustMaterial) { signer: getSigner(path[0]), }; } -exports.verifyCertificate = verifyCertificate; function getSigner(cert) { let issuer; const issuerExtension = cert.extension(OID_FULCIO_ISSUER_V2); + /* istanbul ignore next */ if (issuerExtension) { issuer = issuerExtension.valueObj.subs?.[0]?.value.toString('ascii'); } diff --git a/deps/npm/node_modules/@sigstore/verify/dist/key/sct.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/sct.js similarity index 97% rename from deps/npm/node_modules/@sigstore/verify/dist/key/sct.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/sct.js index aea412840e1039..8eca48738096ee 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/key/sct.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/sct.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifySCTs = void 0; +exports.verifySCTs = verifySCTs; /* Copyright 2023 The Sigstore Authors. @@ -52,7 +52,7 @@ function verifySCTs(cert, issuer, ctlogs) { // https://www.rfc-editor.org/rfc/rfc6962#section-3.2 const preCert = new core_1.ByteStream(); // Calculate hash of the issuer's public key - const issuerId = core_1.crypto.hash(issuer.publicKey); + const issuerId = core_1.crypto.digest('sha256', issuer.publicKey); preCert.appendView(issuerId); // Re-encodes the certificate to DER after removing the SCT extension const tbs = clone.tbsCertificate.toDER(); @@ -76,4 +76,3 @@ function verifySCTs(cert, issuer, ctlogs) { return sct.logID; }); } -exports.verifySCTs = verifySCTs; diff --git a/deps/npm/node_modules/@sigstore/verify/dist/policy.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/policy.js similarity index 93% rename from deps/npm/node_modules/@sigstore/verify/dist/policy.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/policy.js index 731e5c83328475..f5960cf047b84b 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/policy.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/policy.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyExtensions = exports.verifySubjectAlternativeName = void 0; +exports.verifySubjectAlternativeName = verifySubjectAlternativeName; +exports.verifyExtensions = verifyExtensions; const error_1 = require("./error"); function verifySubjectAlternativeName(policyIdentity, signerIdentity) { if (signerIdentity === undefined || !signerIdentity.match(policyIdentity)) { @@ -10,7 +11,6 @@ function verifySubjectAlternativeName(policyIdentity, signerIdentity) { }); } } -exports.verifySubjectAlternativeName = verifySubjectAlternativeName; function verifyExtensions(policyExtensions, signerExtensions = {}) { let key; for (key in policyExtensions) { @@ -22,4 +22,3 @@ function verifyExtensions(policyExtensions, signerExtensions = {}) { } } } -exports.verifyExtensions = verifyExtensions; diff --git a/deps/npm/node_modules/@sigstore/verify/dist/shared.types.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/shared.types.js similarity index 100% rename from deps/npm/node_modules/@sigstore/verify/dist/shared.types.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/shared.types.js diff --git a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/checkpoint.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/checkpoint.js similarity index 99% rename from deps/npm/node_modules/@sigstore/verify/dist/timestamp/checkpoint.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/checkpoint.js index 04a87383f0fd17..46619b675f8863 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/checkpoint.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/checkpoint.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyCheckpoint = void 0; +exports.verifyCheckpoint = verifyCheckpoint; /* Copyright 2023 The Sigstore Authors. @@ -61,7 +61,6 @@ function verifyCheckpoint(entry, tlogs) { }); } } -exports.verifyCheckpoint = verifyCheckpoint; // Verifies the signatures in the SignedNote. For each signature, the // corresponding transparency log is looked up by the key hint and the // signature is verified against the public key in the transparency log. diff --git a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/index.js similarity index 96% rename from deps/npm/node_modules/@sigstore/verify/dist/timestamp/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/index.js index 0da554f648d25e..56e948de19338d 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/index.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/index.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyTLogTimestamp = exports.verifyTSATimestamp = void 0; +exports.verifyTSATimestamp = verifyTSATimestamp; +exports.verifyTLogTimestamp = verifyTLogTimestamp; const error_1 = require("../error"); const checkpoint_1 = require("./checkpoint"); const merkle_1 = require("./merkle"); @@ -14,7 +15,6 @@ function verifyTSATimestamp(timestamp, data, timestampAuthorities) { timestamp: timestamp.signingTime, }; } -exports.verifyTSATimestamp = verifyTSATimestamp; function verifyTLogTimestamp(entry, tlogAuthorities) { let inclusionVerified = false; if (isTLogEntryWithInclusionPromise(entry)) { @@ -38,7 +38,6 @@ function verifyTLogTimestamp(entry, tlogAuthorities) { timestamp: new Date(Number(entry.integratedTime) * 1000), }; } -exports.verifyTLogTimestamp = verifyTLogTimestamp; function isTLogEntryWithInclusionPromise(entry) { return entry.inclusionPromise !== undefined; } diff --git a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/merkle.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/merkle.js similarity index 95% rename from deps/npm/node_modules/@sigstore/verify/dist/timestamp/merkle.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/merkle.js index 9895d01b7abc03..f57cae42002bd0 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/merkle.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/merkle.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyMerkleInclusion = void 0; +exports.verifyMerkleInclusion = verifyMerkleInclusion; /* Copyright 2023 The Sigstore Authors. @@ -53,7 +53,6 @@ function verifyMerkleInclusion(entry) { }); } } -exports.verifyMerkleInclusion = verifyMerkleInclusion; // Breaks down inclusion proof for a leaf at the specified index in a tree of // the specified size. The split point is where paths to the index leaf and // the (size - 1) leaf diverge. Returns lengths of the bottom and upper proof @@ -98,8 +97,8 @@ function bitLength(n) { // Hashing logic according to RFC6962. // https://datatracker.ietf.org/doc/html/rfc6962#section-2 function hashChildren(left, right) { - return core_1.crypto.hash(RFC6962_NODE_HASH_PREFIX, left, right); + return core_1.crypto.digest('sha256', RFC6962_NODE_HASH_PREFIX, left, right); } function hashLeaf(leaf) { - return core_1.crypto.hash(RFC6962_LEAF_HASH_PREFIX, leaf); + return core_1.crypto.digest('sha256', RFC6962_LEAF_HASH_PREFIX, leaf); } diff --git a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/set.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/set.js similarity index 98% rename from deps/npm/node_modules/@sigstore/verify/dist/timestamp/set.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/set.js index a6357c06999cba..5d3f47bb88746a 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/set.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/set.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyTLogSET = void 0; +exports.verifyTLogSET = verifyTLogSET; /* Copyright 2023 The Sigstore Authors. @@ -46,7 +46,6 @@ function verifyTLogSET(entry, tlogs) { }); } } -exports.verifyTLogSET = verifyTLogSET; // Returns a properly formatted "VerificationPayload" for one of the // transaction log entires in the given bundle which can be used for SET // verification. diff --git a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/tsa.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/tsa.js similarity index 98% rename from deps/npm/node_modules/@sigstore/verify/dist/timestamp/tsa.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/tsa.js index 7b095bc3a7f908..70388cd06c52d6 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/tsa.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/tsa.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyRFC3161Timestamp = void 0; +exports.verifyRFC3161Timestamp = verifyRFC3161Timestamp; const core_1 = require("@sigstore/core"); const error_1 = require("../error"); const certificate_1 = require("../key/certificate"); @@ -35,7 +35,6 @@ function verifyRFC3161Timestamp(timestamp, data, timestampAuthorities) { }); } } -exports.verifyRFC3161Timestamp = verifyRFC3161Timestamp; function verifyTimestampForCA(timestamp, data, ca) { const [leaf, ...cas] = ca.certChain; const signingKey = core_1.crypto.createPublicKey(leaf.publicKey); diff --git a/deps/npm/node_modules/@sigstore/verify/dist/tlog/dsse.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/dsse.js similarity index 98% rename from deps/npm/node_modules/@sigstore/verify/dist/tlog/dsse.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/dsse.js index bf430e61dde563..d71ed8c6e7ad9a 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/tlog/dsse.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/dsse.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyDSSETLogBody = void 0; +exports.verifyDSSETLogBody = verifyDSSETLogBody; /* Copyright 2023 The Sigstore Authors. @@ -29,7 +29,6 @@ function verifyDSSETLogBody(tlogEntry, content) { }); } } -exports.verifyDSSETLogBody = verifyDSSETLogBody; // Compare the given dsse v0.0.1 tlog entry to the given DSSE envelope. function verifyDSSE001TLogBody(tlogEntry, content) { // Ensure the bundle's DSSE only contains a single signature diff --git a/deps/npm/node_modules/@sigstore/verify/dist/tlog/hashedrekord.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/hashedrekord.js similarity index 97% rename from deps/npm/node_modules/@sigstore/verify/dist/tlog/hashedrekord.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/hashedrekord.js index d1758858f030d8..c4aa345b57ba7a 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/tlog/hashedrekord.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/hashedrekord.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyHashedRekordTLogBody = void 0; +exports.verifyHashedRekordTLogBody = verifyHashedRekordTLogBody; /* Copyright 2023 The Sigstore Authors. @@ -29,7 +29,6 @@ function verifyHashedRekordTLogBody(tlogEntry, content) { }); } } -exports.verifyHashedRekordTLogBody = verifyHashedRekordTLogBody; // Compare the given hashedrekord v0.0.1 tlog entry to the given message // signature function verifyHashedrekord001TLogBody(tlogEntry, content) { diff --git a/deps/npm/node_modules/@sigstore/verify/dist/tlog/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/index.js similarity index 98% rename from deps/npm/node_modules/@sigstore/verify/dist/tlog/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/index.js index adfc70ed51ad05..da235360c594a8 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/tlog/index.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/index.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyTLogBody = void 0; +exports.verifyTLogBody = verifyTLogBody; /* Copyright 2023 The Sigstore Authors. @@ -45,4 +45,3 @@ function verifyTLogBody(entry, sigContent) { }); } } -exports.verifyTLogBody = verifyTLogBody; diff --git a/deps/npm/node_modules/@sigstore/verify/dist/tlog/intoto.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/intoto.js similarity index 98% rename from deps/npm/node_modules/@sigstore/verify/dist/tlog/intoto.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/intoto.js index 74c7f50d763e1d..9096ae9418cc30 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/tlog/intoto.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/intoto.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyIntotoTLogBody = void 0; +exports.verifyIntotoTLogBody = verifyIntotoTLogBody; /* Copyright 2023 The Sigstore Authors. @@ -29,7 +29,6 @@ function verifyIntotoTLogBody(tlogEntry, content) { }); } } -exports.verifyIntotoTLogBody = verifyIntotoTLogBody; // Compare the given intoto v0.0.2 tlog entry to the given DSSE envelope. function verifyIntoto002TLogBody(tlogEntry, content) { // Ensure the bundle's DSSE contains a single signature diff --git a/deps/npm/node_modules/@sigstore/verify/dist/trust/filter.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/filter.js similarity index 93% rename from deps/npm/node_modules/@sigstore/verify/dist/trust/filter.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/filter.js index c09d055913c4c7..880a16cf1940ea 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/trust/filter.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/filter.js @@ -1,12 +1,12 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.filterTLogAuthorities = exports.filterCertAuthorities = void 0; +exports.filterCertAuthorities = filterCertAuthorities; +exports.filterTLogAuthorities = filterTLogAuthorities; function filterCertAuthorities(certAuthorities, criteria) { return certAuthorities.filter((ca) => { return (ca.validFor.start <= criteria.start && ca.validFor.end >= criteria.end); }); } -exports.filterCertAuthorities = filterCertAuthorities; // Filter the list of tlog instances to only those which match the given log // ID and have public keys which are valid for the given integrated time. function filterTLogAuthorities(tlogAuthorities, criteria) { @@ -21,4 +21,3 @@ function filterTLogAuthorities(tlogAuthorities, criteria) { criteria.targetDate <= tlog.validFor.end); }); } -exports.filterTLogAuthorities = filterTLogAuthorities; diff --git a/deps/npm/node_modules/@sigstore/verify/dist/trust/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/index.js similarity index 95% rename from deps/npm/node_modules/@sigstore/verify/dist/trust/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/index.js index 954de558415902..bfab2eb4f9975a 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/trust/index.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/index.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.toTrustMaterial = exports.filterTLogAuthorities = exports.filterCertAuthorities = void 0; +exports.filterTLogAuthorities = exports.filterCertAuthorities = void 0; +exports.toTrustMaterial = toTrustMaterial; /* Copyright 2023 The Sigstore Authors. @@ -34,7 +35,6 @@ function toTrustMaterial(root, keys) { publicKey: keyFinder, }; } -exports.toTrustMaterial = toTrustMaterial; function createTLogAuthority(tlogInstance) { const keyDetails = tlogInstance.publicKey.keyDetails; const keyType = keyDetails === protobuf_specs_1.PublicKeyDetails.PKCS1_RSA_PKCS1V5 || @@ -54,6 +54,7 @@ function createTLogAuthority(tlogInstance) { }; } function createCertAuthority(ca) { + /* istanbul ignore next */ return { certChain: ca.certChain.certificates.map((cert) => { return core_1.X509Certificate.parse(cert.rawBytes); @@ -76,6 +77,7 @@ function keyLocator(keys) { return { publicKey: core_1.crypto.createPublicKey(key.rawBytes), validFor: (date) => { + /* istanbul ignore next */ return ((key.validFor?.start || BEGINNING_OF_TIME) <= date && (key.validFor?.end || END_OF_TIME) >= date); }, diff --git a/deps/npm/node_modules/@sigstore/verify/dist/trust/trust.types.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/trust.types.js similarity index 100% rename from deps/npm/node_modules/@sigstore/verify/dist/trust/trust.types.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/trust.types.js diff --git a/deps/npm/node_modules/@sigstore/verify/dist/verifier.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/verifier.js similarity index 100% rename from deps/npm/node_modules/@sigstore/verify/dist/verifier.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/verifier.js diff --git a/deps/npm/node_modules/@sigstore/verify/package.json b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/package.json similarity index 86% rename from deps/npm/node_modules/@sigstore/verify/package.json rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/package.json index cd0c845a797e47..edf72b8bfd9680 100644 --- a/deps/npm/node_modules/@sigstore/verify/package.json +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/verify", - "version": "1.2.1", + "version": "2.0.0", "description": "Verification of Sigstore signatures", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -27,10 +27,10 @@ }, "dependencies": { "@sigstore/protobuf-specs": "^0.3.2", - "@sigstore/bundle": "^2.3.2", - "@sigstore/core": "^1.1.0" + "@sigstore/bundle": "^3.0.0", + "@sigstore/core": "^2.0.0" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/sigstore/package.json b/deps/npm/node_modules/sigstore/package.json index fa8744bf304a3f..0f798a263657b4 100644 --- a/deps/npm/node_modules/sigstore/package.json +++ b/deps/npm/node_modules/sigstore/package.json @@ -1,6 +1,6 @@ { "name": "sigstore", - "version": "2.3.1", + "version": "3.0.0", "description": "code-signing for npm packages", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -27,21 +27,21 @@ "provenance": true }, "devDependencies": { - "@sigstore/rekor-types": "^2.0.0", + "@sigstore/rekor-types": "^3.0.0", "@sigstore/jest": "^0.0.0", - "@sigstore/mock": "^0.7.4", - "@tufjs/repo-mock": "^2.0.1", + "@sigstore/mock": "^0.8.0", + "@tufjs/repo-mock": "^3.0.1", "@types/make-fetch-happen": "^10.0.4" }, "dependencies": { - "@sigstore/bundle": "^2.3.2", - "@sigstore/core": "^1.0.0", + "@sigstore/bundle": "^3.0.0", + "@sigstore/core": "^2.0.0", "@sigstore/protobuf-specs": "^0.3.2", - "@sigstore/sign": "^2.3.2", - "@sigstore/tuf": "^2.3.4", - "@sigstore/verify": "^1.2.1" + "@sigstore/sign": "^3.0.0", + "@sigstore/tuf": "^3.0.0", + "@sigstore/verify": "^2.0.0" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/spdx-license-ids/deprecated.json b/deps/npm/node_modules/spdx-license-ids/deprecated.json index 278531e40c613d..4f70a14c7469da 100644 --- a/deps/npm/node_modules/spdx-license-ids/deprecated.json +++ b/deps/npm/node_modules/spdx-license-ids/deprecated.json @@ -19,6 +19,7 @@ "LGPL-2.0", "LGPL-2.1", "LGPL-3.0", + "Net-SNMP", "Nunit", "StandardML-NJ", "bzip2-1.0.5", diff --git a/deps/npm/node_modules/spdx-license-ids/index.json b/deps/npm/node_modules/spdx-license-ids/index.json index c7686a710d61d1..f43d5016bd95ab 100644 --- a/deps/npm/node_modules/spdx-license-ids/index.json +++ b/deps/npm/node_modules/spdx-license-ids/index.json @@ -197,6 +197,8 @@ "DRL-1.0", "DRL-1.1", "DSDP", + "DocBook-Schema", + "DocBook-XML", "Dotseqn", "ECL-1.0", "ECL-2.0", @@ -260,6 +262,7 @@ "Glulxe", "Graphics-Gems", "Gutmann", + "HIDAPI", "HP-1986", "HP-1989", "HPND", @@ -270,6 +273,7 @@ "HPND-Kevlin-Henney", "HPND-MIT-disclaimer", "HPND-Markus-Kuhn", + "HPND-Netrek", "HPND-Pbmplus", "HPND-UC", "HPND-UC-export-US", @@ -403,7 +407,6 @@ "NTP", "NTP-0", "Naumen", - "Net-SNMP", "NetCDF", "Newsletr", "Nokia", @@ -485,6 +488,7 @@ "RSCPL", "Rdisc", "Ruby", + "Ruby-pty", "SAX-PD", "SAX-PD-2.0", "SCEA", @@ -541,6 +545,7 @@ "UMich-Merit", "UPL-1.0", "URT-RLE", + "Ubuntu-font-1.0", "Unicode-3.0", "Unicode-DFS-2015", "Unicode-DFS-2016", @@ -559,6 +564,7 @@ "Wsuipa", "X11", "X11-distribute-modifications-variant", + "X11-swapped", "XFree86-1.1", "XSkat", "Xdebug-1.03", diff --git a/deps/npm/node_modules/spdx-license-ids/package.json b/deps/npm/node_modules/spdx-license-ids/package.json index 5f5ed9554f2579..7ab34aab6b8b1d 100644 --- a/deps/npm/node_modules/spdx-license-ids/package.json +++ b/deps/npm/node_modules/spdx-license-ids/package.json @@ -1,6 +1,6 @@ { "name": "spdx-license-ids", - "version": "3.0.18", + "version": "3.0.20", "description": "A list of SPDX license identifiers", "repository": "jslicense/spdx-license-ids", "author": "Shinnosuke Watanabe (https://github.com/shinnn)", diff --git a/deps/npm/node_modules/tuf-js/dist/config.js b/deps/npm/node_modules/tuf-js/dist/config.js index 6845679942fec5..c66d76af86b98c 100644 --- a/deps/npm/node_modules/tuf-js/dist/config.js +++ b/deps/npm/node_modules/tuf-js/dist/config.js @@ -2,7 +2,7 @@ Object.defineProperty(exports, "__esModule", { value: true }); exports.defaultConfig = void 0; exports.defaultConfig = { - maxRootRotations: 32, + maxRootRotations: 256, maxDelegations: 32, rootMaxLength: 512000, //bytes timestampMaxLength: 16384, // bytes diff --git a/deps/npm/node_modules/tuf-js/dist/updater.js b/deps/npm/node_modules/tuf-js/dist/updater.js index 5317f7e14659ac..8d5eb4428f044a 100644 --- a/deps/npm/node_modules/tuf-js/dist/updater.js +++ b/deps/npm/node_modules/tuf-js/dist/updater.js @@ -144,7 +144,7 @@ class Updater { const rootVersion = this.trustedSet.root.signed.version; const lowerBound = rootVersion + 1; const upperBound = lowerBound + this.config.maxRootRotations; - for (let version = lowerBound; version <= upperBound; version++) { + for (let version = lowerBound; version < upperBound; version++) { const rootUrl = url.join(this.metadataBaseUrl, `${version}.root.json`); try { // Client workflow 5.3.3: download new root metadata file @@ -155,7 +155,13 @@ class Updater { this.persistMetadata(models_1.MetadataKind.Root, bytesData); } catch (error) { - break; + if (error instanceof error_1.DownloadHTTPError) { + // 404/403 means current root is newest available + if ([403, 404].includes(error.statusCode)) { + break; + } + } + throw error; } } } @@ -247,7 +253,8 @@ class Updater { const version = this.trustedSet.root.signed.consistentSnapshot ? metaInfo.version : undefined; - const metadataUrl = url.join(this.metadataBaseUrl, version ? `${version}.${role}.json` : `${role}.json`); + const encodedRole = encodeURIComponent(role); + const metadataUrl = url.join(this.metadataBaseUrl, version ? `${version}.${encodedRole}.json` : `${encodedRole}.json`); try { // Client workflow 5.6.1: download targets metadata file const bytesData = await this.fetcher.downloadBytes(metadataUrl, maxLength); @@ -280,7 +287,6 @@ class Updater { while (visitedRoleNames.size <= this.config.maxDelegations && delegationsToVisit.length > 0) { // Pop the role name from the top of the stack. - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const { roleName, parentRoleName } = delegationsToVisit.pop(); // Skip any visited current role to prevent cycles. // Client workflow 5.6.7.1: skip already-visited roles @@ -330,13 +336,14 @@ class Updater { return path.join(this.targetDir, filePath); } persistMetadata(metaDataName, bytesData) { + const encodedName = encodeURIComponent(metaDataName); try { - const filePath = path.join(this.dir, `${metaDataName}.json`); + const filePath = path.join(this.dir, `${encodedName}.json`); log('WRITE %s', filePath); fs.writeFileSync(filePath, bytesData.toString('utf8')); } catch (error) { - throw new error_1.PersistError(`Failed to persist metadata ${metaDataName} error: ${error}`); + throw new error_1.PersistError(`Failed to persist metadata ${encodedName} error: ${error}`); } } } diff --git a/deps/npm/node_modules/tuf-js/dist/utils/url.js b/deps/npm/node_modules/tuf-js/dist/utils/url.js index ce67fe2c230535..359d1f3ef385b7 100644 --- a/deps/npm/node_modules/tuf-js/dist/utils/url.js +++ b/deps/npm/node_modules/tuf-js/dist/utils/url.js @@ -1,11 +1,10 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.join = void 0; +exports.join = join; const url_1 = require("url"); function join(base, path) { return new url_1.URL(ensureTrailingSlash(base) + removeLeadingSlash(path)).toString(); } -exports.join = join; function ensureTrailingSlash(path) { return path.endsWith('/') ? path : path + '/'; } diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/agents.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/agents.js deleted file mode 100644 index c541b93001517e..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/agents.js +++ /dev/null @@ -1,206 +0,0 @@ -'use strict' - -const net = require('net') -const tls = require('tls') -const { once } = require('events') -const timers = require('timers/promises') -const { normalizeOptions, cacheOptions } = require('./options') -const { getProxy, getProxyAgent, proxyCache } = require('./proxy.js') -const Errors = require('./errors.js') -const { Agent: AgentBase } = require('agent-base') - -module.exports = class Agent extends AgentBase { - #options - #timeouts - #proxy - #noProxy - #ProxyAgent - - constructor (options = {}) { - const { timeouts, proxy, noProxy, ...normalizedOptions } = normalizeOptions(options) - - super(normalizedOptions) - - this.#options = normalizedOptions - this.#timeouts = timeouts - - if (proxy) { - this.#proxy = new URL(proxy) - this.#noProxy = noProxy - this.#ProxyAgent = getProxyAgent(proxy) - } - } - - get proxy () { - return this.#proxy ? { url: this.#proxy } : {} - } - - #getProxy (options) { - if (!this.#proxy) { - return - } - - const proxy = getProxy(`${options.protocol}//${options.host}:${options.port}`, { - proxy: this.#proxy, - noProxy: this.#noProxy, - }) - - if (!proxy) { - return - } - - const cacheKey = cacheOptions({ - ...options, - ...this.#options, - timeouts: this.#timeouts, - proxy, - }) - - if (proxyCache.has(cacheKey)) { - return proxyCache.get(cacheKey) - } - - let ProxyAgent = this.#ProxyAgent - if (Array.isArray(ProxyAgent)) { - ProxyAgent = this.isSecureEndpoint(options) ? ProxyAgent[1] : ProxyAgent[0] - } - - const proxyAgent = new ProxyAgent(proxy, { - ...this.#options, - socketOptions: { family: this.#options.family }, - }) - proxyCache.set(cacheKey, proxyAgent) - - return proxyAgent - } - - // takes an array of promises and races them against the connection timeout - // which will throw the necessary error if it is hit. This will return the - // result of the promise race. - async #timeoutConnection ({ promises, options, timeout }, ac = new AbortController()) { - if (timeout) { - const connectionTimeout = timers.setTimeout(timeout, null, { signal: ac.signal }) - .then(() => { - throw new Errors.ConnectionTimeoutError(`${options.host}:${options.port}`) - }).catch((err) => { - if (err.name === 'AbortError') { - return - } - throw err - }) - promises.push(connectionTimeout) - } - - let result - try { - result = await Promise.race(promises) - ac.abort() - } catch (err) { - ac.abort() - throw err - } - return result - } - - async connect (request, options) { - // if the connection does not have its own lookup function - // set, then use the one from our options - options.lookup ??= this.#options.lookup - - let socket - let timeout = this.#timeouts.connection - const isSecureEndpoint = this.isSecureEndpoint(options) - - const proxy = this.#getProxy(options) - if (proxy) { - // some of the proxies will wait for the socket to fully connect before - // returning so we have to await this while also racing it against the - // connection timeout. - const start = Date.now() - socket = await this.#timeoutConnection({ - options, - timeout, - promises: [proxy.connect(request, options)], - }) - // see how much time proxy.connect took and subtract it from - // the timeout - if (timeout) { - timeout = timeout - (Date.now() - start) - } - } else { - socket = (isSecureEndpoint ? tls : net).connect(options) - } - - socket.setKeepAlive(this.keepAlive, this.keepAliveMsecs) - socket.setNoDelay(this.keepAlive) - - const abortController = new AbortController() - const { signal } = abortController - - const connectPromise = socket[isSecureEndpoint ? 'secureConnecting' : 'connecting'] - ? once(socket, isSecureEndpoint ? 'secureConnect' : 'connect', { signal }) - : Promise.resolve() - - await this.#timeoutConnection({ - options, - timeout, - promises: [ - connectPromise, - once(socket, 'error', { signal }).then((err) => { - throw err[0] - }), - ], - }, abortController) - - if (this.#timeouts.idle) { - socket.setTimeout(this.#timeouts.idle, () => { - socket.destroy(new Errors.IdleTimeoutError(`${options.host}:${options.port}`)) - }) - } - - return socket - } - - addRequest (request, options) { - const proxy = this.#getProxy(options) - // it would be better to call proxy.addRequest here but this causes the - // http-proxy-agent to call its super.addRequest which causes the request - // to be added to the agent twice. since we only support 3 agents - // currently (see the required agents in proxy.js) we have manually - // checked that the only public methods we need to call are called in the - // next block. this could change in the future and presumably we would get - // failing tests until we have properly called the necessary methods on - // each of our proxy agents - if (proxy?.setRequestProps) { - proxy.setRequestProps(request, options) - } - - request.setHeader('connection', this.keepAlive ? 'keep-alive' : 'close') - - if (this.#timeouts.response) { - let responseTimeout - request.once('finish', () => { - setTimeout(() => { - request.destroy(new Errors.ResponseTimeoutError(request, this.#proxy)) - }, this.#timeouts.response) - }) - request.once('response', () => { - clearTimeout(responseTimeout) - }) - } - - if (this.#timeouts.transfer) { - let transferTimeout - request.once('response', (res) => { - setTimeout(() => { - res.destroy(new Errors.TransferTimeoutError(request, this.#proxy)) - }, this.#timeouts.transfer) - res.once('close', () => { - clearTimeout(transferTimeout) - }) - }) - } - - return super.addRequest(request, options) - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/dns.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/dns.js deleted file mode 100644 index 3c6946c566d736..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/dns.js +++ /dev/null @@ -1,53 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') -const dns = require('dns') - -// this is a factory so that each request can have its own opts (i.e. ttl) -// while still sharing the cache across all requests -const cache = new LRUCache({ max: 50 }) - -const getOptions = ({ - family = 0, - hints = dns.ADDRCONFIG, - all = false, - verbatim = undefined, - ttl = 5 * 60 * 1000, - lookup = dns.lookup, -}) => ({ - // hints and lookup are returned since both are top level properties to (net|tls).connect - hints, - lookup: (hostname, ...args) => { - const callback = args.pop() // callback is always last arg - const lookupOptions = args[0] ?? {} - - const options = { - family, - hints, - all, - verbatim, - ...(typeof lookupOptions === 'number' ? { family: lookupOptions } : lookupOptions), - } - - const key = JSON.stringify({ hostname, ...options }) - - if (cache.has(key)) { - const cached = cache.get(key) - return process.nextTick(callback, null, ...cached) - } - - lookup(hostname, options, (err, ...result) => { - if (err) { - return callback(err) - } - - cache.set(key, result, { ttl }) - return callback(null, ...result) - }) - }, -}) - -module.exports = { - cache, - getOptions, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/errors.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/errors.js deleted file mode 100644 index 70475aec8eb357..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/errors.js +++ /dev/null @@ -1,61 +0,0 @@ -'use strict' - -class InvalidProxyProtocolError extends Error { - constructor (url) { - super(`Invalid protocol \`${url.protocol}\` connecting to proxy \`${url.host}\``) - this.code = 'EINVALIDPROXY' - this.proxy = url - } -} - -class ConnectionTimeoutError extends Error { - constructor (host) { - super(`Timeout connecting to host \`${host}\``) - this.code = 'ECONNECTIONTIMEOUT' - this.host = host - } -} - -class IdleTimeoutError extends Error { - constructor (host) { - super(`Idle timeout reached for host \`${host}\``) - this.code = 'EIDLETIMEOUT' - this.host = host - } -} - -class ResponseTimeoutError extends Error { - constructor (request, proxy) { - let msg = 'Response timeout ' - if (proxy) { - msg += `from proxy \`${proxy.host}\` ` - } - msg += `connecting to host \`${request.host}\`` - super(msg) - this.code = 'ERESPONSETIMEOUT' - this.proxy = proxy - this.request = request - } -} - -class TransferTimeoutError extends Error { - constructor (request, proxy) { - let msg = 'Transfer timeout ' - if (proxy) { - msg += `from proxy \`${proxy.host}\` ` - } - msg += `for \`${request.host}\`` - super(msg) - this.code = 'ETRANSFERTIMEOUT' - this.proxy = proxy - this.request = request - } -} - -module.exports = { - InvalidProxyProtocolError, - ConnectionTimeoutError, - IdleTimeoutError, - ResponseTimeoutError, - TransferTimeoutError, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/index.js deleted file mode 100644 index b33d6eaef07a21..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/index.js +++ /dev/null @@ -1,56 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') -const { normalizeOptions, cacheOptions } = require('./options') -const { getProxy, proxyCache } = require('./proxy.js') -const dns = require('./dns.js') -const Agent = require('./agents.js') - -const agentCache = new LRUCache({ max: 20 }) - -const getAgent = (url, { agent, proxy, noProxy, ...options } = {}) => { - // false has meaning so this can't be a simple truthiness check - if (agent != null) { - return agent - } - - url = new URL(url) - - const proxyForUrl = getProxy(url, { proxy, noProxy }) - const normalizedOptions = { - ...normalizeOptions(options), - proxy: proxyForUrl, - } - - const cacheKey = cacheOptions({ - ...normalizedOptions, - secureEndpoint: url.protocol === 'https:', - }) - - if (agentCache.has(cacheKey)) { - return agentCache.get(cacheKey) - } - - const newAgent = new Agent(normalizedOptions) - agentCache.set(cacheKey, newAgent) - - return newAgent -} - -module.exports = { - getAgent, - Agent, - // these are exported for backwards compatability - HttpAgent: Agent, - HttpsAgent: Agent, - cache: { - proxy: proxyCache, - agent: agentCache, - dns: dns.cache, - clear: () => { - proxyCache.clear() - agentCache.clear() - dns.cache.clear() - }, - }, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/options.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/options.js deleted file mode 100644 index 0bf53f725f0846..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/options.js +++ /dev/null @@ -1,86 +0,0 @@ -'use strict' - -const dns = require('./dns') - -const normalizeOptions = (opts) => { - const family = parseInt(opts.family ?? '0', 10) - const keepAlive = opts.keepAlive ?? true - - const normalized = { - // nodejs http agent options. these are all the defaults - // but kept here to increase the likelihood of cache hits - // https://nodejs.org/api/http.html#new-agentoptions - keepAliveMsecs: keepAlive ? 1000 : undefined, - maxSockets: opts.maxSockets ?? 15, - maxTotalSockets: Infinity, - maxFreeSockets: keepAlive ? 256 : undefined, - scheduling: 'fifo', - // then spread the rest of the options - ...opts, - // we already set these to their defaults that we want - family, - keepAlive, - // our custom timeout options - timeouts: { - // the standard timeout option is mapped to our idle timeout - // and then deleted below - idle: opts.timeout ?? 0, - connection: 0, - response: 0, - transfer: 0, - ...opts.timeouts, - }, - // get the dns options that go at the top level of socket connection - ...dns.getOptions({ family, ...opts.dns }), - } - - // remove timeout since we already used it to set our own idle timeout - delete normalized.timeout - - return normalized -} - -const createKey = (obj) => { - let key = '' - const sorted = Object.entries(obj).sort((a, b) => a[0] - b[0]) - for (let [k, v] of sorted) { - if (v == null) { - v = 'null' - } else if (v instanceof URL) { - v = v.toString() - } else if (typeof v === 'object') { - v = createKey(v) - } - key += `${k}:${v}:` - } - return key -} - -const cacheOptions = ({ secureEndpoint, ...options }) => createKey({ - secureEndpoint: !!secureEndpoint, - // socket connect options - family: options.family, - hints: options.hints, - localAddress: options.localAddress, - // tls specific connect options - strictSsl: secureEndpoint ? !!options.rejectUnauthorized : false, - ca: secureEndpoint ? options.ca : null, - cert: secureEndpoint ? options.cert : null, - key: secureEndpoint ? options.key : null, - // http agent options - keepAlive: options.keepAlive, - keepAliveMsecs: options.keepAliveMsecs, - maxSockets: options.maxSockets, - maxTotalSockets: options.maxTotalSockets, - maxFreeSockets: options.maxFreeSockets, - scheduling: options.scheduling, - // timeout options - timeouts: options.timeouts, - // proxy - proxy: options.proxy, -}) - -module.exports = { - normalizeOptions, - cacheOptions, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/proxy.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/proxy.js deleted file mode 100644 index 6272e929e57bcf..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/proxy.js +++ /dev/null @@ -1,88 +0,0 @@ -'use strict' - -const { HttpProxyAgent } = require('http-proxy-agent') -const { HttpsProxyAgent } = require('https-proxy-agent') -const { SocksProxyAgent } = require('socks-proxy-agent') -const { LRUCache } = require('lru-cache') -const { InvalidProxyProtocolError } = require('./errors.js') - -const PROXY_CACHE = new LRUCache({ max: 20 }) - -const SOCKS_PROTOCOLS = new Set(SocksProxyAgent.protocols) - -const PROXY_ENV_KEYS = new Set(['https_proxy', 'http_proxy', 'proxy', 'no_proxy']) - -const PROXY_ENV = Object.entries(process.env).reduce((acc, [key, value]) => { - key = key.toLowerCase() - if (PROXY_ENV_KEYS.has(key)) { - acc[key] = value - } - return acc -}, {}) - -const getProxyAgent = (url) => { - url = new URL(url) - - const protocol = url.protocol.slice(0, -1) - if (SOCKS_PROTOCOLS.has(protocol)) { - return SocksProxyAgent - } - if (protocol === 'https' || protocol === 'http') { - return [HttpProxyAgent, HttpsProxyAgent] - } - - throw new InvalidProxyProtocolError(url) -} - -const isNoProxy = (url, noProxy) => { - if (typeof noProxy === 'string') { - noProxy = noProxy.split(',').map((p) => p.trim()).filter(Boolean) - } - - if (!noProxy || !noProxy.length) { - return false - } - - const hostSegments = url.hostname.split('.').reverse() - - return noProxy.some((no) => { - const noSegments = no.split('.').filter(Boolean).reverse() - if (!noSegments.length) { - return false - } - - for (let i = 0; i < noSegments.length; i++) { - if (hostSegments[i] !== noSegments[i]) { - return false - } - } - - return true - }) -} - -const getProxy = (url, { proxy, noProxy }) => { - url = new URL(url) - - if (!proxy) { - proxy = url.protocol === 'https:' - ? PROXY_ENV.https_proxy - : PROXY_ENV.https_proxy || PROXY_ENV.http_proxy || PROXY_ENV.proxy - } - - if (!noProxy) { - noProxy = PROXY_ENV.no_proxy - } - - if (!proxy || isNoProxy(url, noProxy)) { - return null - } - - return new URL(proxy) -} - -module.exports = { - getProxyAgent, - getProxy, - proxyCache: PROXY_CACHE, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/package.json b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/package.json deleted file mode 100644 index ef5b4e3228cc46..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/package.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "name": "@npmcli/agent", - "version": "2.2.2", - "description": "the http/https agent used by the npm cli", - "main": "lib/index.js", - "scripts": { - "gencerts": "bash scripts/create-cert.sh", - "test": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "author": "GitHub Inc.", - "license": "ISC", - "bugs": { - "url": "https://github.com/npm/agent/issues" - }, - "homepage": "https://github.com/npm/agent#readme", - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", - "publish": "true" - }, - "dependencies": { - "agent-base": "^7.1.0", - "http-proxy-agent": "^7.0.0", - "https-proxy-agent": "^7.0.1", - "lru-cache": "^10.0.1", - "socks-proxy-agent": "^8.0.3" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", - "minipass-fetch": "^3.0.3", - "nock": "^13.2.7", - "semver": "^7.5.4", - "simple-socks": "^3.1.0", - "tap": "^16.3.0" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/agent.git" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/LICENSE.md b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/LICENSE.md deleted file mode 100644 index 5fc208ff122e08..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ - - -ISC License - -Copyright npm, Inc. - -Permission to use, copy, modify, and/or distribute this -software for any purpose with or without fee is hereby -granted, provided that the above copyright notice and this -permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND NPM DISCLAIMS ALL -WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO -EVENT SHALL NPM BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/get-options.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/get-options.js deleted file mode 100644 index cb5982f79077ac..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/get-options.js +++ /dev/null @@ -1,20 +0,0 @@ -// given an input that may or may not be an object, return an object that has -// a copy of every defined property listed in 'copy'. if the input is not an -// object, assign it to the property named by 'wrap' -const getOptions = (input, { copy, wrap }) => { - const result = {} - - if (input && typeof input === 'object') { - for (const prop of copy) { - if (input[prop] !== undefined) { - result[prop] = input[prop] - } - } - } else { - result[wrap] = input - } - - return result -} - -module.exports = getOptions diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/node.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/node.js deleted file mode 100644 index 4d13bc037359d7..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/node.js +++ /dev/null @@ -1,9 +0,0 @@ -const semver = require('semver') - -const satisfies = (range) => { - return semver.satisfies(process.version, range, { includePrerelease: true }) -} - -module.exports = { - satisfies, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/LICENSE deleted file mode 100644 index 93546dfb7655bf..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -(The MIT License) - -Copyright (c) 2011-2017 JP Richardson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files -(the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, - merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/errors.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/errors.js deleted file mode 100644 index 1cd1e05d0c533d..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/errors.js +++ /dev/null @@ -1,129 +0,0 @@ -'use strict' -const { inspect } = require('util') - -// adapted from node's internal/errors -// https://github.com/nodejs/node/blob/c8a04049/lib/internal/errors.js - -// close copy of node's internal SystemError class. -class SystemError { - constructor (code, prefix, context) { - // XXX context.code is undefined in all constructors used in cp/polyfill - // that may be a bug copied from node, maybe the constructor should use - // `code` not `errno`? nodejs/node#41104 - let message = `${prefix}: ${context.syscall} returned ` + - `${context.code} (${context.message})` - - if (context.path !== undefined) { - message += ` ${context.path}` - } - if (context.dest !== undefined) { - message += ` => ${context.dest}` - } - - this.code = code - Object.defineProperties(this, { - name: { - value: 'SystemError', - enumerable: false, - writable: true, - configurable: true, - }, - message: { - value: message, - enumerable: false, - writable: true, - configurable: true, - }, - info: { - value: context, - enumerable: true, - configurable: true, - writable: false, - }, - errno: { - get () { - return context.errno - }, - set (value) { - context.errno = value - }, - enumerable: true, - configurable: true, - }, - syscall: { - get () { - return context.syscall - }, - set (value) { - context.syscall = value - }, - enumerable: true, - configurable: true, - }, - }) - - if (context.path !== undefined) { - Object.defineProperty(this, 'path', { - get () { - return context.path - }, - set (value) { - context.path = value - }, - enumerable: true, - configurable: true, - }) - } - - if (context.dest !== undefined) { - Object.defineProperty(this, 'dest', { - get () { - return context.dest - }, - set (value) { - context.dest = value - }, - enumerable: true, - configurable: true, - }) - } - } - - toString () { - return `${this.name} [${this.code}]: ${this.message}` - } - - [Symbol.for('nodejs.util.inspect.custom')] (_recurseTimes, ctx) { - return inspect(this, { - ...ctx, - getters: true, - customInspect: false, - }) - } -} - -function E (code, message) { - module.exports[code] = class NodeError extends SystemError { - constructor (ctx) { - super(code, message, ctx) - } - } -} - -E('ERR_FS_CP_DIR_TO_NON_DIR', 'Cannot overwrite directory with non-directory') -E('ERR_FS_CP_EEXIST', 'Target already exists') -E('ERR_FS_CP_EINVAL', 'Invalid src or dest') -E('ERR_FS_CP_FIFO_PIPE', 'Cannot copy a FIFO pipe') -E('ERR_FS_CP_NON_DIR_TO_DIR', 'Cannot overwrite non-directory with directory') -E('ERR_FS_CP_SOCKET', 'Cannot copy a socket file') -E('ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY', 'Cannot overwrite symlink in subdirectory of self') -E('ERR_FS_CP_UNKNOWN', 'Cannot copy an unknown file type') -E('ERR_FS_EISDIR', 'Path is a directory') - -module.exports.ERR_INVALID_ARG_TYPE = class ERR_INVALID_ARG_TYPE extends Error { - constructor (name, expected, actual) { - super() - this.code = 'ERR_INVALID_ARG_TYPE' - this.message = `The ${name} argument must be ${expected}. Received ${typeof actual}` - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/index.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/index.js deleted file mode 100644 index 972ce7aa12abef..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/index.js +++ /dev/null @@ -1,22 +0,0 @@ -const fs = require('fs/promises') -const getOptions = require('../common/get-options.js') -const node = require('../common/node.js') -const polyfill = require('./polyfill.js') - -// node 16.7.0 added fs.cp -const useNative = node.satisfies('>=16.7.0') - -const cp = async (src, dest, opts) => { - const options = getOptions(opts, { - copy: ['dereference', 'errorOnExist', 'filter', 'force', 'preserveTimestamps', 'recursive'], - }) - - // the polyfill is tested separately from this module, no need to hack - // process.version to try to trigger it just for coverage - // istanbul ignore next - return useNative - ? fs.cp(src, dest, options) - : polyfill(src, dest, options) -} - -module.exports = cp diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/polyfill.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/polyfill.js deleted file mode 100644 index 80eb10de971918..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/polyfill.js +++ /dev/null @@ -1,428 +0,0 @@ -// this file is a modified version of the code in node 17.2.0 -// which is, in turn, a modified version of the fs-extra module on npm -// node core changes: -// - Use of the assert module has been replaced with core's error system. -// - All code related to the glob dependency has been removed. -// - Bring your own custom fs module is not currently supported. -// - Some basic code cleanup. -// changes here: -// - remove all callback related code -// - drop sync support -// - change assertions back to non-internal methods (see options.js) -// - throws ENOTDIR when rmdir gets an ENOENT for a path that exists in Windows -'use strict' - -const { - ERR_FS_CP_DIR_TO_NON_DIR, - ERR_FS_CP_EEXIST, - ERR_FS_CP_EINVAL, - ERR_FS_CP_FIFO_PIPE, - ERR_FS_CP_NON_DIR_TO_DIR, - ERR_FS_CP_SOCKET, - ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY, - ERR_FS_CP_UNKNOWN, - ERR_FS_EISDIR, - ERR_INVALID_ARG_TYPE, -} = require('./errors.js') -const { - constants: { - errno: { - EEXIST, - EISDIR, - EINVAL, - ENOTDIR, - }, - }, -} = require('os') -const { - chmod, - copyFile, - lstat, - mkdir, - readdir, - readlink, - stat, - symlink, - unlink, - utimes, -} = require('fs/promises') -const { - dirname, - isAbsolute, - join, - parse, - resolve, - sep, - toNamespacedPath, -} = require('path') -const { fileURLToPath } = require('url') - -const defaultOptions = { - dereference: false, - errorOnExist: false, - filter: undefined, - force: true, - preserveTimestamps: false, - recursive: false, -} - -async function cp (src, dest, opts) { - if (opts != null && typeof opts !== 'object') { - throw new ERR_INVALID_ARG_TYPE('options', ['Object'], opts) - } - return cpFn( - toNamespacedPath(getValidatedPath(src)), - toNamespacedPath(getValidatedPath(dest)), - { ...defaultOptions, ...opts }) -} - -function getValidatedPath (fileURLOrPath) { - const path = fileURLOrPath != null && fileURLOrPath.href - && fileURLOrPath.origin - ? fileURLToPath(fileURLOrPath) - : fileURLOrPath - return path -} - -async function cpFn (src, dest, opts) { - // Warn about using preserveTimestamps on 32-bit node - // istanbul ignore next - if (opts.preserveTimestamps && process.arch === 'ia32') { - const warning = 'Using the preserveTimestamps option in 32-bit ' + - 'node is not recommended' - process.emitWarning(warning, 'TimestampPrecisionWarning') - } - const stats = await checkPaths(src, dest, opts) - const { srcStat, destStat } = stats - await checkParentPaths(src, srcStat, dest) - if (opts.filter) { - return handleFilter(checkParentDir, destStat, src, dest, opts) - } - return checkParentDir(destStat, src, dest, opts) -} - -async function checkPaths (src, dest, opts) { - const { 0: srcStat, 1: destStat } = await getStats(src, dest, opts) - if (destStat) { - if (areIdentical(srcStat, destStat)) { - throw new ERR_FS_CP_EINVAL({ - message: 'src and dest cannot be the same', - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - if (srcStat.isDirectory() && !destStat.isDirectory()) { - throw new ERR_FS_CP_DIR_TO_NON_DIR({ - message: `cannot overwrite directory ${src} ` + - `with non-directory ${dest}`, - path: dest, - syscall: 'cp', - errno: EISDIR, - }) - } - if (!srcStat.isDirectory() && destStat.isDirectory()) { - throw new ERR_FS_CP_NON_DIR_TO_DIR({ - message: `cannot overwrite non-directory ${src} ` + - `with directory ${dest}`, - path: dest, - syscall: 'cp', - errno: ENOTDIR, - }) - } - } - - if (srcStat.isDirectory() && isSrcSubdir(src, dest)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${src} to a subdirectory of self ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return { srcStat, destStat } -} - -function areIdentical (srcStat, destStat) { - return destStat.ino && destStat.dev && destStat.ino === srcStat.ino && - destStat.dev === srcStat.dev -} - -function getStats (src, dest, opts) { - const statFunc = opts.dereference ? - (file) => stat(file, { bigint: true }) : - (file) => lstat(file, { bigint: true }) - return Promise.all([ - statFunc(src), - statFunc(dest).catch((err) => { - // istanbul ignore next: unsure how to cover. - if (err.code === 'ENOENT') { - return null - } - // istanbul ignore next: unsure how to cover. - throw err - }), - ]) -} - -async function checkParentDir (destStat, src, dest, opts) { - const destParent = dirname(dest) - const dirExists = await pathExists(destParent) - if (dirExists) { - return getStatsForCopy(destStat, src, dest, opts) - } - await mkdir(destParent, { recursive: true }) - return getStatsForCopy(destStat, src, dest, opts) -} - -function pathExists (dest) { - return stat(dest).then( - () => true, - // istanbul ignore next: not sure when this would occur - (err) => (err.code === 'ENOENT' ? false : Promise.reject(err))) -} - -// Recursively check if dest parent is a subdirectory of src. -// It works for all file types including symlinks since it -// checks the src and dest inodes. It starts from the deepest -// parent and stops once it reaches the src parent or the root path. -async function checkParentPaths (src, srcStat, dest) { - const srcParent = resolve(dirname(src)) - const destParent = resolve(dirname(dest)) - if (destParent === srcParent || destParent === parse(destParent).root) { - return - } - let destStat - try { - destStat = await stat(destParent, { bigint: true }) - } catch (err) { - // istanbul ignore else: not sure when this would occur - if (err.code === 'ENOENT') { - return - } - // istanbul ignore next: not sure when this would occur - throw err - } - if (areIdentical(srcStat, destStat)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${src} to a subdirectory of self ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return checkParentPaths(src, srcStat, destParent) -} - -const normalizePathToArray = (path) => - resolve(path).split(sep).filter(Boolean) - -// Return true if dest is a subdir of src, otherwise false. -// It only checks the path strings. -function isSrcSubdir (src, dest) { - const srcArr = normalizePathToArray(src) - const destArr = normalizePathToArray(dest) - return srcArr.every((cur, i) => destArr[i] === cur) -} - -async function handleFilter (onInclude, destStat, src, dest, opts, cb) { - const include = await opts.filter(src, dest) - if (include) { - return onInclude(destStat, src, dest, opts, cb) - } -} - -function startCopy (destStat, src, dest, opts) { - if (opts.filter) { - return handleFilter(getStatsForCopy, destStat, src, dest, opts) - } - return getStatsForCopy(destStat, src, dest, opts) -} - -async function getStatsForCopy (destStat, src, dest, opts) { - const statFn = opts.dereference ? stat : lstat - const srcStat = await statFn(src) - // istanbul ignore else: can't portably test FIFO - if (srcStat.isDirectory() && opts.recursive) { - return onDir(srcStat, destStat, src, dest, opts) - } else if (srcStat.isDirectory()) { - throw new ERR_FS_EISDIR({ - message: `${src} is a directory (not copied)`, - path: src, - syscall: 'cp', - errno: EINVAL, - }) - } else if (srcStat.isFile() || - srcStat.isCharacterDevice() || - srcStat.isBlockDevice()) { - return onFile(srcStat, destStat, src, dest, opts) - } else if (srcStat.isSymbolicLink()) { - return onLink(destStat, src, dest) - } else if (srcStat.isSocket()) { - throw new ERR_FS_CP_SOCKET({ - message: `cannot copy a socket file: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } else if (srcStat.isFIFO()) { - throw new ERR_FS_CP_FIFO_PIPE({ - message: `cannot copy a FIFO pipe: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - // istanbul ignore next: should be unreachable - throw new ERR_FS_CP_UNKNOWN({ - message: `cannot copy an unknown file type: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) -} - -function onFile (srcStat, destStat, src, dest, opts) { - if (!destStat) { - return _copyFile(srcStat, src, dest, opts) - } - return mayCopyFile(srcStat, src, dest, opts) -} - -async function mayCopyFile (srcStat, src, dest, opts) { - if (opts.force) { - await unlink(dest) - return _copyFile(srcStat, src, dest, opts) - } else if (opts.errorOnExist) { - throw new ERR_FS_CP_EEXIST({ - message: `${dest} already exists`, - path: dest, - syscall: 'cp', - errno: EEXIST, - }) - } -} - -async function _copyFile (srcStat, src, dest, opts) { - await copyFile(src, dest) - if (opts.preserveTimestamps) { - return handleTimestampsAndMode(srcStat.mode, src, dest) - } - return setDestMode(dest, srcStat.mode) -} - -async function handleTimestampsAndMode (srcMode, src, dest) { - // Make sure the file is writable before setting the timestamp - // otherwise open fails with EPERM when invoked with 'r+' - // (through utimes call) - if (fileIsNotWritable(srcMode)) { - await makeFileWritable(dest, srcMode) - return setDestTimestampsAndMode(srcMode, src, dest) - } - return setDestTimestampsAndMode(srcMode, src, dest) -} - -function fileIsNotWritable (srcMode) { - return (srcMode & 0o200) === 0 -} - -function makeFileWritable (dest, srcMode) { - return setDestMode(dest, srcMode | 0o200) -} - -async function setDestTimestampsAndMode (srcMode, src, dest) { - await setDestTimestamps(src, dest) - return setDestMode(dest, srcMode) -} - -function setDestMode (dest, srcMode) { - return chmod(dest, srcMode) -} - -async function setDestTimestamps (src, dest) { - // The initial srcStat.atime cannot be trusted - // because it is modified by the read(2) system call - // (See https://nodejs.org/api/fs.html#fs_stat_time_values) - const updatedSrcStat = await stat(src) - return utimes(dest, updatedSrcStat.atime, updatedSrcStat.mtime) -} - -function onDir (srcStat, destStat, src, dest, opts) { - if (!destStat) { - return mkDirAndCopy(srcStat.mode, src, dest, opts) - } - return copyDir(src, dest, opts) -} - -async function mkDirAndCopy (srcMode, src, dest, opts) { - await mkdir(dest) - await copyDir(src, dest, opts) - return setDestMode(dest, srcMode) -} - -async function copyDir (src, dest, opts) { - const dir = await readdir(src) - for (let i = 0; i < dir.length; i++) { - const item = dir[i] - const srcItem = join(src, item) - const destItem = join(dest, item) - const { destStat } = await checkPaths(srcItem, destItem, opts) - await startCopy(destStat, srcItem, destItem, opts) - } -} - -async function onLink (destStat, src, dest) { - let resolvedSrc = await readlink(src) - if (!isAbsolute(resolvedSrc)) { - resolvedSrc = resolve(dirname(src), resolvedSrc) - } - if (!destStat) { - return symlink(resolvedSrc, dest) - } - let resolvedDest - try { - resolvedDest = await readlink(dest) - } catch (err) { - // Dest exists and is a regular file or directory, - // Windows may throw UNKNOWN error. If dest already exists, - // fs throws error anyway, so no need to guard against it here. - // istanbul ignore next: can only test on windows - if (err.code === 'EINVAL' || err.code === 'UNKNOWN') { - return symlink(resolvedSrc, dest) - } - // istanbul ignore next: should not be possible - throw err - } - if (!isAbsolute(resolvedDest)) { - resolvedDest = resolve(dirname(dest), resolvedDest) - } - if (isSrcSubdir(resolvedSrc, resolvedDest)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${resolvedSrc} to a subdirectory of self ` + - `${resolvedDest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - // Do not copy if src is a subdir of dest since unlinking - // dest in this case would result in removing src contents - // and therefore a broken symlink would be created. - const srcStat = await stat(src) - if (srcStat.isDirectory() && isSrcSubdir(resolvedDest, resolvedSrc)) { - throw new ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY({ - message: `cannot overwrite ${resolvedDest} with ${resolvedSrc}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return copyLink(resolvedSrc, dest) -} - -async function copyLink (resolvedSrc, dest) { - await unlink(dest) - return symlink(resolvedSrc, dest) -} - -module.exports = cp diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/index.js deleted file mode 100644 index 81c746304cc428..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/index.js +++ /dev/null @@ -1,13 +0,0 @@ -'use strict' - -const cp = require('./cp/index.js') -const withTempDir = require('./with-temp-dir.js') -const readdirScoped = require('./readdir-scoped.js') -const moveFile = require('./move-file.js') - -module.exports = { - cp, - withTempDir, - readdirScoped, - moveFile, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/move-file.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/move-file.js deleted file mode 100644 index d56e06d384659a..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/move-file.js +++ /dev/null @@ -1,78 +0,0 @@ -const { dirname, join, resolve, relative, isAbsolute } = require('path') -const fs = require('fs/promises') - -const pathExists = async path => { - try { - await fs.access(path) - return true - } catch (er) { - return er.code !== 'ENOENT' - } -} - -const moveFile = async (source, destination, options = {}, root = true, symlinks = []) => { - if (!source || !destination) { - throw new TypeError('`source` and `destination` file required') - } - - options = { - overwrite: true, - ...options, - } - - if (!options.overwrite && await pathExists(destination)) { - throw new Error(`The destination file exists: ${destination}`) - } - - await fs.mkdir(dirname(destination), { recursive: true }) - - try { - await fs.rename(source, destination) - } catch (error) { - if (error.code === 'EXDEV' || error.code === 'EPERM') { - const sourceStat = await fs.lstat(source) - if (sourceStat.isDirectory()) { - const files = await fs.readdir(source) - await Promise.all(files.map((file) => - moveFile(join(source, file), join(destination, file), options, false, symlinks) - )) - } else if (sourceStat.isSymbolicLink()) { - symlinks.push({ source, destination }) - } else { - await fs.copyFile(source, destination) - } - } else { - throw error - } - } - - if (root) { - await Promise.all(symlinks.map(async ({ source: symSource, destination: symDestination }) => { - let target = await fs.readlink(symSource) - // junction symlinks in windows will be absolute paths, so we need to - // make sure they point to the symlink destination - if (isAbsolute(target)) { - target = resolve(symDestination, relative(symSource, target)) - } - // try to determine what the actual file is so we can create the correct - // type of symlink in windows - let targetStat = 'file' - try { - targetStat = await fs.stat(resolve(dirname(symSource), target)) - if (targetStat.isDirectory()) { - targetStat = 'junction' - } - } catch { - // targetStat remains 'file' - } - await fs.symlink( - target, - symDestination, - targetStat - ) - })) - await fs.rm(source, { recursive: true, force: true }) - } -} - -module.exports = moveFile diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/readdir-scoped.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/readdir-scoped.js deleted file mode 100644 index cd601dfbe7486b..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/readdir-scoped.js +++ /dev/null @@ -1,20 +0,0 @@ -const { readdir } = require('fs/promises') -const { join } = require('path') - -const readdirScoped = async (dir) => { - const results = [] - - for (const item of await readdir(dir)) { - if (item.startsWith('@')) { - for (const scopedItem of await readdir(join(dir, item))) { - results.push(join(item, scopedItem)) - } - } else { - results.push(item) - } - } - - return results -} - -module.exports = readdirScoped diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/with-temp-dir.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/with-temp-dir.js deleted file mode 100644 index 0738ac4f29e1be..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/with-temp-dir.js +++ /dev/null @@ -1,39 +0,0 @@ -const { join, sep } = require('path') - -const getOptions = require('./common/get-options.js') -const { mkdir, mkdtemp, rm } = require('fs/promises') - -// create a temp directory, ensure its permissions match its parent, then call -// the supplied function passing it the path to the directory. clean up after -// the function finishes, whether it throws or not -const withTempDir = async (root, fn, opts) => { - const options = getOptions(opts, { - copy: ['tmpPrefix'], - }) - // create the directory - await mkdir(root, { recursive: true }) - - const target = await mkdtemp(join(`${root}${sep}`, options.tmpPrefix || '')) - let err - let result - - try { - result = await fn(target) - } catch (_err) { - err = _err - } - - try { - await rm(target, { force: true, recursive: true }) - } catch { - // ignore errors - } - - if (err) { - throw err - } - - return result -} - -module.exports = withTempDir diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/package.json b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/package.json deleted file mode 100644 index 5261a11b78000e..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/package.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "@npmcli/fs", - "version": "3.1.1", - "description": "filesystem utilities for the npm cli", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "snap": "tap", - "test": "tap", - "npmclilint": "npmcli-lint", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "lintfix": "npm run lint -- --fix", - "posttest": "npm run lint", - "postsnap": "npm run lintfix --", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/fs.git" - }, - "keywords": [ - "npm", - "oss" - ], - "author": "GitHub Inc.", - "license": "ISC", - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.1" - }, - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@tufjs/models/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/LICENSE similarity index 100% rename from deps/npm/node_modules/@tufjs/models/LICENSE rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/LICENSE diff --git a/deps/npm/node_modules/@tufjs/models/dist/base.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/base.js similarity index 80% rename from deps/npm/node_modules/@tufjs/models/dist/base.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/base.js index 259f6799c13a0d..85e45d8fc1151e 100644 --- a/deps/npm/node_modules/@tufjs/models/dist/base.js +++ b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/base.js @@ -3,7 +3,8 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.Signed = exports.isMetadataKind = exports.MetadataKind = void 0; +exports.Signed = exports.MetadataKind = void 0; +exports.isMetadataKind = isMetadataKind; const util_1 = __importDefault(require("util")); const error_1 = require("./error"); const utils_1 = require("./utils"); @@ -19,7 +20,6 @@ function isMetadataKind(value) { return (typeof value === 'string' && Object.values(MetadataKind).includes(value)); } -exports.isMetadataKind = isMetadataKind; /*** * A base class for the signed part of TUF metadata. * @@ -39,8 +39,8 @@ class Signed { if (specList[0] != SPECIFICATION_VERSION[0]) { throw new error_1.ValueError('Unsupported specVersion'); } - this.expires = options.expires || new Date().toISOString(); - this.version = options.version || 1; + this.expires = options.expires; + this.version = options.version; this.unrecognizedFields = options.unrecognizedFields || {}; } equals(other) { @@ -60,13 +60,22 @@ class Signed { } static commonFieldsFromJSON(data) { const { spec_version, expires, version, ...rest } = data; - if (utils_1.guard.isDefined(spec_version) && !(typeof spec_version === 'string')) { + if (!utils_1.guard.isDefined(spec_version)) { + throw new error_1.ValueError('spec_version is not defined'); + } + else if (typeof spec_version !== 'string') { throw new TypeError('spec_version must be a string'); } - if (utils_1.guard.isDefined(expires) && !(typeof expires === 'string')) { + if (!utils_1.guard.isDefined(expires)) { + throw new error_1.ValueError('expires is not defined'); + } + else if (!(typeof expires === 'string')) { throw new TypeError('expires must be a string'); } - if (utils_1.guard.isDefined(version) && !(typeof version === 'number')) { + if (!utils_1.guard.isDefined(version)) { + throw new error_1.ValueError('version is not defined'); + } + else if (!(typeof version === 'number')) { throw new TypeError('version must be a number'); } return { diff --git a/deps/npm/node_modules/@tufjs/models/dist/delegations.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/delegations.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/delegations.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/delegations.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/error.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/error.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/error.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/error.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/file.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/file.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/file.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/file.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/index.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/index.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/index.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/index.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/key.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/key.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/key.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/key.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/metadata.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/metadata.js similarity index 91% rename from deps/npm/node_modules/@tufjs/models/dist/metadata.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/metadata.js index 9668b6f14fa701..389d2504e0b53d 100644 --- a/deps/npm/node_modules/@tufjs/models/dist/metadata.js +++ b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/metadata.js @@ -125,6 +125,9 @@ class Metadata { if (type !== signed._type) { throw new error_1.ValueError(`expected '${type}', got ${signed['_type']}`); } + if (!utils_1.guard.isObjectArray(signatures)) { + throw new TypeError('signatures is not an array'); + } let signedObj; switch (type) { case base_1.MetadataKind.Root: @@ -142,17 +145,16 @@ class Metadata { default: throw new TypeError('invalid metadata type'); } - const sigMap = signaturesFromJSON(signatures); + const sigMap = {}; + // Ensure that each signature is unique + signatures.forEach((sigData) => { + const sig = signature_1.Signature.fromJSON(sigData); + if (sigMap[sig.keyID]) { + throw new error_1.ValueError(`multiple signatures found for keyid: ${sig.keyID}`); + } + sigMap[sig.keyID] = sig; + }); return new Metadata(signedObj, sigMap, rest); } } exports.Metadata = Metadata; -function signaturesFromJSON(data) { - if (!utils_1.guard.isObjectArray(data)) { - throw new TypeError('signatures is not an array'); - } - return data.reduce((acc, sigData) => { - const signature = signature_1.Signature.fromJSON(sigData); - return { ...acc, [signature.keyID]: signature }; - }, {}); -} diff --git a/deps/npm/node_modules/@tufjs/models/dist/role.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/role.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/role.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/role.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/root.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/root.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/root.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/root.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/signature.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/signature.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/signature.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/signature.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/snapshot.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/snapshot.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/snapshot.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/snapshot.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/targets.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/targets.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/targets.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/targets.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/timestamp.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/timestamp.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/timestamp.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/timestamp.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/utils/guard.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/guard.js similarity index 88% rename from deps/npm/node_modules/@tufjs/models/dist/utils/guard.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/guard.js index efe558852303ce..911e8475986bbc 100644 --- a/deps/npm/node_modules/@tufjs/models/dist/utils/guard.js +++ b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/guard.js @@ -1,33 +1,32 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.isObjectRecord = exports.isStringRecord = exports.isObjectArray = exports.isStringArray = exports.isObject = exports.isDefined = void 0; +exports.isDefined = isDefined; +exports.isObject = isObject; +exports.isStringArray = isStringArray; +exports.isObjectArray = isObjectArray; +exports.isStringRecord = isStringRecord; +exports.isObjectRecord = isObjectRecord; function isDefined(val) { return val !== undefined; } -exports.isDefined = isDefined; function isObject(value) { return typeof value === 'object' && value !== null; } -exports.isObject = isObject; function isStringArray(value) { return Array.isArray(value) && value.every((v) => typeof v === 'string'); } -exports.isStringArray = isStringArray; function isObjectArray(value) { return Array.isArray(value) && value.every(isObject); } -exports.isObjectArray = isObjectArray; function isStringRecord(value) { return (typeof value === 'object' && value !== null && Object.keys(value).every((k) => typeof k === 'string') && Object.values(value).every((v) => typeof v === 'string')); } -exports.isStringRecord = isStringRecord; function isObjectRecord(value) { return (typeof value === 'object' && value !== null && Object.keys(value).every((k) => typeof k === 'string') && Object.values(value).every((v) => typeof v === 'object' && v !== null)); } -exports.isObjectRecord = isObjectRecord; diff --git a/deps/npm/node_modules/@tufjs/models/dist/utils/index.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/index.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/utils/index.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/index.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/utils/key.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/key.js similarity index 99% rename from deps/npm/node_modules/@tufjs/models/dist/utils/key.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/key.js index 1f795ba1a2733f..3c3ec07f1425a7 100644 --- a/deps/npm/node_modules/@tufjs/models/dist/utils/key.js +++ b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/key.js @@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.getPublicKey = void 0; +exports.getPublicKey = getPublicKey; const crypto_1 = __importDefault(require("crypto")); const error_1 = require("../error"); const oid_1 = require("./oid"); @@ -28,7 +28,6 @@ function getPublicKey(keyInfo) { throw new error_1.UnsupportedAlgorithmError(`Unsupported key type: ${keyInfo.keyType}`); } } -exports.getPublicKey = getPublicKey; function getRSAPublicKey(keyInfo) { // Only support PEM-encoded RSA keys if (!keyInfo.keyVal.startsWith(PEM_HEADER)) { diff --git a/deps/npm/node_modules/@tufjs/models/dist/utils/oid.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/oid.js similarity index 96% rename from deps/npm/node_modules/@tufjs/models/dist/utils/oid.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/oid.js index e1bb7af5e54fbf..00b29c3030d1ec 100644 --- a/deps/npm/node_modules/@tufjs/models/dist/utils/oid.js +++ b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/oid.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.encodeOIDString = void 0; +exports.encodeOIDString = encodeOIDString; const ANS1_TAG_OID = 0x06; function encodeOIDString(oid) { const parts = oid.split('.'); @@ -14,7 +14,6 @@ function encodeOIDString(oid) { const der = Buffer.from([first, ...rest]); return Buffer.from([ANS1_TAG_OID, der.length, ...der]); } -exports.encodeOIDString = encodeOIDString; function encodeVariableLengthInteger(value) { const bytes = []; let mask = 0x00; diff --git a/deps/npm/node_modules/@tufjs/models/dist/utils/types.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/types.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/utils/types.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/types.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/utils/verify.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/verify.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/utils/verify.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/verify.js diff --git a/deps/npm/node_modules/@tufjs/models/package.json b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/package.json similarity index 90% rename from deps/npm/node_modules/@tufjs/models/package.json rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/package.json index be581591a0f3a3..8e5132ddf1079c 100644 --- a/deps/npm/node_modules/@tufjs/models/package.json +++ b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/package.json @@ -1,6 +1,6 @@ { "name": "@tufjs/models", - "version": "2.0.1", + "version": "3.0.1", "description": "TUF metadata models", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -29,9 +29,9 @@ "homepage": "https://github.com/theupdateframework/tuf-js/tree/main/packages/models#readme", "dependencies": { "@tufjs/canonical-json": "2.0.0", - "minimatch": "^9.0.4" + "minimatch": "^9.0.5" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/LICENSE.md b/deps/npm/node_modules/tuf-js/node_modules/cacache/LICENSE.md deleted file mode 100644 index 8d28acf866d932..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/LICENSE.md +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/path.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/path.js deleted file mode 100644 index ad5a76a4f73f26..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/path.js +++ /dev/null @@ -1,29 +0,0 @@ -'use strict' - -const contentVer = require('../../package.json')['cache-version'].content -const hashToSegments = require('../util/hash-to-segments') -const path = require('path') -const ssri = require('ssri') - -// Current format of content file path: -// -// sha512-BaSE64Hex= -> -// ~/.my-cache/content-v2/sha512/ba/da/55deadbeefc0ffee -// -module.exports = contentPath - -function contentPath (cache, integrity) { - const sri = ssri.parse(integrity, { single: true }) - // contentPath is the *strongest* algo given - return path.join( - contentDir(cache), - sri.algorithm, - ...hashToSegments(sri.hexDigest()) - ) -} - -module.exports.contentDir = contentDir - -function contentDir (cache) { - return path.join(cache, `content-v${contentVer}`) -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/read.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/read.js deleted file mode 100644 index 5f6192c3cec566..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/read.js +++ /dev/null @@ -1,165 +0,0 @@ -'use strict' - -const fs = require('fs/promises') -const fsm = require('fs-minipass') -const ssri = require('ssri') -const contentPath = require('./path') -const Pipeline = require('minipass-pipeline') - -module.exports = read - -const MAX_SINGLE_READ_SIZE = 64 * 1024 * 1024 -async function read (cache, integrity, opts = {}) { - const { size } = opts - const { stat, cpath, sri } = await withContentSri(cache, integrity, async (cpath, sri) => { - // get size - const stat = size ? { size } : await fs.stat(cpath) - return { stat, cpath, sri } - }) - - if (stat.size > MAX_SINGLE_READ_SIZE) { - return readPipeline(cpath, stat.size, sri, new Pipeline()).concat() - } - - const data = await fs.readFile(cpath, { encoding: null }) - - if (stat.size !== data.length) { - throw sizeError(stat.size, data.length) - } - - if (!ssri.checkData(data, sri)) { - throw integrityError(sri, cpath) - } - - return data -} - -const readPipeline = (cpath, size, sri, stream) => { - stream.push( - new fsm.ReadStream(cpath, { - size, - readSize: MAX_SINGLE_READ_SIZE, - }), - ssri.integrityStream({ - integrity: sri, - size, - }) - ) - return stream -} - -module.exports.stream = readStream -module.exports.readStream = readStream - -function readStream (cache, integrity, opts = {}) { - const { size } = opts - const stream = new Pipeline() - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const { stat, cpath, sri } = await withContentSri(cache, integrity, async (cpath, sri) => { - // get size - const stat = size ? { size } : await fs.stat(cpath) - return { stat, cpath, sri } - }) - - return readPipeline(cpath, stat.size, sri, stream) - }).catch(err => stream.emit('error', err)) - - return stream -} - -module.exports.copy = copy - -function copy (cache, integrity, dest) { - return withContentSri(cache, integrity, (cpath) => { - return fs.copyFile(cpath, dest) - }) -} - -module.exports.hasContent = hasContent - -async function hasContent (cache, integrity) { - if (!integrity) { - return false - } - - try { - return await withContentSri(cache, integrity, async (cpath, sri) => { - const stat = await fs.stat(cpath) - return { size: stat.size, sri, stat } - }) - } catch (err) { - if (err.code === 'ENOENT') { - return false - } - - if (err.code === 'EPERM') { - /* istanbul ignore else */ - if (process.platform !== 'win32') { - throw err - } else { - return false - } - } - } -} - -async function withContentSri (cache, integrity, fn) { - const sri = ssri.parse(integrity) - // If `integrity` has multiple entries, pick the first digest - // with available local data. - const algo = sri.pickAlgorithm() - const digests = sri[algo] - - if (digests.length <= 1) { - const cpath = contentPath(cache, digests[0]) - return fn(cpath, digests[0]) - } else { - // Can't use race here because a generic error can happen before - // a ENOENT error, and can happen before a valid result - const results = await Promise.all(digests.map(async (meta) => { - try { - return await withContentSri(cache, meta, fn) - } catch (err) { - if (err.code === 'ENOENT') { - return Object.assign( - new Error('No matching content found for ' + sri.toString()), - { code: 'ENOENT' } - ) - } - return err - } - })) - // Return the first non error if it is found - const result = results.find((r) => !(r instanceof Error)) - if (result) { - return result - } - - // Throw the No matching content found error - const enoentError = results.find((r) => r.code === 'ENOENT') - if (enoentError) { - throw enoentError - } - - // Throw generic error - throw results.find((r) => r instanceof Error) - } -} - -function sizeError (expected, found) { - /* eslint-disable-next-line max-len */ - const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`) - err.expected = expected - err.found = found - err.code = 'EBADSIZE' - return err -} - -function integrityError (sri, path) { - const err = new Error(`Integrity verification failed for ${sri} (${path})`) - err.code = 'EINTEGRITY' - err.sri = sri - err.path = path - return err -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/rm.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/rm.js deleted file mode 100644 index ce58d679e4cb25..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/rm.js +++ /dev/null @@ -1,18 +0,0 @@ -'use strict' - -const fs = require('fs/promises') -const contentPath = require('./path') -const { hasContent } = require('./read') - -module.exports = rm - -async function rm (cache, integrity) { - const content = await hasContent(cache, integrity) - // ~pretty~ sure we can't end up with a content lacking sri, but be safe - if (content && content.sri) { - await fs.rm(contentPath(cache, content.sri), { recursive: true, force: true }) - return true - } else { - return false - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/write.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/write.js deleted file mode 100644 index e7187abca8788a..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/write.js +++ /dev/null @@ -1,206 +0,0 @@ -'use strict' - -const events = require('events') - -const contentPath = require('./path') -const fs = require('fs/promises') -const { moveFile } = require('@npmcli/fs') -const { Minipass } = require('minipass') -const Pipeline = require('minipass-pipeline') -const Flush = require('minipass-flush') -const path = require('path') -const ssri = require('ssri') -const uniqueFilename = require('unique-filename') -const fsm = require('fs-minipass') - -module.exports = write - -// Cache of move operations in process so we don't duplicate -const moveOperations = new Map() - -async function write (cache, data, opts = {}) { - const { algorithms, size, integrity } = opts - - if (typeof size === 'number' && data.length !== size) { - throw sizeError(size, data.length) - } - - const sri = ssri.fromData(data, algorithms ? { algorithms } : {}) - if (integrity && !ssri.checkData(data, integrity, opts)) { - throw checksumError(integrity, sri) - } - - for (const algo in sri) { - const tmp = await makeTmp(cache, opts) - const hash = sri[algo].toString() - try { - await fs.writeFile(tmp.target, data, { flag: 'wx' }) - await moveToDestination(tmp, cache, hash, opts) - } finally { - if (!tmp.moved) { - await fs.rm(tmp.target, { recursive: true, force: true }) - } - } - } - return { integrity: sri, size: data.length } -} - -module.exports.stream = writeStream - -// writes proxied to the 'inputStream' that is passed to the Promise -// 'end' is deferred until content is handled. -class CacacheWriteStream extends Flush { - constructor (cache, opts) { - super() - this.opts = opts - this.cache = cache - this.inputStream = new Minipass() - this.inputStream.on('error', er => this.emit('error', er)) - this.inputStream.on('drain', () => this.emit('drain')) - this.handleContentP = null - } - - write (chunk, encoding, cb) { - if (!this.handleContentP) { - this.handleContentP = handleContent( - this.inputStream, - this.cache, - this.opts - ) - this.handleContentP.catch(error => this.emit('error', error)) - } - return this.inputStream.write(chunk, encoding, cb) - } - - flush (cb) { - this.inputStream.end(() => { - if (!this.handleContentP) { - const e = new Error('Cache input stream was empty') - e.code = 'ENODATA' - // empty streams are probably emitting end right away. - // defer this one tick by rejecting a promise on it. - return Promise.reject(e).catch(cb) - } - // eslint-disable-next-line promise/catch-or-return - this.handleContentP.then( - (res) => { - res.integrity && this.emit('integrity', res.integrity) - // eslint-disable-next-line promise/always-return - res.size !== null && this.emit('size', res.size) - cb() - }, - (er) => cb(er) - ) - }) - } -} - -function writeStream (cache, opts = {}) { - return new CacacheWriteStream(cache, opts) -} - -async function handleContent (inputStream, cache, opts) { - const tmp = await makeTmp(cache, opts) - try { - const res = await pipeToTmp(inputStream, cache, tmp.target, opts) - await moveToDestination( - tmp, - cache, - res.integrity, - opts - ) - return res - } finally { - if (!tmp.moved) { - await fs.rm(tmp.target, { recursive: true, force: true }) - } - } -} - -async function pipeToTmp (inputStream, cache, tmpTarget, opts) { - const outStream = new fsm.WriteStream(tmpTarget, { - flags: 'wx', - }) - - if (opts.integrityEmitter) { - // we need to create these all simultaneously since they can fire in any order - const [integrity, size] = await Promise.all([ - events.once(opts.integrityEmitter, 'integrity').then(res => res[0]), - events.once(opts.integrityEmitter, 'size').then(res => res[0]), - new Pipeline(inputStream, outStream).promise(), - ]) - return { integrity, size } - } - - let integrity - let size - const hashStream = ssri.integrityStream({ - integrity: opts.integrity, - algorithms: opts.algorithms, - size: opts.size, - }) - hashStream.on('integrity', i => { - integrity = i - }) - hashStream.on('size', s => { - size = s - }) - - const pipeline = new Pipeline(inputStream, hashStream, outStream) - await pipeline.promise() - return { integrity, size } -} - -async function makeTmp (cache, opts) { - const tmpTarget = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix) - await fs.mkdir(path.dirname(tmpTarget), { recursive: true }) - return { - target: tmpTarget, - moved: false, - } -} - -async function moveToDestination (tmp, cache, sri) { - const destination = contentPath(cache, sri) - const destDir = path.dirname(destination) - if (moveOperations.has(destination)) { - return moveOperations.get(destination) - } - moveOperations.set( - destination, - fs.mkdir(destDir, { recursive: true }) - .then(async () => { - await moveFile(tmp.target, destination, { overwrite: false }) - tmp.moved = true - return tmp.moved - }) - .catch(err => { - if (!err.message.startsWith('The destination file exists')) { - throw Object.assign(err, { code: 'EEXIST' }) - } - }).finally(() => { - moveOperations.delete(destination) - }) - - ) - return moveOperations.get(destination) -} - -function sizeError (expected, found) { - /* eslint-disable-next-line max-len */ - const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`) - err.expected = expected - err.found = found - err.code = 'EBADSIZE' - return err -} - -function checksumError (expected, found) { - const err = new Error(`Integrity check failed: - Wanted: ${expected} - Found: ${found}`) - err.code = 'EINTEGRITY' - err.expected = expected - err.found = found - return err -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/entry-index.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/entry-index.js deleted file mode 100644 index 89c28f2f257d48..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/entry-index.js +++ /dev/null @@ -1,336 +0,0 @@ -'use strict' - -const crypto = require('crypto') -const { - appendFile, - mkdir, - readFile, - readdir, - rm, - writeFile, -} = require('fs/promises') -const { Minipass } = require('minipass') -const path = require('path') -const ssri = require('ssri') -const uniqueFilename = require('unique-filename') - -const contentPath = require('./content/path') -const hashToSegments = require('./util/hash-to-segments') -const indexV = require('../package.json')['cache-version'].index -const { moveFile } = require('@npmcli/fs') - -const pMap = require('p-map') -const lsStreamConcurrency = 5 - -module.exports.NotFoundError = class NotFoundError extends Error { - constructor (cache, key) { - super(`No cache entry for ${key} found in ${cache}`) - this.code = 'ENOENT' - this.cache = cache - this.key = key - } -} - -module.exports.compact = compact - -async function compact (cache, key, matchFn, opts = {}) { - const bucket = bucketPath(cache, key) - const entries = await bucketEntries(bucket) - const newEntries = [] - // we loop backwards because the bottom-most result is the newest - // since we add new entries with appendFile - for (let i = entries.length - 1; i >= 0; --i) { - const entry = entries[i] - // a null integrity could mean either a delete was appended - // or the user has simply stored an index that does not map - // to any content. we determine if the user wants to keep the - // null integrity based on the validateEntry function passed in options. - // if the integrity is null and no validateEntry is provided, we break - // as we consider the null integrity to be a deletion of everything - // that came before it. - if (entry.integrity === null && !opts.validateEntry) { - break - } - - // if this entry is valid, and it is either the first entry or - // the newEntries array doesn't already include an entry that - // matches this one based on the provided matchFn, then we add - // it to the beginning of our list - if ((!opts.validateEntry || opts.validateEntry(entry) === true) && - (newEntries.length === 0 || - !newEntries.find((oldEntry) => matchFn(oldEntry, entry)))) { - newEntries.unshift(entry) - } - } - - const newIndex = '\n' + newEntries.map((entry) => { - const stringified = JSON.stringify(entry) - const hash = hashEntry(stringified) - return `${hash}\t${stringified}` - }).join('\n') - - const setup = async () => { - const target = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix) - await mkdir(path.dirname(target), { recursive: true }) - return { - target, - moved: false, - } - } - - const teardown = async (tmp) => { - if (!tmp.moved) { - return rm(tmp.target, { recursive: true, force: true }) - } - } - - const write = async (tmp) => { - await writeFile(tmp.target, newIndex, { flag: 'wx' }) - await mkdir(path.dirname(bucket), { recursive: true }) - // we use @npmcli/move-file directly here because we - // want to overwrite the existing file - await moveFile(tmp.target, bucket) - tmp.moved = true - } - - // write the file atomically - const tmp = await setup() - try { - await write(tmp) - } finally { - await teardown(tmp) - } - - // we reverse the list we generated such that the newest - // entries come first in order to make looping through them easier - // the true passed to formatEntry tells it to keep null - // integrity values, if they made it this far it's because - // validateEntry returned true, and as such we should return it - return newEntries.reverse().map((entry) => formatEntry(cache, entry, true)) -} - -module.exports.insert = insert - -async function insert (cache, key, integrity, opts = {}) { - const { metadata, size, time } = opts - const bucket = bucketPath(cache, key) - const entry = { - key, - integrity: integrity && ssri.stringify(integrity), - time: time || Date.now(), - size, - metadata, - } - try { - await mkdir(path.dirname(bucket), { recursive: true }) - const stringified = JSON.stringify(entry) - // NOTE - Cleverness ahoy! - // - // This works because it's tremendously unlikely for an entry to corrupt - // another while still preserving the string length of the JSON in - // question. So, we just slap the length in there and verify it on read. - // - // Thanks to @isaacs for the whiteboarding session that ended up with - // this. - await appendFile(bucket, `\n${hashEntry(stringified)}\t${stringified}`) - } catch (err) { - if (err.code === 'ENOENT') { - return undefined - } - - throw err - } - return formatEntry(cache, entry) -} - -module.exports.find = find - -async function find (cache, key) { - const bucket = bucketPath(cache, key) - try { - const entries = await bucketEntries(bucket) - return entries.reduce((latest, next) => { - if (next && next.key === key) { - return formatEntry(cache, next) - } else { - return latest - } - }, null) - } catch (err) { - if (err.code === 'ENOENT') { - return null - } else { - throw err - } - } -} - -module.exports.delete = del - -function del (cache, key, opts = {}) { - if (!opts.removeFully) { - return insert(cache, key, null, opts) - } - - const bucket = bucketPath(cache, key) - return rm(bucket, { recursive: true, force: true }) -} - -module.exports.lsStream = lsStream - -function lsStream (cache) { - const indexDir = bucketDir(cache) - const stream = new Minipass({ objectMode: true }) - - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const buckets = await readdirOrEmpty(indexDir) - await pMap(buckets, async (bucket) => { - const bucketPath = path.join(indexDir, bucket) - const subbuckets = await readdirOrEmpty(bucketPath) - await pMap(subbuckets, async (subbucket) => { - const subbucketPath = path.join(bucketPath, subbucket) - - // "/cachename//./*" - const subbucketEntries = await readdirOrEmpty(subbucketPath) - await pMap(subbucketEntries, async (entry) => { - const entryPath = path.join(subbucketPath, entry) - try { - const entries = await bucketEntries(entryPath) - // using a Map here prevents duplicate keys from showing up - // twice, I guess? - const reduced = entries.reduce((acc, entry) => { - acc.set(entry.key, entry) - return acc - }, new Map()) - // reduced is a map of key => entry - for (const entry of reduced.values()) { - const formatted = formatEntry(cache, entry) - if (formatted) { - stream.write(formatted) - } - } - } catch (err) { - if (err.code === 'ENOENT') { - return undefined - } - throw err - } - }, - { concurrency: lsStreamConcurrency }) - }, - { concurrency: lsStreamConcurrency }) - }, - { concurrency: lsStreamConcurrency }) - stream.end() - return stream - }).catch(err => stream.emit('error', err)) - - return stream -} - -module.exports.ls = ls - -async function ls (cache) { - const entries = await lsStream(cache).collect() - return entries.reduce((acc, xs) => { - acc[xs.key] = xs - return acc - }, {}) -} - -module.exports.bucketEntries = bucketEntries - -async function bucketEntries (bucket, filter) { - const data = await readFile(bucket, 'utf8') - return _bucketEntries(data, filter) -} - -function _bucketEntries (data) { - const entries = [] - data.split('\n').forEach((entry) => { - if (!entry) { - return - } - - const pieces = entry.split('\t') - if (!pieces[1] || hashEntry(pieces[1]) !== pieces[0]) { - // Hash is no good! Corruption or malice? Doesn't matter! - // EJECT EJECT - return - } - let obj - try { - obj = JSON.parse(pieces[1]) - } catch (_) { - // eslint-ignore-next-line no-empty-block - } - // coverage disabled here, no need to test with an entry that parses to something falsey - // istanbul ignore else - if (obj) { - entries.push(obj) - } - }) - return entries -} - -module.exports.bucketDir = bucketDir - -function bucketDir (cache) { - return path.join(cache, `index-v${indexV}`) -} - -module.exports.bucketPath = bucketPath - -function bucketPath (cache, key) { - const hashed = hashKey(key) - return path.join.apply( - path, - [bucketDir(cache)].concat(hashToSegments(hashed)) - ) -} - -module.exports.hashKey = hashKey - -function hashKey (key) { - return hash(key, 'sha256') -} - -module.exports.hashEntry = hashEntry - -function hashEntry (str) { - return hash(str, 'sha1') -} - -function hash (str, digest) { - return crypto - .createHash(digest) - .update(str) - .digest('hex') -} - -function formatEntry (cache, entry, keepAll) { - // Treat null digests as deletions. They'll shadow any previous entries. - if (!entry.integrity && !keepAll) { - return null - } - - return { - key: entry.key, - integrity: entry.integrity, - path: entry.integrity ? contentPath(cache, entry.integrity) : undefined, - size: entry.size, - time: entry.time, - metadata: entry.metadata, - } -} - -function readdirOrEmpty (dir) { - return readdir(dir).catch((err) => { - if (err.code === 'ENOENT' || err.code === 'ENOTDIR') { - return [] - } - - throw err - }) -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/get.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/get.js deleted file mode 100644 index 80ec206c7ecaaa..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/get.js +++ /dev/null @@ -1,170 +0,0 @@ -'use strict' - -const Collect = require('minipass-collect') -const { Minipass } = require('minipass') -const Pipeline = require('minipass-pipeline') - -const index = require('./entry-index') -const memo = require('./memoization') -const read = require('./content/read') - -async function getData (cache, key, opts = {}) { - const { integrity, memoize, size } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return { - metadata: memoized.entry.metadata, - data: memoized.data, - integrity: memoized.entry.integrity, - size: memoized.entry.size, - } - } - - const entry = await index.find(cache, key, opts) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - const data = await read(cache, entry.integrity, { integrity, size }) - if (memoize) { - memo.put(cache, entry, data, opts) - } - - return { - data, - metadata: entry.metadata, - size: entry.size, - integrity: entry.integrity, - } -} -module.exports = getData - -async function getDataByDigest (cache, key, opts = {}) { - const { integrity, memoize, size } = opts - const memoized = memo.get.byDigest(cache, key, opts) - if (memoized && memoize !== false) { - return memoized - } - - const res = await read(cache, key, { integrity, size }) - if (memoize) { - memo.put.byDigest(cache, key, res, opts) - } - return res -} -module.exports.byDigest = getDataByDigest - -const getMemoizedStream = (memoized) => { - const stream = new Minipass() - stream.on('newListener', function (ev, cb) { - ev === 'metadata' && cb(memoized.entry.metadata) - ev === 'integrity' && cb(memoized.entry.integrity) - ev === 'size' && cb(memoized.entry.size) - }) - stream.end(memoized.data) - return stream -} - -function getStream (cache, key, opts = {}) { - const { memoize, size } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return getMemoizedStream(memoized) - } - - const stream = new Pipeline() - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const entry = await index.find(cache, key) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - - stream.emit('metadata', entry.metadata) - stream.emit('integrity', entry.integrity) - stream.emit('size', entry.size) - stream.on('newListener', function (ev, cb) { - ev === 'metadata' && cb(entry.metadata) - ev === 'integrity' && cb(entry.integrity) - ev === 'size' && cb(entry.size) - }) - - const src = read.readStream( - cache, - entry.integrity, - { ...opts, size: typeof size !== 'number' ? entry.size : size } - ) - - if (memoize) { - const memoStream = new Collect.PassThrough() - memoStream.on('collect', data => memo.put(cache, entry, data, opts)) - stream.unshift(memoStream) - } - stream.unshift(src) - return stream - }).catch((err) => stream.emit('error', err)) - - return stream -} - -module.exports.stream = getStream - -function getStreamDigest (cache, integrity, opts = {}) { - const { memoize } = opts - const memoized = memo.get.byDigest(cache, integrity, opts) - if (memoized && memoize !== false) { - const stream = new Minipass() - stream.end(memoized) - return stream - } else { - const stream = read.readStream(cache, integrity, opts) - if (!memoize) { - return stream - } - - const memoStream = new Collect.PassThrough() - memoStream.on('collect', data => memo.put.byDigest( - cache, - integrity, - data, - opts - )) - return new Pipeline(stream, memoStream) - } -} - -module.exports.stream.byDigest = getStreamDigest - -function info (cache, key, opts = {}) { - const { memoize } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return Promise.resolve(memoized.entry) - } else { - return index.find(cache, key) - } -} -module.exports.info = info - -async function copy (cache, key, dest, opts = {}) { - const entry = await index.find(cache, key, opts) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - await read.copy(cache, entry.integrity, dest, opts) - return { - metadata: entry.metadata, - size: entry.size, - integrity: entry.integrity, - } -} - -module.exports.copy = copy - -async function copyByDigest (cache, key, dest, opts = {}) { - await read.copy(cache, key, dest, opts) - return key -} - -module.exports.copy.byDigest = copyByDigest - -module.exports.hasContent = read.hasContent diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/index.js deleted file mode 100644 index c9b0da5f3a271b..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/index.js +++ /dev/null @@ -1,42 +0,0 @@ -'use strict' - -const get = require('./get.js') -const put = require('./put.js') -const rm = require('./rm.js') -const verify = require('./verify.js') -const { clearMemoized } = require('./memoization.js') -const tmp = require('./util/tmp.js') -const index = require('./entry-index.js') - -module.exports.index = {} -module.exports.index.compact = index.compact -module.exports.index.insert = index.insert - -module.exports.ls = index.ls -module.exports.ls.stream = index.lsStream - -module.exports.get = get -module.exports.get.byDigest = get.byDigest -module.exports.get.stream = get.stream -module.exports.get.stream.byDigest = get.stream.byDigest -module.exports.get.copy = get.copy -module.exports.get.copy.byDigest = get.copy.byDigest -module.exports.get.info = get.info -module.exports.get.hasContent = get.hasContent - -module.exports.put = put -module.exports.put.stream = put.stream - -module.exports.rm = rm.entry -module.exports.rm.all = rm.all -module.exports.rm.entry = module.exports.rm -module.exports.rm.content = rm.content - -module.exports.clearMemoized = clearMemoized - -module.exports.tmp = {} -module.exports.tmp.mkdir = tmp.mkdir -module.exports.tmp.withTmp = tmp.withTmp - -module.exports.verify = verify -module.exports.verify.lastRun = verify.lastRun diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/memoization.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/memoization.js deleted file mode 100644 index 2ecc60912e4563..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/memoization.js +++ /dev/null @@ -1,72 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') - -const MEMOIZED = new LRUCache({ - max: 500, - maxSize: 50 * 1024 * 1024, // 50MB - ttl: 3 * 60 * 1000, // 3 minutes - sizeCalculation: (entry, key) => key.startsWith('key:') ? entry.data.length : entry.length, -}) - -module.exports.clearMemoized = clearMemoized - -function clearMemoized () { - const old = {} - MEMOIZED.forEach((v, k) => { - old[k] = v - }) - MEMOIZED.clear() - return old -} - -module.exports.put = put - -function put (cache, entry, data, opts) { - pickMem(opts).set(`key:${cache}:${entry.key}`, { entry, data }) - putDigest(cache, entry.integrity, data, opts) -} - -module.exports.put.byDigest = putDigest - -function putDigest (cache, integrity, data, opts) { - pickMem(opts).set(`digest:${cache}:${integrity}`, data) -} - -module.exports.get = get - -function get (cache, key, opts) { - return pickMem(opts).get(`key:${cache}:${key}`) -} - -module.exports.get.byDigest = getDigest - -function getDigest (cache, integrity, opts) { - return pickMem(opts).get(`digest:${cache}:${integrity}`) -} - -class ObjProxy { - constructor (obj) { - this.obj = obj - } - - get (key) { - return this.obj[key] - } - - set (key, val) { - this.obj[key] = val - } -} - -function pickMem (opts) { - if (!opts || !opts.memoize) { - return MEMOIZED - } else if (opts.memoize.get && opts.memoize.set) { - return opts.memoize - } else if (typeof opts.memoize === 'object') { - return new ObjProxy(opts.memoize) - } else { - return MEMOIZED - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/put.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/put.js deleted file mode 100644 index 9fc932d5f6dec5..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/put.js +++ /dev/null @@ -1,80 +0,0 @@ -'use strict' - -const index = require('./entry-index') -const memo = require('./memoization') -const write = require('./content/write') -const Flush = require('minipass-flush') -const { PassThrough } = require('minipass-collect') -const Pipeline = require('minipass-pipeline') - -const putOpts = (opts) => ({ - algorithms: ['sha512'], - ...opts, -}) - -module.exports = putData - -async function putData (cache, key, data, opts = {}) { - const { memoize } = opts - opts = putOpts(opts) - const res = await write(cache, data, opts) - const entry = await index.insert(cache, key, res.integrity, { ...opts, size: res.size }) - if (memoize) { - memo.put(cache, entry, data, opts) - } - - return res.integrity -} - -module.exports.stream = putStream - -function putStream (cache, key, opts = {}) { - const { memoize } = opts - opts = putOpts(opts) - let integrity - let size - let error - - let memoData - const pipeline = new Pipeline() - // first item in the pipeline is the memoizer, because we need - // that to end first and get the collected data. - if (memoize) { - const memoizer = new PassThrough().on('collect', data => { - memoData = data - }) - pipeline.push(memoizer) - } - - // contentStream is a write-only, not a passthrough - // no data comes out of it. - const contentStream = write.stream(cache, opts) - .on('integrity', (int) => { - integrity = int - }) - .on('size', (s) => { - size = s - }) - .on('error', (err) => { - error = err - }) - - pipeline.push(contentStream) - - // last but not least, we write the index and emit hash and size, - // and memoize if we're doing that - pipeline.push(new Flush({ - async flush () { - if (!error) { - const entry = await index.insert(cache, key, integrity, { ...opts, size }) - if (memoize && memoData) { - memo.put(cache, entry, memoData, opts) - } - pipeline.emit('integrity', integrity) - pipeline.emit('size', size) - } - }, - })) - - return pipeline -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/rm.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/rm.js deleted file mode 100644 index a94760c7cf2430..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/rm.js +++ /dev/null @@ -1,31 +0,0 @@ -'use strict' - -const { rm } = require('fs/promises') -const glob = require('./util/glob.js') -const index = require('./entry-index') -const memo = require('./memoization') -const path = require('path') -const rmContent = require('./content/rm') - -module.exports = entry -module.exports.entry = entry - -function entry (cache, key, opts) { - memo.clearMemoized() - return index.delete(cache, key, opts) -} - -module.exports.content = content - -function content (cache, integrity) { - memo.clearMemoized() - return rmContent(cache, integrity) -} - -module.exports.all = all - -async function all (cache) { - memo.clearMemoized() - const paths = await glob(path.join(cache, '*(content-*|index-*)'), { silent: true, nosort: true }) - return Promise.all(paths.map((p) => rm(p, { recursive: true, force: true }))) -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/glob.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/glob.js deleted file mode 100644 index 8500c1c16a429f..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/glob.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -const { glob } = require('glob') -const path = require('path') - -const globify = (pattern) => pattern.split(path.win32.sep).join(path.posix.sep) -module.exports = (path, options) => glob(globify(path), options) diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/hash-to-segments.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/hash-to-segments.js deleted file mode 100644 index 445599b5038088..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/hash-to-segments.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -module.exports = hashToSegments - -function hashToSegments (hash) { - return [hash.slice(0, 2), hash.slice(2, 4), hash.slice(4)] -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/tmp.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/tmp.js deleted file mode 100644 index 0bf5302136ebeb..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/tmp.js +++ /dev/null @@ -1,26 +0,0 @@ -'use strict' - -const { withTempDir } = require('@npmcli/fs') -const fs = require('fs/promises') -const path = require('path') - -module.exports.mkdir = mktmpdir - -async function mktmpdir (cache, opts = {}) { - const { tmpPrefix } = opts - const tmpDir = path.join(cache, 'tmp') - await fs.mkdir(tmpDir, { recursive: true, owner: 'inherit' }) - // do not use path.join(), it drops the trailing / if tmpPrefix is unset - const target = `${tmpDir}${path.sep}${tmpPrefix || ''}` - return fs.mkdtemp(target, { owner: 'inherit' }) -} - -module.exports.withTmp = withTmp - -function withTmp (cache, opts, cb) { - if (!cb) { - cb = opts - opts = {} - } - return withTempDir(path.join(cache, 'tmp'), cb, opts) -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/verify.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/verify.js deleted file mode 100644 index d7423da1295b68..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/verify.js +++ /dev/null @@ -1,257 +0,0 @@ -'use strict' - -const { - mkdir, - readFile, - rm, - stat, - truncate, - writeFile, -} = require('fs/promises') -const pMap = require('p-map') -const contentPath = require('./content/path') -const fsm = require('fs-minipass') -const glob = require('./util/glob.js') -const index = require('./entry-index') -const path = require('path') -const ssri = require('ssri') - -const hasOwnProperty = (obj, key) => - Object.prototype.hasOwnProperty.call(obj, key) - -const verifyOpts = (opts) => ({ - concurrency: 20, - log: { silly () {} }, - ...opts, -}) - -module.exports = verify - -async function verify (cache, opts) { - opts = verifyOpts(opts) - opts.log.silly('verify', 'verifying cache at', cache) - - const steps = [ - markStartTime, - fixPerms, - garbageCollect, - rebuildIndex, - cleanTmp, - writeVerifile, - markEndTime, - ] - - const stats = {} - for (const step of steps) { - const label = step.name - const start = new Date() - const s = await step(cache, opts) - if (s) { - Object.keys(s).forEach((k) => { - stats[k] = s[k] - }) - } - const end = new Date() - if (!stats.runTime) { - stats.runTime = {} - } - stats.runTime[label] = end - start - } - stats.runTime.total = stats.endTime - stats.startTime - opts.log.silly( - 'verify', - 'verification finished for', - cache, - 'in', - `${stats.runTime.total}ms` - ) - return stats -} - -async function markStartTime () { - return { startTime: new Date() } -} - -async function markEndTime () { - return { endTime: new Date() } -} - -async function fixPerms (cache, opts) { - opts.log.silly('verify', 'fixing cache permissions') - await mkdir(cache, { recursive: true }) - return null -} - -// Implements a naive mark-and-sweep tracing garbage collector. -// -// The algorithm is basically as follows: -// 1. Read (and filter) all index entries ("pointers") -// 2. Mark each integrity value as "live" -// 3. Read entire filesystem tree in `content-vX/` dir -// 4. If content is live, verify its checksum and delete it if it fails -// 5. If content is not marked as live, rm it. -// -async function garbageCollect (cache, opts) { - opts.log.silly('verify', 'garbage collecting content') - const indexStream = index.lsStream(cache) - const liveContent = new Set() - indexStream.on('data', (entry) => { - if (opts.filter && !opts.filter(entry)) { - return - } - - // integrity is stringified, re-parse it so we can get each hash - const integrity = ssri.parse(entry.integrity) - for (const algo in integrity) { - liveContent.add(integrity[algo].toString()) - } - }) - await new Promise((resolve, reject) => { - indexStream.on('end', resolve).on('error', reject) - }) - const contentDir = contentPath.contentDir(cache) - const files = await glob(path.join(contentDir, '**'), { - follow: false, - nodir: true, - nosort: true, - }) - const stats = { - verifiedContent: 0, - reclaimedCount: 0, - reclaimedSize: 0, - badContentCount: 0, - keptSize: 0, - } - await pMap( - files, - async (f) => { - const split = f.split(/[/\\]/) - const digest = split.slice(split.length - 3).join('') - const algo = split[split.length - 4] - const integrity = ssri.fromHex(digest, algo) - if (liveContent.has(integrity.toString())) { - const info = await verifyContent(f, integrity) - if (!info.valid) { - stats.reclaimedCount++ - stats.badContentCount++ - stats.reclaimedSize += info.size - } else { - stats.verifiedContent++ - stats.keptSize += info.size - } - } else { - // No entries refer to this content. We can delete. - stats.reclaimedCount++ - const s = await stat(f) - await rm(f, { recursive: true, force: true }) - stats.reclaimedSize += s.size - } - return stats - }, - { concurrency: opts.concurrency } - ) - return stats -} - -async function verifyContent (filepath, sri) { - const contentInfo = {} - try { - const { size } = await stat(filepath) - contentInfo.size = size - contentInfo.valid = true - await ssri.checkStream(new fsm.ReadStream(filepath), sri) - } catch (err) { - if (err.code === 'ENOENT') { - return { size: 0, valid: false } - } - if (err.code !== 'EINTEGRITY') { - throw err - } - - await rm(filepath, { recursive: true, force: true }) - contentInfo.valid = false - } - return contentInfo -} - -async function rebuildIndex (cache, opts) { - opts.log.silly('verify', 'rebuilding index') - const entries = await index.ls(cache) - const stats = { - missingContent: 0, - rejectedEntries: 0, - totalEntries: 0, - } - const buckets = {} - for (const k in entries) { - /* istanbul ignore else */ - if (hasOwnProperty(entries, k)) { - const hashed = index.hashKey(k) - const entry = entries[k] - const excluded = opts.filter && !opts.filter(entry) - excluded && stats.rejectedEntries++ - if (buckets[hashed] && !excluded) { - buckets[hashed].push(entry) - } else if (buckets[hashed] && excluded) { - // skip - } else if (excluded) { - buckets[hashed] = [] - buckets[hashed]._path = index.bucketPath(cache, k) - } else { - buckets[hashed] = [entry] - buckets[hashed]._path = index.bucketPath(cache, k) - } - } - } - await pMap( - Object.keys(buckets), - (key) => { - return rebuildBucket(cache, buckets[key], stats, opts) - }, - { concurrency: opts.concurrency } - ) - return stats -} - -async function rebuildBucket (cache, bucket, stats) { - await truncate(bucket._path) - // This needs to be serialized because cacache explicitly - // lets very racy bucket conflicts clobber each other. - for (const entry of bucket) { - const content = contentPath(cache, entry.integrity) - try { - await stat(content) - await index.insert(cache, entry.key, entry.integrity, { - metadata: entry.metadata, - size: entry.size, - time: entry.time, - }) - stats.totalEntries++ - } catch (err) { - if (err.code === 'ENOENT') { - stats.rejectedEntries++ - stats.missingContent++ - } else { - throw err - } - } - } -} - -function cleanTmp (cache, opts) { - opts.log.silly('verify', 'cleaning tmp directory') - return rm(path.join(cache, 'tmp'), { recursive: true, force: true }) -} - -async function writeVerifile (cache, opts) { - const verifile = path.join(cache, '_lastverified') - opts.log.silly('verify', 'writing verifile to ' + verifile) - return writeFile(verifile, `${Date.now()}`) -} - -module.exports.lastRun = lastRun - -async function lastRun (cache) { - const data = await readFile(path.join(cache, '_lastverified'), { encoding: 'utf8' }) - return new Date(+data) -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/package.json b/deps/npm/node_modules/tuf-js/node_modules/cacache/package.json deleted file mode 100644 index 6e6219158ed759..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/package.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "name": "cacache", - "version": "18.0.4", - "cache-version": { - "content": "2", - "index": "5" - }, - "description": "Fast, fault-tolerant, cross-platform, disk-based, data-agnostic, content-addressable cache.", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "test": "tap", - "snap": "tap", - "coverage": "tap", - "test-docker": "docker run -it --rm --name pacotest -v \"$PWD\":/tmp -w /tmp node:latest npm test", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "npmclilint": "npmcli-lint", - "lintfix": "npm run lint -- --fix", - "postsnap": "npm run lintfix --", - "postlint": "template-oss-check", - "posttest": "npm run lint", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/cacache.git" - }, - "keywords": [ - "cache", - "caching", - "content-addressable", - "sri", - "sri hash", - "subresource integrity", - "cache", - "storage", - "store", - "file store", - "filesystem", - "disk cache", - "disk storage" - ], - "license": "ISC", - "dependencies": { - "@npmcli/fs": "^3.1.0", - "fs-minipass": "^3.0.0", - "glob": "^10.2.2", - "lru-cache": "^10.0.1", - "minipass": "^7.0.3", - "minipass-collect": "^2.0.1", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "p-map": "^4.0.0", - "ssri": "^10.0.0", - "tar": "^6.1.11", - "unique-filename": "^3.0.0" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.0" - }, - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "windowsCI": false, - "version": "4.22.0", - "publish": "true" - }, - "author": "GitHub Inc.", - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/LICENSE deleted file mode 100644 index 1808eb2844231c..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/LICENSE +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright 2017-2022 (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/entry.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/entry.js deleted file mode 100644 index bfcfacbcc95e18..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/entry.js +++ /dev/null @@ -1,471 +0,0 @@ -const { Request, Response } = require('minipass-fetch') -const { Minipass } = require('minipass') -const MinipassFlush = require('minipass-flush') -const cacache = require('cacache') -const url = require('url') - -const CachingMinipassPipeline = require('../pipeline.js') -const CachePolicy = require('./policy.js') -const cacheKey = require('./key.js') -const remote = require('../remote.js') - -const hasOwnProperty = (obj, prop) => Object.prototype.hasOwnProperty.call(obj, prop) - -// allow list for request headers that will be written to the cache index -// note: we will also store any request headers -// that are named in a response's vary header -const KEEP_REQUEST_HEADERS = [ - 'accept-charset', - 'accept-encoding', - 'accept-language', - 'accept', - 'cache-control', -] - -// allow list for response headers that will be written to the cache index -// note: we must not store the real response's age header, or when we load -// a cache policy based on the metadata it will think the cached response -// is always stale -const KEEP_RESPONSE_HEADERS = [ - 'cache-control', - 'content-encoding', - 'content-language', - 'content-type', - 'date', - 'etag', - 'expires', - 'last-modified', - 'link', - 'location', - 'pragma', - 'vary', -] - -// return an object containing all metadata to be written to the index -const getMetadata = (request, response, options) => { - const metadata = { - time: Date.now(), - url: request.url, - reqHeaders: {}, - resHeaders: {}, - - // options on which we must match the request and vary the response - options: { - compress: options.compress != null ? options.compress : request.compress, - }, - } - - // only save the status if it's not a 200 or 304 - if (response.status !== 200 && response.status !== 304) { - metadata.status = response.status - } - - for (const name of KEEP_REQUEST_HEADERS) { - if (request.headers.has(name)) { - metadata.reqHeaders[name] = request.headers.get(name) - } - } - - // if the request's host header differs from the host in the url - // we need to keep it, otherwise it's just noise and we ignore it - const host = request.headers.get('host') - const parsedUrl = new url.URL(request.url) - if (host && parsedUrl.host !== host) { - metadata.reqHeaders.host = host - } - - // if the response has a vary header, make sure - // we store the relevant request headers too - if (response.headers.has('vary')) { - const vary = response.headers.get('vary') - // a vary of "*" means every header causes a different response. - // in that scenario, we do not include any additional headers - // as the freshness check will always fail anyway and we don't - // want to bloat the cache indexes - if (vary !== '*') { - // copy any other request headers that will vary the response - const varyHeaders = vary.trim().toLowerCase().split(/\s*,\s*/) - for (const name of varyHeaders) { - if (request.headers.has(name)) { - metadata.reqHeaders[name] = request.headers.get(name) - } - } - } - } - - for (const name of KEEP_RESPONSE_HEADERS) { - if (response.headers.has(name)) { - metadata.resHeaders[name] = response.headers.get(name) - } - } - - for (const name of options.cacheAdditionalHeaders) { - if (response.headers.has(name)) { - metadata.resHeaders[name] = response.headers.get(name) - } - } - - return metadata -} - -// symbols used to hide objects that may be lazily evaluated in a getter -const _request = Symbol('request') -const _response = Symbol('response') -const _policy = Symbol('policy') - -class CacheEntry { - constructor ({ entry, request, response, options }) { - if (entry) { - this.key = entry.key - this.entry = entry - // previous versions of this module didn't write an explicit timestamp in - // the metadata, so fall back to the entry's timestamp. we can't use the - // entry timestamp to determine staleness because cacache will update it - // when it verifies its data - this.entry.metadata.time = this.entry.metadata.time || this.entry.time - } else { - this.key = cacheKey(request) - } - - this.options = options - - // these properties are behind getters that lazily evaluate - this[_request] = request - this[_response] = response - this[_policy] = null - } - - // returns a CacheEntry instance that satisfies the given request - // or undefined if no existing entry satisfies - static async find (request, options) { - try { - // compacts the index and returns an array of unique entries - var matches = await cacache.index.compact(options.cachePath, cacheKey(request), (A, B) => { - const entryA = new CacheEntry({ entry: A, options }) - const entryB = new CacheEntry({ entry: B, options }) - return entryA.policy.satisfies(entryB.request) - }, { - validateEntry: (entry) => { - // clean out entries with a buggy content-encoding value - if (entry.metadata && - entry.metadata.resHeaders && - entry.metadata.resHeaders['content-encoding'] === null) { - return false - } - - // if an integrity is null, it needs to have a status specified - if (entry.integrity === null) { - return !!(entry.metadata && entry.metadata.status) - } - - return true - }, - }) - } catch (err) { - // if the compact request fails, ignore the error and return - return - } - - // a cache mode of 'reload' means to behave as though we have no cache - // on the way to the network. return undefined to allow cacheFetch to - // create a brand new request no matter what. - if (options.cache === 'reload') { - return - } - - // find the specific entry that satisfies the request - let match - for (const entry of matches) { - const _entry = new CacheEntry({ - entry, - options, - }) - - if (_entry.policy.satisfies(request)) { - match = _entry - break - } - } - - return match - } - - // if the user made a PUT/POST/PATCH then we invalidate our - // cache for the same url by deleting the index entirely - static async invalidate (request, options) { - const key = cacheKey(request) - try { - await cacache.rm.entry(options.cachePath, key, { removeFully: true }) - } catch (err) { - // ignore errors - } - } - - get request () { - if (!this[_request]) { - this[_request] = new Request(this.entry.metadata.url, { - method: 'GET', - headers: this.entry.metadata.reqHeaders, - ...this.entry.metadata.options, - }) - } - - return this[_request] - } - - get response () { - if (!this[_response]) { - this[_response] = new Response(null, { - url: this.entry.metadata.url, - counter: this.options.counter, - status: this.entry.metadata.status || 200, - headers: { - ...this.entry.metadata.resHeaders, - 'content-length': this.entry.size, - }, - }) - } - - return this[_response] - } - - get policy () { - if (!this[_policy]) { - this[_policy] = new CachePolicy({ - entry: this.entry, - request: this.request, - response: this.response, - options: this.options, - }) - } - - return this[_policy] - } - - // wraps the response in a pipeline that stores the data - // in the cache while the user consumes it - async store (status) { - // if we got a status other than 200, 301, or 308, - // or the CachePolicy forbid storage, append the - // cache status header and return it untouched - if ( - this.request.method !== 'GET' || - ![200, 301, 308].includes(this.response.status) || - !this.policy.storable() - ) { - this.response.headers.set('x-local-cache-status', 'skip') - return this.response - } - - const size = this.response.headers.get('content-length') - const cacheOpts = { - algorithms: this.options.algorithms, - metadata: getMetadata(this.request, this.response, this.options), - size, - integrity: this.options.integrity, - integrityEmitter: this.response.body.hasIntegrityEmitter && this.response.body, - } - - let body = null - // we only set a body if the status is a 200, redirects are - // stored as metadata only - if (this.response.status === 200) { - let cacheWriteResolve, cacheWriteReject - const cacheWritePromise = new Promise((resolve, reject) => { - cacheWriteResolve = resolve - cacheWriteReject = reject - }).catch((err) => { - body.emit('error', err) - }) - - body = new CachingMinipassPipeline({ events: ['integrity', 'size'] }, new MinipassFlush({ - flush () { - return cacheWritePromise - }, - })) - // this is always true since if we aren't reusing the one from the remote fetch, we - // are using the one from cacache - body.hasIntegrityEmitter = true - - const onResume = () => { - const tee = new Minipass() - const cacheStream = cacache.put.stream(this.options.cachePath, this.key, cacheOpts) - // re-emit the integrity and size events on our new response body so they can be reused - cacheStream.on('integrity', i => body.emit('integrity', i)) - cacheStream.on('size', s => body.emit('size', s)) - // stick a flag on here so downstream users will know if they can expect integrity events - tee.pipe(cacheStream) - // TODO if the cache write fails, log a warning but return the response anyway - // eslint-disable-next-line promise/catch-or-return - cacheStream.promise().then(cacheWriteResolve, cacheWriteReject) - body.unshift(tee) - body.unshift(this.response.body) - } - - body.once('resume', onResume) - body.once('end', () => body.removeListener('resume', onResume)) - } else { - await cacache.index.insert(this.options.cachePath, this.key, null, cacheOpts) - } - - // note: we do not set the x-local-cache-hash header because we do not know - // the hash value until after the write to the cache completes, which doesn't - // happen until after the response has been sent and it's too late to write - // the header anyway - this.response.headers.set('x-local-cache', encodeURIComponent(this.options.cachePath)) - this.response.headers.set('x-local-cache-key', encodeURIComponent(this.key)) - this.response.headers.set('x-local-cache-mode', 'stream') - this.response.headers.set('x-local-cache-status', status) - this.response.headers.set('x-local-cache-time', new Date().toISOString()) - const newResponse = new Response(body, { - url: this.response.url, - status: this.response.status, - headers: this.response.headers, - counter: this.options.counter, - }) - return newResponse - } - - // use the cached data to create a response and return it - async respond (method, options, status) { - let response - if (method === 'HEAD' || [301, 308].includes(this.response.status)) { - // if the request is a HEAD, or the response is a redirect, - // then the metadata in the entry already includes everything - // we need to build a response - response = this.response - } else { - // we're responding with a full cached response, so create a body - // that reads from cacache and attach it to a new Response - const body = new Minipass() - const headers = { ...this.policy.responseHeaders() } - - const onResume = () => { - const cacheStream = cacache.get.stream.byDigest( - this.options.cachePath, this.entry.integrity, { memoize: this.options.memoize } - ) - cacheStream.on('error', async (err) => { - cacheStream.pause() - if (err.code === 'EINTEGRITY') { - await cacache.rm.content( - this.options.cachePath, this.entry.integrity, { memoize: this.options.memoize } - ) - } - if (err.code === 'ENOENT' || err.code === 'EINTEGRITY') { - await CacheEntry.invalidate(this.request, this.options) - } - body.emit('error', err) - cacheStream.resume() - }) - // emit the integrity and size events based on our metadata so we're consistent - body.emit('integrity', this.entry.integrity) - body.emit('size', Number(headers['content-length'])) - cacheStream.pipe(body) - } - - body.once('resume', onResume) - body.once('end', () => body.removeListener('resume', onResume)) - response = new Response(body, { - url: this.entry.metadata.url, - counter: options.counter, - status: 200, - headers, - }) - } - - response.headers.set('x-local-cache', encodeURIComponent(this.options.cachePath)) - response.headers.set('x-local-cache-hash', encodeURIComponent(this.entry.integrity)) - response.headers.set('x-local-cache-key', encodeURIComponent(this.key)) - response.headers.set('x-local-cache-mode', 'stream') - response.headers.set('x-local-cache-status', status) - response.headers.set('x-local-cache-time', new Date(this.entry.metadata.time).toUTCString()) - return response - } - - // use the provided request along with this cache entry to - // revalidate the stored response. returns a response, either - // from the cache or from the update - async revalidate (request, options) { - const revalidateRequest = new Request(request, { - headers: this.policy.revalidationHeaders(request), - }) - - try { - // NOTE: be sure to remove the headers property from the - // user supplied options, since we have already defined - // them on the new request object. if they're still in the - // options then those will overwrite the ones from the policy - var response = await remote(revalidateRequest, { - ...options, - headers: undefined, - }) - } catch (err) { - // if the network fetch fails, return the stale - // cached response unless it has a cache-control - // of 'must-revalidate' - if (!this.policy.mustRevalidate) { - return this.respond(request.method, options, 'stale') - } - - throw err - } - - if (this.policy.revalidated(revalidateRequest, response)) { - // we got a 304, write a new index to the cache and respond from cache - const metadata = getMetadata(request, response, options) - // 304 responses do not include headers that are specific to the response data - // since they do not include a body, so we copy values for headers that were - // in the old cache entry to the new one, if the new metadata does not already - // include that header - for (const name of KEEP_RESPONSE_HEADERS) { - if ( - !hasOwnProperty(metadata.resHeaders, name) && - hasOwnProperty(this.entry.metadata.resHeaders, name) - ) { - metadata.resHeaders[name] = this.entry.metadata.resHeaders[name] - } - } - - for (const name of options.cacheAdditionalHeaders) { - const inMeta = hasOwnProperty(metadata.resHeaders, name) - const inEntry = hasOwnProperty(this.entry.metadata.resHeaders, name) - const inPolicy = hasOwnProperty(this.policy.response.headers, name) - - // if the header is in the existing entry, but it is not in the metadata - // then we need to write it to the metadata as this will refresh the on-disk cache - if (!inMeta && inEntry) { - metadata.resHeaders[name] = this.entry.metadata.resHeaders[name] - } - // if the header is in the metadata, but not in the policy, then we need to set - // it in the policy so that it's included in the immediate response. future - // responses will load a new cache entry, so we don't need to change that - if (!inPolicy && inMeta) { - this.policy.response.headers[name] = metadata.resHeaders[name] - } - } - - try { - await cacache.index.insert(options.cachePath, this.key, this.entry.integrity, { - size: this.entry.size, - metadata, - }) - } catch (err) { - // if updating the cache index fails, we ignore it and - // respond anyway - } - return this.respond(request.method, options, 'revalidated') - } - - // if we got a modified response, create a new entry based on it - const newEntry = new CacheEntry({ - request, - response, - options, - }) - - // respond with the new entry while writing it to the cache - return newEntry.store('updated') - } -} - -module.exports = CacheEntry diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/errors.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/errors.js deleted file mode 100644 index 67a66573bebe66..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/errors.js +++ /dev/null @@ -1,11 +0,0 @@ -class NotCachedError extends Error { - constructor (url) { - /* eslint-disable-next-line max-len */ - super(`request to ${url} failed: cache mode is 'only-if-cached' but no cached response is available.`) - this.code = 'ENOTCACHED' - } -} - -module.exports = { - NotCachedError, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/index.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/index.js deleted file mode 100644 index 0de49d23fb9336..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/index.js +++ /dev/null @@ -1,49 +0,0 @@ -const { NotCachedError } = require('./errors.js') -const CacheEntry = require('./entry.js') -const remote = require('../remote.js') - -// do whatever is necessary to get a Response and return it -const cacheFetch = async (request, options) => { - // try to find a cached entry that satisfies this request - const entry = await CacheEntry.find(request, options) - if (!entry) { - // no cached result, if the cache mode is 'only-if-cached' that's a failure - if (options.cache === 'only-if-cached') { - throw new NotCachedError(request.url) - } - - // otherwise, we make a request, store it and return it - const response = await remote(request, options) - const newEntry = new CacheEntry({ request, response, options }) - return newEntry.store('miss') - } - - // we have a cached response that satisfies this request, however if the cache - // mode is 'no-cache' then we send the revalidation request no matter what - if (options.cache === 'no-cache') { - return entry.revalidate(request, options) - } - - // if the cached entry is not stale, or if the cache mode is 'force-cache' or - // 'only-if-cached' we can respond with the cached entry. set the status - // based on the result of needsRevalidation and respond - const _needsRevalidation = entry.policy.needsRevalidation(request) - if (options.cache === 'force-cache' || - options.cache === 'only-if-cached' || - !_needsRevalidation) { - return entry.respond(request.method, options, _needsRevalidation ? 'stale' : 'hit') - } - - // if we got here, the cache entry is stale so revalidate it - return entry.revalidate(request, options) -} - -cacheFetch.invalidate = async (request, options) => { - if (!options.cachePath) { - return - } - - return CacheEntry.invalidate(request, options) -} - -module.exports = cacheFetch diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/key.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/key.js deleted file mode 100644 index f7684d562b7fae..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/key.js +++ /dev/null @@ -1,17 +0,0 @@ -const { URL, format } = require('url') - -// options passed to url.format() when generating a key -const formatOptions = { - auth: false, - fragment: false, - search: true, - unicode: false, -} - -// returns a string to be used as the cache key for the Request -const cacheKey = (request) => { - const parsed = new URL(request.url) - return `make-fetch-happen:request-cache:${format(parsed, formatOptions)}` -} - -module.exports = cacheKey diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/policy.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/policy.js deleted file mode 100644 index ada3c8600dae92..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/policy.js +++ /dev/null @@ -1,161 +0,0 @@ -const CacheSemantics = require('http-cache-semantics') -const Negotiator = require('negotiator') -const ssri = require('ssri') - -// options passed to http-cache-semantics constructor -const policyOptions = { - shared: false, - ignoreCargoCult: true, -} - -// a fake empty response, used when only testing the -// request for storability -const emptyResponse = { status: 200, headers: {} } - -// returns a plain object representation of the Request -const requestObject = (request) => { - const _obj = { - method: request.method, - url: request.url, - headers: {}, - compress: request.compress, - } - - request.headers.forEach((value, key) => { - _obj.headers[key] = value - }) - - return _obj -} - -// returns a plain object representation of the Response -const responseObject = (response) => { - const _obj = { - status: response.status, - headers: {}, - } - - response.headers.forEach((value, key) => { - _obj.headers[key] = value - }) - - return _obj -} - -class CachePolicy { - constructor ({ entry, request, response, options }) { - this.entry = entry - this.request = requestObject(request) - this.response = responseObject(response) - this.options = options - this.policy = new CacheSemantics(this.request, this.response, policyOptions) - - if (this.entry) { - // if we have an entry, copy the timestamp to the _responseTime - // this is necessary because the CacheSemantics constructor forces - // the value to Date.now() which means a policy created from a - // cache entry is likely to always identify itself as stale - this.policy._responseTime = this.entry.metadata.time - } - } - - // static method to quickly determine if a request alone is storable - static storable (request, options) { - // no cachePath means no caching - if (!options.cachePath) { - return false - } - - // user explicitly asked not to cache - if (options.cache === 'no-store') { - return false - } - - // we only cache GET and HEAD requests - if (!['GET', 'HEAD'].includes(request.method)) { - return false - } - - // otherwise, let http-cache-semantics make the decision - // based on the request's headers - const policy = new CacheSemantics(requestObject(request), emptyResponse, policyOptions) - return policy.storable() - } - - // returns true if the policy satisfies the request - satisfies (request) { - const _req = requestObject(request) - if (this.request.headers.host !== _req.headers.host) { - return false - } - - if (this.request.compress !== _req.compress) { - return false - } - - const negotiatorA = new Negotiator(this.request) - const negotiatorB = new Negotiator(_req) - - if (JSON.stringify(negotiatorA.mediaTypes()) !== JSON.stringify(negotiatorB.mediaTypes())) { - return false - } - - if (JSON.stringify(negotiatorA.languages()) !== JSON.stringify(negotiatorB.languages())) { - return false - } - - if (JSON.stringify(negotiatorA.encodings()) !== JSON.stringify(negotiatorB.encodings())) { - return false - } - - if (this.options.integrity) { - return ssri.parse(this.options.integrity).match(this.entry.integrity) - } - - return true - } - - // returns true if the request and response allow caching - storable () { - return this.policy.storable() - } - - // NOTE: this is a hack to avoid parsing the cache-control - // header ourselves, it returns true if the response's - // cache-control contains must-revalidate - get mustRevalidate () { - return !!this.policy._rescc['must-revalidate'] - } - - // returns true if the cached response requires revalidation - // for the given request - needsRevalidation (request) { - const _req = requestObject(request) - // force method to GET because we only cache GETs - // but can serve a HEAD from a cached GET - _req.method = 'GET' - return !this.policy.satisfiesWithoutRevalidation(_req) - } - - responseHeaders () { - return this.policy.responseHeaders() - } - - // returns a new object containing the appropriate headers - // to send a revalidation request - revalidationHeaders (request) { - const _req = requestObject(request) - return this.policy.revalidationHeaders(_req) - } - - // returns true if the request/response was revalidated - // successfully. returns false if a new response was received - revalidated (request, response) { - const _req = requestObject(request) - const _res = responseObject(response) - const policy = this.policy.revalidatedPolicy(_req, _res) - return !policy.modified - } -} - -module.exports = CachePolicy diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/fetch.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/fetch.js deleted file mode 100644 index 233ba67e165502..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/fetch.js +++ /dev/null @@ -1,118 +0,0 @@ -'use strict' - -const { FetchError, Request, isRedirect } = require('minipass-fetch') -const url = require('url') - -const CachePolicy = require('./cache/policy.js') -const cache = require('./cache/index.js') -const remote = require('./remote.js') - -// given a Request, a Response and user options -// return true if the response is a redirect that -// can be followed. we throw errors that will result -// in the fetch being rejected if the redirect is -// possible but invalid for some reason -const canFollowRedirect = (request, response, options) => { - if (!isRedirect(response.status)) { - return false - } - - if (options.redirect === 'manual') { - return false - } - - if (options.redirect === 'error') { - throw new FetchError(`redirect mode is set to error: ${request.url}`, - 'no-redirect', { code: 'ENOREDIRECT' }) - } - - if (!response.headers.has('location')) { - throw new FetchError(`redirect location header missing for: ${request.url}`, - 'no-location', { code: 'EINVALIDREDIRECT' }) - } - - if (request.counter >= request.follow) { - throw new FetchError(`maximum redirect reached at: ${request.url}`, - 'max-redirect', { code: 'EMAXREDIRECT' }) - } - - return true -} - -// given a Request, a Response, and the user's options return an object -// with a new Request and a new options object that will be used for -// following the redirect -const getRedirect = (request, response, options) => { - const _opts = { ...options } - const location = response.headers.get('location') - const redirectUrl = new url.URL(location, /^https?:/.test(location) ? undefined : request.url) - // Comment below is used under the following license: - /** - * @license - * Copyright (c) 2010-2012 Mikeal Rogers - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS - * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language - * governing permissions and limitations under the License. - */ - - // Remove authorization if changing hostnames (but not if just - // changing ports or protocols). This matches the behavior of request: - // https://github.com/request/request/blob/b12a6245/lib/redirect.js#L134-L138 - if (new url.URL(request.url).hostname !== redirectUrl.hostname) { - request.headers.delete('authorization') - request.headers.delete('cookie') - } - - // for POST request with 301/302 response, or any request with 303 response, - // use GET when following redirect - if ( - response.status === 303 || - (request.method === 'POST' && [301, 302].includes(response.status)) - ) { - _opts.method = 'GET' - _opts.body = null - request.headers.delete('content-length') - } - - _opts.headers = {} - request.headers.forEach((value, key) => { - _opts.headers[key] = value - }) - - _opts.counter = ++request.counter - const redirectReq = new Request(url.format(redirectUrl), _opts) - return { - request: redirectReq, - options: _opts, - } -} - -const fetch = async (request, options) => { - const response = CachePolicy.storable(request, options) - ? await cache(request, options) - : await remote(request, options) - - // if the request wasn't a GET or HEAD, and the response - // status is between 200 and 399 inclusive, invalidate the - // request url - if (!['GET', 'HEAD'].includes(request.method) && - response.status >= 200 && - response.status <= 399) { - await cache.invalidate(request, options) - } - - if (!canFollowRedirect(request, response, options)) { - return response - } - - const redirect = getRedirect(request, response, options) - return fetch(redirect.request, redirect.options) -} - -module.exports = fetch diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/index.js deleted file mode 100644 index 2f12e8e1b61131..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/index.js +++ /dev/null @@ -1,41 +0,0 @@ -const { FetchError, Headers, Request, Response } = require('minipass-fetch') - -const configureOptions = require('./options.js') -const fetch = require('./fetch.js') - -const makeFetchHappen = (url, opts) => { - const options = configureOptions(opts) - - const request = new Request(url, options) - return fetch(request, options) -} - -makeFetchHappen.defaults = (defaultUrl, defaultOptions = {}, wrappedFetch = makeFetchHappen) => { - if (typeof defaultUrl === 'object') { - defaultOptions = defaultUrl - defaultUrl = null - } - - const defaultedFetch = (url, options = {}) => { - const finalUrl = url || defaultUrl - const finalOptions = { - ...defaultOptions, - ...options, - headers: { - ...defaultOptions.headers, - ...options.headers, - }, - } - return wrappedFetch(finalUrl, finalOptions) - } - - defaultedFetch.defaults = (defaultUrl1, defaultOptions1 = {}) => - makeFetchHappen.defaults(defaultUrl1, defaultOptions1, defaultedFetch) - return defaultedFetch -} - -module.exports = makeFetchHappen -module.exports.FetchError = FetchError -module.exports.Headers = Headers -module.exports.Request = Request -module.exports.Response = Response diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/options.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/options.js deleted file mode 100644 index f77511279f831d..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/options.js +++ /dev/null @@ -1,54 +0,0 @@ -const dns = require('dns') - -const conditionalHeaders = [ - 'if-modified-since', - 'if-none-match', - 'if-unmodified-since', - 'if-match', - 'if-range', -] - -const configureOptions = (opts) => { - const { strictSSL, ...options } = { ...opts } - options.method = options.method ? options.method.toUpperCase() : 'GET' - options.rejectUnauthorized = strictSSL !== false - - if (!options.retry) { - options.retry = { retries: 0 } - } else if (typeof options.retry === 'string') { - const retries = parseInt(options.retry, 10) - if (isFinite(retries)) { - options.retry = { retries } - } else { - options.retry = { retries: 0 } - } - } else if (typeof options.retry === 'number') { - options.retry = { retries: options.retry } - } else { - options.retry = { retries: 0, ...options.retry } - } - - options.dns = { ttl: 5 * 60 * 1000, lookup: dns.lookup, ...options.dns } - - options.cache = options.cache || 'default' - if (options.cache === 'default') { - const hasConditionalHeader = Object.keys(options.headers || {}).some((name) => { - return conditionalHeaders.includes(name.toLowerCase()) - }) - if (hasConditionalHeader) { - options.cache = 'no-store' - } - } - - options.cacheAdditionalHeaders = options.cacheAdditionalHeaders || [] - - // cacheManager is deprecated, but if it's set and - // cachePath is not we should copy it to the new field - if (options.cacheManager && !options.cachePath) { - options.cachePath = options.cacheManager - } - - return options -} - -module.exports = configureOptions diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/pipeline.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/pipeline.js deleted file mode 100644 index b1d221b2d0ce31..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/pipeline.js +++ /dev/null @@ -1,41 +0,0 @@ -'use strict' - -const MinipassPipeline = require('minipass-pipeline') - -class CachingMinipassPipeline extends MinipassPipeline { - #events = [] - #data = new Map() - - constructor (opts, ...streams) { - // CRITICAL: do NOT pass the streams to the call to super(), this will start - // the flow of data and potentially cause the events we need to catch to emit - // before we've finished our own setup. instead we call super() with no args, - // finish our setup, and then push the streams into ourselves to start the - // data flow - super() - this.#events = opts.events - - /* istanbul ignore next - coverage disabled because this is pointless to test here */ - if (streams.length) { - this.push(...streams) - } - } - - on (event, handler) { - if (this.#events.includes(event) && this.#data.has(event)) { - return handler(...this.#data.get(event)) - } - - return super.on(event, handler) - } - - emit (event, ...data) { - if (this.#events.includes(event)) { - this.#data.set(event, data) - } - - return super.emit(event, ...data) - } -} - -module.exports = CachingMinipassPipeline diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/remote.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/remote.js deleted file mode 100644 index 8554564074de6e..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/remote.js +++ /dev/null @@ -1,131 +0,0 @@ -const { Minipass } = require('minipass') -const fetch = require('minipass-fetch') -const promiseRetry = require('promise-retry') -const ssri = require('ssri') -const { log } = require('proc-log') - -const CachingMinipassPipeline = require('./pipeline.js') -const { getAgent } = require('@npmcli/agent') -const pkg = require('../package.json') - -const USER_AGENT = `${pkg.name}/${pkg.version} (+https://npm.im/${pkg.name})` - -const RETRY_ERRORS = [ - 'ECONNRESET', // remote socket closed on us - 'ECONNREFUSED', // remote host refused to open connection - 'EADDRINUSE', // failed to bind to a local port (proxy?) - 'ETIMEDOUT', // someone in the transaction is WAY TOO SLOW - // from @npmcli/agent - 'ECONNECTIONTIMEOUT', - 'EIDLETIMEOUT', - 'ERESPONSETIMEOUT', - 'ETRANSFERTIMEOUT', - // Known codes we do NOT retry on: - // ENOTFOUND (getaddrinfo failure. Either bad hostname, or offline) - // EINVALIDPROXY // invalid protocol from @npmcli/agent - // EINVALIDRESPONSE // invalid status code from @npmcli/agent -] - -const RETRY_TYPES = [ - 'request-timeout', -] - -// make a request directly to the remote source, -// retrying certain classes of errors as well as -// following redirects (through the cache if necessary) -// and verifying response integrity -const remoteFetch = (request, options) => { - const agent = getAgent(request.url, options) - if (!request.headers.has('connection')) { - request.headers.set('connection', agent ? 'keep-alive' : 'close') - } - - if (!request.headers.has('user-agent')) { - request.headers.set('user-agent', USER_AGENT) - } - - // keep our own options since we're overriding the agent - // and the redirect mode - const _opts = { - ...options, - agent, - redirect: 'manual', - } - - return promiseRetry(async (retryHandler, attemptNum) => { - const req = new fetch.Request(request, _opts) - try { - let res = await fetch(req, _opts) - if (_opts.integrity && res.status === 200) { - // we got a 200 response and the user has specified an expected - // integrity value, so wrap the response in an ssri stream to verify it - const integrityStream = ssri.integrityStream({ - algorithms: _opts.algorithms, - integrity: _opts.integrity, - size: _opts.size, - }) - const pipeline = new CachingMinipassPipeline({ - events: ['integrity', 'size'], - }, res.body, integrityStream) - // we also propagate the integrity and size events out to the pipeline so we can use - // this new response body as an integrityEmitter for cacache - integrityStream.on('integrity', i => pipeline.emit('integrity', i)) - integrityStream.on('size', s => pipeline.emit('size', s)) - res = new fetch.Response(pipeline, res) - // set an explicit flag so we know if our response body will emit integrity and size - res.body.hasIntegrityEmitter = true - } - - res.headers.set('x-fetch-attempts', attemptNum) - - // do not retry POST requests, or requests with a streaming body - // do retry requests with a 408, 420, 429 or 500+ status in the response - const isStream = Minipass.isStream(req.body) - const isRetriable = req.method !== 'POST' && - !isStream && - ([408, 420, 429].includes(res.status) || res.status >= 500) - - if (isRetriable) { - if (typeof options.onRetry === 'function') { - options.onRetry(res) - } - - /* eslint-disable-next-line max-len */ - log.http('fetch', `${req.method} ${req.url} attempt ${attemptNum} failed with ${res.status}`) - return retryHandler(res) - } - - return res - } catch (err) { - const code = (err.code === 'EPROMISERETRY') - ? err.retried.code - : err.code - - // err.retried will be the thing that was thrown from above - // if it's a response, we just got a bad status code and we - // can re-throw to allow the retry - const isRetryError = err.retried instanceof fetch.Response || - (RETRY_ERRORS.includes(code) && RETRY_TYPES.includes(err.type)) - - if (req.method === 'POST' || isRetryError) { - throw err - } - - if (typeof options.onRetry === 'function') { - options.onRetry(err) - } - - log.http('fetch', `${req.method} ${req.url} attempt ${attemptNum} failed with ${err.code}`) - return retryHandler(err) - } - }, options.retry).catch((err) => { - // don't reject for http errors, just return them - if (err.status >= 400 && err.type !== 'system') { - return err - } - - throw err - }) -} - -module.exports = remoteFetch diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/package.json b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/package.json deleted file mode 100644 index 7adb4d1e7f9719..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/package.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "name": "make-fetch-happen", - "version": "13.0.1", - "description": "Opinionated, caching, retrying fetch client", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "test": "tap", - "posttest": "npm run lint", - "eslint": "eslint", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "lintfix": "npm run lint -- --fix", - "postlint": "template-oss-check", - "snap": "tap", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/make-fetch-happen.git" - }, - "keywords": [ - "http", - "request", - "fetch", - "mean girls", - "caching", - "cache", - "subresource integrity" - ], - "author": "GitHub Inc.", - "license": "ISC", - "dependencies": { - "@npmcli/agent": "^2.0.0", - "cacache": "^18.0.0", - "http-cache-semantics": "^4.1.1", - "is-lambda": "^1.0.1", - "minipass": "^7.0.2", - "minipass-fetch": "^3.0.0", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.3", - "proc-log": "^4.2.0", - "promise-retry": "^2.0.1", - "ssri": "^10.0.0" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.4", - "nock": "^13.2.4", - "safe-buffer": "^5.2.1", - "standard-version": "^9.3.2", - "tap": "^16.0.0" - }, - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "tap": { - "color": 1, - "files": "test/*.js", - "check-coverage": true, - "timeout": 60, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.4", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/LICENSE deleted file mode 100644 index 3c3410cdc12ee3..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -The MIT License (MIT) - -Copyright (c) Isaac Z. Schlueter and Contributors -Copyright (c) 2016 David Frank - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---- - -Note: This is a derivative work based on "node-fetch" by David Frank, -modified and distributed under the terms of the MIT license above. -https://github.com/bitinn/node-fetch diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/abort-error.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/abort-error.js deleted file mode 100644 index b18f643269e375..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/abort-error.js +++ /dev/null @@ -1,17 +0,0 @@ -'use strict' -class AbortError extends Error { - constructor (message) { - super(message) - this.code = 'FETCH_ABORTED' - this.type = 'aborted' - Error.captureStackTrace(this, this.constructor) - } - - get name () { - return 'AbortError' - } - - // don't allow name to be overridden, but don't throw either - set name (s) {} -} -module.exports = AbortError diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/blob.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/blob.js deleted file mode 100644 index 121b1730102e72..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/blob.js +++ /dev/null @@ -1,97 +0,0 @@ -'use strict' -const { Minipass } = require('minipass') -const TYPE = Symbol('type') -const BUFFER = Symbol('buffer') - -class Blob { - constructor (blobParts, options) { - this[TYPE] = '' - - const buffers = [] - let size = 0 - - if (blobParts) { - const a = blobParts - const length = Number(a.length) - for (let i = 0; i < length; i++) { - const element = a[i] - const buffer = element instanceof Buffer ? element - : ArrayBuffer.isView(element) - ? Buffer.from(element.buffer, element.byteOffset, element.byteLength) - : element instanceof ArrayBuffer ? Buffer.from(element) - : element instanceof Blob ? element[BUFFER] - : typeof element === 'string' ? Buffer.from(element) - : Buffer.from(String(element)) - size += buffer.length - buffers.push(buffer) - } - } - - this[BUFFER] = Buffer.concat(buffers, size) - - const type = options && options.type !== undefined - && String(options.type).toLowerCase() - if (type && !/[^\u0020-\u007E]/.test(type)) { - this[TYPE] = type - } - } - - get size () { - return this[BUFFER].length - } - - get type () { - return this[TYPE] - } - - text () { - return Promise.resolve(this[BUFFER].toString()) - } - - arrayBuffer () { - const buf = this[BUFFER] - const off = buf.byteOffset - const len = buf.byteLength - const ab = buf.buffer.slice(off, off + len) - return Promise.resolve(ab) - } - - stream () { - return new Minipass().end(this[BUFFER]) - } - - slice (start, end, type) { - const size = this.size - const relativeStart = start === undefined ? 0 - : start < 0 ? Math.max(size + start, 0) - : Math.min(start, size) - const relativeEnd = end === undefined ? size - : end < 0 ? Math.max(size + end, 0) - : Math.min(end, size) - const span = Math.max(relativeEnd - relativeStart, 0) - - const buffer = this[BUFFER] - const slicedBuffer = buffer.slice( - relativeStart, - relativeStart + span - ) - const blob = new Blob([], { type }) - blob[BUFFER] = slicedBuffer - return blob - } - - get [Symbol.toStringTag] () { - return 'Blob' - } - - static get BUFFER () { - return BUFFER - } -} - -Object.defineProperties(Blob.prototype, { - size: { enumerable: true }, - type: { enumerable: true }, -}) - -module.exports = Blob diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/body.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/body.js deleted file mode 100644 index 62286bd1de0d91..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/body.js +++ /dev/null @@ -1,350 +0,0 @@ -'use strict' -const { Minipass } = require('minipass') -const MinipassSized = require('minipass-sized') - -const Blob = require('./blob.js') -const { BUFFER } = Blob -const FetchError = require('./fetch-error.js') - -// optional dependency on 'encoding' -let convert -try { - convert = require('encoding').convert -} catch (e) { - // defer error until textConverted is called -} - -const INTERNALS = Symbol('Body internals') -const CONSUME_BODY = Symbol('consumeBody') - -class Body { - constructor (bodyArg, options = {}) { - const { size = 0, timeout = 0 } = options - const body = bodyArg === undefined || bodyArg === null ? null - : isURLSearchParams(bodyArg) ? Buffer.from(bodyArg.toString()) - : isBlob(bodyArg) ? bodyArg - : Buffer.isBuffer(bodyArg) ? bodyArg - : Object.prototype.toString.call(bodyArg) === '[object ArrayBuffer]' - ? Buffer.from(bodyArg) - : ArrayBuffer.isView(bodyArg) - ? Buffer.from(bodyArg.buffer, bodyArg.byteOffset, bodyArg.byteLength) - : Minipass.isStream(bodyArg) ? bodyArg - : Buffer.from(String(bodyArg)) - - this[INTERNALS] = { - body, - disturbed: false, - error: null, - } - - this.size = size - this.timeout = timeout - - if (Minipass.isStream(body)) { - body.on('error', er => { - const error = er.name === 'AbortError' ? er - : new FetchError(`Invalid response while trying to fetch ${ - this.url}: ${er.message}`, 'system', er) - this[INTERNALS].error = error - }) - } - } - - get body () { - return this[INTERNALS].body - } - - get bodyUsed () { - return this[INTERNALS].disturbed - } - - arrayBuffer () { - return this[CONSUME_BODY]().then(buf => - buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength)) - } - - blob () { - const ct = this.headers && this.headers.get('content-type') || '' - return this[CONSUME_BODY]().then(buf => Object.assign( - new Blob([], { type: ct.toLowerCase() }), - { [BUFFER]: buf } - )) - } - - async json () { - const buf = await this[CONSUME_BODY]() - try { - return JSON.parse(buf.toString()) - } catch (er) { - throw new FetchError( - `invalid json response body at ${this.url} reason: ${er.message}`, - 'invalid-json' - ) - } - } - - text () { - return this[CONSUME_BODY]().then(buf => buf.toString()) - } - - buffer () { - return this[CONSUME_BODY]() - } - - textConverted () { - return this[CONSUME_BODY]().then(buf => convertBody(buf, this.headers)) - } - - [CONSUME_BODY] () { - if (this[INTERNALS].disturbed) { - return Promise.reject(new TypeError(`body used already for: ${ - this.url}`)) - } - - this[INTERNALS].disturbed = true - - if (this[INTERNALS].error) { - return Promise.reject(this[INTERNALS].error) - } - - // body is null - if (this.body === null) { - return Promise.resolve(Buffer.alloc(0)) - } - - if (Buffer.isBuffer(this.body)) { - return Promise.resolve(this.body) - } - - const upstream = isBlob(this.body) ? this.body.stream() : this.body - - /* istanbul ignore if: should never happen */ - if (!Minipass.isStream(upstream)) { - return Promise.resolve(Buffer.alloc(0)) - } - - const stream = this.size && upstream instanceof MinipassSized ? upstream - : !this.size && upstream instanceof Minipass && - !(upstream instanceof MinipassSized) ? upstream - : this.size ? new MinipassSized({ size: this.size }) - : new Minipass() - - // allow timeout on slow response body, but only if the stream is still writable. this - // makes the timeout center on the socket stream from lib/index.js rather than the - // intermediary minipass stream we create to receive the data - const resTimeout = this.timeout && stream.writable ? setTimeout(() => { - stream.emit('error', new FetchError( - `Response timeout while trying to fetch ${ - this.url} (over ${this.timeout}ms)`, 'body-timeout')) - }, this.timeout) : null - - // do not keep the process open just for this timeout, even - // though we expect it'll get cleared eventually. - if (resTimeout && resTimeout.unref) { - resTimeout.unref() - } - - // do the pipe in the promise, because the pipe() can send too much - // data through right away and upset the MP Sized object - return new Promise((resolve) => { - // if the stream is some other kind of stream, then pipe through a MP - // so we can collect it more easily. - if (stream !== upstream) { - upstream.on('error', er => stream.emit('error', er)) - upstream.pipe(stream) - } - resolve() - }).then(() => stream.concat()).then(buf => { - clearTimeout(resTimeout) - return buf - }).catch(er => { - clearTimeout(resTimeout) - // request was aborted, reject with this Error - if (er.name === 'AbortError' || er.name === 'FetchError') { - throw er - } else if (er.name === 'RangeError') { - throw new FetchError(`Could not create Buffer from response body for ${ - this.url}: ${er.message}`, 'system', er) - } else { - // other errors, such as incorrect content-encoding or content-length - throw new FetchError(`Invalid response body while trying to fetch ${ - this.url}: ${er.message}`, 'system', er) - } - }) - } - - static clone (instance) { - if (instance.bodyUsed) { - throw new Error('cannot clone body after it is used') - } - - const body = instance.body - - // check that body is a stream and not form-data object - // NB: can't clone the form-data object without having it as a dependency - if (Minipass.isStream(body) && typeof body.getBoundary !== 'function') { - // create a dedicated tee stream so that we don't lose data - // potentially sitting in the body stream's buffer by writing it - // immediately to p1 and not having it for p2. - const tee = new Minipass() - const p1 = new Minipass() - const p2 = new Minipass() - tee.on('error', er => { - p1.emit('error', er) - p2.emit('error', er) - }) - body.on('error', er => tee.emit('error', er)) - tee.pipe(p1) - tee.pipe(p2) - body.pipe(tee) - // set instance body to one fork, return the other - instance[INTERNALS].body = p1 - return p2 - } else { - return instance.body - } - } - - static extractContentType (body) { - return body === null || body === undefined ? null - : typeof body === 'string' ? 'text/plain;charset=UTF-8' - : isURLSearchParams(body) - ? 'application/x-www-form-urlencoded;charset=UTF-8' - : isBlob(body) ? body.type || null - : Buffer.isBuffer(body) ? null - : Object.prototype.toString.call(body) === '[object ArrayBuffer]' ? null - : ArrayBuffer.isView(body) ? null - : typeof body.getBoundary === 'function' - ? `multipart/form-data;boundary=${body.getBoundary()}` - : Minipass.isStream(body) ? null - : 'text/plain;charset=UTF-8' - } - - static getTotalBytes (instance) { - const { body } = instance - return (body === null || body === undefined) ? 0 - : isBlob(body) ? body.size - : Buffer.isBuffer(body) ? body.length - : body && typeof body.getLengthSync === 'function' && ( - // detect form data input from form-data module - body._lengthRetrievers && - /* istanbul ignore next */ body._lengthRetrievers.length === 0 || // 1.x - body.hasKnownLength && body.hasKnownLength()) // 2.x - ? body.getLengthSync() - : null - } - - static writeToStream (dest, instance) { - const { body } = instance - - if (body === null || body === undefined) { - dest.end() - } else if (Buffer.isBuffer(body) || typeof body === 'string') { - dest.end(body) - } else { - // body is stream or blob - const stream = isBlob(body) ? body.stream() : body - stream.on('error', er => dest.emit('error', er)).pipe(dest) - } - - return dest - } -} - -Object.defineProperties(Body.prototype, { - body: { enumerable: true }, - bodyUsed: { enumerable: true }, - arrayBuffer: { enumerable: true }, - blob: { enumerable: true }, - json: { enumerable: true }, - text: { enumerable: true }, -}) - -const isURLSearchParams = obj => - // Duck-typing as a necessary condition. - (typeof obj !== 'object' || - typeof obj.append !== 'function' || - typeof obj.delete !== 'function' || - typeof obj.get !== 'function' || - typeof obj.getAll !== 'function' || - typeof obj.has !== 'function' || - typeof obj.set !== 'function') ? false - // Brand-checking and more duck-typing as optional condition. - : obj.constructor.name === 'URLSearchParams' || - Object.prototype.toString.call(obj) === '[object URLSearchParams]' || - typeof obj.sort === 'function' - -const isBlob = obj => - typeof obj === 'object' && - typeof obj.arrayBuffer === 'function' && - typeof obj.type === 'string' && - typeof obj.stream === 'function' && - typeof obj.constructor === 'function' && - typeof obj.constructor.name === 'string' && - /^(Blob|File)$/.test(obj.constructor.name) && - /^(Blob|File)$/.test(obj[Symbol.toStringTag]) - -const convertBody = (buffer, headers) => { - /* istanbul ignore if */ - if (typeof convert !== 'function') { - throw new Error('The package `encoding` must be installed to use the textConverted() function') - } - - const ct = headers && headers.get('content-type') - let charset = 'utf-8' - let res - - // header - if (ct) { - res = /charset=([^;]*)/i.exec(ct) - } - - // no charset in content type, peek at response body for at most 1024 bytes - const str = buffer.slice(0, 1024).toString() - - // html5 - if (!res && str) { - res = / this.expect - ? 'max-size' : type - this.message = message - Error.captureStackTrace(this, this.constructor) - } - - get name () { - return 'FetchError' - } - - // don't allow name to be overwritten - set name (n) {} - - get [Symbol.toStringTag] () { - return 'FetchError' - } -} -module.exports = FetchError diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/headers.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/headers.js deleted file mode 100644 index dd6e854d5ba399..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/headers.js +++ /dev/null @@ -1,267 +0,0 @@ -'use strict' -const invalidTokenRegex = /[^^_`a-zA-Z\-0-9!#$%&'*+.|~]/ -const invalidHeaderCharRegex = /[^\t\x20-\x7e\x80-\xff]/ - -const validateName = name => { - name = `${name}` - if (invalidTokenRegex.test(name) || name === '') { - throw new TypeError(`${name} is not a legal HTTP header name`) - } -} - -const validateValue = value => { - value = `${value}` - if (invalidHeaderCharRegex.test(value)) { - throw new TypeError(`${value} is not a legal HTTP header value`) - } -} - -const find = (map, name) => { - name = name.toLowerCase() - for (const key in map) { - if (key.toLowerCase() === name) { - return key - } - } - return undefined -} - -const MAP = Symbol('map') -class Headers { - constructor (init = undefined) { - this[MAP] = Object.create(null) - if (init instanceof Headers) { - const rawHeaders = init.raw() - const headerNames = Object.keys(rawHeaders) - for (const headerName of headerNames) { - for (const value of rawHeaders[headerName]) { - this.append(headerName, value) - } - } - return - } - - // no-op - if (init === undefined || init === null) { - return - } - - if (typeof init === 'object') { - const method = init[Symbol.iterator] - if (method !== null && method !== undefined) { - if (typeof method !== 'function') { - throw new TypeError('Header pairs must be iterable') - } - - // sequence> - // Note: per spec we have to first exhaust the lists then process them - const pairs = [] - for (const pair of init) { - if (typeof pair !== 'object' || - typeof pair[Symbol.iterator] !== 'function') { - throw new TypeError('Each header pair must be iterable') - } - const arrPair = Array.from(pair) - if (arrPair.length !== 2) { - throw new TypeError('Each header pair must be a name/value tuple') - } - pairs.push(arrPair) - } - - for (const pair of pairs) { - this.append(pair[0], pair[1]) - } - } else { - // record - for (const key of Object.keys(init)) { - this.append(key, init[key]) - } - } - } else { - throw new TypeError('Provided initializer must be an object') - } - } - - get (name) { - name = `${name}` - validateName(name) - const key = find(this[MAP], name) - if (key === undefined) { - return null - } - - return this[MAP][key].join(', ') - } - - forEach (callback, thisArg = undefined) { - let pairs = getHeaders(this) - for (let i = 0; i < pairs.length; i++) { - const [name, value] = pairs[i] - callback.call(thisArg, value, name, this) - // refresh in case the callback added more headers - pairs = getHeaders(this) - } - } - - set (name, value) { - name = `${name}` - value = `${value}` - validateName(name) - validateValue(value) - const key = find(this[MAP], name) - this[MAP][key !== undefined ? key : name] = [value] - } - - append (name, value) { - name = `${name}` - value = `${value}` - validateName(name) - validateValue(value) - const key = find(this[MAP], name) - if (key !== undefined) { - this[MAP][key].push(value) - } else { - this[MAP][name] = [value] - } - } - - has (name) { - name = `${name}` - validateName(name) - return find(this[MAP], name) !== undefined - } - - delete (name) { - name = `${name}` - validateName(name) - const key = find(this[MAP], name) - if (key !== undefined) { - delete this[MAP][key] - } - } - - raw () { - return this[MAP] - } - - keys () { - return new HeadersIterator(this, 'key') - } - - values () { - return new HeadersIterator(this, 'value') - } - - [Symbol.iterator] () { - return new HeadersIterator(this, 'key+value') - } - - entries () { - return new HeadersIterator(this, 'key+value') - } - - get [Symbol.toStringTag] () { - return 'Headers' - } - - static exportNodeCompatibleHeaders (headers) { - const obj = Object.assign(Object.create(null), headers[MAP]) - - // http.request() only supports string as Host header. This hack makes - // specifying custom Host header possible. - const hostHeaderKey = find(headers[MAP], 'Host') - if (hostHeaderKey !== undefined) { - obj[hostHeaderKey] = obj[hostHeaderKey][0] - } - - return obj - } - - static createHeadersLenient (obj) { - const headers = new Headers() - for (const name of Object.keys(obj)) { - if (invalidTokenRegex.test(name)) { - continue - } - - if (Array.isArray(obj[name])) { - for (const val of obj[name]) { - if (invalidHeaderCharRegex.test(val)) { - continue - } - - if (headers[MAP][name] === undefined) { - headers[MAP][name] = [val] - } else { - headers[MAP][name].push(val) - } - } - } else if (!invalidHeaderCharRegex.test(obj[name])) { - headers[MAP][name] = [obj[name]] - } - } - return headers - } -} - -Object.defineProperties(Headers.prototype, { - get: { enumerable: true }, - forEach: { enumerable: true }, - set: { enumerable: true }, - append: { enumerable: true }, - has: { enumerable: true }, - delete: { enumerable: true }, - keys: { enumerable: true }, - values: { enumerable: true }, - entries: { enumerable: true }, -}) - -const getHeaders = (headers, kind = 'key+value') => - Object.keys(headers[MAP]).sort().map( - kind === 'key' ? k => k.toLowerCase() - : kind === 'value' ? k => headers[MAP][k].join(', ') - : k => [k.toLowerCase(), headers[MAP][k].join(', ')] - ) - -const INTERNAL = Symbol('internal') - -class HeadersIterator { - constructor (target, kind) { - this[INTERNAL] = { - target, - kind, - index: 0, - } - } - - get [Symbol.toStringTag] () { - return 'HeadersIterator' - } - - next () { - /* istanbul ignore if: should be impossible */ - if (!this || Object.getPrototypeOf(this) !== HeadersIterator.prototype) { - throw new TypeError('Value of `this` is not a HeadersIterator') - } - - const { target, kind, index } = this[INTERNAL] - const values = getHeaders(target, kind) - const len = values.length - if (index >= len) { - return { - value: undefined, - done: true, - } - } - - this[INTERNAL].index++ - - return { value: values[index], done: false } - } -} - -// manually extend because 'extends' requires a ctor -Object.setPrototypeOf(HeadersIterator.prototype, - Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))) - -module.exports = Headers diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/index.js deleted file mode 100644 index da402161670e65..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/index.js +++ /dev/null @@ -1,377 +0,0 @@ -'use strict' -const { URL } = require('url') -const http = require('http') -const https = require('https') -const zlib = require('minizlib') -const { Minipass } = require('minipass') - -const Body = require('./body.js') -const { writeToStream, getTotalBytes } = Body -const Response = require('./response.js') -const Headers = require('./headers.js') -const { createHeadersLenient } = Headers -const Request = require('./request.js') -const { getNodeRequestOptions } = Request -const FetchError = require('./fetch-error.js') -const AbortError = require('./abort-error.js') - -// XXX this should really be split up and unit-ized for easier testing -// and better DRY implementation of data/http request aborting -const fetch = async (url, opts) => { - if (/^data:/.test(url)) { - const request = new Request(url, opts) - // delay 1 promise tick so that the consumer can abort right away - return Promise.resolve().then(() => new Promise((resolve, reject) => { - let type, data - try { - const { pathname, search } = new URL(url) - const split = pathname.split(',') - if (split.length < 2) { - throw new Error('invalid data: URI') - } - const mime = split.shift() - const base64 = /;base64$/.test(mime) - type = base64 ? mime.slice(0, -1 * ';base64'.length) : mime - const rawData = decodeURIComponent(split.join(',') + search) - data = base64 ? Buffer.from(rawData, 'base64') : Buffer.from(rawData) - } catch (er) { - return reject(new FetchError(`[${request.method}] ${ - request.url} invalid URL, ${er.message}`, 'system', er)) - } - - const { signal } = request - if (signal && signal.aborted) { - return reject(new AbortError('The user aborted a request.')) - } - - const headers = { 'Content-Length': data.length } - if (type) { - headers['Content-Type'] = type - } - return resolve(new Response(data, { headers })) - })) - } - - return new Promise((resolve, reject) => { - // build request object - const request = new Request(url, opts) - let options - try { - options = getNodeRequestOptions(request) - } catch (er) { - return reject(er) - } - - const send = (options.protocol === 'https:' ? https : http).request - const { signal } = request - let response = null - const abort = () => { - const error = new AbortError('The user aborted a request.') - reject(error) - if (Minipass.isStream(request.body) && - typeof request.body.destroy === 'function') { - request.body.destroy(error) - } - if (response && response.body) { - response.body.emit('error', error) - } - } - - if (signal && signal.aborted) { - return abort() - } - - const abortAndFinalize = () => { - abort() - finalize() - } - - const finalize = () => { - req.abort() - if (signal) { - signal.removeEventListener('abort', abortAndFinalize) - } - clearTimeout(reqTimeout) - } - - // send request - const req = send(options) - - if (signal) { - signal.addEventListener('abort', abortAndFinalize) - } - - let reqTimeout = null - if (request.timeout) { - req.once('socket', () => { - reqTimeout = setTimeout(() => { - reject(new FetchError(`network timeout at: ${ - request.url}`, 'request-timeout')) - finalize() - }, request.timeout) - }) - } - - req.on('error', er => { - // if a 'response' event is emitted before the 'error' event, then by the - // time this handler is run it's too late to reject the Promise for the - // response. instead, we forward the error event to the response stream - // so that the error will surface to the user when they try to consume - // the body. this is done as a side effect of aborting the request except - // for in windows, where we must forward the event manually, otherwise - // there is no longer a ref'd socket attached to the request and the - // stream never ends so the event loop runs out of work and the process - // exits without warning. - // coverage skipped here due to the difficulty in testing - // istanbul ignore next - if (req.res) { - req.res.emit('error', er) - } - reject(new FetchError(`request to ${request.url} failed, reason: ${ - er.message}`, 'system', er)) - finalize() - }) - - req.on('response', res => { - clearTimeout(reqTimeout) - - const headers = createHeadersLenient(res.headers) - - // HTTP fetch step 5 - if (fetch.isRedirect(res.statusCode)) { - // HTTP fetch step 5.2 - const location = headers.get('Location') - - // HTTP fetch step 5.3 - let locationURL = null - try { - locationURL = location === null ? null : new URL(location, request.url).toString() - } catch { - // error here can only be invalid URL in Location: header - // do not throw when options.redirect == manual - // let the user extract the errorneous redirect URL - if (request.redirect !== 'manual') { - /* eslint-disable-next-line max-len */ - reject(new FetchError(`uri requested responds with an invalid redirect URL: ${location}`, 'invalid-redirect')) - finalize() - return - } - } - - // HTTP fetch step 5.5 - if (request.redirect === 'error') { - reject(new FetchError('uri requested responds with a redirect, ' + - `redirect mode is set to error: ${request.url}`, 'no-redirect')) - finalize() - return - } else if (request.redirect === 'manual') { - // node-fetch-specific step: make manual redirect a bit easier to - // use by setting the Location header value to the resolved URL. - if (locationURL !== null) { - // handle corrupted header - try { - headers.set('Location', locationURL) - } catch (err) { - /* istanbul ignore next: nodejs server prevent invalid - response headers, we can't test this through normal - request */ - reject(err) - } - } - } else if (request.redirect === 'follow' && locationURL !== null) { - // HTTP-redirect fetch step 5 - if (request.counter >= request.follow) { - reject(new FetchError(`maximum redirect reached at: ${ - request.url}`, 'max-redirect')) - finalize() - return - } - - // HTTP-redirect fetch step 9 - if (res.statusCode !== 303 && - request.body && - getTotalBytes(request) === null) { - reject(new FetchError( - 'Cannot follow redirect with body being a readable stream', - 'unsupported-redirect' - )) - finalize() - return - } - - // Update host due to redirection - request.headers.set('host', (new URL(locationURL)).host) - - // HTTP-redirect fetch step 6 (counter increment) - // Create a new Request object. - const requestOpts = { - headers: new Headers(request.headers), - follow: request.follow, - counter: request.counter + 1, - agent: request.agent, - compress: request.compress, - method: request.method, - body: request.body, - signal: request.signal, - timeout: request.timeout, - } - - // if the redirect is to a new hostname, strip the authorization and cookie headers - const parsedOriginal = new URL(request.url) - const parsedRedirect = new URL(locationURL) - if (parsedOriginal.hostname !== parsedRedirect.hostname) { - requestOpts.headers.delete('authorization') - requestOpts.headers.delete('cookie') - } - - // HTTP-redirect fetch step 11 - if (res.statusCode === 303 || ( - (res.statusCode === 301 || res.statusCode === 302) && - request.method === 'POST' - )) { - requestOpts.method = 'GET' - requestOpts.body = undefined - requestOpts.headers.delete('content-length') - } - - // HTTP-redirect fetch step 15 - resolve(fetch(new Request(locationURL, requestOpts))) - finalize() - return - } - } // end if(isRedirect) - - // prepare response - res.once('end', () => - signal && signal.removeEventListener('abort', abortAndFinalize)) - - const body = new Minipass() - // if an error occurs, either on the response stream itself, on one of the - // decoder streams, or a response length timeout from the Body class, we - // forward the error through to our internal body stream. If we see an - // error event on that, we call finalize to abort the request and ensure - // we don't leave a socket believing a request is in flight. - // this is difficult to test, so lacks specific coverage. - body.on('error', finalize) - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - res.on('error', /* istanbul ignore next */ er => body.emit('error', er)) - res.on('data', (chunk) => body.write(chunk)) - res.on('end', () => body.end()) - - const responseOptions = { - url: request.url, - status: res.statusCode, - statusText: res.statusMessage, - headers: headers, - size: request.size, - timeout: request.timeout, - counter: request.counter, - trailer: new Promise(resolveTrailer => - res.on('end', () => resolveTrailer(createHeadersLenient(res.trailers)))), - } - - // HTTP-network fetch step 12.1.1.3 - const codings = headers.get('Content-Encoding') - - // HTTP-network fetch step 12.1.1.4: handle content codings - - // in following scenarios we ignore compression support - // 1. compression support is disabled - // 2. HEAD request - // 3. no Content-Encoding header - // 4. no content response (204) - // 5. content not modified response (304) - if (!request.compress || - request.method === 'HEAD' || - codings === null || - res.statusCode === 204 || - res.statusCode === 304) { - response = new Response(body, responseOptions) - resolve(response) - return - } - - // Be less strict when decoding compressed responses, since sometimes - // servers send slightly invalid responses that are still accepted - // by common browsers. - // Always using Z_SYNC_FLUSH is what cURL does. - const zlibOptions = { - flush: zlib.constants.Z_SYNC_FLUSH, - finishFlush: zlib.constants.Z_SYNC_FLUSH, - } - - // for gzip - if (codings === 'gzip' || codings === 'x-gzip') { - const unzip = new zlib.Gunzip(zlibOptions) - response = new Response( - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => unzip.emit('error', er)).pipe(unzip), - responseOptions - ) - resolve(response) - return - } - - // for deflate - if (codings === 'deflate' || codings === 'x-deflate') { - // handle the infamous raw deflate response from old servers - // a hack for old IIS and Apache servers - const raw = res.pipe(new Minipass()) - raw.once('data', chunk => { - // see http://stackoverflow.com/questions/37519828 - const decoder = (chunk[0] & 0x0F) === 0x08 - ? new zlib.Inflate() - : new zlib.InflateRaw() - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => decoder.emit('error', er)).pipe(decoder) - response = new Response(decoder, responseOptions) - resolve(response) - }) - return - } - - // for br - if (codings === 'br') { - // ignoring coverage so tests don't have to fake support (or lack of) for brotli - // istanbul ignore next - try { - var decoder = new zlib.BrotliDecompress() - } catch (err) { - reject(err) - finalize() - return - } - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => decoder.emit('error', er)).pipe(decoder) - response = new Response(decoder, responseOptions) - resolve(response) - return - } - - // otherwise, use response as-is - response = new Response(body, responseOptions) - resolve(response) - }) - - writeToStream(req, request) - }) -} - -module.exports = fetch - -fetch.isRedirect = code => - code === 301 || - code === 302 || - code === 303 || - code === 307 || - code === 308 - -fetch.Headers = Headers -fetch.Request = Request -fetch.Response = Response -fetch.FetchError = FetchError -fetch.AbortError = AbortError diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/request.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/request.js deleted file mode 100644 index 054439e6699107..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/request.js +++ /dev/null @@ -1,282 +0,0 @@ -'use strict' -const { URL } = require('url') -const { Minipass } = require('minipass') -const Headers = require('./headers.js') -const { exportNodeCompatibleHeaders } = Headers -const Body = require('./body.js') -const { clone, extractContentType, getTotalBytes } = Body - -const version = require('../package.json').version -const defaultUserAgent = - `minipass-fetch/${version} (+https://github.com/isaacs/minipass-fetch)` - -const INTERNALS = Symbol('Request internals') - -const isRequest = input => - typeof input === 'object' && typeof input[INTERNALS] === 'object' - -const isAbortSignal = signal => { - const proto = ( - signal - && typeof signal === 'object' - && Object.getPrototypeOf(signal) - ) - return !!(proto && proto.constructor.name === 'AbortSignal') -} - -class Request extends Body { - constructor (input, init = {}) { - const parsedURL = isRequest(input) ? new URL(input.url) - : input && input.href ? new URL(input.href) - : new URL(`${input}`) - - if (isRequest(input)) { - init = { ...input[INTERNALS], ...init } - } else if (!input || typeof input === 'string') { - input = {} - } - - const method = (init.method || input.method || 'GET').toUpperCase() - const isGETHEAD = method === 'GET' || method === 'HEAD' - - if ((init.body !== null && init.body !== undefined || - isRequest(input) && input.body !== null) && isGETHEAD) { - throw new TypeError('Request with GET/HEAD method cannot have body') - } - - const inputBody = init.body !== null && init.body !== undefined ? init.body - : isRequest(input) && input.body !== null ? clone(input) - : null - - super(inputBody, { - timeout: init.timeout || input.timeout || 0, - size: init.size || input.size || 0, - }) - - const headers = new Headers(init.headers || input.headers || {}) - - if (inputBody !== null && inputBody !== undefined && - !headers.has('Content-Type')) { - const contentType = extractContentType(inputBody) - if (contentType) { - headers.append('Content-Type', contentType) - } - } - - const signal = 'signal' in init ? init.signal - : null - - if (signal !== null && signal !== undefined && !isAbortSignal(signal)) { - throw new TypeError('Expected signal must be an instanceof AbortSignal') - } - - // TLS specific options that are handled by node - const { - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized = process.env.NODE_TLS_REJECT_UNAUTHORIZED !== '0', - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } = init - - this[INTERNALS] = { - method, - redirect: init.redirect || input.redirect || 'follow', - headers, - parsedURL, - signal, - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } - - // node-fetch-only options - this.follow = init.follow !== undefined ? init.follow - : input.follow !== undefined ? input.follow - : 20 - this.compress = init.compress !== undefined ? init.compress - : input.compress !== undefined ? input.compress - : true - this.counter = init.counter || input.counter || 0 - this.agent = init.agent || input.agent - } - - get method () { - return this[INTERNALS].method - } - - get url () { - return this[INTERNALS].parsedURL.toString() - } - - get headers () { - return this[INTERNALS].headers - } - - get redirect () { - return this[INTERNALS].redirect - } - - get signal () { - return this[INTERNALS].signal - } - - clone () { - return new Request(this) - } - - get [Symbol.toStringTag] () { - return 'Request' - } - - static getNodeRequestOptions (request) { - const parsedURL = request[INTERNALS].parsedURL - const headers = new Headers(request[INTERNALS].headers) - - // fetch step 1.3 - if (!headers.has('Accept')) { - headers.set('Accept', '*/*') - } - - // Basic fetch - if (!/^https?:$/.test(parsedURL.protocol)) { - throw new TypeError('Only HTTP(S) protocols are supported') - } - - if (request.signal && - Minipass.isStream(request.body) && - typeof request.body.destroy !== 'function') { - throw new Error( - 'Cancellation of streamed requests with AbortSignal is not supported') - } - - // HTTP-network-or-cache fetch steps 2.4-2.7 - const contentLengthValue = - (request.body === null || request.body === undefined) && - /^(POST|PUT)$/i.test(request.method) ? '0' - : request.body !== null && request.body !== undefined - ? getTotalBytes(request) - : null - - if (contentLengthValue) { - headers.set('Content-Length', contentLengthValue + '') - } - - // HTTP-network-or-cache fetch step 2.11 - if (!headers.has('User-Agent')) { - headers.set('User-Agent', defaultUserAgent) - } - - // HTTP-network-or-cache fetch step 2.15 - if (request.compress && !headers.has('Accept-Encoding')) { - headers.set('Accept-Encoding', 'gzip,deflate') - } - - const agent = typeof request.agent === 'function' - ? request.agent(parsedURL) - : request.agent - - if (!headers.has('Connection') && !agent) { - headers.set('Connection', 'close') - } - - // TLS specific options that are handled by node - const { - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } = request[INTERNALS] - - // HTTP-network fetch step 4.2 - // chunked encoding is handled by Node.js - - // we cannot spread parsedURL directly, so we have to read each property one-by-one - // and map them to the equivalent https?.request() method options - const urlProps = { - auth: parsedURL.username || parsedURL.password - ? `${parsedURL.username}:${parsedURL.password}` - : '', - host: parsedURL.host, - hostname: parsedURL.hostname, - path: `${parsedURL.pathname}${parsedURL.search}`, - port: parsedURL.port, - protocol: parsedURL.protocol, - } - - return { - ...urlProps, - method: request.method, - headers: exportNodeCompatibleHeaders(headers), - agent, - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - timeout: request.timeout, - } - } -} - -module.exports = Request - -Object.defineProperties(Request.prototype, { - method: { enumerable: true }, - url: { enumerable: true }, - headers: { enumerable: true }, - redirect: { enumerable: true }, - clone: { enumerable: true }, - signal: { enumerable: true }, -}) diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/response.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/response.js deleted file mode 100644 index 54cb52db3594a7..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/response.js +++ /dev/null @@ -1,90 +0,0 @@ -'use strict' -const http = require('http') -const { STATUS_CODES } = http - -const Headers = require('./headers.js') -const Body = require('./body.js') -const { clone, extractContentType } = Body - -const INTERNALS = Symbol('Response internals') - -class Response extends Body { - constructor (body = null, opts = {}) { - super(body, opts) - - const status = opts.status || 200 - const headers = new Headers(opts.headers) - - if (body !== null && body !== undefined && !headers.has('Content-Type')) { - const contentType = extractContentType(body) - if (contentType) { - headers.append('Content-Type', contentType) - } - } - - this[INTERNALS] = { - url: opts.url, - status, - statusText: opts.statusText || STATUS_CODES[status], - headers, - counter: opts.counter, - trailer: Promise.resolve(opts.trailer || new Headers()), - } - } - - get trailer () { - return this[INTERNALS].trailer - } - - get url () { - return this[INTERNALS].url || '' - } - - get status () { - return this[INTERNALS].status - } - - get ok () { - return this[INTERNALS].status >= 200 && this[INTERNALS].status < 300 - } - - get redirected () { - return this[INTERNALS].counter > 0 - } - - get statusText () { - return this[INTERNALS].statusText - } - - get headers () { - return this[INTERNALS].headers - } - - clone () { - return new Response(clone(this), { - url: this.url, - status: this.status, - statusText: this.statusText, - headers: this.headers, - ok: this.ok, - redirected: this.redirected, - trailer: this.trailer, - }) - } - - get [Symbol.toStringTag] () { - return 'Response' - } -} - -module.exports = Response - -Object.defineProperties(Response.prototype, { - url: { enumerable: true }, - status: { enumerable: true }, - ok: { enumerable: true }, - redirected: { enumerable: true }, - statusText: { enumerable: true }, - headers: { enumerable: true }, - clone: { enumerable: true }, -}) diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/package.json b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/package.json deleted file mode 100644 index d491a7fba126d0..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/package.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "name": "minipass-fetch", - "version": "3.0.5", - "description": "An implementation of window.fetch in Node.js using Minipass streams", - "license": "MIT", - "main": "lib/index.js", - "scripts": { - "test:tls-fixtures": "./test/fixtures/tls/setup.sh", - "test": "tap", - "snap": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "lintfix": "npm run lint -- --fix", - "posttest": "npm run lint", - "template-oss-apply": "template-oss-apply --force" - }, - "tap": { - "coverage-map": "map.js", - "check-coverage": true, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "@ungap/url-search-params": "^0.2.2", - "abort-controller": "^3.0.0", - "abortcontroller-polyfill": "~1.7.3", - "encoding": "^0.1.13", - "form-data": "^4.0.0", - "nock": "^13.2.4", - "parted": "^0.1.1", - "string-to-arraybuffer": "^1.0.2", - "tap": "^16.0.0" - }, - "dependencies": { - "minipass": "^7.0.3", - "minipass-sized": "^1.0.3", - "minizlib": "^2.1.2" - }, - "optionalDependencies": { - "encoding": "^0.1.13" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/minipass-fetch.git" - }, - "keywords": [ - "fetch", - "minipass", - "node-fetch", - "window.fetch" - ], - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "author": "GitHub Inc.", - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/proc-log/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/proc-log/lib/index.js deleted file mode 100644 index 86d90861078dab..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/proc-log/lib/index.js +++ /dev/null @@ -1,153 +0,0 @@ -const META = Symbol('proc-log.meta') -module.exports = { - META: META, - output: { - LEVELS: [ - 'standard', - 'error', - 'buffer', - 'flush', - ], - KEYS: { - standard: 'standard', - error: 'error', - buffer: 'buffer', - flush: 'flush', - }, - standard: function (...args) { - return process.emit('output', 'standard', ...args) - }, - error: function (...args) { - return process.emit('output', 'error', ...args) - }, - buffer: function (...args) { - return process.emit('output', 'buffer', ...args) - }, - flush: function (...args) { - return process.emit('output', 'flush', ...args) - }, - }, - log: { - LEVELS: [ - 'notice', - 'error', - 'warn', - 'info', - 'verbose', - 'http', - 'silly', - 'timing', - 'pause', - 'resume', - ], - KEYS: { - notice: 'notice', - error: 'error', - warn: 'warn', - info: 'info', - verbose: 'verbose', - http: 'http', - silly: 'silly', - timing: 'timing', - pause: 'pause', - resume: 'resume', - }, - error: function (...args) { - return process.emit('log', 'error', ...args) - }, - notice: function (...args) { - return process.emit('log', 'notice', ...args) - }, - warn: function (...args) { - return process.emit('log', 'warn', ...args) - }, - info: function (...args) { - return process.emit('log', 'info', ...args) - }, - verbose: function (...args) { - return process.emit('log', 'verbose', ...args) - }, - http: function (...args) { - return process.emit('log', 'http', ...args) - }, - silly: function (...args) { - return process.emit('log', 'silly', ...args) - }, - timing: function (...args) { - return process.emit('log', 'timing', ...args) - }, - pause: function () { - return process.emit('log', 'pause') - }, - resume: function () { - return process.emit('log', 'resume') - }, - }, - time: { - LEVELS: [ - 'start', - 'end', - ], - KEYS: { - start: 'start', - end: 'end', - }, - start: function (name, fn) { - process.emit('time', 'start', name) - function end () { - return process.emit('time', 'end', name) - } - if (typeof fn === 'function') { - const res = fn() - if (res && res.finally) { - return res.finally(end) - } - end() - return res - } - return end - }, - end: function (name) { - return process.emit('time', 'end', name) - }, - }, - input: { - LEVELS: [ - 'start', - 'end', - 'read', - ], - KEYS: { - start: 'start', - end: 'end', - read: 'read', - }, - start: function (fn) { - process.emit('input', 'start') - function end () { - return process.emit('input', 'end') - } - if (typeof fn === 'function') { - const res = fn() - if (res && res.finally) { - return res.finally(end) - } - end() - return res - } - return end - }, - end: function () { - return process.emit('input', 'end') - }, - read: function (...args) { - let resolve, reject - const promise = new Promise((_resolve, _reject) => { - resolve = _resolve - reject = _reject - }) - process.emit('input', 'read', resolve, reject, ...args) - return promise - }, - }, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/proc-log/package.json b/deps/npm/node_modules/tuf-js/node_modules/proc-log/package.json deleted file mode 100644 index 4ab89102ecc9b5..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/proc-log/package.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "proc-log", - "version": "4.2.0", - "files": [ - "bin/", - "lib/" - ], - "main": "lib/index.js", - "description": "just emit 'log' events on the process object", - "repository": { - "type": "git", - "url": "https://github.com/npm/proc-log.git" - }, - "author": "GitHub Inc.", - "license": "ISC", - "scripts": { - "test": "tap", - "snap": "tap", - "posttest": "npm run lint", - "postsnap": "eslint index.js test/*.js --fix", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "lintfix": "npm run lint -- --fix", - "template-oss-apply": "template-oss-apply --force" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", - "tap": "^16.0.1" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", - "publish": true - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/ssri/LICENSE.md b/deps/npm/node_modules/tuf-js/node_modules/ssri/LICENSE.md deleted file mode 100644 index e335388869f50f..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/ssri/LICENSE.md +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright 2021 (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/ssri/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/ssri/lib/index.js deleted file mode 100644 index 7d749ed480fb98..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/ssri/lib/index.js +++ /dev/null @@ -1,580 +0,0 @@ -'use strict' - -const crypto = require('crypto') -const { Minipass } = require('minipass') - -const SPEC_ALGORITHMS = ['sha512', 'sha384', 'sha256'] -const DEFAULT_ALGORITHMS = ['sha512'] - -// TODO: this should really be a hardcoded list of algorithms we support, -// rather than [a-z0-9]. -const BASE64_REGEX = /^[a-z0-9+/]+(?:=?=?)$/i -const SRI_REGEX = /^([a-z0-9]+)-([^?]+)([?\S*]*)$/ -const STRICT_SRI_REGEX = /^([a-z0-9]+)-([A-Za-z0-9+/=]{44,88})(\?[\x21-\x7E]*)?$/ -const VCHAR_REGEX = /^[\x21-\x7E]+$/ - -const getOptString = options => options?.length ? `?${options.join('?')}` : '' - -class IntegrityStream extends Minipass { - #emittedIntegrity - #emittedSize - #emittedVerified - - constructor (opts) { - super() - this.size = 0 - this.opts = opts - - // may be overridden later, but set now for class consistency - this.#getOptions() - - // options used for calculating stream. can't be changed. - if (opts?.algorithms) { - this.algorithms = [...opts.algorithms] - } else { - this.algorithms = [...DEFAULT_ALGORITHMS] - } - if (this.algorithm !== null && !this.algorithms.includes(this.algorithm)) { - this.algorithms.push(this.algorithm) - } - - this.hashes = this.algorithms.map(crypto.createHash) - } - - #getOptions () { - // For verification - this.sri = this.opts?.integrity ? parse(this.opts?.integrity, this.opts) : null - this.expectedSize = this.opts?.size - - if (!this.sri) { - this.algorithm = null - } else if (this.sri.isHash) { - this.goodSri = true - this.algorithm = this.sri.algorithm - } else { - this.goodSri = !this.sri.isEmpty() - this.algorithm = this.sri.pickAlgorithm(this.opts) - } - - this.digests = this.goodSri ? this.sri[this.algorithm] : null - this.optString = getOptString(this.opts?.options) - } - - on (ev, handler) { - if (ev === 'size' && this.#emittedSize) { - return handler(this.#emittedSize) - } - - if (ev === 'integrity' && this.#emittedIntegrity) { - return handler(this.#emittedIntegrity) - } - - if (ev === 'verified' && this.#emittedVerified) { - return handler(this.#emittedVerified) - } - - return super.on(ev, handler) - } - - emit (ev, data) { - if (ev === 'end') { - this.#onEnd() - } - return super.emit(ev, data) - } - - write (data) { - this.size += data.length - this.hashes.forEach(h => h.update(data)) - return super.write(data) - } - - #onEnd () { - if (!this.goodSri) { - this.#getOptions() - } - const newSri = parse(this.hashes.map((h, i) => { - return `${this.algorithms[i]}-${h.digest('base64')}${this.optString}` - }).join(' '), this.opts) - // Integrity verification mode - const match = this.goodSri && newSri.match(this.sri, this.opts) - if (typeof this.expectedSize === 'number' && this.size !== this.expectedSize) { - /* eslint-disable-next-line max-len */ - const err = new Error(`stream size mismatch when checking ${this.sri}.\n Wanted: ${this.expectedSize}\n Found: ${this.size}`) - err.code = 'EBADSIZE' - err.found = this.size - err.expected = this.expectedSize - err.sri = this.sri - this.emit('error', err) - } else if (this.sri && !match) { - /* eslint-disable-next-line max-len */ - const err = new Error(`${this.sri} integrity checksum failed when using ${this.algorithm}: wanted ${this.digests} but got ${newSri}. (${this.size} bytes)`) - err.code = 'EINTEGRITY' - err.found = newSri - err.expected = this.digests - err.algorithm = this.algorithm - err.sri = this.sri - this.emit('error', err) - } else { - this.#emittedSize = this.size - this.emit('size', this.size) - this.#emittedIntegrity = newSri - this.emit('integrity', newSri) - if (match) { - this.#emittedVerified = match - this.emit('verified', match) - } - } - } -} - -class Hash { - get isHash () { - return true - } - - constructor (hash, opts) { - const strict = opts?.strict - this.source = hash.trim() - - // set default values so that we make V8 happy to - // always see a familiar object template. - this.digest = '' - this.algorithm = '' - this.options = [] - - // 3.1. Integrity metadata (called "Hash" by ssri) - // https://w3c.github.io/webappsec-subresource-integrity/#integrity-metadata-description - const match = this.source.match( - strict - ? STRICT_SRI_REGEX - : SRI_REGEX - ) - if (!match) { - return - } - if (strict && !SPEC_ALGORITHMS.includes(match[1])) { - return - } - this.algorithm = match[1] - this.digest = match[2] - - const rawOpts = match[3] - if (rawOpts) { - this.options = rawOpts.slice(1).split('?') - } - } - - hexDigest () { - return this.digest && Buffer.from(this.digest, 'base64').toString('hex') - } - - toJSON () { - return this.toString() - } - - match (integrity, opts) { - const other = parse(integrity, opts) - if (!other) { - return false - } - if (other.isIntegrity) { - const algo = other.pickAlgorithm(opts, [this.algorithm]) - - if (!algo) { - return false - } - - const foundHash = other[algo].find(hash => hash.digest === this.digest) - - if (foundHash) { - return foundHash - } - - return false - } - return other.digest === this.digest ? other : false - } - - toString (opts) { - if (opts?.strict) { - // Strict mode enforces the standard as close to the foot of the - // letter as it can. - if (!( - // The spec has very restricted productions for algorithms. - // https://www.w3.org/TR/CSP2/#source-list-syntax - SPEC_ALGORITHMS.includes(this.algorithm) && - // Usually, if someone insists on using a "different" base64, we - // leave it as-is, since there's multiple standards, and the - // specified is not a URL-safe variant. - // https://www.w3.org/TR/CSP2/#base64_value - this.digest.match(BASE64_REGEX) && - // Option syntax is strictly visual chars. - // https://w3c.github.io/webappsec-subresource-integrity/#grammardef-option-expression - // https://tools.ietf.org/html/rfc5234#appendix-B.1 - this.options.every(opt => opt.match(VCHAR_REGEX)) - )) { - return '' - } - } - return `${this.algorithm}-${this.digest}${getOptString(this.options)}` - } -} - -function integrityHashToString (toString, sep, opts, hashes) { - const toStringIsNotEmpty = toString !== '' - - let shouldAddFirstSep = false - let complement = '' - - const lastIndex = hashes.length - 1 - - for (let i = 0; i < lastIndex; i++) { - const hashString = Hash.prototype.toString.call(hashes[i], opts) - - if (hashString) { - shouldAddFirstSep = true - - complement += hashString - complement += sep - } - } - - const finalHashString = Hash.prototype.toString.call(hashes[lastIndex], opts) - - if (finalHashString) { - shouldAddFirstSep = true - complement += finalHashString - } - - if (toStringIsNotEmpty && shouldAddFirstSep) { - return toString + sep + complement - } - - return toString + complement -} - -class Integrity { - get isIntegrity () { - return true - } - - toJSON () { - return this.toString() - } - - isEmpty () { - return Object.keys(this).length === 0 - } - - toString (opts) { - let sep = opts?.sep || ' ' - let toString = '' - - if (opts?.strict) { - // Entries must be separated by whitespace, according to spec. - sep = sep.replace(/\S+/g, ' ') - - for (const hash of SPEC_ALGORITHMS) { - if (this[hash]) { - toString = integrityHashToString(toString, sep, opts, this[hash]) - } - } - } else { - for (const hash of Object.keys(this)) { - toString = integrityHashToString(toString, sep, opts, this[hash]) - } - } - - return toString - } - - concat (integrity, opts) { - const other = typeof integrity === 'string' - ? integrity - : stringify(integrity, opts) - return parse(`${this.toString(opts)} ${other}`, opts) - } - - hexDigest () { - return parse(this, { single: true }).hexDigest() - } - - // add additional hashes to an integrity value, but prevent - // *changing* an existing integrity hash. - merge (integrity, opts) { - const other = parse(integrity, opts) - for (const algo in other) { - if (this[algo]) { - if (!this[algo].find(hash => - other[algo].find(otherhash => - hash.digest === otherhash.digest))) { - throw new Error('hashes do not match, cannot update integrity') - } - } else { - this[algo] = other[algo] - } - } - } - - match (integrity, opts) { - const other = parse(integrity, opts) - if (!other) { - return false - } - const algo = other.pickAlgorithm(opts, Object.keys(this)) - return ( - !!algo && - this[algo] && - other[algo] && - this[algo].find(hash => - other[algo].find(otherhash => - hash.digest === otherhash.digest - ) - ) - ) || false - } - - // Pick the highest priority algorithm present, optionally also limited to a - // set of hashes found in another integrity. When limiting it may return - // nothing. - pickAlgorithm (opts, hashes) { - const pickAlgorithm = opts?.pickAlgorithm || getPrioritizedHash - const keys = Object.keys(this).filter(k => { - if (hashes?.length) { - return hashes.includes(k) - } - return true - }) - if (keys.length) { - return keys.reduce((acc, algo) => pickAlgorithm(acc, algo) || acc) - } - // no intersection between this and hashes, - return null - } -} - -module.exports.parse = parse -function parse (sri, opts) { - if (!sri) { - return null - } - if (typeof sri === 'string') { - return _parse(sri, opts) - } else if (sri.algorithm && sri.digest) { - const fullSri = new Integrity() - fullSri[sri.algorithm] = [sri] - return _parse(stringify(fullSri, opts), opts) - } else { - return _parse(stringify(sri, opts), opts) - } -} - -function _parse (integrity, opts) { - // 3.4.3. Parse metadata - // https://w3c.github.io/webappsec-subresource-integrity/#parse-metadata - if (opts?.single) { - return new Hash(integrity, opts) - } - const hashes = integrity.trim().split(/\s+/).reduce((acc, string) => { - const hash = new Hash(string, opts) - if (hash.algorithm && hash.digest) { - const algo = hash.algorithm - if (!acc[algo]) { - acc[algo] = [] - } - acc[algo].push(hash) - } - return acc - }, new Integrity()) - return hashes.isEmpty() ? null : hashes -} - -module.exports.stringify = stringify -function stringify (obj, opts) { - if (obj.algorithm && obj.digest) { - return Hash.prototype.toString.call(obj, opts) - } else if (typeof obj === 'string') { - return stringify(parse(obj, opts), opts) - } else { - return Integrity.prototype.toString.call(obj, opts) - } -} - -module.exports.fromHex = fromHex -function fromHex (hexDigest, algorithm, opts) { - const optString = getOptString(opts?.options) - return parse( - `${algorithm}-${ - Buffer.from(hexDigest, 'hex').toString('base64') - }${optString}`, opts - ) -} - -module.exports.fromData = fromData -function fromData (data, opts) { - const algorithms = opts?.algorithms || [...DEFAULT_ALGORITHMS] - const optString = getOptString(opts?.options) - return algorithms.reduce((acc, algo) => { - const digest = crypto.createHash(algo).update(data).digest('base64') - const hash = new Hash( - `${algo}-${digest}${optString}`, - opts - ) - /* istanbul ignore else - it would be VERY strange if the string we - * just calculated with an algo did not have an algo or digest. - */ - if (hash.algorithm && hash.digest) { - const hashAlgo = hash.algorithm - if (!acc[hashAlgo]) { - acc[hashAlgo] = [] - } - acc[hashAlgo].push(hash) - } - return acc - }, new Integrity()) -} - -module.exports.fromStream = fromStream -function fromStream (stream, opts) { - const istream = integrityStream(opts) - return new Promise((resolve, reject) => { - stream.pipe(istream) - stream.on('error', reject) - istream.on('error', reject) - let sri - istream.on('integrity', s => { - sri = s - }) - istream.on('end', () => resolve(sri)) - istream.resume() - }) -} - -module.exports.checkData = checkData -function checkData (data, sri, opts) { - sri = parse(sri, opts) - if (!sri || !Object.keys(sri).length) { - if (opts?.error) { - throw Object.assign( - new Error('No valid integrity hashes to check against'), { - code: 'EINTEGRITY', - } - ) - } else { - return false - } - } - const algorithm = sri.pickAlgorithm(opts) - const digest = crypto.createHash(algorithm).update(data).digest('base64') - const newSri = parse({ algorithm, digest }) - const match = newSri.match(sri, opts) - opts = opts || {} - if (match || !(opts.error)) { - return match - } else if (typeof opts.size === 'number' && (data.length !== opts.size)) { - /* eslint-disable-next-line max-len */ - const err = new Error(`data size mismatch when checking ${sri}.\n Wanted: ${opts.size}\n Found: ${data.length}`) - err.code = 'EBADSIZE' - err.found = data.length - err.expected = opts.size - err.sri = sri - throw err - } else { - /* eslint-disable-next-line max-len */ - const err = new Error(`Integrity checksum failed when using ${algorithm}: Wanted ${sri}, but got ${newSri}. (${data.length} bytes)`) - err.code = 'EINTEGRITY' - err.found = newSri - err.expected = sri - err.algorithm = algorithm - err.sri = sri - throw err - } -} - -module.exports.checkStream = checkStream -function checkStream (stream, sri, opts) { - opts = opts || Object.create(null) - opts.integrity = sri - sri = parse(sri, opts) - if (!sri || !Object.keys(sri).length) { - return Promise.reject(Object.assign( - new Error('No valid integrity hashes to check against'), { - code: 'EINTEGRITY', - } - )) - } - const checker = integrityStream(opts) - return new Promise((resolve, reject) => { - stream.pipe(checker) - stream.on('error', reject) - checker.on('error', reject) - let verified - checker.on('verified', s => { - verified = s - }) - checker.on('end', () => resolve(verified)) - checker.resume() - }) -} - -module.exports.integrityStream = integrityStream -function integrityStream (opts = Object.create(null)) { - return new IntegrityStream(opts) -} - -module.exports.create = createIntegrity -function createIntegrity (opts) { - const algorithms = opts?.algorithms || [...DEFAULT_ALGORITHMS] - const optString = getOptString(opts?.options) - - const hashes = algorithms.map(crypto.createHash) - - return { - update: function (chunk, enc) { - hashes.forEach(h => h.update(chunk, enc)) - return this - }, - digest: function () { - const integrity = algorithms.reduce((acc, algo) => { - const digest = hashes.shift().digest('base64') - const hash = new Hash( - `${algo}-${digest}${optString}`, - opts - ) - /* istanbul ignore else - it would be VERY strange if the hash we - * just calculated with an algo did not have an algo or digest. - */ - if (hash.algorithm && hash.digest) { - const hashAlgo = hash.algorithm - if (!acc[hashAlgo]) { - acc[hashAlgo] = [] - } - acc[hashAlgo].push(hash) - } - return acc - }, new Integrity()) - - return integrity - }, - } -} - -const NODE_HASHES = crypto.getHashes() - -// This is a Best Effort™ at a reasonable priority for hash algos -const DEFAULT_PRIORITY = [ - 'md5', 'whirlpool', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', - // TODO - it's unclear _which_ of these Node will actually use as its name - // for the algorithm, so we guesswork it based on the OpenSSL names. - 'sha3', - 'sha3-256', 'sha3-384', 'sha3-512', - 'sha3_256', 'sha3_384', 'sha3_512', -].filter(algo => NODE_HASHES.includes(algo)) - -function getPrioritizedHash (algo1, algo2) { - /* eslint-disable-next-line max-len */ - return DEFAULT_PRIORITY.indexOf(algo1.toLowerCase()) >= DEFAULT_PRIORITY.indexOf(algo2.toLowerCase()) - ? algo1 - : algo2 -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/ssri/package.json b/deps/npm/node_modules/tuf-js/node_modules/ssri/package.json deleted file mode 100644 index 28395414e4643c..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/ssri/package.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "name": "ssri", - "version": "10.0.6", - "description": "Standard Subresource Integrity library -- parses, serializes, generates, and verifies integrity metadata according to the SRI spec.", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "prerelease": "npm t", - "postrelease": "npm publish", - "posttest": "npm run lint", - "test": "tap", - "coverage": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap" - }, - "tap": { - "check-coverage": true, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/ssri.git" - }, - "keywords": [ - "w3c", - "web", - "security", - "integrity", - "checksum", - "hashing", - "subresource integrity", - "sri", - "sri hash", - "sri string", - "sri generator", - "html" - ], - "author": "GitHub Inc.", - "license": "ISC", - "dependencies": { - "minipass": "^7.0.3" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.1" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/unique-filename/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/unique-filename/LICENSE deleted file mode 100644 index 69619c125ea7ef..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/unique-filename/LICENSE +++ /dev/null @@ -1,5 +0,0 @@ -Copyright npm, Inc - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/unique-filename/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/unique-filename/lib/index.js deleted file mode 100644 index d067d2e709809a..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/unique-filename/lib/index.js +++ /dev/null @@ -1,7 +0,0 @@ -var path = require('path') - -var uniqueSlug = require('unique-slug') - -module.exports = function (filepath, prefix, uniq) { - return path.join(filepath, (prefix ? prefix + '-' : '') + uniqueSlug(uniq)) -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/unique-filename/package.json b/deps/npm/node_modules/tuf-js/node_modules/unique-filename/package.json deleted file mode 100644 index b2fbf0666489a6..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/unique-filename/package.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "unique-filename", - "version": "3.0.0", - "description": "Generate a unique filename for use in temporary directories or caches.", - "main": "lib/index.js", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/unique-filename.git" - }, - "keywords": [], - "author": "GitHub Inc.", - "license": "ISC", - "bugs": { - "url": "https://github.com/iarna/unique-filename/issues" - }, - "homepage": "https://github.com/iarna/unique-filename", - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.5.1", - "tap": "^16.3.0" - }, - "dependencies": { - "unique-slug": "^4.0.0" - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.5.1" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/unique-slug/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/unique-slug/LICENSE deleted file mode 100644 index 7953647e7760b8..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/unique-slug/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -The ISC License - -Copyright npm, Inc - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/unique-slug/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/unique-slug/lib/index.js deleted file mode 100644 index 1bac84d95d7307..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/unique-slug/lib/index.js +++ /dev/null @@ -1,11 +0,0 @@ -'use strict' -var MurmurHash3 = require('imurmurhash') - -module.exports = function (uniq) { - if (uniq) { - var hash = new MurmurHash3(uniq) - return ('00000000' + hash.result().toString(16)).slice(-8) - } else { - return (Math.random().toString(16) + '0000000').slice(2, 10) - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/unique-slug/package.json b/deps/npm/node_modules/tuf-js/node_modules/unique-slug/package.json deleted file mode 100644 index 33732cdbb42859..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/unique-slug/package.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "name": "unique-slug", - "version": "4.0.0", - "description": "Generate a unique character string suitible for use in files and URLs.", - "main": "lib/index.js", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "keywords": [], - "author": "GitHub Inc.", - "license": "ISC", - "devDependencies": { - "@npmcli/eslint-config": "^3.1.0", - "@npmcli/template-oss": "4.5.1", - "tap": "^16.3.0" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/unique-slug.git" - }, - "dependencies": { - "imurmurhash": "^0.1.4" - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.5.1" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/tuf-js/package.json b/deps/npm/node_modules/tuf-js/package.json index 9280719230d9ab..e79a3d45f3f06a 100644 --- a/deps/npm/node_modules/tuf-js/package.json +++ b/deps/npm/node_modules/tuf-js/package.json @@ -1,12 +1,12 @@ { "name": "tuf-js", - "version": "2.2.1", + "version": "3.0.1", "description": "JavaScript implementation of The Update Framework (TUF)", "main": "dist/index.js", "types": "dist/index.d.ts", "scripts": { "build": "tsc --build", - "clean": "rm -rf dist", + "clean": "rm -rf dist && rm tsconfig.tsbuildinfo", "test": "jest" }, "repository": { @@ -28,16 +28,16 @@ }, "homepage": "https://github.com/theupdateframework/tuf-js/tree/main/packages/client#readme", "devDependencies": { - "@tufjs/repo-mock": "2.0.1", + "@tufjs/repo-mock": "3.0.1", "@types/debug": "^4.1.12", "@types/make-fetch-happen": "^10.0.4" }, "dependencies": { - "@tufjs/models": "2.0.1", - "debug": "^4.3.4", - "make-fetch-happen": "^13.0.1" + "@tufjs/models": "3.0.1", + "debug": "^4.3.6", + "make-fetch-happen": "^14.0.1" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/index.js b/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/index.js index 130a0929b8ce8c..ddfdba39a783a4 100644 --- a/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/index.js +++ b/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/index.js @@ -1,7 +1,9 @@ export default function ansiRegex({onlyFirst = false} = {}) { + // Valid string terminator sequences are BEL, ESC\, and 0x9c + const ST = '(?:\\u0007|\\u001B\\u005C|\\u009C)'; const pattern = [ - '[\\u001B\\u009B][[\\]()#;?]*(?:(?:(?:(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]+)*|[a-zA-Z\\d]+(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]*)*)?\\u0007)', - '(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PR-TZcf-ntqry=><~]))' + `[\\u001B\\u009B][[\\]()#;?]*(?:(?:(?:(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]+)*|[a-zA-Z\\d]+(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]*)*)?${ST})`, + '(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PR-TZcf-nq-uy=><~]))', ].join('|'); return new RegExp(pattern, onlyFirst ? undefined : 'g'); diff --git a/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/package.json b/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/package.json index 7bbb563bf2a70a..49f3f61021512b 100644 --- a/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/package.json +++ b/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/package.json @@ -1,6 +1,6 @@ { "name": "ansi-regex", - "version": "6.0.1", + "version": "6.1.0", "description": "Regular expression for matching ANSI escape codes", "license": "MIT", "repository": "chalk/ansi-regex", @@ -12,6 +12,8 @@ }, "type": "module", "exports": "./index.js", + "types": "./index.d.ts", + "sideEffects": false, "engines": { "node": ">=12" }, @@ -51,8 +53,9 @@ "pattern" ], "devDependencies": { + "ansi-escapes": "^5.0.0", "ava": "^3.15.0", - "tsd": "^0.14.0", - "xo": "^0.38.2" + "tsd": "^0.21.0", + "xo": "^0.54.2" } } diff --git a/deps/npm/package.json b/deps/npm/package.json index c92578506b30ac..2f5676d81bde93 100644 --- a/deps/npm/package.json +++ b/deps/npm/package.json @@ -1,5 +1,5 @@ { - "version": "10.9.0", + "version": "10.9.1", "name": "npm", "description": "a package manager for JavaScript", "workspaces": [ @@ -57,21 +57,21 @@ "@npmcli/fs": "^4.0.0", "@npmcli/map-workspaces": "^4.0.1", "@npmcli/package-json": "^6.0.1", - "@npmcli/promise-spawn": "^8.0.1", + "@npmcli/promise-spawn": "^8.0.2", "@npmcli/redact": "^3.0.0", "@npmcli/run-script": "^9.0.1", - "@sigstore/tuf": "^2.3.4", + "@sigstore/tuf": "^3.0.0", "abbrev": "^3.0.0", "archy": "~1.0.0", "cacache": "^19.0.1", "chalk": "^5.3.0", - "ci-info": "^4.0.0", + "ci-info": "^4.1.0", "cli-columns": "^4.0.0", "fastest-levenshtein": "^1.0.16", "fs-minipass": "^3.0.3", "glob": "^10.4.5", "graceful-fs": "^4.2.11", - "hosted-git-info": "^8.0.0", + "hosted-git-info": "^8.0.2", "ini": "^5.0.0", "init-package-json": "^7.0.1", "is-cidr": "^5.1.0", @@ -83,11 +83,11 @@ "libnpmhook": "^11.0.0", "libnpmorg": "^7.0.0", "libnpmpack": "^8.0.0", - "libnpmpublish": "^10.0.0", + "libnpmpublish": "^10.0.1", "libnpmsearch": "^8.0.0", "libnpmteam": "^7.0.0", "libnpmversion": "^7.0.0", - "make-fetch-happen": "^14.0.1", + "make-fetch-happen": "^14.0.3", "minimatch": "^9.0.5", "minipass": "^7.1.1", "minipass-pipeline": "^1.2.4", @@ -96,14 +96,14 @@ "nopt": "^8.0.0", "normalize-package-data": "^7.0.0", "npm-audit-report": "^6.0.0", - "npm-install-checks": "^7.1.0", + "npm-install-checks": "^7.1.1", "npm-package-arg": "^12.0.0", "npm-pick-manifest": "^10.0.0", "npm-profile": "^11.0.1", - "npm-registry-fetch": "^18.0.1", + "npm-registry-fetch": "^18.0.2", "npm-user-validate": "^3.0.0", "p-map": "^4.0.0", - "pacote": "^19.0.0", + "pacote": "^19.0.1", "parse-conflict-json": "^4.0.0", "proc-log": "^5.0.0", "qrcode-terminal": "^0.12.0", From f7567d46d8de9a15b8af10ea597ccd59f969ebb1 Mon Sep 17 00:00:00 2001 From: Arne Keller <2012gdwu+github@posteo.de> Date: Sun, 24 Nov 2024 23:30:38 +0100 Subject: [PATCH 36/53] test: make HTTP/1.0 connection test more robust Fixes: https://github.com/nodejs/node/issues/47200 Co-authored-by: Luigi Pinca PR-URL: https://github.com/nodejs/node/pull/55959 Reviewed-By: Luigi Pinca Reviewed-By: Yagiz Nizipli Reviewed-By: Jason Zhang --- ...emove-connection-header-persists-connection.js | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/test/parallel/test-http-remove-connection-header-persists-connection.js b/test/parallel/test-http-remove-connection-header-persists-connection.js index df7e39ae94375f..d5f53a703c821e 100644 --- a/test/parallel/test-http-remove-connection-header-persists-connection.js +++ b/test/parallel/test-http-remove-connection-header-persists-connection.js @@ -10,6 +10,13 @@ const server = http.createServer(function(request, response) { // For HTTP/1.0, the connection should be closed after the response automatically. response.removeHeader('connection'); + if (request.httpVersion === '1.0') { + const socket = request.socket; + response.on('finish', common.mustCall(function() { + assert.ok(socket.writableEnded); + })); + } + response.end('beep boop\n'); }); @@ -50,9 +57,7 @@ function makeHttp10Request(cb) { '\r\n'); socket.resume(); // Ignore the response itself - setTimeout(function() { - cb(socket); - }, common.platformTimeout(50)); + socket.on('close', cb); }); } @@ -62,9 +67,7 @@ server.listen(0, function() { // Both HTTP/1.1 requests should have used the same socket: assert.strictEqual(firstSocket, secondSocket); - makeHttp10Request(function(socket) { - // The server should have immediately closed the HTTP/1.0 socket: - assert.strictEqual(socket.closed, true); + makeHttp10Request(function() { server.close(); }); }); From c048865199af8f0bf4d6bc74474657dcb72b7549 Mon Sep 17 00:00:00 2001 From: Colin Ihrig Date: Sun, 24 Nov 2024 21:30:43 -0500 Subject: [PATCH 37/53] test_runner: simplify hook running logic This commit removes some asynchronous logic from the runHook() method and replaces ArrayPrototypeReduce() with a for loop. PR-URL: https://github.com/nodejs/node/pull/55963 Reviewed-By: Pietro Marchini Reviewed-By: James M Snell Reviewed-By: Chemi Atlow Reviewed-By: Antoine du Hamel --- lib/internal/test_runner/test.js | 8 +++---- .../test-runner/output/abort_hooks.snapshot | 4 ++-- ...global_after_should_fail_the_test.snapshot | 1 - .../test-runner/output/hooks.snapshot | 11 +++++----- .../output/hooks_spec_reporter.snapshot | 22 +++++++++---------- 5 files changed, 21 insertions(+), 25 deletions(-) diff --git a/lib/internal/test_runner/test.js b/lib/internal/test_runner/test.js index 34e1900979960f..1ba113dbdc1a49 100644 --- a/lib/internal/test_runner/test.js +++ b/lib/internal/test_runner/test.js @@ -2,7 +2,6 @@ const { ArrayPrototypePush, ArrayPrototypePushApply, - ArrayPrototypeReduce, ArrayPrototypeShift, ArrayPrototypeSlice, ArrayPrototypeSome, @@ -846,13 +845,14 @@ class Test extends AsyncResource { async runHook(hook, args) { validateOneOf(hook, 'hook name', kHookNames); try { - await ArrayPrototypeReduce(this.hooks[hook], async (prev, hook) => { - await prev; + const hooks = this.hooks[hook]; + for (let i = 0; i < hooks.length; ++i) { + const hook = hooks[i]; await hook.run(args); if (hook.error) { throw hook.error; } - }, PromiseResolve()); + } } catch (err) { const error = new ERR_TEST_FAILURE(`failed running ${hook} hook`, kHookFailure); error.cause = isTestFailureError(err) ? err.cause : err; diff --git a/test/fixtures/test-runner/output/abort_hooks.snapshot b/test/fixtures/test-runner/output/abort_hooks.snapshot index 278b5e5fd36ca5..e318e36d9d56a4 100644 --- a/test/fixtures/test-runner/output/abort_hooks.snapshot +++ b/test/fixtures/test-runner/output/abort_hooks.snapshot @@ -101,7 +101,7 @@ not ok 2 - 2 after describe * * * - async Promise.all (index 0) + * ... # Subtest: test 2 not ok 2 - test 2 @@ -122,7 +122,7 @@ not ok 2 - 2 after describe * * * - async Promise.all (index 0) + * ... 1..2 not ok 3 - 3 beforeEach describe diff --git a/test/fixtures/test-runner/output/global_after_should_fail_the_test.snapshot b/test/fixtures/test-runner/output/global_after_should_fail_the_test.snapshot index 3196f377b3d4bf..ee4d5f71072ba5 100644 --- a/test/fixtures/test-runner/output/global_after_should_fail_the_test.snapshot +++ b/test/fixtures/test-runner/output/global_after_should_fail_the_test.snapshot @@ -21,7 +21,6 @@ not ok 2 - /test/fixtures/test-runner/output/global_after_should_fail_the_test.j * * * - * ... 1..1 # tests 1 diff --git a/test/fixtures/test-runner/output/hooks.snapshot b/test/fixtures/test-runner/output/hooks.snapshot index 6b9d6d26a90e39..be8d1b210c60e4 100644 --- a/test/fixtures/test-runner/output/hooks.snapshot +++ b/test/fixtures/test-runner/output/hooks.snapshot @@ -77,7 +77,6 @@ not ok 3 - before throws * * * - * ... # Subtest: before throws - no subtests not ok 4 - before throws - no subtests @@ -97,7 +96,6 @@ not ok 4 - before throws - no subtests * * * - * ... # Subtest: after throws # Subtest: 1 @@ -129,6 +127,7 @@ not ok 5 - after throws * * * + * ... # Subtest: after throws - no subtests not ok 6 - after throws - no subtests @@ -149,6 +148,7 @@ not ok 6 - after throws - no subtests * * * + * ... # Subtest: beforeEach throws # Subtest: 1 @@ -167,9 +167,9 @@ not ok 6 - after throws - no subtests * * * - async Promise.all (index 0) * * + new Promise () ... # Subtest: 2 not ok 2 - 2 @@ -188,6 +188,8 @@ not ok 6 - after throws - no subtests * * * + * + async Promise.all (index 0) ... 1..2 not ok 7 - beforeEach throws @@ -482,7 +484,6 @@ not ok 15 - t.after throws * * * - * ... # Subtest: t.after throws - no subtests not ok 16 - t.after throws - no subtests @@ -502,7 +503,6 @@ not ok 16 - t.after throws - no subtests * * * - * ... # Subtest: t.beforeEach throws # Subtest: 1 @@ -765,7 +765,6 @@ not ok 24 - run after when before throws * * * - * ... # Subtest: test hooks - async # Subtest: 1 diff --git a/test/fixtures/test-runner/output/hooks_spec_reporter.snapshot b/test/fixtures/test-runner/output/hooks_spec_reporter.snapshot index b5c5ab7d1965e5..ea916c2ee754c4 100644 --- a/test/fixtures/test-runner/output/hooks_spec_reporter.snapshot +++ b/test/fixtures/test-runner/output/hooks_spec_reporter.snapshot @@ -26,7 +26,6 @@ * * * - * before throws - no subtests (*ms) Error: before @@ -38,7 +37,6 @@ * * * - * after throws 1 (*ms) @@ -55,6 +53,7 @@ * * * + * after throws - no subtests (*ms) Error: after @@ -67,6 +66,7 @@ * * * + * beforeEach throws 1 (*ms) @@ -78,9 +78,9 @@ * * * - at async Promise.all (index 0) * * + at new Promise () 2 (*ms) Error: beforeEach @@ -92,6 +92,8 @@ * * * + * + at async Promise.all (index 0) beforeEach throws (*ms) afterEach throws @@ -242,7 +244,6 @@ * * * - * t.after throws - no subtests (*ms) Error: after @@ -255,7 +256,6 @@ * * * - * t.beforeEach throws 1 (*ms) @@ -390,7 +390,6 @@ * * * - * test hooks - async 1 (*ms) @@ -430,7 +429,6 @@ * * * - * * before throws - no subtests (*ms) @@ -443,7 +441,6 @@ * * * - * * after throws (*ms) @@ -457,6 +454,7 @@ * * * + * * after throws - no subtests (*ms) @@ -470,6 +468,7 @@ * * * + * * 1 (*ms) @@ -481,9 +480,9 @@ * * * - at async Promise.all (index 0) * * + at new Promise () * 2 (*ms) @@ -496,6 +495,8 @@ * * * + * + at async Promise.all (index 0) * 1 (*ms) @@ -633,7 +634,6 @@ * * * - * * t.after throws - no subtests (*ms) @@ -647,7 +647,6 @@ * * * - * * 1 (*ms) @@ -776,4 +775,3 @@ * * * - * From 30f26ba254d091ca611a05dd7212814a8cdede1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Jos=C3=A9?= Date: Mon, 25 Nov 2024 00:07:13 -0500 Subject: [PATCH 38/53] lib: avoid excluding symlinks in recursive fs.readdir with filetypes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes: https://github.com/nodejs/node/issues/52663 Signed-off-by: Juan José Arboleda PR-URL: https://github.com/nodejs/node/pull/55714 Reviewed-By: Ethan Arrowood Reviewed-By: James M Snell --- lib/fs.js | 5 ++- .../test-fs-readdir-types-symlinks.js | 36 +++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 test/parallel/test-fs-readdir-types-symlinks.js diff --git a/lib/fs.js b/lib/fs.js index 6a7d2cde463597..0fd917ba96acd6 100644 --- a/lib/fs.js +++ b/lib/fs.js @@ -1401,9 +1401,12 @@ function readdirSyncRecursive(basePath, options) { // of the first array within the result. const length = readdirResult[0].length; for (let i = 0; i < length; i++) { + // Avoid excluding symlinks, as they are not directories. + // Refs: https://github.com/nodejs/node/issues/52663 + const stat = binding.internalModuleStat(binding, pathModule.join(path, readdirResult[0][i])); const dirent = getDirent(path, readdirResult[0][i], readdirResult[1][i]); ArrayPrototypePush(readdirResults, dirent); - if (dirent.isDirectory()) { + if (dirent.isDirectory() || stat === 1) { ArrayPrototypePush(pathsQueue, pathModule.join(dirent.parentPath, dirent.name)); } } diff --git a/test/parallel/test-fs-readdir-types-symlinks.js b/test/parallel/test-fs-readdir-types-symlinks.js new file mode 100644 index 00000000000000..afdbdb16364c03 --- /dev/null +++ b/test/parallel/test-fs-readdir-types-symlinks.js @@ -0,0 +1,36 @@ +'use strict'; + +// Refs: https://github.com/nodejs/node/issues/52663 +const common = require('../common'); +const assert = require('node:assert'); +const fs = require('node:fs'); +const path = require('node:path'); + +if (!common.canCreateSymLink()) + common.skip('insufficient privileges'); + +const tmpdir = require('../common/tmpdir'); +const readdirDir = tmpdir.path; +// clean up the tmpdir +tmpdir.refresh(); + +// a/1, a/2 +const a = path.join(readdirDir, 'a'); +fs.mkdirSync(a); +fs.writeFileSync(path.join(a, '1'), 'irrelevant'); +fs.writeFileSync(path.join(a, '2'), 'irrelevant'); + +// b/1 +const b = path.join(readdirDir, 'b'); +fs.mkdirSync(b); +fs.writeFileSync(path.join(b, '1'), 'irrelevant'); + +// b/c -> a +const c = path.join(readdirDir, 'b', 'c'); +fs.symlinkSync(a, c, 'dir'); + +// Just check that the number of entries are the same +assert.strictEqual( + fs.readdirSync(b, { recursive: true, withFileTypes: true }).length, + fs.readdirSync(b, { recursive: true, withFileTypes: false }).length +); From 95e8c4ef6c31bb155d6014947c15b86e964482dc Mon Sep 17 00:00:00 2001 From: cjihrig Date: Fri, 22 Nov 2024 15:41:35 -0500 Subject: [PATCH 39/53] test_runner: refactor build Promise in Suite() This commit refactors the buildSuite Promise logic in the Suite constructor to use an async function instead of creating an awkward primordial-based Promise chain. PR-URL: https://github.com/nodejs/node/pull/55958 Reviewed-By: Yagiz Nizipli Reviewed-By: Benjamin Gruenbaum Reviewed-By: Matteo Collina Reviewed-By: Pietro Marchini Reviewed-By: Jacob Smith Reviewed-By: James M Snell Reviewed-By: Chemi Atlow --- lib/internal/test_runner/test.js | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/lib/internal/test_runner/test.js b/lib/internal/test_runner/test.js index 1ba113dbdc1a49..6a92ec335ddf34 100644 --- a/lib/internal/test_runner/test.js +++ b/lib/internal/test_runner/test.js @@ -23,7 +23,6 @@ const { SafeMap, SafePromiseAll, SafePromiseAllReturnVoid, - SafePromisePrototypeFinally, SafePromiseRace, SafeSet, StringPrototypeStartsWith, @@ -1259,27 +1258,20 @@ class Suite extends Test { this.skipped = false; } + this.buildSuite = this.createBuild(); + this.fn = noop; + } + + async createBuild() { try { const { ctx, args } = this.getRunArgs(); const runArgs = [this.fn, ctx]; ArrayPrototypePushApply(runArgs, args); - this.buildSuite = SafePromisePrototypeFinally( - PromisePrototypeThen( - PromiseResolve(ReflectApply(this.runInAsyncScope, this, runArgs)), - undefined, - (err) => { - this.fail(new ERR_TEST_FAILURE(err, kTestCodeFailure)); - }), - () => this.postBuild(), - ); + await ReflectApply(this.runInAsyncScope, this, runArgs); } catch (err) { this.fail(new ERR_TEST_FAILURE(err, kTestCodeFailure)); - this.postBuild(); } - this.fn = noop; - } - postBuild() { this.buildPhaseFinished = true; } From 7c3a4d4bcd4bd748021b3aa3f4fc4b6ebefe2915 Mon Sep 17 00:00:00 2001 From: cjihrig Date: Fri, 22 Nov 2024 15:54:03 -0500 Subject: [PATCH 40/53] test_runner: refactor Promise chain in run() This commit refactors the chain of functions in run() to use an async function instead of creating an awkward primordial-based Promise chain. PR-URL: https://github.com/nodejs/node/pull/55958 Reviewed-By: Yagiz Nizipli Reviewed-By: Benjamin Gruenbaum Reviewed-By: Matteo Collina Reviewed-By: Pietro Marchini Reviewed-By: Jacob Smith Reviewed-By: James M Snell Reviewed-By: Chemi Atlow --- lib/internal/test_runner/runner.js | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/internal/test_runner/runner.js b/lib/internal/test_runner/runner.js index 0ba40d7ba7f71f..43a62b5b4307e4 100644 --- a/lib/internal/test_runner/runner.js +++ b/lib/internal/test_runner/runner.js @@ -17,7 +17,6 @@ const { ArrayPrototypeSort, ObjectAssign, PromisePrototypeThen, - PromiseResolve, PromiseWithResolvers, SafeMap, SafePromiseAll, @@ -801,9 +800,17 @@ function run(options = kEmptyObject) { } } - const setupPromise = PromiseResolve(setup?.(root.reporter)); - PromisePrototypeThen(PromisePrototypeThen(PromisePrototypeThen(setupPromise, runFiles), postRun), teardown); + const runChain = async () => { + if (typeof setup === 'function') { + await setup(root.reporter); + } + + await runFiles(); + postRun?.(); + teardown?.(); + }; + runChain(); return root.reporter; } From 32b1681b7f103e12fdf35246dad8e5fdf4f8bafd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Mon, 25 Nov 2024 09:27:26 +0100 Subject: [PATCH 41/53] tools: use tokenless Codecov uploads MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refs: https://docs.codecov.com/docs/codecov-tokens#uploading-without-a-token PR-URL: https://github.com/nodejs/node/pull/55943 Reviewed-By: Yagiz Nizipli Reviewed-By: Richard Lau Reviewed-By: Luigi Pinca Reviewed-By: Juan José Arboleda Reviewed-By: James M Snell --- .github/workflows/coverage-linux-without-intl.yml | 3 +-- .github/workflows/coverage-linux.yml | 3 +-- .github/workflows/coverage-windows.yml | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/coverage-linux-without-intl.yml b/.github/workflows/coverage-linux-without-intl.yml index ddd85fb8a4ff0e..1977eda3f97e03 100644 --- a/.github/workflows/coverage-linux-without-intl.yml +++ b/.github/workflows/coverage-linux-without-intl.yml @@ -79,7 +79,6 @@ jobs: - name: Clean tmp run: rm -rf coverage/tmp && rm -rf out - name: Upload - uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0 + uses: codecov/codecov-action@015f24e6818733317a2da2edd6290ab26238649a # v5.0.7 with: directory: ./coverage - token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/coverage-linux.yml b/.github/workflows/coverage-linux.yml index 153504ba4280d6..164c0b540a9f45 100644 --- a/.github/workflows/coverage-linux.yml +++ b/.github/workflows/coverage-linux.yml @@ -79,7 +79,6 @@ jobs: - name: Clean tmp run: rm -rf coverage/tmp && rm -rf out - name: Upload - uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0 + uses: codecov/codecov-action@015f24e6818733317a2da2edd6290ab26238649a # v5.0.7 with: directory: ./coverage - token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/coverage-windows.yml b/.github/workflows/coverage-windows.yml index 84feb7b09018de..fada006e321520 100644 --- a/.github/workflows/coverage-windows.yml +++ b/.github/workflows/coverage-windows.yml @@ -71,7 +71,6 @@ jobs: - name: Clean tmp run: npx rimraf ./coverage/tmp - name: Upload - uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0 + uses: codecov/codecov-action@015f24e6818733317a2da2edd6290ab26238649a # v5.0.7 with: directory: ./coverage - token: ${{ secrets.CODECOV_TOKEN }} From 770572423be417649d7c8531d3adec56a8d6faba Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 25 Nov 2024 09:41:12 +0100 Subject: [PATCH 42/53] doc: add vetted courses to the ambassador benefits Signed-off-by: Matteo Collina PR-URL: https://github.com/nodejs/node/pull/55934 Reviewed-By: Ruben Bridgewater Reviewed-By: Michael Dawson Reviewed-By: James M Snell --- doc/contributing/advocacy-ambasador-program.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/contributing/advocacy-ambasador-program.md b/doc/contributing/advocacy-ambasador-program.md index f806109f866caf..cfb8c5cb1cd484 100644 --- a/doc/contributing/advocacy-ambasador-program.md +++ b/doc/contributing/advocacy-ambasador-program.md @@ -18,6 +18,10 @@ The ambassador program does that by: messages and topics defined. * Advocating for ambassadors to be part of the OpenJS speakers bureau, even if the ambassador is not otherwise an active member of the project itself. +* Each ambassador could add a maximum of three links to resources to learn Node.js + on a dedicated page on the main Node.js website. At least one of those must be a + free resource. The Node.js TSC members could ask for coupon codes to verify the + material if they so decide. ## Ambassadors nominations From a3f7db6b6d9b85458b24f4adf0b53e3fb2aefc2c Mon Sep 17 00:00:00 2001 From: skyclouds2001 <95597335+skyclouds2001@users.noreply.github.com> Date: Mon, 25 Nov 2024 18:02:32 +0800 Subject: [PATCH 43/53] doc: add doc for PerformanceObserver.takeRecords() PR-URL: https://github.com/nodejs/node/pull/55786 Fixes: https://github.com/nodejs/node/issues/55779 Reviewed-By: James M Snell Reviewed-By: Jason Zhang --- doc/api/perf_hooks.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/doc/api/perf_hooks.md b/doc/api/perf_hooks.md index fe345287198297..308f2cfc698dac 100644 --- a/doc/api/perf_hooks.md +++ b/doc/api/perf_hooks.md @@ -1339,6 +1339,14 @@ for (let n = 0; n < 3; n++) performance.mark(`test${n}`); ``` +### `performanceObserver.takeRecords()` + + + +* Returns: {PerformanceEntry\[]} Current list of entries stored in the performance observer, emptying it out. + ## Class: `PerformanceObserverEntryList` +### DEP0187: Passing invalid argument types to `fs.existsSync` + + + +Type: Documentation-only + +Passing non-supported argument types is deprecated and, instead of returning `false`, +will throw an error in a future version. + [NIST SP 800-38D]: https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf [RFC 6066]: https://tools.ietf.org/html/rfc6066#section-3 [RFC 8247 Section 2.4]: https://www.rfc-editor.org/rfc/rfc8247#section-2.4 From 1fb30d6e8682ad0a74bea9265a3b43428a94093c Mon Sep 17 00:00:00 2001 From: James M Snell Date: Sat, 23 Nov 2024 09:37:33 -0800 Subject: [PATCH 46/53] quic: multiple updates to quic impl * separate stats and symbols into separate files * quic: rename `EndpointStats` and `SessionStats` to be consistent * s/EndpointStats/QuicEndpointStats/ * s/SessionStats/QuicSessionStats/ * separate state into separate files and other cleanups * extend tls options validations * rename classes for consistency and other cleanups PR-URL: https://github.com/nodejs/node/pull/55971 Reviewed-By: Yagiz Nizipli Reviewed-By: Matteo Collina --- lib/internal/quic/quic.js | 1323 ++--------------- lib/internal/quic/state.js | 566 +++++++ lib/internal/quic/stats.js | 646 ++++++++ lib/internal/quic/symbols.js | 56 + src/node_builtins.cc | 3 +- ...-quic-internal-endpoint-listen-defaults.js | 4 +- .../test-quic-internal-endpoint-options.js | 34 +- ...test-quic-internal-endpoint-stats-state.js | 39 +- 8 files changed, 1449 insertions(+), 1222 deletions(-) create mode 100644 lib/internal/quic/state.js create mode 100644 lib/internal/quic/stats.js create mode 100644 lib/internal/quic/symbols.js diff --git a/lib/internal/quic/quic.js b/lib/internal/quic/quic.js index 4d0077195d0629..a4b53e2407072c 100644 --- a/lib/internal/quic/quic.js +++ b/lib/internal/quic/quic.js @@ -5,28 +5,21 @@ /* c8 ignore start */ const { - ArrayBuffer, ArrayIsArray, ArrayPrototypePush, - BigUint64Array, - DataView, - DataViewPrototypeGetBigInt64, - DataViewPrototypeGetBigUint64, - DataViewPrototypeGetUint8, - DataViewPrototypeSetUint8, ObjectDefineProperties, - Symbol, SymbolAsyncDispose, Uint8Array, } = primordials; +// QUIC requires that Node.js be compiled with crypto support. const { assertCrypto, - customInspectSymbol: kInspect, } = require('internal/util'); -const { inspect } = require('internal/util/inspect'); assertCrypto(); +const { inspect } = require('internal/util/inspect'); + const { Endpoint: Endpoint_, setCallbacks, @@ -54,99 +47,6 @@ const { CLOSECONTEXT_RECEIVE_FAILURE: kCloseContextReceiveFailure, CLOSECONTEXT_SEND_FAILURE: kCloseContextSendFailure, CLOSECONTEXT_START_FAILURE: kCloseContextStartFailure, - - // All of the IDX_STATS_* constants are the index positions of the stats - // fields in the relevant BigUint64Array's that underlie the *Stats objects. - // These are not exposed to end users. - IDX_STATS_ENDPOINT_CREATED_AT, - IDX_STATS_ENDPOINT_DESTROYED_AT, - IDX_STATS_ENDPOINT_BYTES_RECEIVED, - IDX_STATS_ENDPOINT_BYTES_SENT, - IDX_STATS_ENDPOINT_PACKETS_RECEIVED, - IDX_STATS_ENDPOINT_PACKETS_SENT, - IDX_STATS_ENDPOINT_SERVER_SESSIONS, - IDX_STATS_ENDPOINT_CLIENT_SESSIONS, - IDX_STATS_ENDPOINT_SERVER_BUSY_COUNT, - IDX_STATS_ENDPOINT_RETRY_COUNT, - IDX_STATS_ENDPOINT_VERSION_NEGOTIATION_COUNT, - IDX_STATS_ENDPOINT_STATELESS_RESET_COUNT, - IDX_STATS_ENDPOINT_IMMEDIATE_CLOSE_COUNT, - - IDX_STATS_SESSION_CREATED_AT, - IDX_STATS_SESSION_CLOSING_AT, - IDX_STATS_SESSION_DESTROYED_AT, - IDX_STATS_SESSION_HANDSHAKE_COMPLETED_AT, - IDX_STATS_SESSION_HANDSHAKE_CONFIRMED_AT, - IDX_STATS_SESSION_GRACEFUL_CLOSING_AT, - IDX_STATS_SESSION_BYTES_RECEIVED, - IDX_STATS_SESSION_BYTES_SENT, - IDX_STATS_SESSION_BIDI_IN_STREAM_COUNT, - IDX_STATS_SESSION_BIDI_OUT_STREAM_COUNT, - IDX_STATS_SESSION_UNI_IN_STREAM_COUNT, - IDX_STATS_SESSION_UNI_OUT_STREAM_COUNT, - IDX_STATS_SESSION_LOSS_RETRANSMIT_COUNT, - IDX_STATS_SESSION_MAX_BYTES_IN_FLIGHT, - IDX_STATS_SESSION_BYTES_IN_FLIGHT, - IDX_STATS_SESSION_BLOCK_COUNT, - IDX_STATS_SESSION_CWND, - IDX_STATS_SESSION_LATEST_RTT, - IDX_STATS_SESSION_MIN_RTT, - IDX_STATS_SESSION_RTTVAR, - IDX_STATS_SESSION_SMOOTHED_RTT, - IDX_STATS_SESSION_SSTHRESH, - IDX_STATS_SESSION_DATAGRAMS_RECEIVED, - IDX_STATS_SESSION_DATAGRAMS_SENT, - IDX_STATS_SESSION_DATAGRAMS_ACKNOWLEDGED, - IDX_STATS_SESSION_DATAGRAMS_LOST, - - IDX_STATS_STREAM_CREATED_AT, - IDX_STATS_STREAM_RECEIVED_AT, - IDX_STATS_STREAM_ACKED_AT, - IDX_STATS_STREAM_CLOSING_AT, - IDX_STATS_STREAM_DESTROYED_AT, - IDX_STATS_STREAM_BYTES_RECEIVED, - IDX_STATS_STREAM_BYTES_SENT, - IDX_STATS_STREAM_MAX_OFFSET, - IDX_STATS_STREAM_MAX_OFFSET_ACK, - IDX_STATS_STREAM_MAX_OFFSET_RECV, - IDX_STATS_STREAM_FINAL_SIZE, - - IDX_STATE_SESSION_PATH_VALIDATION, - IDX_STATE_SESSION_VERSION_NEGOTIATION, - IDX_STATE_SESSION_DATAGRAM, - IDX_STATE_SESSION_SESSION_TICKET, - IDX_STATE_SESSION_CLOSING, - IDX_STATE_SESSION_GRACEFUL_CLOSE, - IDX_STATE_SESSION_SILENT_CLOSE, - IDX_STATE_SESSION_STATELESS_RESET, - IDX_STATE_SESSION_DESTROYED, - IDX_STATE_SESSION_HANDSHAKE_COMPLETED, - IDX_STATE_SESSION_HANDSHAKE_CONFIRMED, - IDX_STATE_SESSION_STREAM_OPEN_ALLOWED, - IDX_STATE_SESSION_PRIORITY_SUPPORTED, - IDX_STATE_SESSION_WRAPPED, - IDX_STATE_SESSION_LAST_DATAGRAM_ID, - - IDX_STATE_ENDPOINT_BOUND, - IDX_STATE_ENDPOINT_RECEIVING, - IDX_STATE_ENDPOINT_LISTENING, - IDX_STATE_ENDPOINT_CLOSING, - IDX_STATE_ENDPOINT_BUSY, - IDX_STATE_ENDPOINT_PENDING_CALLBACKS, - - IDX_STATE_STREAM_ID, - IDX_STATE_STREAM_FIN_SENT, - IDX_STATE_STREAM_FIN_RECEIVED, - IDX_STATE_STREAM_READ_ENDED, - IDX_STATE_STREAM_WRITE_ENDED, - IDX_STATE_STREAM_DESTROYED, - IDX_STATE_STREAM_PAUSED, - IDX_STATE_STREAM_RESET, - IDX_STATE_STREAM_HAS_READER, - IDX_STATE_STREAM_WANTS_BLOCK, - IDX_STATE_STREAM_WANTS_HEADERS, - IDX_STATE_STREAM_WANTS_RESET, - IDX_STATE_STREAM_WANTS_TRAILERS, } = internalBinding('quic'); const { @@ -180,34 +80,48 @@ const { isCryptoKey, } = require('internal/crypto/keys'); -const { - kHandle: kKeyObjectHandle, - kKeyObject: kKeyObjectInner, -} = require('internal/crypto/util'); - const { validateFunction, validateObject, + validateString, + validateBoolean, } = require('internal/validators'); const kEmptyObject = { __proto__: null }; -const kEnumerable = { __proto__: null, enumerable: true }; - -const kBlocked = Symbol('kBlocked'); -const kDatagram = Symbol('kDatagram'); -const kDatagramStatus = Symbol('kDatagramStatus'); -const kError = Symbol('kError'); -const kFinishClose = Symbol('kFinishClose'); -const kHandshake = Symbol('kHandshake'); -const kHeaders = Symbol('kHeaders'); -const kOwner = Symbol('kOwner'); -const kNewSession = Symbol('kNewSession'); -const kNewStream = Symbol('kNewStream'); -const kPathValidation = Symbol('kPathValidation'); -const kReset = Symbol('kReset'); -const kSessionTicket = Symbol('kSessionTicket'); -const kTrailers = Symbol('kTrailers'); -const kVersionNegotiation = Symbol('kVersionNegotiation'); + +const { + kBlocked, + kDatagram, + kDatagramStatus, + kError, + kFinishClose, + kHandshake, + kHeaders, + kOwner, + kNewSession, + kNewStream, + kPathValidation, + kReset, + kSessionTicket, + kTrailers, + kVersionNegotiation, + kInspect, + kKeyObjectHandle, + kKeyObjectInner, + kPrivateConstructor, +} = require('internal/quic/symbols'); + +const { + QuicEndpointStats, + QuicStreamStats, + QuicSessionStats, +} = require('internal/quic/stats'); + +const { + QuicEndpointState, + QuicSessionState, + QuicStreamState, +} = require('internal/quic/state'); /** * @typedef {import('../socketaddress.js').SocketAddress} SocketAddress @@ -317,22 +231,22 @@ const kVersionNegotiation = Symbol('kVersionNegotiation'); /** * Called when the Endpoint receives a new server-side Session. * @callback OnSessionCallback - * @param {Session} session - * @param {Endpoint} endpoint + * @param {QuicSession} session + * @param {QuicEndpoint} endpoint * @returns {void} */ /** * @callback OnStreamCallback * @param {QuicStream} stream - * @param {Session} session + * @param {QuicSession} session * @returns {void} */ /** * @callback OnDatagramCallback * @param {Uint8Array} datagram - * @param {Session} session + * @param {QuicSession} session * @param {boolean} early * @returns {void} */ @@ -341,7 +255,7 @@ const kVersionNegotiation = Symbol('kVersionNegotiation'); * @callback OnDatagramStatusCallback * @param {bigint} id * @param {'lost'|'acknowledged'} status - * @param {Session} session + * @param {QuicSession} session * @returns {void} */ @@ -353,14 +267,14 @@ const kVersionNegotiation = Symbol('kVersionNegotiation'); * @param {SocketAddress} oldLocalAddress * @param {SocketAddress} oldRemoteAddress * @param {boolean} preferredAddress - * @param {Session} session + * @param {QuicSession} session * @returns {void} */ /** * @callback OnSessionTicketCallback * @param {object} ticket - * @param {Session} session + * @param {QuicSession} session * @returns {void} */ @@ -369,7 +283,7 @@ const kVersionNegotiation = Symbol('kVersionNegotiation'); * @param {number} version * @param {number[]} requestedVersions * @param {number[]} supportedVersions - * @param {Session} session + * @param {QuicSession} session * @returns {void} */ @@ -382,7 +296,7 @@ const kVersionNegotiation = Symbol('kVersionNegotiation'); * @param {string} validationErrorReason * @param {number} validationErrorCode * @param {boolean} earlyDataAccepted - * @param {Session} session + * @param {QuicSession} session * @returns {void} */ @@ -526,18 +440,65 @@ function processTlsOptions(tls) { const { sni, alpn, - ciphers, - groups, - keylog, - verifyClient, - tlsTrace, - verifyPrivateKey, + ciphers = DEFAULT_CIPHERS, + groups = DEFAULT_GROUPS, + keylog = false, + verifyClient = false, + tlsTrace = false, + verifyPrivateKey = false, keys, certs, ca, crl, } = tls; + if (sni !== undefined) { + validateString(sni, 'options.tls.sni'); + } + if (alpn !== undefined) { + validateString(alpn, 'options.tls.alpn'); + } + if (ciphers !== undefined) { + validateString(ciphers, 'options.tls.ciphers'); + } + if (groups !== undefined) { + validateString(groups, 'options.tls.groups'); + } + validateBoolean(keylog, 'options.tls.keylog'); + validateBoolean(verifyClient, 'options.tls.verifyClient'); + validateBoolean(tlsTrace, 'options.tls.tlsTrace'); + validateBoolean(verifyPrivateKey, 'options.tls.verifyPrivateKey'); + + if (certs !== undefined) { + const certInputs = ArrayIsArray(certs) ? certs : [certs]; + for (const cert of certInputs) { + if (!isArrayBufferView(cert) && !isArrayBuffer(cert)) { + throw new ERR_INVALID_ARG_TYPE('options.tls.certs', + ['ArrayBufferView', 'ArrayBuffer'], cert); + } + } + } + + if (ca !== undefined) { + const caInputs = ArrayIsArray(ca) ? ca : [ca]; + for (const caCert of caInputs) { + if (!isArrayBufferView(caCert) && !isArrayBuffer(caCert)) { + throw new ERR_INVALID_ARG_TYPE('options.tls.ca', + ['ArrayBufferView', 'ArrayBuffer'], caCert); + } + } + } + + if (crl !== undefined) { + const crlInputs = ArrayIsArray(crl) ? crl : [crl]; + for (const crlCert of crlInputs) { + if (!isArrayBufferView(crlCert) && !isArrayBuffer(crlCert)) { + throw new ERR_INVALID_ARG_TYPE('options.tls.crl', + ['ArrayBufferView', 'ArrayBuffer'], crlCert); + } + } + } + const keyHandles = []; if (keys !== undefined) { const keyInputs = ArrayIsArray(keys) ? keys : [keys]; @@ -574,277 +535,10 @@ function processTlsOptions(tls) { }; } -class QuicStreamStats { - /** @type {BigUint64Array} */ - #handle; - - /** - * @param {ArrayBuffer} buffer - */ - constructor(buffer) { - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); - } - this.#handle = new BigUint64Array(buffer); - } - - /** @type {bigint} */ - get createdAt() { - return this.#handle[IDX_STATS_STREAM_CREATED_AT]; - } - - /** @type {bigint} */ - get receivedAt() { - return this.#handle[IDX_STATS_STREAM_RECEIVED_AT]; - } - - /** @type {bigint} */ - get ackedAt() { - return this.#handle[IDX_STATS_STREAM_ACKED_AT]; - } - - /** @type {bigint} */ - get closingAt() { - return this.#handle[IDX_STATS_STREAM_CLOSING_AT]; - } - - /** @type {bigint} */ - get destroyedAt() { - return this.#handle[IDX_STATS_STREAM_DESTROYED_AT]; - } - - /** @type {bigint} */ - get bytesReceived() { - return this.#handle[IDX_STATS_STREAM_BYTES_RECEIVED]; - } - - /** @type {bigint} */ - get bytesSent() { - return this.#handle[IDX_STATS_STREAM_BYTES_SENT]; - } - - /** @type {bigint} */ - get maxOffset() { - return this.#handle[IDX_STATS_STREAM_MAX_OFFSET]; - } - - /** @type {bigint} */ - get maxOffsetAcknowledged() { - return this.#handle[IDX_STATS_STREAM_MAX_OFFSET_ACK]; - } - - /** @type {bigint} */ - get maxOffsetReceived() { - return this.#handle[IDX_STATS_STREAM_MAX_OFFSET_RECV]; - } - - /** @type {bigint} */ - get finalSize() { - return this.#handle[IDX_STATS_STREAM_FINAL_SIZE]; - } - - toJSON() { - return { - __proto__: null, - createdAt: `${this.createdAt}`, - receivedAt: `${this.receivedAt}`, - ackedAt: `${this.ackedAt}`, - closingAt: `${this.closingAt}`, - destroyedAt: `${this.destroyedAt}`, - bytesReceived: `${this.bytesReceived}`, - bytesSent: `${this.bytesSent}`, - maxOffset: `${this.maxOffset}`, - maxOffsetAcknowledged: `${this.maxOffsetAcknowledged}`, - maxOffsetReceived: `${this.maxOffsetReceived}`, - finalSize: `${this.finalSize}`, - }; - } - - [kInspect](depth, options) { - if (depth < 0) - return this; - - const opts = { - ...options, - depth: options.depth == null ? null : options.depth - 1, - }; - - return `StreamStats ${inspect({ - createdAt: this.createdAt, - receivedAt: this.receivedAt, - ackedAt: this.ackedAt, - closingAt: this.closingAt, - destroyedAt: this.destroyedAt, - bytesReceived: this.bytesReceived, - bytesSent: this.bytesSent, - maxOffset: this.maxOffset, - maxOffsetAcknowledged: this.maxOffsetAcknowledged, - maxOffsetReceived: this.maxOffsetReceived, - finalSize: this.finalSize, - }, opts)}`; - } - - [kFinishClose]() { - // Snapshot the stats into a new BigUint64Array since the underlying - // buffer will be destroyed. - this.#handle = new BigUint64Array(this.#handle); - } -} - -class QuicStreamState { - /** @type {DataView} */ - #handle; - - /** - * @param {ArrayBuffer} buffer - */ - constructor(buffer) { - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); - } - this.#handle = new DataView(buffer); - } - - /** @type {bigint} */ - get id() { - return DataViewPrototypeGetBigInt64(this.#handle, IDX_STATE_STREAM_ID); - } - - /** @type {boolean} */ - get finSent() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_FIN_SENT); - } - - /** @type {boolean} */ - get finReceived() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_FIN_RECEIVED); - } - - /** @type {boolean} */ - get readEnded() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_READ_ENDED); - } - - /** @type {boolean} */ - get writeEnded() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WRITE_ENDED); - } - - /** @type {boolean} */ - get destroyed() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_DESTROYED); - } - - /** @type {boolean} */ - get paused() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_PAUSED); - } - - /** @type {boolean} */ - get reset() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_RESET); - } - - /** @type {boolean} */ - get hasReader() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_HAS_READER); - } - - /** @type {boolean} */ - get wantsBlock() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_BLOCK); - } - - /** @type {boolean} */ - set wantsBlock(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_BLOCK, val ? 1 : 0); - } - - /** @type {boolean} */ - get wantsHeaders() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_HEADERS); - } - - /** @type {boolean} */ - set wantsHeaders(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_HEADERS, val ? 1 : 0); - } - - /** @type {boolean} */ - get wantsReset() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_RESET); - } - - /** @type {boolean} */ - set wantsReset(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_RESET, val ? 1 : 0); - } - - /** @type {boolean} */ - get wantsTrailers() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_TRAILERS); - } - - /** @type {boolean} */ - set wantsTrailers(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_TRAILERS, val ? 1 : 0); - } - - toJSON() { - return { - __proto__: null, - id: `${this.id}`, - finSent: this.finSent, - finReceived: this.finReceived, - readEnded: this.readEnded, - writeEnded: this.writeEnded, - destroyed: this.destroyed, - paused: this.paused, - reset: this.reset, - hasReader: this.hasReader, - wantsBlock: this.wantsBlock, - wantsHeaders: this.wantsHeaders, - wantsReset: this.wantsReset, - wantsTrailers: this.wantsTrailers, - }; - } - - [kInspect](depth, options) { - if (depth < 0) - return this; - - const opts = { - ...options, - depth: options.depth == null ? null : options.depth - 1, - }; - - return `StreamState ${inspect({ - id: this.id, - finSent: this.finSent, - finReceived: this.finReceived, - readEnded: this.readEnded, - writeEnded: this.writeEnded, - destroyed: this.destroyed, - paused: this.paused, - reset: this.reset, - hasReader: this.hasReader, - wantsBlock: this.wantsBlock, - wantsHeaders: this.wantsHeaders, - wantsReset: this.wantsReset, - wantsTrailers: this.wantsTrailers, - }, opts)}`; - } - - [kFinishClose]() { - // When the stream is destroyed, the state is no longer available. - this.#handle = new DataView(new ArrayBuffer(0)); - } -} - class QuicStream { /** @type {object} */ #handle; - /** @type {Session} */ + /** @type {QuicSession} */ #session; /** @type {QuicStreamStats} */ #stats; @@ -866,12 +560,12 @@ class QuicStream { /** * @param {StreamCallbackConfiguration} config * @param {object} handle - * @param {Session} session + * @param {QuicSession} session */ constructor(config, handle, session, direction) { validateObject(config, 'config'); - this.#stats = new QuicStreamStats(handle.stats); - this.#state = new QuicStreamState(handle.stats); + this.#stats = new QuicStreamStats(kPrivateConstructor, handle.stats); + this.#state = new QuicStreamState(kPrivateConstructor, handle.stats); const { onblocked, onerror, @@ -913,7 +607,7 @@ class QuicStream { /** @type {QuicStreamState} */ get state() { return this.#state; } - /** @type {Session} */ + /** @type {QuicSession} */ get session() { return this.#session; } /** @type {bigint} */ @@ -963,395 +657,8 @@ class QuicStream { } } -class SessionStats { - /** @type {BigUint64Array} */ - #handle; - - /** - * @param {BigUint64Array} buffer - */ - constructor(buffer) { - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); - } - this.#handle = new BigUint64Array(buffer); - } - - /** @type {bigint} */ - get createdAt() { - return this.#handle[IDX_STATS_SESSION_CREATED_AT]; - } - - /** @type {bigint} */ - get closingAt() { - return this.#handle[IDX_STATS_SESSION_CLOSING_AT]; - } - - /** @type {bigint} */ - get destroyedAt() { - return this.#handle[IDX_STATS_SESSION_DESTROYED_AT]; - } - - /** @type {bigint} */ - get handshakeCompletedAt() { - return this.#handle[IDX_STATS_SESSION_HANDSHAKE_COMPLETED_AT]; - } - - /** @type {bigint} */ - get handshakeConfirmedAt() { - return this.#handle[IDX_STATS_SESSION_HANDSHAKE_CONFIRMED_AT]; - } - - /** @type {bigint} */ - get gracefulClosingAt() { - return this.#handle[IDX_STATS_SESSION_GRACEFUL_CLOSING_AT]; - } - - /** @type {bigint} */ - get bytesReceived() { - return this.#handle[IDX_STATS_SESSION_BYTES_RECEIVED]; - } - - /** @type {bigint} */ - get bytesSent() { - return this.#handle[IDX_STATS_SESSION_BYTES_SENT]; - } - - /** @type {bigint} */ - get bidiInStreamCount() { - return this.#handle[IDX_STATS_SESSION_BIDI_IN_STREAM_COUNT]; - } - - /** @type {bigint} */ - get bidiOutStreamCount() { - return this.#handle[IDX_STATS_SESSION_BIDI_OUT_STREAM_COUNT]; - } - - /** @type {bigint} */ - get uniInStreamCount() { - return this.#handle[IDX_STATS_SESSION_UNI_IN_STREAM_COUNT]; - } - - /** @type {bigint} */ - get uniOutStreamCount() { - return this.#handle[IDX_STATS_SESSION_UNI_OUT_STREAM_COUNT]; - } - - /** @type {bigint} */ - get lossRetransmitCount() { - return this.#handle[IDX_STATS_SESSION_LOSS_RETRANSMIT_COUNT]; - } - - /** @type {bigint} */ - get maxBytesInFlights() { - return this.#handle[IDX_STATS_SESSION_MAX_BYTES_IN_FLIGHT]; - } - - /** @type {bigint} */ - get bytesInFlight() { - return this.#handle[IDX_STATS_SESSION_BYTES_IN_FLIGHT]; - } - - /** @type {bigint} */ - get blockCount() { - return this.#handle[IDX_STATS_SESSION_BLOCK_COUNT]; - } - - /** @type {bigint} */ - get cwnd() { - return this.#handle[IDX_STATS_SESSION_CWND]; - } - - /** @type {bigint} */ - get latestRtt() { - return this.#handle[IDX_STATS_SESSION_LATEST_RTT]; - } - - /** @type {bigint} */ - get minRtt() { - return this.#handle[IDX_STATS_SESSION_MIN_RTT]; - } - - /** @type {bigint} */ - get rttVar() { - return this.#handle[IDX_STATS_SESSION_RTTVAR]; - } - - /** @type {bigint} */ - get smoothedRtt() { - return this.#handle[IDX_STATS_SESSION_SMOOTHED_RTT]; - } - - /** @type {bigint} */ - get ssthresh() { - return this.#handle[IDX_STATS_SESSION_SSTHRESH]; - } - - /** @type {bigint} */ - get datagramsReceived() { - return this.#handle[IDX_STATS_SESSION_DATAGRAMS_RECEIVED]; - } - - /** @type {bigint} */ - get datagramsSent() { - return this.#handle[IDX_STATS_SESSION_DATAGRAMS_SENT]; - } - - /** @type {bigint} */ - get datagramsAcknowledged() { - return this.#handle[IDX_STATS_SESSION_DATAGRAMS_ACKNOWLEDGED]; - } - - /** @type {bigint} */ - get datagramsLost() { - return this.#handle[IDX_STATS_SESSION_DATAGRAMS_LOST]; - } - - toJSON() { - return { - __proto__: null, - createdAt: `${this.createdAt}`, - closingAt: `${this.closingAt}`, - destroyedAt: `${this.destroyedAt}`, - handshakeCompletedAt: `${this.handshakeCompletedAt}`, - handshakeConfirmedAt: `${this.handshakeConfirmedAt}`, - gracefulClosingAt: `${this.gracefulClosingAt}`, - bytesReceived: `${this.bytesReceived}`, - bytesSent: `${this.bytesSent}`, - bidiInStreamCount: `${this.bidiInStreamCount}`, - bidiOutStreamCount: `${this.bidiOutStreamCount}`, - uniInStreamCount: `${this.uniInStreamCount}`, - uniOutStreamCount: `${this.uniOutStreamCount}`, - lossRetransmitCount: `${this.lossRetransmitCount}`, - maxBytesInFlights: `${this.maxBytesInFlights}`, - bytesInFlight: `${this.bytesInFlight}`, - blockCount: `${this.blockCount}`, - cwnd: `${this.cwnd}`, - latestRtt: `${this.latestRtt}`, - minRtt: `${this.minRtt}`, - rttVar: `${this.rttVar}`, - smoothedRtt: `${this.smoothedRtt}`, - ssthresh: `${this.ssthresh}`, - datagramsReceived: `${this.datagramsReceived}`, - datagramsSent: `${this.datagramsSent}`, - datagramsAcknowledged: `${this.datagramsAcknowledged}`, - datagramsLost: `${this.datagramsLost}`, - }; - } - - [kInspect](depth, options) { - if (depth < 0) - return this; - - const opts = { - ...options, - depth: options.depth == null ? null : options.depth - 1, - }; - - return `SessionStats ${inspect({ - createdAt: this.createdAt, - closingAt: this.closingAt, - destroyedAt: this.destroyedAt, - handshakeCompletedAt: this.handshakeCompletedAt, - handshakeConfirmedAt: this.handshakeConfirmedAt, - gracefulClosingAt: this.gracefulClosingAt, - bytesReceived: this.bytesReceived, - bytesSent: this.bytesSent, - bidiInStreamCount: this.bidiInStreamCount, - bidiOutStreamCount: this.bidiOutStreamCount, - uniInStreamCount: this.uniInStreamCount, - uniOutStreamCount: this.uniOutStreamCount, - lossRetransmitCount: this.lossRetransmitCount, - maxBytesInFlights: this.maxBytesInFlights, - bytesInFlight: this.bytesInFlight, - blockCount: this.blockCount, - cwnd: this.cwnd, - latestRtt: this.latestRtt, - minRtt: this.minRtt, - rttVar: this.rttVar, - smoothedRtt: this.smoothedRtt, - ssthresh: this.ssthresh, - datagramsReceived: this.datagramsReceived, - datagramsSent: this.datagramsSent, - datagramsAcknowledged: this.datagramsAcknowledged, - datagramsLost: this.datagramsLost, - }, opts)}`; - } - - [kFinishClose]() { - // Snapshot the stats into a new BigUint64Array since the underlying - // buffer will be destroyed. - this.#handle = new BigUint64Array(this.#handle); - } -} - -class SessionState { - /** @type {DataView} */ - #handle; - - /** - * @param {ArrayBuffer} buffer - */ - constructor(buffer) { - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); - } - this.#handle = new DataView(buffer); - } - - /** @type {boolean} */ - get hasPathValidationListener() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_PATH_VALIDATION); - } - - /** @type {boolean} */ - set hasPathValidationListener(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_PATH_VALIDATION, val ? 1 : 0); - } - - /** @type {boolean} */ - get hasVersionNegotiationListener() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_VERSION_NEGOTIATION); - } - - /** @type {boolean} */ - set hasVersionNegotiationListener(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_VERSION_NEGOTIATION, val ? 1 : 0); - } - - /** @type {boolean} */ - get hasDatagramListener() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_DATAGRAM); - } - - /** @type {boolean} */ - set hasDatagramListener(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_DATAGRAM, val ? 1 : 0); - } - - /** @type {boolean} */ - get hasSessionTicketListener() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_SESSION_TICKET); - } - - /** @type {boolean} */ - set hasSessionTicketListener(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_SESSION_TICKET, val ? 1 : 0); - } - - /** @type {boolean} */ - get isClosing() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_CLOSING); - } - - /** @type {boolean} */ - get isGracefulClose() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_GRACEFUL_CLOSE); - } - - /** @type {boolean} */ - get isSilentClose() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_SILENT_CLOSE); - } - - /** @type {boolean} */ - get isStatelessReset() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_STATELESS_RESET); - } - - /** @type {boolean} */ - get isDestroyed() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_DESTROYED); - } - - /** @type {boolean} */ - get isHandshakeCompleted() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_HANDSHAKE_COMPLETED); - } - - /** @type {boolean} */ - get isHandshakeConfirmed() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_HANDSHAKE_CONFIRMED); - } - - /** @type {boolean} */ - get isStreamOpenAllowed() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_STREAM_OPEN_ALLOWED); - } - - /** @type {boolean} */ - get isPrioritySupported() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_PRIORITY_SUPPORTED); - } - - /** @type {boolean} */ - get isWrapped() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_WRAPPED); - } - - /** @type {bigint} */ - get lastDatagramId() { - return DataViewPrototypeGetBigUint64(this.#handle, IDX_STATE_SESSION_LAST_DATAGRAM_ID); - } - - toJSON() { - return { - __proto__: null, - hasPathValidationListener: this.hasPathValidationListener, - hasVersionNegotiationListener: this.hasVersionNegotiationListener, - hasDatagramListener: this.hasDatagramListener, - hasSessionTicketListener: this.hasSessionTicketListener, - isClosing: this.isClosing, - isGracefulClose: this.isGracefulClose, - isSilentClose: this.isSilentClose, - isStatelessReset: this.isStatelessReset, - isDestroyed: this.isDestroyed, - isHandshakeCompleted: this.isHandshakeCompleted, - isHandshakeConfirmed: this.isHandshakeConfirmed, - isStreamOpenAllowed: this.isStreamOpenAllowed, - isPrioritySupported: this.isPrioritySupported, - isWrapped: this.isWrapped, - lastDatagramId: `${this.lastDatagramId}`, - }; - } - - [kInspect](depth, options) { - if (depth < 0) - return this; - - const opts = { - ...options, - depth: options.depth == null ? null : options.depth - 1, - }; - - return `SessionState ${inspect({ - hasPathValidationListener: this.hasPathValidationListener, - hasVersionNegotiationListener: this.hasVersionNegotiationListener, - hasDatagramListener: this.hasDatagramListener, - hasSessionTicketListener: this.hasSessionTicketListener, - isClosing: this.isClosing, - isGracefulClose: this.isGracefulClose, - isSilentClose: this.isSilentClose, - isStatelessReset: this.isStatelessReset, - isDestroyed: this.isDestroyed, - isHandshakeCompleted: this.isHandshakeCompleted, - isHandshakeConfirmed: this.isHandshakeConfirmed, - isStreamOpenAllowed: this.isStreamOpenAllowed, - isPrioritySupported: this.isPrioritySupported, - isWrapped: this.isWrapped, - lastDatagramId: this.lastDatagramId, - }, opts)}`; - } - - [kFinishClose]() { - // Snapshot the state into a new DataView since the underlying - // buffer will be destroyed. - this.#handle = new DataView(new ArrayBuffer(0)); - } -} - -class Session { - /** @type {Endpoint} */ +class QuicSession { + /** @type {QuicEndpoint} */ #endpoint = undefined; /** @type {boolean} */ #isPendingClose = false; @@ -1361,9 +668,9 @@ class Session { #pendingClose; /** @type {SocketAddress|undefined} */ #remoteAddress = undefined; - /** @type {SessionState} */ + /** @type {QuicSessionState} */ #state; - /** @type {SessionStats} */ + /** @type {QuicSessionStats} */ #stats; /** @type {QuicStream[]} */ #streams = []; @@ -1388,12 +695,12 @@ class Session { * @param {SessionCallbackConfiguration} config * @param {StreamCallbackConfiguration} streamConfig * @param {object} [handle] - * @param {Endpoint} [endpoint] + * @param {QuicEndpoint} [endpoint] */ constructor(config, streamConfig, handle, endpoint) { validateObject(config, 'config'); - this.#stats = new SessionStats(handle.stats); - this.#state = new SessionState(handle.state); + this.#stats = new QuicSessionStats(kPrivateConstructor, handle.stats); + this.#state = new QuicSessionState(kPrivateConstructor, handle.state); const { ondatagram, ondatagramstatus, @@ -1440,13 +747,13 @@ class Session { return this.#handle === undefined || this.#isPendingClose; } - /** @type {SessionStats} */ + /** @type {QuicSessionStats} */ get stats() { return this.#stats; } - /** @type {SessionState} */ + /** @type {QuicSessionState} */ get state() { return this.#state; } - /** @type {Endpoint} */ + /** @type {QuicEndpoint} */ get endpoint() { return this.#endpoint; } /** @type {Path} */ @@ -1669,7 +976,7 @@ class Session { depth: options.depth == null ? null : options.depth - 1, }; - return `Session ${inspect({ + return `QuicSession ${inspect({ closed: this.closed, closing: this.#isPendingClose, destroyed: this.destroyed, @@ -1688,242 +995,6 @@ class Session { async [SymbolAsyncDispose]() { await this.close(); } } -class EndpointStats { - /** @type {BigUint64Array} */ - #handle; - - /** - * @param {ArrayBuffer} buffer - */ - constructor(buffer) { - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); - } - this.#handle = new BigUint64Array(buffer); - } - - /** @type {bigint} */ - get createdAt() { - return this.#handle[IDX_STATS_ENDPOINT_CREATED_AT]; - } - - /** @type {bigint} */ - get destroyedAt() { - return this.#handle[IDX_STATS_ENDPOINT_DESTROYED_AT]; - } - - /** @type {bigint} */ - get bytesReceived() { - return this.#handle[IDX_STATS_ENDPOINT_BYTES_RECEIVED]; - } - - /** @type {bigint} */ - get bytesSent() { - return this.#handle[IDX_STATS_ENDPOINT_BYTES_SENT]; - } - - /** @type {bigint} */ - get packetsReceived() { - return this.#handle[IDX_STATS_ENDPOINT_PACKETS_RECEIVED]; - } - - /** @type {bigint} */ - get packetsSent() { - return this.#handle[IDX_STATS_ENDPOINT_PACKETS_SENT]; - } - - /** @type {bigint} */ - get serverSessions() { - return this.#handle[IDX_STATS_ENDPOINT_SERVER_SESSIONS]; - } - - /** @type {bigint} */ - get clientSessions() { - return this.#handle[IDX_STATS_ENDPOINT_CLIENT_SESSIONS]; - } - - /** @type {bigint} */ - get serverBusyCount() { - return this.#handle[IDX_STATS_ENDPOINT_SERVER_BUSY_COUNT]; - } - - /** @type {bigint} */ - get retryCount() { - return this.#handle[IDX_STATS_ENDPOINT_RETRY_COUNT]; - } - - /** @type {bigint} */ - get versionNegotiationCount() { - return this.#handle[IDX_STATS_ENDPOINT_VERSION_NEGOTIATION_COUNT]; - } - - /** @type {bigint} */ - get statelessResetCount() { - return this.#handle[IDX_STATS_ENDPOINT_STATELESS_RESET_COUNT]; - } - - /** @type {bigint} */ - get immediateCloseCount() { - return this.#handle[IDX_STATS_ENDPOINT_IMMEDIATE_CLOSE_COUNT]; - } - - toJSON() { - return { - __proto__: null, - createdAt: `${this.createdAt}`, - destroyedAt: `${this.destroyedAt}`, - bytesReceived: `${this.bytesReceived}`, - bytesSent: `${this.bytesSent}`, - packetsReceived: `${this.packetsReceived}`, - packetsSent: `${this.packetsSent}`, - serverSessions: `${this.serverSessions}`, - clientSessions: `${this.clientSessions}`, - serverBusyCount: `${this.serverBusyCount}`, - retryCount: `${this.retryCount}`, - versionNegotiationCount: `${this.versionNegotiationCount}`, - statelessResetCount: `${this.statelessResetCount}`, - immediateCloseCount: `${this.immediateCloseCount}`, - }; - } - - [kInspect](depth, options) { - if (depth < 0) - return this; - - const opts = { - ...options, - depth: options.depth == null ? null : options.depth - 1, - }; - - return `EndpointStats ${inspect({ - createdAt: this.createdAt, - destroyedAt: this.destroyedAt, - bytesReceived: this.bytesReceived, - bytesSent: this.bytesSent, - packetsReceived: this.packetsReceived, - packetsSent: this.packetsSent, - serverSessions: this.serverSessions, - clientSessions: this.clientSessions, - serverBusyCount: this.serverBusyCount, - retryCount: this.retryCount, - versionNegotiationCount: this.versionNegotiationCount, - statelessResetCount: this.statelessResetCount, - immediateCloseCount: this.immediateCloseCount, - }, opts)}`; - } - - [kFinishClose]() { - // Snapshot the stats into a new BigUint64Array since the underlying - // buffer will be destroyed. - this.#handle = new BigUint64Array(this.#handle); - } -} - -class EndpointState { - /** @type {DataView} */ - #handle; - - /** - * @param {ArrayBuffer} buffer - */ - constructor(buffer) { - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); - } - this.#handle = new DataView(buffer); - } - - #assertNotClosed() { - if (this.#handle.byteLength === 0) { - throw new ERR_INVALID_STATE('Endpoint is closed'); - } - } - - /** @type {boolean} */ - get isBound() { - this.#assertNotClosed(); - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_BOUND); - } - - /** @type {boolean} */ - get isReceiving() { - this.#assertNotClosed(); - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_RECEIVING); - } - - /** @type {boolean} */ - get isListening() { - this.#assertNotClosed(); - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_LISTENING); - } - - /** @type {boolean} */ - get isClosing() { - this.#assertNotClosed(); - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_CLOSING); - } - - /** @type {boolean} */ - get isBusy() { - this.#assertNotClosed(); - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_BUSY); - } - - /** - * The number of underlying callbacks that are pending. If the session - * is closing, these are the number of callbacks that the session is - * waiting on before it can be closed. - * @type {bigint} - */ - get pendingCallbacks() { - this.#assertNotClosed(); - return DataViewPrototypeGetBigUint64(this.#handle, IDX_STATE_ENDPOINT_PENDING_CALLBACKS); - } - - toJSON() { - if (this.#handle.byteLength === 0) return {}; - return { - __proto__: null, - isBound: this.isBound, - isReceiving: this.isReceiving, - isListening: this.isListening, - isClosing: this.isClosing, - isBusy: this.isBusy, - pendingCallbacks: `${this.pendingCallbacks}`, - }; - } - - [kInspect](depth, options) { - if (depth < 0) - return this; - - if (this.#handle.byteLength === 0) { - return 'EndpointState { }'; - } - - const opts = { - ...options, - depth: options.depth == null ? null : options.depth - 1, - }; - - return `EndpointState ${inspect({ - isBound: this.isBound, - isReceiving: this.isReceiving, - isListening: this.isListening, - isClosing: this.isClosing, - isBusy: this.isBusy, - pendingCallbacks: this.pendingCallbacks, - }, opts)}`; - } - - [kFinishClose]() { - // Snapshot the state into a new DataView since the underlying - // buffer will be destroyed. - if (this.#handle.byteLength === 0) return; - this.#handle = new DataView(new ArrayBuffer(0)); - } -} - function validateStreamConfig(config, name = 'config') { validateObject(config, name); if (config.onerror !== undefined) @@ -1968,7 +1039,7 @@ function validateEndpointConfig(config) { return config; } -class Endpoint { +class QuicEndpoint { /** @type {SocketAddress|undefined} */ #address = undefined; /** @type {boolean} */ @@ -1983,11 +1054,11 @@ class Endpoint { #pendingClose; /** @type {any} */ #pendingError = undefined; - /** @type {Session[]} */ + /** @type {QuicSession[]} */ #sessions = []; - /** @type {EndpointState} */ + /** @type {QuicEndpointState} */ #state; - /** @type {EndpointStats} */ + /** @type {QuicEndpointStats} */ #stats; /** @type {OnSessionCallback} */ #onsession; @@ -2074,14 +1145,14 @@ class Endpoint { tokenSecret, }); this.#handle[kOwner] = this; - this.#stats = new EndpointStats(this.#handle.stats); - this.#state = new EndpointState(this.#handle.state); + this.#stats = new QuicEndpointStats(kPrivateConstructor, this.#handle.stats); + this.#state = new QuicEndpointState(kPrivateConstructor, this.#handle.state); } - /** @type {EndpointStats} */ + /** @type {QuicEndpointStats} */ get stats() { return this.#stats; } - /** @type {EndpointState} */ + /** @type {QuicEndpointState} */ get state() { return this.#state; } get #isClosedOrClosing() { @@ -2164,7 +1235,7 @@ class Endpoint { * Initiates a session with a remote endpoint. * @param {SocketAddress} address * @param {SessionOptions} [options] - * @returns {Session} + * @returns {QuicSession} */ connect(address, options = kEmptyObject) { if (this.#isClosedOrClosing) { @@ -2207,7 +1278,7 @@ class Endpoint { if (handle === undefined) { throw new ERR_QUIC_CONNECTION_FAILED(); } - const session = new Session(this.#sessionConfig, this.#streamConfig, handle, this); + const session = new QuicSession(this.#sessionConfig, this.#streamConfig, handle, this); ArrayPrototypePush(this.#sessions, session); return session; } @@ -2322,7 +1393,7 @@ class Endpoint { } [kNewSession](handle) { - const session = new Session(this.#sessionConfig, this.#streamConfig, handle, this); + const session = new QuicSession(this.#sessionConfig, this.#streamConfig, handle, this); ArrayPrototypePush(this.#sessions, session); this.#onsession(session, this); } @@ -2338,7 +1409,7 @@ class Endpoint { depth: options.depth == null ? null : options.depth - 1, }; - return `Endpoint ${inspect({ + return `QuicEndpoint ${inspect({ address: this.address, busy: this.busy, closed: this.closed, @@ -2352,125 +1423,7 @@ class Endpoint { } }; -ObjectDefineProperties(QuicStreamStats.prototype, { - createdAt: kEnumerable, - receivedAt: kEnumerable, - ackedAt: kEnumerable, - closingAt: kEnumerable, - destroyedAt: kEnumerable, - bytesReceived: kEnumerable, - bytesSent: kEnumerable, - maxOffset: kEnumerable, - maxOffsetAcknowledged: kEnumerable, - maxOffsetReceived: kEnumerable, - finalSize: kEnumerable, -}); -ObjectDefineProperties(QuicStreamState.prototype, { - id: kEnumerable, - finSent: kEnumerable, - finReceived: kEnumerable, - readEnded: kEnumerable, - writeEnded: kEnumerable, - destroyed: kEnumerable, - paused: kEnumerable, - reset: kEnumerable, - hasReader: kEnumerable, - wantsBlock: kEnumerable, - wantsHeaders: kEnumerable, - wantsReset: kEnumerable, - wantsTrailers: kEnumerable, -}); -ObjectDefineProperties(QuicStream.prototype, { - stats: kEnumerable, - state: kEnumerable, - session: kEnumerable, - id: kEnumerable, -}); -ObjectDefineProperties(SessionStats.prototype, { - createdAt: kEnumerable, - closingAt: kEnumerable, - destroyedAt: kEnumerable, - handshakeCompletedAt: kEnumerable, - handshakeConfirmedAt: kEnumerable, - gracefulClosingAt: kEnumerable, - bytesReceived: kEnumerable, - bytesSent: kEnumerable, - bidiInStreamCount: kEnumerable, - bidiOutStreamCount: kEnumerable, - uniInStreamCount: kEnumerable, - uniOutStreamCount: kEnumerable, - lossRetransmitCount: kEnumerable, - maxBytesInFlights: kEnumerable, - bytesInFlight: kEnumerable, - blockCount: kEnumerable, - cwnd: kEnumerable, - latestRtt: kEnumerable, - minRtt: kEnumerable, - rttVar: kEnumerable, - smoothedRtt: kEnumerable, - ssthresh: kEnumerable, - datagramsReceived: kEnumerable, - datagramsSent: kEnumerable, - datagramsAcknowledged: kEnumerable, - datagramsLost: kEnumerable, -}); -ObjectDefineProperties(SessionState.prototype, { - hasPathValidationListener: kEnumerable, - hasVersionNegotiationListener: kEnumerable, - hasDatagramListener: kEnumerable, - hasSessionTicketListener: kEnumerable, - isClosing: kEnumerable, - isGracefulClose: kEnumerable, - isSilentClose: kEnumerable, - isStatelessReset: kEnumerable, - isDestroyed: kEnumerable, - isHandshakeCompleted: kEnumerable, - isHandshakeConfirmed: kEnumerable, - isStreamOpenAllowed: kEnumerable, - isPrioritySupported: kEnumerable, - isWrapped: kEnumerable, - lastDatagramId: kEnumerable, -}); -ObjectDefineProperties(Session.prototype, { - closed: kEnumerable, - destroyed: kEnumerable, - endpoint: kEnumerable, - path: kEnumerable, - state: kEnumerable, - stats: kEnumerable, -}); -ObjectDefineProperties(EndpointStats.prototype, { - createdAt: kEnumerable, - destroyedAt: kEnumerable, - bytesReceived: kEnumerable, - bytesSent: kEnumerable, - packetsReceived: kEnumerable, - packetsSent: kEnumerable, - serverSessions: kEnumerable, - clientSessions: kEnumerable, - serverBusyCount: kEnumerable, - retryCount: kEnumerable, - versionNegotiationCount: kEnumerable, - statelessResetCount: kEnumerable, - immediateCloseCount: kEnumerable, -}); -ObjectDefineProperties(EndpointState.prototype, { - isBound: kEnumerable, - isReceiving: kEnumerable, - isListening: kEnumerable, - isClosing: kEnumerable, - isBusy: kEnumerable, - pendingCallbacks: kEnumerable, -}); -ObjectDefineProperties(Endpoint.prototype, { - address: kEnumerable, - busy: kEnumerable, - closed: kEnumerable, - destroyed: kEnumerable, - state: kEnumerable, - stats: kEnumerable, -}); -ObjectDefineProperties(Endpoint, { +ObjectDefineProperties(QuicEndpoint, { CC_ALGO_RENO: { __proto__: null, value: CC_ALGO_RENO, @@ -2514,7 +1467,7 @@ ObjectDefineProperties(Endpoint, { enumerable: true, }, }); -ObjectDefineProperties(Session, { +ObjectDefineProperties(QuicSession, { DEFAULT_CIPHERS: { __proto__: null, value: DEFAULT_CIPHERS, @@ -2532,17 +1485,15 @@ ObjectDefineProperties(Session, { }); module.exports = { - Endpoint, - Session, + QuicEndpoint, + QuicSession, QuicStream, - // Exported for testing - kFinishClose, - SessionState, - SessionStats, + QuicSessionState, + QuicSessionStats, QuicStreamState, QuicStreamStats, - EndpointState, - EndpointStats, + QuicEndpointState, + QuicEndpointStats, }; /* c8 ignore stop */ diff --git a/lib/internal/quic/state.js b/lib/internal/quic/state.js new file mode 100644 index 00000000000000..8bfb2ac83302fb --- /dev/null +++ b/lib/internal/quic/state.js @@ -0,0 +1,566 @@ +'use strict'; + +const { + ArrayBuffer, + DataView, + DataViewPrototypeGetBigInt64, + DataViewPrototypeGetBigUint64, + DataViewPrototypeGetUint8, + DataViewPrototypeSetUint8, + JSONStringify, +} = primordials; + +const { + codes: { + ERR_ILLEGAL_CONSTRUCTOR, + ERR_INVALID_ARG_TYPE, + ERR_INVALID_STATE, + }, +} = require('internal/errors'); + +const { + isArrayBuffer, +} = require('util/types'); + +const { inspect } = require('internal/util/inspect'); + +const { + kFinishClose, + kInspect, + kPrivateConstructor, +} = require('internal/quic/symbols'); + +// This file defines the helper objects for accessing state for +// various QUIC objects. Each of these wraps a DataView. +// Some of the state properties are read only, others are mutable. +// An ArrayBuffer is shared with the C++ level to allow for more +// efficient communication of state across the C++/JS boundary. +// When the state object is no longer needed, it is closed to +// prevent further updates to the buffer. + +const { + IDX_STATE_SESSION_PATH_VALIDATION, + IDX_STATE_SESSION_VERSION_NEGOTIATION, + IDX_STATE_SESSION_DATAGRAM, + IDX_STATE_SESSION_SESSION_TICKET, + IDX_STATE_SESSION_CLOSING, + IDX_STATE_SESSION_GRACEFUL_CLOSE, + IDX_STATE_SESSION_SILENT_CLOSE, + IDX_STATE_SESSION_STATELESS_RESET, + IDX_STATE_SESSION_DESTROYED, + IDX_STATE_SESSION_HANDSHAKE_COMPLETED, + IDX_STATE_SESSION_HANDSHAKE_CONFIRMED, + IDX_STATE_SESSION_STREAM_OPEN_ALLOWED, + IDX_STATE_SESSION_PRIORITY_SUPPORTED, + IDX_STATE_SESSION_WRAPPED, + IDX_STATE_SESSION_LAST_DATAGRAM_ID, + + IDX_STATE_ENDPOINT_BOUND, + IDX_STATE_ENDPOINT_RECEIVING, + IDX_STATE_ENDPOINT_LISTENING, + IDX_STATE_ENDPOINT_CLOSING, + IDX_STATE_ENDPOINT_BUSY, + IDX_STATE_ENDPOINT_PENDING_CALLBACKS, + + IDX_STATE_STREAM_ID, + IDX_STATE_STREAM_FIN_SENT, + IDX_STATE_STREAM_FIN_RECEIVED, + IDX_STATE_STREAM_READ_ENDED, + IDX_STATE_STREAM_WRITE_ENDED, + IDX_STATE_STREAM_DESTROYED, + IDX_STATE_STREAM_PAUSED, + IDX_STATE_STREAM_RESET, + IDX_STATE_STREAM_HAS_READER, + IDX_STATE_STREAM_WANTS_BLOCK, + IDX_STATE_STREAM_WANTS_HEADERS, + IDX_STATE_STREAM_WANTS_RESET, + IDX_STATE_STREAM_WANTS_TRAILERS, +} = internalBinding('quic'); + +class QuicEndpointState { + /** @type {DataView} */ + #handle; + + /** + * @param {symbol} privateSymbol + * @param {ArrayBuffer} buffer + */ + constructor(privateSymbol, buffer) { + if (privateSymbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); + } + if (!isArrayBuffer(buffer)) { + throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + } + this.#handle = new DataView(buffer); + } + + #assertNotClosed() { + if (this.#handle.byteLength === 0) { + throw new ERR_INVALID_STATE('Endpoint is closed'); + } + } + + /** @type {boolean} */ + get isBound() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_BOUND); + } + + /** @type {boolean} */ + get isReceiving() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_RECEIVING); + } + + /** @type {boolean} */ + get isListening() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_LISTENING); + } + + /** @type {boolean} */ + get isClosing() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_CLOSING); + } + + /** @type {boolean} */ + get isBusy() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_BUSY); + } + + /** + * The number of underlying callbacks that are pending. If the session + * is closing, these are the number of callbacks that the session is + * waiting on before it can be closed. + * @type {bigint} + */ + get pendingCallbacks() { + this.#assertNotClosed(); + return DataViewPrototypeGetBigUint64(this.#handle, IDX_STATE_ENDPOINT_PENDING_CALLBACKS); + } + + toString() { + return JSONStringify(this.toJSON()); + } + + toJSON() { + if (this.#handle.byteLength === 0) return {}; + return { + __proto__: null, + isBound: this.isBound, + isReceiving: this.isReceiving, + isListening: this.isListening, + isClosing: this.isClosing, + isBusy: this.isBusy, + pendingCallbacks: `${this.pendingCallbacks}`, + }; + } + + [kInspect](depth, options) { + if (depth < 0) + return this; + + if (this.#handle.byteLength === 0) { + return 'QuicEndpointState { }'; + } + + const opts = { + ...options, + depth: options.depth == null ? null : options.depth - 1, + }; + + return `QuicEndpointState ${inspect({ + isBound: this.isBound, + isReceiving: this.isReceiving, + isListening: this.isListening, + isClosing: this.isClosing, + isBusy: this.isBusy, + pendingCallbacks: this.pendingCallbacks, + }, opts)}`; + } + + [kFinishClose]() { + // Snapshot the state into a new DataView since the underlying + // buffer will be destroyed. + if (this.#handle.byteLength === 0) return; + this.#handle = new DataView(new ArrayBuffer(0)); + } +} + +class QuicSessionState { + /** @type {DataView} */ + #handle; + + /** + * @param {symbol} privateSymbol + * @param {ArrayBuffer} buffer + */ + constructor(privateSymbol, buffer) { + if (privateSymbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); + } + if (!isArrayBuffer(buffer)) { + throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + } + this.#handle = new DataView(buffer); + } + + #assertNotClosed() { + if (this.#handle.byteLength === 0) { + throw new ERR_INVALID_STATE('Session is closed'); + } + } + + /** @type {boolean} */ + get hasPathValidationListener() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_PATH_VALIDATION); + } + + /** @type {boolean} */ + set hasPathValidationListener(val) { + this.#assertNotClosed(); + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_PATH_VALIDATION, val ? 1 : 0); + } + + /** @type {boolean} */ + get hasVersionNegotiationListener() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_VERSION_NEGOTIATION); + } + + /** @type {boolean} */ + set hasVersionNegotiationListener(val) { + this.#assertNotClosed(); + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_VERSION_NEGOTIATION, val ? 1 : 0); + } + + /** @type {boolean} */ + get hasDatagramListener() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_DATAGRAM); + } + + /** @type {boolean} */ + set hasDatagramListener(val) { + this.#assertNotClosed(); + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_DATAGRAM, val ? 1 : 0); + } + + /** @type {boolean} */ + get hasSessionTicketListener() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_SESSION_TICKET); + } + + /** @type {boolean} */ + set hasSessionTicketListener(val) { + this.#assertNotClosed(); + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_SESSION_TICKET, val ? 1 : 0); + } + + /** @type {boolean} */ + get isClosing() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_CLOSING); + } + + /** @type {boolean} */ + get isGracefulClose() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_GRACEFUL_CLOSE); + } + + /** @type {boolean} */ + get isSilentClose() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_SILENT_CLOSE); + } + + /** @type {boolean} */ + get isStatelessReset() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_STATELESS_RESET); + } + + /** @type {boolean} */ + get isDestroyed() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_DESTROYED); + } + + /** @type {boolean} */ + get isHandshakeCompleted() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_HANDSHAKE_COMPLETED); + } + + /** @type {boolean} */ + get isHandshakeConfirmed() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_HANDSHAKE_CONFIRMED); + } + + /** @type {boolean} */ + get isStreamOpenAllowed() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_STREAM_OPEN_ALLOWED); + } + + /** @type {boolean} */ + get isPrioritySupported() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_PRIORITY_SUPPORTED); + } + + /** @type {boolean} */ + get isWrapped() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_WRAPPED); + } + + /** @type {bigint} */ + get lastDatagramId() { + this.#assertNotClosed(); + return DataViewPrototypeGetBigUint64(this.#handle, IDX_STATE_SESSION_LAST_DATAGRAM_ID); + } + + toString() { + return JSONStringify(this.toJSON()); + } + + toJSON() { + if (this.#handle.byteLength === 0) return {}; + return { + __proto__: null, + hasPathValidationListener: this.hasPathValidationListener, + hasVersionNegotiationListener: this.hasVersionNegotiationListener, + hasDatagramListener: this.hasDatagramListener, + hasSessionTicketListener: this.hasSessionTicketListener, + isClosing: this.isClosing, + isGracefulClose: this.isGracefulClose, + isSilentClose: this.isSilentClose, + isStatelessReset: this.isStatelessReset, + isDestroyed: this.isDestroyed, + isHandshakeCompleted: this.isHandshakeCompleted, + isHandshakeConfirmed: this.isHandshakeConfirmed, + isStreamOpenAllowed: this.isStreamOpenAllowed, + isPrioritySupported: this.isPrioritySupported, + isWrapped: this.isWrapped, + lastDatagramId: `${this.lastDatagramId}`, + }; + } + + [kInspect](depth, options) { + if (depth < 0) + return this; + + if (this.#handle.byteLength === 0) { + return 'QuicSessionState { }'; + } + + const opts = { + ...options, + depth: options.depth == null ? null : options.depth - 1, + }; + + return `QuicSessionState ${inspect({ + hasPathValidationListener: this.hasPathValidationListener, + hasVersionNegotiationListener: this.hasVersionNegotiationListener, + hasDatagramListener: this.hasDatagramListener, + hasSessionTicketListener: this.hasSessionTicketListener, + isClosing: this.isClosing, + isGracefulClose: this.isGracefulClose, + isSilentClose: this.isSilentClose, + isStatelessReset: this.isStatelessReset, + isDestroyed: this.isDestroyed, + isHandshakeCompleted: this.isHandshakeCompleted, + isHandshakeConfirmed: this.isHandshakeConfirmed, + isStreamOpenAllowed: this.isStreamOpenAllowed, + isPrioritySupported: this.isPrioritySupported, + isWrapped: this.isWrapped, + lastDatagramId: this.lastDatagramId, + }, opts)}`; + } + + [kFinishClose]() { + // Snapshot the state into a new DataView since the underlying + // buffer will be destroyed. + if (this.#handle.byteLength === 0) return; + this.#handle = new DataView(new ArrayBuffer(0)); + } +} + +class QuicStreamState { + /** @type {DataView} */ + #handle; + + /** + * @param {symbol} privateSymbol + * @param {ArrayBuffer} buffer + */ + constructor(privateSymbol, buffer) { + if (privateSymbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); + } + if (!isArrayBuffer(buffer)) { + throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + } + this.#handle = new DataView(buffer); + } + + /** @type {bigint} */ + get id() { + return DataViewPrototypeGetBigInt64(this.#handle, IDX_STATE_STREAM_ID); + } + + /** @type {boolean} */ + get finSent() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_FIN_SENT); + } + + /** @type {boolean} */ + get finReceived() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_FIN_RECEIVED); + } + + /** @type {boolean} */ + get readEnded() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_READ_ENDED); + } + + /** @type {boolean} */ + get writeEnded() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WRITE_ENDED); + } + + /** @type {boolean} */ + get destroyed() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_DESTROYED); + } + + /** @type {boolean} */ + get paused() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_PAUSED); + } + + /** @type {boolean} */ + get reset() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_RESET); + } + + /** @type {boolean} */ + get hasReader() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_HAS_READER); + } + + /** @type {boolean} */ + get wantsBlock() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_BLOCK); + } + + /** @type {boolean} */ + set wantsBlock(val) { + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_BLOCK, val ? 1 : 0); + } + + /** @type {boolean} */ + get wantsHeaders() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_HEADERS); + } + + /** @type {boolean} */ + set wantsHeaders(val) { + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_HEADERS, val ? 1 : 0); + } + + /** @type {boolean} */ + get wantsReset() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_RESET); + } + + /** @type {boolean} */ + set wantsReset(val) { + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_RESET, val ? 1 : 0); + } + + /** @type {boolean} */ + get wantsTrailers() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_TRAILERS); + } + + /** @type {boolean} */ + set wantsTrailers(val) { + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_TRAILERS, val ? 1 : 0); + } + + toString() { + return JSONStringify(this.toJSON()); + } + + toJSON() { + if (this.#handle.byteLength === 0) return {}; + return { + __proto__: null, + id: `${this.id}`, + finSent: this.finSent, + finReceived: this.finReceived, + readEnded: this.readEnded, + writeEnded: this.writeEnded, + destroyed: this.destroyed, + paused: this.paused, + reset: this.reset, + hasReader: this.hasReader, + wantsBlock: this.wantsBlock, + wantsHeaders: this.wantsHeaders, + wantsReset: this.wantsReset, + wantsTrailers: this.wantsTrailers, + }; + } + + [kInspect](depth, options) { + if (depth < 0) + return this; + + if (this.#handle.byteLength === 0) { + return 'QuicStreamState { }'; + } + + const opts = { + ...options, + depth: options.depth == null ? null : options.depth - 1, + }; + + return `QuicStreamState ${inspect({ + id: this.id, + finSent: this.finSent, + finReceived: this.finReceived, + readEnded: this.readEnded, + writeEnded: this.writeEnded, + destroyed: this.destroyed, + paused: this.paused, + reset: this.reset, + hasReader: this.hasReader, + wantsBlock: this.wantsBlock, + wantsHeaders: this.wantsHeaders, + wantsReset: this.wantsReset, + wantsTrailers: this.wantsTrailers, + }, opts)}`; + } + + [kFinishClose]() { + // Snapshot the state into a new DataView since the underlying + // buffer will be destroyed. + if (this.#handle.byteLength === 0) return; + this.#handle = new DataView(new ArrayBuffer(0)); + } +} + +module.exports = { + QuicEndpointState, + QuicSessionState, + QuicStreamState, +}; diff --git a/lib/internal/quic/stats.js b/lib/internal/quic/stats.js new file mode 100644 index 00000000000000..3ac9523d9aeca4 --- /dev/null +++ b/lib/internal/quic/stats.js @@ -0,0 +1,646 @@ +'use strict'; + +const { + BigUint64Array, + JSONStringify, +} = primordials; + +const { + isArrayBuffer, +} = require('util/types'); + +const { + codes: { + ERR_ILLEGAL_CONSTRUCTOR, + ERR_INVALID_ARG_TYPE, + }, +} = require('internal/errors'); + +const { inspect } = require('internal/util/inspect'); + +const { + kFinishClose, + kInspect, + kPrivateConstructor, +} = require('internal/quic/symbols'); + +// This file defines the helper objects for accessing statistics collected +// by various QUIC objects. Each of these wraps a BigUint64Array. Every +// stats object is read-only via the API, and the underlying buffer is +// only updated by the QUIC internals. When the stats object is no longer +// needed, it is closed to prevent further updates to the buffer. + +const { + // All of the IDX_STATS_* constants are the index positions of the stats + // fields in the relevant BigUint64Array's that underlie the *Stats objects. + // These are not exposed to end users. + IDX_STATS_ENDPOINT_CREATED_AT, + IDX_STATS_ENDPOINT_DESTROYED_AT, + IDX_STATS_ENDPOINT_BYTES_RECEIVED, + IDX_STATS_ENDPOINT_BYTES_SENT, + IDX_STATS_ENDPOINT_PACKETS_RECEIVED, + IDX_STATS_ENDPOINT_PACKETS_SENT, + IDX_STATS_ENDPOINT_SERVER_SESSIONS, + IDX_STATS_ENDPOINT_CLIENT_SESSIONS, + IDX_STATS_ENDPOINT_SERVER_BUSY_COUNT, + IDX_STATS_ENDPOINT_RETRY_COUNT, + IDX_STATS_ENDPOINT_VERSION_NEGOTIATION_COUNT, + IDX_STATS_ENDPOINT_STATELESS_RESET_COUNT, + IDX_STATS_ENDPOINT_IMMEDIATE_CLOSE_COUNT, + + IDX_STATS_SESSION_CREATED_AT, + IDX_STATS_SESSION_CLOSING_AT, + IDX_STATS_SESSION_DESTROYED_AT, + IDX_STATS_SESSION_HANDSHAKE_COMPLETED_AT, + IDX_STATS_SESSION_HANDSHAKE_CONFIRMED_AT, + IDX_STATS_SESSION_GRACEFUL_CLOSING_AT, + IDX_STATS_SESSION_BYTES_RECEIVED, + IDX_STATS_SESSION_BYTES_SENT, + IDX_STATS_SESSION_BIDI_IN_STREAM_COUNT, + IDX_STATS_SESSION_BIDI_OUT_STREAM_COUNT, + IDX_STATS_SESSION_UNI_IN_STREAM_COUNT, + IDX_STATS_SESSION_UNI_OUT_STREAM_COUNT, + IDX_STATS_SESSION_LOSS_RETRANSMIT_COUNT, + IDX_STATS_SESSION_MAX_BYTES_IN_FLIGHT, + IDX_STATS_SESSION_BYTES_IN_FLIGHT, + IDX_STATS_SESSION_BLOCK_COUNT, + IDX_STATS_SESSION_CWND, + IDX_STATS_SESSION_LATEST_RTT, + IDX_STATS_SESSION_MIN_RTT, + IDX_STATS_SESSION_RTTVAR, + IDX_STATS_SESSION_SMOOTHED_RTT, + IDX_STATS_SESSION_SSTHRESH, + IDX_STATS_SESSION_DATAGRAMS_RECEIVED, + IDX_STATS_SESSION_DATAGRAMS_SENT, + IDX_STATS_SESSION_DATAGRAMS_ACKNOWLEDGED, + IDX_STATS_SESSION_DATAGRAMS_LOST, + + IDX_STATS_STREAM_CREATED_AT, + IDX_STATS_STREAM_RECEIVED_AT, + IDX_STATS_STREAM_ACKED_AT, + IDX_STATS_STREAM_CLOSING_AT, + IDX_STATS_STREAM_DESTROYED_AT, + IDX_STATS_STREAM_BYTES_RECEIVED, + IDX_STATS_STREAM_BYTES_SENT, + IDX_STATS_STREAM_MAX_OFFSET, + IDX_STATS_STREAM_MAX_OFFSET_ACK, + IDX_STATS_STREAM_MAX_OFFSET_RECV, + IDX_STATS_STREAM_FINAL_SIZE, +} = internalBinding('quic'); + +class QuicEndpointStats { + /** @type {BigUint64Array} */ + #handle; + /** @type {boolean} */ + #disconnected = false; + + /** + * @param {symbol} privateSymbol + * @param {ArrayBuffer} buffer + */ + constructor(privateSymbol, buffer) { + // We use the kPrivateConstructor symbol to restrict the ability to + // create new instances of QuicEndpointStats to internal code. + if (privateSymbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); + } + if (!isArrayBuffer(buffer)) { + throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + } + this.#handle = new BigUint64Array(buffer); + } + + /** @type {bigint} */ + get createdAt() { + return this.#handle[IDX_STATS_ENDPOINT_CREATED_AT]; + } + + /** @type {bigint} */ + get destroyedAt() { + return this.#handle[IDX_STATS_ENDPOINT_DESTROYED_AT]; + } + + /** @type {bigint} */ + get bytesReceived() { + return this.#handle[IDX_STATS_ENDPOINT_BYTES_RECEIVED]; + } + + /** @type {bigint} */ + get bytesSent() { + return this.#handle[IDX_STATS_ENDPOINT_BYTES_SENT]; + } + + /** @type {bigint} */ + get packetsReceived() { + return this.#handle[IDX_STATS_ENDPOINT_PACKETS_RECEIVED]; + } + + /** @type {bigint} */ + get packetsSent() { + return this.#handle[IDX_STATS_ENDPOINT_PACKETS_SENT]; + } + + /** @type {bigint} */ + get serverSessions() { + return this.#handle[IDX_STATS_ENDPOINT_SERVER_SESSIONS]; + } + + /** @type {bigint} */ + get clientSessions() { + return this.#handle[IDX_STATS_ENDPOINT_CLIENT_SESSIONS]; + } + + /** @type {bigint} */ + get serverBusyCount() { + return this.#handle[IDX_STATS_ENDPOINT_SERVER_BUSY_COUNT]; + } + + /** @type {bigint} */ + get retryCount() { + return this.#handle[IDX_STATS_ENDPOINT_RETRY_COUNT]; + } + + /** @type {bigint} */ + get versionNegotiationCount() { + return this.#handle[IDX_STATS_ENDPOINT_VERSION_NEGOTIATION_COUNT]; + } + + /** @type {bigint} */ + get statelessResetCount() { + return this.#handle[IDX_STATS_ENDPOINT_STATELESS_RESET_COUNT]; + } + + /** @type {bigint} */ + get immediateCloseCount() { + return this.#handle[IDX_STATS_ENDPOINT_IMMEDIATE_CLOSE_COUNT]; + } + + toString() { + return JSONStringify(this.toJSON()); + } + + toJSON() { + return { + __proto__: null, + connected: this.isConnected, + // We need to convert the values to strings because JSON does not + // support BigInts. + createdAt: `${this.createdAt}`, + destroyedAt: `${this.destroyedAt}`, + bytesReceived: `${this.bytesReceived}`, + bytesSent: `${this.bytesSent}`, + packetsReceived: `${this.packetsReceived}`, + packetsSent: `${this.packetsSent}`, + serverSessions: `${this.serverSessions}`, + clientSessions: `${this.clientSessions}`, + serverBusyCount: `${this.serverBusyCount}`, + retryCount: `${this.retryCount}`, + versionNegotiationCount: `${this.versionNegotiationCount}`, + statelessResetCount: `${this.statelessResetCount}`, + immediateCloseCount: `${this.immediateCloseCount}`, + }; + } + + [kInspect](depth, options) { + if (depth < 0) + return this; + + const opts = { + ...options, + depth: options.depth == null ? null : options.depth - 1, + }; + + return `QuicEndpointStats ${inspect({ + connected: this.isConnected, + createdAt: this.createdAt, + destroyedAt: this.destroyedAt, + bytesReceived: this.bytesReceived, + bytesSent: this.bytesSent, + packetsReceived: this.packetsReceived, + packetsSent: this.packetsSent, + serverSessions: this.serverSessions, + clientSessions: this.clientSessions, + serverBusyCount: this.serverBusyCount, + retryCount: this.retryCount, + versionNegotiationCount: this.versionNegotiationCount, + statelessResetCount: this.statelessResetCount, + immediateCloseCount: this.immediateCloseCount, + }, opts)}`; + } + + /** + * True if this QuicEndpointStats object is still connected to the underlying + * Endpoint stats source. If this returns false, then the stats object is + * no longer being updated and should be considered stale. + * @returns {boolean} + */ + get isConnected() { + return !this.#disconnected; + } + + [kFinishClose]() { + // Snapshot the stats into a new BigUint64Array since the underlying + // buffer will be destroyed. + this.#handle = new BigUint64Array(this.#handle); + this.#disconnected = true; + } +} + +class QuicSessionStats { + /** @type {BigUint64Array} */ + #handle; + /** @type {boolean} */ + #disconnected = false; + + /** + * @param {symbol} privateSynbol + * @param {BigUint64Array} buffer + */ + constructor(privateSynbol, buffer) { + // We use the kPrivateConstructor symbol to restrict the ability to + // create new instances of QuicSessionStats to internal code. + if (privateSynbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); + } + if (!isArrayBuffer(buffer)) { + throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + } + this.#handle = new BigUint64Array(buffer); + } + + /** @type {bigint} */ + get createdAt() { + return this.#handle[IDX_STATS_SESSION_CREATED_AT]; + } + + /** @type {bigint} */ + get closingAt() { + return this.#handle[IDX_STATS_SESSION_CLOSING_AT]; + } + + /** @type {bigint} */ + get destroyedAt() { + return this.#handle[IDX_STATS_SESSION_DESTROYED_AT]; + } + + /** @type {bigint} */ + get handshakeCompletedAt() { + return this.#handle[IDX_STATS_SESSION_HANDSHAKE_COMPLETED_AT]; + } + + /** @type {bigint} */ + get handshakeConfirmedAt() { + return this.#handle[IDX_STATS_SESSION_HANDSHAKE_CONFIRMED_AT]; + } + + /** @type {bigint} */ + get gracefulClosingAt() { + return this.#handle[IDX_STATS_SESSION_GRACEFUL_CLOSING_AT]; + } + + /** @type {bigint} */ + get bytesReceived() { + return this.#handle[IDX_STATS_SESSION_BYTES_RECEIVED]; + } + + /** @type {bigint} */ + get bytesSent() { + return this.#handle[IDX_STATS_SESSION_BYTES_SENT]; + } + + /** @type {bigint} */ + get bidiInStreamCount() { + return this.#handle[IDX_STATS_SESSION_BIDI_IN_STREAM_COUNT]; + } + + /** @type {bigint} */ + get bidiOutStreamCount() { + return this.#handle[IDX_STATS_SESSION_BIDI_OUT_STREAM_COUNT]; + } + + /** @type {bigint} */ + get uniInStreamCount() { + return this.#handle[IDX_STATS_SESSION_UNI_IN_STREAM_COUNT]; + } + + /** @type {bigint} */ + get uniOutStreamCount() { + return this.#handle[IDX_STATS_SESSION_UNI_OUT_STREAM_COUNT]; + } + + /** @type {bigint} */ + get lossRetransmitCount() { + return this.#handle[IDX_STATS_SESSION_LOSS_RETRANSMIT_COUNT]; + } + + /** @type {bigint} */ + get maxBytesInFlights() { + return this.#handle[IDX_STATS_SESSION_MAX_BYTES_IN_FLIGHT]; + } + + /** @type {bigint} */ + get bytesInFlight() { + return this.#handle[IDX_STATS_SESSION_BYTES_IN_FLIGHT]; + } + + /** @type {bigint} */ + get blockCount() { + return this.#handle[IDX_STATS_SESSION_BLOCK_COUNT]; + } + + /** @type {bigint} */ + get cwnd() { + return this.#handle[IDX_STATS_SESSION_CWND]; + } + + /** @type {bigint} */ + get latestRtt() { + return this.#handle[IDX_STATS_SESSION_LATEST_RTT]; + } + + /** @type {bigint} */ + get minRtt() { + return this.#handle[IDX_STATS_SESSION_MIN_RTT]; + } + + /** @type {bigint} */ + get rttVar() { + return this.#handle[IDX_STATS_SESSION_RTTVAR]; + } + + /** @type {bigint} */ + get smoothedRtt() { + return this.#handle[IDX_STATS_SESSION_SMOOTHED_RTT]; + } + + /** @type {bigint} */ + get ssthresh() { + return this.#handle[IDX_STATS_SESSION_SSTHRESH]; + } + + /** @type {bigint} */ + get datagramsReceived() { + return this.#handle[IDX_STATS_SESSION_DATAGRAMS_RECEIVED]; + } + + /** @type {bigint} */ + get datagramsSent() { + return this.#handle[IDX_STATS_SESSION_DATAGRAMS_SENT]; + } + + /** @type {bigint} */ + get datagramsAcknowledged() { + return this.#handle[IDX_STATS_SESSION_DATAGRAMS_ACKNOWLEDGED]; + } + + /** @type {bigint} */ + get datagramsLost() { + return this.#handle[IDX_STATS_SESSION_DATAGRAMS_LOST]; + } + + toString() { + return JSONStringify(this.toJSON()); + } + + toJSON() { + return { + __proto__: null, + connected: this.isConnected, + // We need to convert the values to strings because JSON does not + // support BigInts. + createdAt: `${this.createdAt}`, + closingAt: `${this.closingAt}`, + destroyedAt: `${this.destroyedAt}`, + handshakeCompletedAt: `${this.handshakeCompletedAt}`, + handshakeConfirmedAt: `${this.handshakeConfirmedAt}`, + gracefulClosingAt: `${this.gracefulClosingAt}`, + bytesReceived: `${this.bytesReceived}`, + bytesSent: `${this.bytesSent}`, + bidiInStreamCount: `${this.bidiInStreamCount}`, + bidiOutStreamCount: `${this.bidiOutStreamCount}`, + uniInStreamCount: `${this.uniInStreamCount}`, + uniOutStreamCount: `${this.uniOutStreamCount}`, + lossRetransmitCount: `${this.lossRetransmitCount}`, + maxBytesInFlights: `${this.maxBytesInFlights}`, + bytesInFlight: `${this.bytesInFlight}`, + blockCount: `${this.blockCount}`, + cwnd: `${this.cwnd}`, + latestRtt: `${this.latestRtt}`, + minRtt: `${this.minRtt}`, + rttVar: `${this.rttVar}`, + smoothedRtt: `${this.smoothedRtt}`, + ssthresh: `${this.ssthresh}`, + datagramsReceived: `${this.datagramsReceived}`, + datagramsSent: `${this.datagramsSent}`, + datagramsAcknowledged: `${this.datagramsAcknowledged}`, + datagramsLost: `${this.datagramsLost}`, + }; + } + + [kInspect](depth, options) { + if (depth < 0) + return this; + + const opts = { + ...options, + depth: options.depth == null ? null : options.depth - 1, + }; + + return `QuicSessionStats ${inspect({ + connected: this.isConnected, + createdAt: this.createdAt, + closingAt: this.closingAt, + destroyedAt: this.destroyedAt, + handshakeCompletedAt: this.handshakeCompletedAt, + handshakeConfirmedAt: this.handshakeConfirmedAt, + gracefulClosingAt: this.gracefulClosingAt, + bytesReceived: this.bytesReceived, + bytesSent: this.bytesSent, + bidiInStreamCount: this.bidiInStreamCount, + bidiOutStreamCount: this.bidiOutStreamCount, + uniInStreamCount: this.uniInStreamCount, + uniOutStreamCount: this.uniOutStreamCount, + lossRetransmitCount: this.lossRetransmitCount, + maxBytesInFlights: this.maxBytesInFlights, + bytesInFlight: this.bytesInFlight, + blockCount: this.blockCount, + cwnd: this.cwnd, + latestRtt: this.latestRtt, + minRtt: this.minRtt, + rttVar: this.rttVar, + smoothedRtt: this.smoothedRtt, + ssthresh: this.ssthresh, + datagramsReceived: this.datagramsReceived, + datagramsSent: this.datagramsSent, + datagramsAcknowledged: this.datagramsAcknowledged, + datagramsLost: this.datagramsLost, + }, opts)}`; + } + + /** + * True if this QuicSessionStats object is still connected to the underlying + * Session stats source. If this returns false, then the stats object is + * no longer being updated and should be considered stale. + * @returns {boolean} + */ + get isConnected() { + return !this.#disconnected; + } + + [kFinishClose]() { + // Snapshot the stats into a new BigUint64Array since the underlying + // buffer will be destroyed. + this.#handle = new BigUint64Array(this.#handle); + this.#disconnected = true; + } +} + +class QuicStreamStats { + /** @type {BigUint64Array} */ + #handle; + /** type {boolean} */ + #disconnected = false; + + /** + * @param {symbol} privateSymbol + * @param {ArrayBuffer} buffer + */ + constructor(privateSymbol, buffer) { + // We use the kPrivateConstructor symbol to restrict the ability to + // create new instances of QuicStreamStats to internal code. + if (privateSymbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); + } + if (!isArrayBuffer(buffer)) { + throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + } + this.#handle = new BigUint64Array(buffer); + } + + /** @type {bigint} */ + get createdAt() { + return this.#handle[IDX_STATS_STREAM_CREATED_AT]; + } + + /** @type {bigint} */ + get receivedAt() { + return this.#handle[IDX_STATS_STREAM_RECEIVED_AT]; + } + + /** @type {bigint} */ + get ackedAt() { + return this.#handle[IDX_STATS_STREAM_ACKED_AT]; + } + + /** @type {bigint} */ + get closingAt() { + return this.#handle[IDX_STATS_STREAM_CLOSING_AT]; + } + + /** @type {bigint} */ + get destroyedAt() { + return this.#handle[IDX_STATS_STREAM_DESTROYED_AT]; + } + + /** @type {bigint} */ + get bytesReceived() { + return this.#handle[IDX_STATS_STREAM_BYTES_RECEIVED]; + } + + /** @type {bigint} */ + get bytesSent() { + return this.#handle[IDX_STATS_STREAM_BYTES_SENT]; + } + + /** @type {bigint} */ + get maxOffset() { + return this.#handle[IDX_STATS_STREAM_MAX_OFFSET]; + } + + /** @type {bigint} */ + get maxOffsetAcknowledged() { + return this.#handle[IDX_STATS_STREAM_MAX_OFFSET_ACK]; + } + + /** @type {bigint} */ + get maxOffsetReceived() { + return this.#handle[IDX_STATS_STREAM_MAX_OFFSET_RECV]; + } + + /** @type {bigint} */ + get finalSize() { + return this.#handle[IDX_STATS_STREAM_FINAL_SIZE]; + } + + toString() { + return JSONStringify(this.toJSON()); + } + + toJSON() { + return { + __proto__: null, + connected: this.isConnected, + // We need to convert the values to strings because JSON does not + // support BigInts. + createdAt: `${this.createdAt}`, + receivedAt: `${this.receivedAt}`, + ackedAt: `${this.ackedAt}`, + closingAt: `${this.closingAt}`, + destroyedAt: `${this.destroyedAt}`, + bytesReceived: `${this.bytesReceived}`, + bytesSent: `${this.bytesSent}`, + maxOffset: `${this.maxOffset}`, + maxOffsetAcknowledged: `${this.maxOffsetAcknowledged}`, + maxOffsetReceived: `${this.maxOffsetReceived}`, + finalSize: `${this.finalSize}`, + }; + } + + [kInspect](depth, options) { + if (depth < 0) + return this; + + const opts = { + ...options, + depth: options.depth == null ? null : options.depth - 1, + }; + + return `StreamStats ${inspect({ + connected: this.isConnected, + createdAt: this.createdAt, + receivedAt: this.receivedAt, + ackedAt: this.ackedAt, + closingAt: this.closingAt, + destroyedAt: this.destroyedAt, + bytesReceived: this.bytesReceived, + bytesSent: this.bytesSent, + maxOffset: this.maxOffset, + maxOffsetAcknowledged: this.maxOffsetAcknowledged, + maxOffsetReceived: this.maxOffsetReceived, + finalSize: this.finalSize, + }, opts)}`; + } + + /** + * True if this QuicStreamStats object is still connected to the underlying + * Stream stats source. If this returns false, then the stats object is + * no longer being updated and should be considered stale. + * @returns {boolean} + */ + get isConnected() { + return !this.#disconnected; + } + + [kFinishClose]() { + // Snapshot the stats into a new BigUint64Array since the underlying + // buffer will be destroyed. + this.#handle = new BigUint64Array(this.#handle); + this.#disconnected = true; + } +} + +module.exports = { + QuicEndpointStats, + QuicSessionStats, + QuicStreamStats, +}; diff --git a/lib/internal/quic/symbols.js b/lib/internal/quic/symbols.js new file mode 100644 index 00000000000000..fa2c98320b860e --- /dev/null +++ b/lib/internal/quic/symbols.js @@ -0,0 +1,56 @@ +'use strict'; + +const { + Symbol, +} = primordials; + +const { + customInspectSymbol: kInspect, +} = require('internal/util'); + +const { + kHandle: kKeyObjectHandle, + kKeyObject: kKeyObjectInner, +} = require('internal/crypto/util'); + +// Symbols used to hide various private properties and methods from the +// public API. + +const kBlocked = Symbol('kBlocked'); +const kDatagram = Symbol('kDatagram'); +const kDatagramStatus = Symbol('kDatagramStatus'); +const kError = Symbol('kError'); +const kFinishClose = Symbol('kFinishClose'); +const kHandshake = Symbol('kHandshake'); +const kHeaders = Symbol('kHeaders'); +const kOwner = Symbol('kOwner'); +const kNewSession = Symbol('kNewSession'); +const kNewStream = Symbol('kNewStream'); +const kPathValidation = Symbol('kPathValidation'); +const kReset = Symbol('kReset'); +const kSessionTicket = Symbol('kSessionTicket'); +const kTrailers = Symbol('kTrailers'); +const kVersionNegotiation = Symbol('kVersionNegotiation'); +const kPrivateConstructor = Symbol('kPrivateConstructor'); + +module.exports = { + kBlocked, + kDatagram, + kDatagramStatus, + kError, + kFinishClose, + kHandshake, + kHeaders, + kOwner, + kNewSession, + kNewStream, + kPathValidation, + kReset, + kSessionTicket, + kTrailers, + kVersionNegotiation, + kInspect, + kKeyObjectHandle, + kKeyObjectInner, + kPrivateConstructor, +}; diff --git a/src/node_builtins.cc b/src/node_builtins.cc index 1bec44f6f29b0b..9aaf5626fcfe4a 100644 --- a/src/node_builtins.cc +++ b/src/node_builtins.cc @@ -133,7 +133,8 @@ BuiltinLoader::BuiltinCategories BuiltinLoader::GetBuiltinCategories() const { "internal/streams/lazy_transform", #endif // !HAVE_OPENSSL #if !NODE_OPENSSL_HAS_QUIC - "internal/quic/quic", + "internal/quic/quic", "internal/quic/symbols", "internal/quic/stats", + "internal/quic/state", #endif // !NODE_OPENSSL_HAS_QUIC "sqlite", // Experimental. "sys", // Deprecated. diff --git a/test/parallel/test-quic-internal-endpoint-listen-defaults.js b/test/parallel/test-quic-internal-endpoint-listen-defaults.js index 7b073104a148ce..987e191b759cdd 100644 --- a/test/parallel/test-quic-internal-endpoint-listen-defaults.js +++ b/test/parallel/test-quic-internal-endpoint-listen-defaults.js @@ -20,11 +20,11 @@ describe('quic internal endpoint listen defaults', { skip: !hasQuic }, async () } = require('net'); const { - Endpoint, + QuicEndpoint, } = require('internal/quic/quic'); it('are reasonable and work as expected', async () => { - const endpoint = new Endpoint({ + const endpoint = new QuicEndpoint({ onsession() {}, session: {}, stream: {}, diff --git a/test/parallel/test-quic-internal-endpoint-options.js b/test/parallel/test-quic-internal-endpoint-options.js index 88dc42c4dd7ade..91c1e6d5b2d312 100644 --- a/test/parallel/test-quic-internal-endpoint-options.js +++ b/test/parallel/test-quic-internal-endpoint-options.js @@ -15,7 +15,7 @@ describe('quic internal endpoint options', { skip: !hasQuic }, async () => { } = require('node:assert'); const { - Endpoint, + QuicEndpoint, } = require('internal/quic/quic'); const { @@ -30,7 +30,7 @@ describe('quic internal endpoint options', { skip: !hasQuic }, async () => { it('invalid options', async () => { ['a', null, false, NaN].forEach((i) => { - throws(() => new Endpoint(callbackConfig, i), { + throws(() => new QuicEndpoint(callbackConfig, i), { code: 'ERR_INVALID_ARG_TYPE', }); }); @@ -38,9 +38,9 @@ describe('quic internal endpoint options', { skip: !hasQuic }, async () => { it('valid options', async () => { // Just Works... using all defaults - new Endpoint(callbackConfig, {}); - new Endpoint(callbackConfig); - new Endpoint(callbackConfig, undefined); + new QuicEndpoint(callbackConfig, {}); + new QuicEndpoint(callbackConfig); + new QuicEndpoint(callbackConfig, undefined); }); it('various cases', async () => { @@ -126,12 +126,12 @@ describe('quic internal endpoint options', { skip: !hasQuic }, async () => { { key: 'cc', valid: [ - Endpoint.CC_ALGO_RENO, - Endpoint.CC_ALGO_CUBIC, - Endpoint.CC_ALGO_BBR, - Endpoint.CC_ALGO_RENO_STR, - Endpoint.CC_ALGO_CUBIC_STR, - Endpoint.CC_ALGO_BBR_STR, + QuicEndpoint.CC_ALGO_RENO, + QuicEndpoint.CC_ALGO_CUBIC, + QuicEndpoint.CC_ALGO_BBR, + QuicEndpoint.CC_ALGO_RENO_STR, + QuicEndpoint.CC_ALGO_CUBIC_STR, + QuicEndpoint.CC_ALGO_BBR_STR, ], invalid: [-1, 4, 1n, 'a', null, false, true, {}, [], () => {}], }, @@ -190,13 +190,13 @@ describe('quic internal endpoint options', { skip: !hasQuic }, async () => { for (const value of valid) { const options = {}; options[key] = value; - new Endpoint(callbackConfig, options); + new QuicEndpoint(callbackConfig, options); } for (const value of invalid) { const options = {}; options[key] = value; - throws(() => new Endpoint(callbackConfig, options), { + throws(() => new QuicEndpoint(callbackConfig, options), { code: 'ERR_INVALID_ARG_VALUE', }); } @@ -204,7 +204,7 @@ describe('quic internal endpoint options', { skip: !hasQuic }, async () => { }); it('endpoint can be ref/unrefed without error', async () => { - const endpoint = new Endpoint(callbackConfig, {}); + const endpoint = new QuicEndpoint(callbackConfig, {}); endpoint.unref(); endpoint.ref(); endpoint.close(); @@ -212,17 +212,17 @@ describe('quic internal endpoint options', { skip: !hasQuic }, async () => { }); it('endpoint can be inspected', async () => { - const endpoint = new Endpoint(callbackConfig, {}); + const endpoint = new QuicEndpoint(callbackConfig, {}); strictEqual(typeof inspect(endpoint), 'string'); endpoint.close(); await endpoint.closed; }); it('endpoint with object address', () => { - new Endpoint(callbackConfig, { + new QuicEndpoint(callbackConfig, { address: { host: '127.0.0.1:0' }, }); - throws(() => new Endpoint(callbackConfig, { address: '127.0.0.1:0' }), { + throws(() => new QuicEndpoint(callbackConfig, { address: '127.0.0.1:0' }), { code: 'ERR_INVALID_ARG_TYPE', }); }); diff --git a/test/parallel/test-quic-internal-endpoint-stats-state.js b/test/parallel/test-quic-internal-endpoint-stats-state.js index 18a6ed6b138048..6992e4ac09df08 100644 --- a/test/parallel/test-quic-internal-endpoint-stats-state.js +++ b/test/parallel/test-quic-internal-endpoint-stats-state.js @@ -10,14 +10,18 @@ const { describe('quic internal endpoint stats and state', { skip: !hasQuic }, () => { const { - Endpoint, + QuicEndpoint, QuicStreamState, QuicStreamStats, - SessionState, - SessionStats, - kFinishClose, + QuicSessionState, + QuicSessionStats, } = require('internal/quic/quic'); + const { + kFinishClose, + kPrivateConstructor, + } = require('internal/quic/symbols'); + const { inspect, } = require('util'); @@ -29,7 +33,7 @@ describe('quic internal endpoint stats and state', { skip: !hasQuic }, () => { } = require('node:assert'); it('endpoint state', () => { - const endpoint = new Endpoint({ + const endpoint = new QuicEndpoint({ onsession() {}, session: {}, stream: {}, @@ -62,7 +66,7 @@ describe('quic internal endpoint stats and state', { skip: !hasQuic }, () => { }); it('state is not readable after close', () => { - const endpoint = new Endpoint({ + const endpoint = new QuicEndpoint({ onsession() {}, session: {}, stream: {}, @@ -74,24 +78,25 @@ describe('quic internal endpoint stats and state', { skip: !hasQuic }, () => { }); it('state constructor argument is ArrayBuffer', () => { - const endpoint = new Endpoint({ + const endpoint = new QuicEndpoint({ onsession() {}, session: {}, stream: {}, }, {}); const Cons = endpoint.state.constructor; - throws(() => new Cons(1), { + throws(() => new Cons(kPrivateConstructor, 1), { code: 'ERR_INVALID_ARG_TYPE' }); }); it('endpoint stats', () => { - const endpoint = new Endpoint({ + const endpoint = new QuicEndpoint({ onsession() {}, session: {}, stream: {}, }); + strictEqual(typeof endpoint.stats.isConnected, 'boolean'); strictEqual(typeof endpoint.stats.createdAt, 'bigint'); strictEqual(typeof endpoint.stats.destroyedAt, 'bigint'); strictEqual(typeof endpoint.stats.bytesReceived, 'bigint'); @@ -107,6 +112,7 @@ describe('quic internal endpoint stats and state', { skip: !hasQuic }, () => { strictEqual(typeof endpoint.stats.immediateCloseCount, 'bigint'); deepStrictEqual(Object.keys(endpoint.stats.toJSON()), [ + 'connected', 'createdAt', 'destroyedAt', 'bytesReceived', @@ -128,25 +134,26 @@ describe('quic internal endpoint stats and state', { skip: !hasQuic }, () => { }); it('stats are still readble after close', () => { - const endpoint = new Endpoint({ + const endpoint = new QuicEndpoint({ onsession() {}, session: {}, stream: {}, }, {}); strictEqual(typeof endpoint.stats.toJSON(), 'object'); endpoint.stats[kFinishClose](); + strictEqual(endpoint.stats.isConnected, false); strictEqual(typeof endpoint.stats.destroyedAt, 'bigint'); strictEqual(typeof endpoint.stats.toJSON(), 'object'); }); it('stats constructor argument is ArrayBuffer', () => { - const endpoint = new Endpoint({ + const endpoint = new QuicEndpoint({ onsession() {}, session: {}, stream: {}, }, {}); const Cons = endpoint.stats.constructor; - throws(() => new Cons(1), { + throws(() => new Cons(kPrivateConstructor, 1), { code: 'ERR_INVALID_ARG_TYPE', }); }); @@ -156,8 +163,8 @@ describe('quic internal endpoint stats and state', { skip: !hasQuic }, () => { // temporarily while the rest of the functionality is being // implemented. it('stream and session states', () => { - const streamState = new QuicStreamState(new ArrayBuffer(1024)); - const sessionState = new SessionState(new ArrayBuffer(1024)); + const streamState = new QuicStreamState(kPrivateConstructor, new ArrayBuffer(1024)); + const sessionState = new QuicSessionState(kPrivateConstructor, new ArrayBuffer(1024)); strictEqual(streamState.finSent, false); strictEqual(streamState.finReceived, false); @@ -195,8 +202,8 @@ describe('quic internal endpoint stats and state', { skip: !hasQuic }, () => { }); it('stream and session stats', () => { - const streamStats = new QuicStreamStats(new ArrayBuffer(1024)); - const sessionStats = new SessionStats(new ArrayBuffer(1024)); + const streamStats = new QuicStreamStats(kPrivateConstructor, new ArrayBuffer(1024)); + const sessionStats = new QuicSessionStats(kPrivateConstructor, new ArrayBuffer(1024)); strictEqual(streamStats.createdAt, undefined); strictEqual(streamStats.receivedAt, undefined); strictEqual(streamStats.ackedAt, undefined); From d180a8aedb4a2e4ef0a2812528580a3eac6e0cc8 Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Mon, 25 Nov 2024 20:01:45 -0500 Subject: [PATCH 47/53] deps: update simdutf to 5.6.3 PR-URL: https://github.com/nodejs/node/pull/55973 Reviewed-By: Luigi Pinca Reviewed-By: Rafael Gonzaga --- deps/simdutf/simdutf.cpp | 89 +++++++++++++++++++++++----------------- deps/simdutf/simdutf.h | 16 ++++---- 2 files changed, 59 insertions(+), 46 deletions(-) diff --git a/deps/simdutf/simdutf.cpp b/deps/simdutf/simdutf.cpp index 2da5ca7284c1f0..007fa02b165204 100644 --- a/deps/simdutf/simdutf.cpp +++ b/deps/simdutf/simdutf.cpp @@ -1,4 +1,4 @@ -/* auto-generated on 2024-11-14 14:52:31 -0500. Do not edit! */ +/* auto-generated on 2024-11-21 10:33:28 -0500. Do not edit! */ /* begin file src/simdutf.cpp */ #include "simdutf.h" // We include base64_tables once. @@ -23495,7 +23495,7 @@ size_t encode_base64(char *dst, const char *src, size_t srclen, } template -static inline uint64_t to_base64_mask(block64 *b, bool *error) { +static inline uint64_t to_base64_mask(block64 *b, uint64_t *error) { __m512i input = b->chunks[0]; const __m512i ascii_space_tbl = _mm512_set_epi8( 0, 0, 13, 12, 0, 10, 9, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 13, 12, 0, 10, @@ -23538,7 +23538,7 @@ static inline uint64_t to_base64_mask(block64 *b, bool *error) { if (mask) { const __mmask64 spaces = _mm512_cmpeq_epi8_mask( _mm512_shuffle_epi8(ascii_space_tbl, input), input); - *error |= (mask != spaces); + *error = (mask ^ spaces); } b->chunks[0] = translated; @@ -23646,16 +23646,13 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, block64 b; load_block(&b, src); src += 64; - bool error = false; + uint64_t error = 0; uint64_t badcharmask = to_base64_mask(&b, &error); if (error) { src -= 64; - while (src < srcend && scalar::base64::is_eight_byte(*src) && - to_base64[uint8_t(*src)] <= 64) { - src++; - } - return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit), - size_t(dst - dstinit)}; + size_t error_offset = _tzcnt_u64(error); + return {error_code::INVALID_BASE64_CHARACTER, + size_t(src - srcinit + error_offset), size_t(dst - dstinit)}; } if (badcharmask != 0) { // optimization opportunity: check for simple masks like those made of @@ -28240,7 +28237,7 @@ struct block64 { }; template -static inline uint32_t to_base64_mask(__m256i *src, bool *error) { +static inline uint32_t to_base64_mask(__m256i *src, uint32_t *error) { const __m256i ascii_space_tbl = _mm256_setr_epi8(0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9, 0xa, 0x0, 0xc, 0xd, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, @@ -28324,17 +28321,19 @@ static inline uint32_t to_base64_mask(__m256i *src, bool *error) { if (mask) { __m256i ascii_space = _mm256_cmpeq_epi8(_mm256_shuffle_epi8(ascii_space_tbl, *src), *src); - *error |= (mask != _mm256_movemask_epi8(ascii_space)); + *error = (mask ^ _mm256_movemask_epi8(ascii_space)); } *src = out; return (uint32_t)mask; } template -static inline uint64_t to_base64_mask(block64 *b, bool *error) { - *error = 0; - uint64_t m0 = to_base64_mask(&b->chunks[0], error); - uint64_t m1 = to_base64_mask(&b->chunks[1], error); +static inline uint64_t to_base64_mask(block64 *b, uint64_t *error) { + uint32_t err0 = 0; + uint32_t err1 = 0; + uint64_t m0 = to_base64_mask(&b->chunks[0], &err0); + uint64_t m1 = to_base64_mask(&b->chunks[1], &err1); + *error = err0 | ((uint64_t)err1 << 32); return m0 | (m1 << 32); } @@ -28466,16 +28465,13 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, block64 b; load_block(&b, src); src += 64; - bool error = false; + uint64_t error = 0; uint64_t badcharmask = to_base64_mask(&b, &error); if (error) { src -= 64; - while (src < srcend && scalar::base64::is_eight_byte(*src) && - to_base64[uint8_t(*src)] <= 64) { - src++; - } - return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit), - size_t(dst - dstinit)}; + size_t error_offset = _tzcnt_u64(error); + return {error_code::INVALID_BASE64_CHARACTER, + size_t(src - srcinit + error_offset), size_t(dst - dstinit)}; } if (badcharmask != 0) { // optimization opportunity: check for simple masks like those made of @@ -37992,7 +37988,7 @@ struct block64 { }; template -static inline uint16_t to_base64_mask(__m128i *src, bool *error) { +static inline uint16_t to_base64_mask(__m128i *src, uint32_t *error) { const __m128i ascii_space_tbl = _mm_setr_epi8(0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9, 0xa, 0x0, 0xc, 0xd, 0x0, 0x0); @@ -38059,22 +38055,42 @@ static inline uint16_t to_base64_mask(__m128i *src, bool *error) { if (mask) { __m128i ascii_space = _mm_cmpeq_epi8(_mm_shuffle_epi8(ascii_space_tbl, *src), *src); - *error |= (mask != _mm_movemask_epi8(ascii_space)); + *error = (mask ^ _mm_movemask_epi8(ascii_space)); } *src = out; return (uint16_t)mask; } template -static inline uint64_t to_base64_mask(block64 *b, bool *error) { - *error = 0; - uint64_t m0 = to_base64_mask(&b->chunks[0], error); - uint64_t m1 = to_base64_mask(&b->chunks[1], error); - uint64_t m2 = to_base64_mask(&b->chunks[2], error); - uint64_t m3 = to_base64_mask(&b->chunks[3], error); +static inline uint64_t to_base64_mask(block64 *b, uint64_t *error) { + uint32_t err0 = 0; + uint32_t err1 = 0; + uint32_t err2 = 0; + uint32_t err3 = 0; + uint64_t m0 = to_base64_mask(&b->chunks[0], &err0); + uint64_t m1 = to_base64_mask(&b->chunks[1], &err1); + uint64_t m2 = to_base64_mask(&b->chunks[2], &err2); + uint64_t m3 = to_base64_mask(&b->chunks[3], &err3); + *error = (err0) | ((uint64_t)err1 << 16) | ((uint64_t)err2 << 32) | + ((uint64_t)err3 << 48); return m0 | (m1 << 16) | (m2 << 32) | (m3 << 48); } +#if defined(_MSC_VER) && !defined(__clang__) +static inline size_t simdutf_tzcnt_u64(uint64_t num) { + unsigned long ret; + if (num == 0) { + return 64; + } + _BitScanForward64(&ret, num); + return ret; +} +#else // GCC or Clang +static inline size_t simdutf_tzcnt_u64(uint64_t num) { + return num ? __builtin_ctzll(num) : 64; +} +#endif + static inline void copy_block(block64 *b, char *output) { _mm_storeu_si128(reinterpret_cast<__m128i *>(output), b->chunks[0]); _mm_storeu_si128(reinterpret_cast<__m128i *>(output + 16), b->chunks[1]); @@ -38222,16 +38238,13 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, block64 b; load_block(&b, src); src += 64; - bool error = false; + uint64_t error = 0; uint64_t badcharmask = to_base64_mask(&b, &error); if (error) { src -= 64; - while (src < srcend && scalar::base64::is_eight_byte(*src) && - to_base64[uint8_t(*src)] <= 64) { - src++; - } - return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit), - size_t(dst - dstinit)}; + size_t error_offset = simdutf_tzcnt_u64(error); + return {error_code::INVALID_BASE64_CHARACTER, + size_t(src - srcinit + error_offset), size_t(dst - dstinit)}; } if (badcharmask != 0) { // optimization opportunity: check for simple masks like those made of diff --git a/deps/simdutf/simdutf.h b/deps/simdutf/simdutf.h index a30e5f2cd228cd..5f82ca372ccfe3 100644 --- a/deps/simdutf/simdutf.h +++ b/deps/simdutf/simdutf.h @@ -1,4 +1,4 @@ -/* auto-generated on 2024-11-14 14:52:31 -0500. Do not edit! */ +/* auto-generated on 2024-11-21 10:33:28 -0500. Do not edit! */ /* begin file include/simdutf.h */ #ifndef SIMDUTF_H #define SIMDUTF_H @@ -155,11 +155,11 @@ // RISC-V 64-bit #define SIMDUTF_IS_RISCV64 1 - #if __clang_major__ >= 19 - // Does the compiler support target regions for RISC-V - #define SIMDUTF_HAS_RVV_TARGET_REGION 1 - #endif - + // #if __riscv_v_intrinsic >= 1000000 + // #define SIMDUTF_HAS_RVV_INTRINSICS 1 + // #define SIMDUTF_HAS_RVV_TARGET_REGION 1 + // #elif ... + // Check for special compiler versions that implement pre v1.0 intrinsics #if __riscv_v_intrinsic >= 11000 #define SIMDUTF_HAS_RVV_INTRINSICS 1 #endif @@ -670,7 +670,7 @@ SIMDUTF_DISABLE_UNDESIRED_WARNINGS #define SIMDUTF_SIMDUTF_VERSION_H /** The version of simdutf being used (major.minor.revision) */ -#define SIMDUTF_VERSION "5.6.2" +#define SIMDUTF_VERSION "5.6.3" namespace simdutf { enum { @@ -685,7 +685,7 @@ enum { /** * The revision (major.minor.REVISION) of simdutf being used. */ - SIMDUTF_VERSION_REVISION = 2 + SIMDUTF_VERSION_REVISION = 3 }; } // namespace simdutf From 96e846de8965b97a8b34e22bebaa8a55ecc8f252 Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Mon, 25 Nov 2024 20:01:52 -0500 Subject: [PATCH 48/53] deps: update ngtcp2 to 1.9.0 PR-URL: https://github.com/nodejs/node/pull/55975 Reviewed-By: Luigi Pinca Reviewed-By: Rafael Gonzaga Reviewed-By: Richard Lau --- deps/ngtcp2/ngtcp2.gyp | 3 +- .../ngtcp2/crypto/boringssl/boringssl.c | 41 +- .../crypto/includes/ngtcp2/ngtcp2_crypto.h | 219 ++- .../includes/ngtcp2/ngtcp2_crypto_boringssl.h | 10 +- .../includes/ngtcp2/ngtcp2_crypto_picotls.h | 18 +- .../includes/ngtcp2/ngtcp2_crypto_quictls.h | 10 +- .../includes/ngtcp2/ngtcp2_crypto_wolfssl.h | 10 +- deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c | 10 +- deps/ngtcp2/ngtcp2/crypto/quictls/quictls.c | 97 +- deps/ngtcp2/ngtcp2/crypto/shared.c | 404 +++-- deps/ngtcp2/ngtcp2/crypto/shared.h | 24 +- deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c | 43 +- .../ngtcp2/lib/includes/ngtcp2/ngtcp2.h | 332 ++-- .../ngtcp2/lib/includes/ngtcp2/version.h | 10 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c | 58 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h | 29 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.c | 10 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.h | 11 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c | 375 ++-- deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h | 15 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c | 542 +++--- deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h | 105 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.c | 22 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.h | 59 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c | 1548 +++++++++-------- deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h | 79 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h | 9 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c | 151 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h | 70 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c | 66 - deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c | 831 +-------- deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h | 65 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_err.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c | 19 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h | 27 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.c | 42 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.h | 20 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.c | 19 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.h | 35 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c | 271 +-- deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h | 158 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c | 258 ++- deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h | 35 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c | 220 +-- deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h | 66 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.c | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.h | 8 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h | 48 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h | 26 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_opl.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_path.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c | 191 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h | 128 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c | 55 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.h | 13 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c | 79 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h | 59 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.c | 53 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h | 69 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.c | 2 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h | 6 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c | 56 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.h | 10 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_range.c | 6 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_range.h | 6 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rcvry.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c | 34 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h | 26 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c | 45 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h | 28 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c | 35 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h | 18 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c | 322 ++-- deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h | 78 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.c | 91 + deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.h | 73 + deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c | 125 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h | 73 +- .../ngtcp2/lib/ngtcp2_transport_params.c | 886 ++++++++++ ...conversion.h => ngtcp2_transport_params.h} | 55 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c | 19 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h | 12 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c | 9 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.h | 30 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_version.c | 2 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.h | 4 +- 92 files changed, 4936 insertions(+), 4334 deletions(-) delete mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c create mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.c create mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.h create mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.c rename deps/ngtcp2/ngtcp2/lib/{ngtcp2_conversion.h => ngtcp2_transport_params.h} (55%) diff --git a/deps/ngtcp2/ngtcp2.gyp b/deps/ngtcp2/ngtcp2.gyp index 0f2929b75478f1..68013580874b09 100644 --- a/deps/ngtcp2/ngtcp2.gyp +++ b/deps/ngtcp2/ngtcp2.gyp @@ -13,7 +13,6 @@ 'ngtcp2/lib/ngtcp2_cid.c', 'ngtcp2/lib/ngtcp2_conn.c', 'ngtcp2/lib/ngtcp2_conv.c', - 'ngtcp2/lib/ngtcp2_conversion.c', 'ngtcp2/lib/ngtcp2_crypto.c', 'ngtcp2/lib/ngtcp2_err.c', 'ngtcp2/lib/ngtcp2_frame_chain.c', @@ -37,8 +36,10 @@ 'ngtcp2/lib/ngtcp2_rob.c', 'ngtcp2/lib/ngtcp2_rst.c', 'ngtcp2/lib/ngtcp2_rtb.c', + 'ngtcp2/lib/ngtcp2_settings.c', 'ngtcp2/lib/ngtcp2_str.c', 'ngtcp2/lib/ngtcp2_strm.c', + 'ngtcp2/lib/ngtcp2_transport_params.c', 'ngtcp2/lib/ngtcp2_unreachable.c', 'ngtcp2/lib/ngtcp2_vec.c', 'ngtcp2/lib/ngtcp2_version.c', diff --git a/deps/ngtcp2/ngtcp2/crypto/boringssl/boringssl.c b/deps/ngtcp2/ngtcp2/crypto/boringssl/boringssl.c index 50b89110e36ff7..283063f738e2af 100644 --- a/deps/ngtcp2/ngtcp2/crypto/boringssl/boringssl.c +++ b/deps/ngtcp2/ngtcp2/crypto/boringssl/boringssl.c @@ -24,7 +24,7 @@ */ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include #include @@ -52,15 +52,15 @@ typedef struct ngtcp2_crypto_boringssl_cipher { } ngtcp2_crypto_boringssl_cipher; static ngtcp2_crypto_boringssl_cipher crypto_cipher_aes_128 = { - NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_AES_128, + NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_AES_128, }; static ngtcp2_crypto_boringssl_cipher crypto_cipher_aes_256 = { - NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_AES_256, + NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_AES_256, }; static ngtcp2_crypto_boringssl_cipher crypto_cipher_chacha20 = { - NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_CHACHA20, + NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_CHACHA20, }; ngtcp2_crypto_aead *ngtcp2_crypto_aead_aes_128_gcm(ngtcp2_crypto_aead *aead) { @@ -175,7 +175,7 @@ static ngtcp2_crypto_ctx *crypto_ctx_cipher_id(ngtcp2_crypto_ctx *ctx, ctx->hp.native_handle = (void *)crypto_cipher_id_get_hp(cipher_id); ctx->max_encryption = crypto_cipher_id_get_aead_max_encryption(cipher_id); ctx->max_decryption_failure = - crypto_cipher_id_get_aead_max_decryption_failure(cipher_id); + crypto_cipher_id_get_aead_max_decryption_failure(cipher_id); return ctx; } @@ -413,12 +413,12 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, AES_ecb_encrypt(sample, dest, &ctx->aes_key, 1); return 0; case NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_CHACHA20: -#if defined(WORDS_BIGENDIAN) +#ifdef WORDS_BIGENDIAN counter = (uint32_t)sample[0] + (uint32_t)(sample[1] << 8) + (uint32_t)(sample[2] << 16) + (uint32_t)(sample[3] << 24); -#else /* !WORDS_BIGENDIAN */ +#else /* !defined(WORDS_BIGENDIAN) */ memcpy(&counter, sample, sizeof(counter)); -#endif /* !WORDS_BIGENDIAN */ +#endif /* !defined(WORDS_BIGENDIAN) */ CRYPTO_chacha_20(dest, PLAINTEXT, sizeof(PLAINTEXT) - 1, ctx->key, sample + sizeof(counter), counter); return 0; @@ -429,17 +429,16 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, } int ngtcp2_crypto_read_write_crypto_data( - ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, - const uint8_t *data, size_t datalen) { + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, + const uint8_t *data, size_t datalen) { SSL *ssl = ngtcp2_conn_get_tls_native_handle(conn); int rv; int err; if (SSL_provide_quic_data( - ssl, - ngtcp2_crypto_boringssl_from_ngtcp2_encryption_level( - encryption_level), - data, datalen) != 1) { + ssl, + ngtcp2_crypto_boringssl_from_ngtcp2_encryption_level(encryption_level), + data, datalen) != 1) { return -1; } @@ -522,7 +521,7 @@ int ngtcp2_crypto_set_local_transport_params(void *tls, const uint8_t *buf, } ngtcp2_encryption_level ngtcp2_crypto_boringssl_from_ssl_encryption_level( - enum ssl_encryption_level_t ssl_level) { + enum ssl_encryption_level_t ssl_level) { switch (ssl_level) { case ssl_encryption_initial: return NGTCP2_ENCRYPTION_LEVEL_INITIAL; @@ -540,7 +539,7 @@ ngtcp2_encryption_level ngtcp2_crypto_boringssl_from_ssl_encryption_level( enum ssl_encryption_level_t ngtcp2_crypto_boringssl_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level) { + ngtcp2_encryption_level encryption_level) { switch (encryption_level) { case NGTCP2_ENCRYPTION_LEVEL_INITIAL: return ssl_encryption_initial; @@ -582,7 +581,7 @@ static int set_read_secret(SSL *ssl, enum ssl_encryption_level_t bssl_level, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); + ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); (void)cipher; if (ngtcp2_crypto_derive_and_install_rx_key(conn, NULL, NULL, NULL, level, @@ -599,7 +598,7 @@ static int set_write_secret(SSL *ssl, enum ssl_encryption_level_t bssl_level, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); + ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); (void)cipher; if (ngtcp2_crypto_derive_and_install_tx_key(conn, NULL, NULL, NULL, level, @@ -615,7 +614,7 @@ static int add_handshake_data(SSL *ssl, enum ssl_encryption_level_t bssl_level, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); + ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); int rv; rv = ngtcp2_conn_submit_crypto_data(conn, level, data, datalen); @@ -644,8 +643,8 @@ static int send_alert(SSL *ssl, enum ssl_encryption_level_t bssl_level, } static SSL_QUIC_METHOD quic_method = { - set_read_secret, set_write_secret, add_handshake_data, - flush_flight, send_alert, + set_read_secret, set_write_secret, add_handshake_data, + flush_flight, send_alert, }; static void crypto_boringssl_configure_context(SSL_CTX *ssl_ctx) { diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto.h index 06427d7a7cac70..68093d18b71b14 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto.h @@ -29,14 +29,45 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* defined(__cplusplus) */ #ifdef WIN32 # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN -# endif +# endif /* !defined(WIN32_LEAN_AND_MEAN) */ # include -#endif /* WIN32 */ +#endif /* defined(WIN32) */ + +/** + * @macrosection + * + * ngtcp2 crypto library error codes + */ + +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_ERR_INTERNAL` indicates an internal error. + */ +#define NGTCP2_CRYPTO_ERR_INTERNAL -201 + +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_ERR_UNREADABLE_TOKEN` indicates that a token + * is unreadable because it is not correctly formatted; or verifying + * the integrity protection failed. + */ +#define NGTCP2_CRYPTO_ERR_UNREADABLE_TOKEN -202 + +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_ERR_VERIFY_TOKEN` indicates that a token does + * not probe the client address; or the token validity has expired; or + * it contains invalid Connection ID. + */ +#define NGTCP2_CRYPTO_ERR_VERIFY_TOKEN -203 /** * @function @@ -135,12 +166,9 @@ ngtcp2_crypto_hkdf_extract(uint8_t *dest, const ngtcp2_crypto_md *md, * * This function returns 0 if it succeeds, or -1. */ -NGTCP2_EXTERN int ngtcp2_crypto_hkdf_expand(uint8_t *dest, size_t destlen, - const ngtcp2_crypto_md *md, - const uint8_t *secret, - size_t secretlen, - const uint8_t *info, - size_t infolen); +NGTCP2_EXTERN int ngtcp2_crypto_hkdf_expand( + uint8_t *dest, size_t destlen, const ngtcp2_crypto_md *md, + const uint8_t *secret, size_t secretlen, const uint8_t *info, size_t infolen); /** * @function @@ -318,8 +346,8 @@ ngtcp2_crypto_hp_mask_cb(uint8_t *dest, const ngtcp2_crypto_cipher *hp, * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_derive_and_install_rx_key( - ngtcp2_conn *conn, uint8_t *key, uint8_t *iv, uint8_t *hp, - ngtcp2_encryption_level level, const uint8_t *secret, size_t secretlen); + ngtcp2_conn *conn, uint8_t *key, uint8_t *iv, uint8_t *hp, + ngtcp2_encryption_level level, const uint8_t *secret, size_t secretlen); /** * @function @@ -365,8 +393,8 @@ NGTCP2_EXTERN int ngtcp2_crypto_derive_and_install_rx_key( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_derive_and_install_tx_key( - ngtcp2_conn *conn, uint8_t *key, uint8_t *iv, uint8_t *hp, - ngtcp2_encryption_level level, const uint8_t *secret, size_t secretlen); + ngtcp2_conn *conn, uint8_t *key, uint8_t *iv, uint8_t *hp, + ngtcp2_encryption_level level, const uint8_t *secret, size_t secretlen); /** * @function @@ -405,11 +433,11 @@ NGTCP2_EXTERN int ngtcp2_crypto_derive_and_install_tx_key( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_update_key( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_key, uint8_t *rx_iv, - ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_key, uint8_t *tx_iv, - const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, - size_t secretlen); + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_key, uint8_t *rx_iv, + ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_key, uint8_t *tx_iv, + const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, + size_t secretlen); /** * @function @@ -422,11 +450,11 @@ NGTCP2_EXTERN int ngtcp2_crypto_update_key( * :macro:`NGTCP2_ERR_CALLBACK_FAILURE`. */ NGTCP2_EXTERN int ngtcp2_crypto_update_key_cb( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_iv, - ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_iv, - const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, - size_t secretlen, void *user_data); + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_iv, + ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_iv, + const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, + size_t secretlen, void *user_data); /** * @function @@ -514,8 +542,8 @@ ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, * codes. */ NGTCP2_EXTERN int ngtcp2_crypto_recv_crypto_data_cb( - ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, - uint64_t offset, const uint8_t *data, size_t datalen, void *user_data); + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, uint64_t offset, + const uint8_t *data, size_t datalen, void *user_data); /** * @function @@ -529,8 +557,8 @@ NGTCP2_EXTERN int ngtcp2_crypto_recv_crypto_data_cb( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_generate_stateless_reset_token( - uint8_t *token, const uint8_t *secret, size_t secretlen, - const ngtcp2_cid *cid); + uint8_t *token, const uint8_t *secret, size_t secretlen, + const ngtcp2_cid *cid); /** * @macro @@ -540,7 +568,7 @@ NGTCP2_EXTERN int ngtcp2_crypto_generate_stateless_reset_token( * `ngtcp2_crypto_generate_retry_token` or * `ngtcp2_crypto_generate_regular_token`. */ -#define NGTCP2_CRYPTO_TOKEN_RAND_DATALEN 32 +#define NGTCP2_CRYPTO_TOKEN_RAND_DATALEN 16 /** * @macro @@ -550,6 +578,14 @@ NGTCP2_EXTERN int ngtcp2_crypto_generate_stateless_reset_token( */ #define NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY 0xb6 +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY2` is the magic byte for + * Retry token generated by `ngtcp2_crypto_generate_retry_token2`. + */ +#define NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY2 0xb7 + /** * @macro * @@ -569,6 +605,17 @@ NGTCP2_EXTERN int ngtcp2_crypto_generate_stateless_reset_token( sizeof(ngtcp2_tstamp) + /* aead tag = */ 16 + \ NGTCP2_CRYPTO_TOKEN_RAND_DATALEN) +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_MAX_RETRY_TOKENLEN2` is the maximum length of + * a token generated by `ngtcp2_crypto_generate_retry_token2`. + */ +#define NGTCP2_CRYPTO_MAX_RETRY_TOKENLEN2 \ + (/* magic = */ 1 + sizeof(ngtcp2_sockaddr_union) + /* cid len = */ 1 + \ + NGTCP2_MAX_CIDLEN + sizeof(ngtcp2_tstamp) + /* aead tag = */ 16 + \ + NGTCP2_CRYPTO_TOKEN_RAND_DATALEN) + /** * @macro * @@ -595,13 +642,15 @@ NGTCP2_EXTERN int ngtcp2_crypto_generate_stateless_reset_token( * is a Destination Connection ID in Initial packet sent by client. * |ts| is the timestamp when the token is generated. * + * See also `ngtcp2_crypto_generate_retry_token2`. + * * This function returns the length of generated token if it succeeds, * or -1. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_generate_retry_token( - uint8_t *token, const uint8_t *secret, size_t secretlen, uint32_t version, - const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, - const ngtcp2_cid *retry_scid, const ngtcp2_cid *odcid, ngtcp2_tstamp ts); + uint8_t *token, const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *retry_scid, const ngtcp2_cid *odcid, ngtcp2_tstamp ts); /** * @function @@ -622,10 +671,76 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_generate_retry_token( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_verify_retry_token( - ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen, - const uint8_t *secret, size_t secretlen, uint32_t version, - const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, - const ngtcp2_cid *dcid, ngtcp2_duration timeout, ngtcp2_tstamp ts); + ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen, + const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *dcid, ngtcp2_duration timeout, ngtcp2_tstamp ts); + +/** + * @function + * + * `ngtcp2_crypto_generate_retry_token2` generates a token in the + * buffer pointed by |token| that is sent with Retry packet. The + * buffer pointed by |token| must have at least + * :macro:`NGTCP2_CRYPTO_MAX_RETRY_TOKENLEN2` bytes long. The + * successfully generated token starts with + * :macro:`NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY2`. |secret| of length + * |secretlen| is a keying material to generate keys to encrypt the + * token. |version| is QUIC version. |remote_addr| of length + * |remote_addrlen| is an address of client. |retry_scid| is a Source + * Connection ID chosen by server, and set in Retry packet. |odcid| + * is a Destination Connection ID in Initial packet sent by client. + * |ts| is the timestamp when the token is generated. + * + * Use this function instead of `ngtcp2_crypto_generate_retry_token` + * if more detailed error handling is required when verifying the + * token. `ngtcp2_crypto_verify_retry_token2` must be used to verify + * the token. + * + * This function returns the length of generated token if it succeeds, + * or -1. + */ +NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_generate_retry_token2( + uint8_t *token, const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *retry_scid, const ngtcp2_cid *odcid, ngtcp2_tstamp ts); + +/** + * @function + * + * `ngtcp2_crypto_verify_retry_token2` verifies Retry token stored in + * the buffer pointed by |token| of length |tokenlen|. |secret| of + * length |secretlen| is a keying material to generate keys to decrypt + * the token. |version| is QUIC version of the Initial packet that + * contains this token. |remote_addr| of length |remote_addrlen| is + * an address of client. |dcid| is a Destination Connection ID in + * Initial packet sent by client. |timeout| is the period during + * which the token is valid. |ts| is the current timestamp. When + * validation succeeds, the extracted Destination Connection ID (which + * is the Destination Connection ID in Initial packet sent by client + * that triggered Retry packet) is stored in the buffer pointed by + * |odcid|. + * + * The token must be generated by + * `ngtcp2_crypto_generate_retry_token2`. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGTCP2_CRYPTO_ERR_UNREADABLE_TOKEN` + * A token is badly formatted; or verifying the integrity + * protection failed. + * :macro:`NGTCP2_CRYPTO_ERR_VERIFY_TOKEN` + * A token does not probe the client address; or the token + * validity has expired; or it contains invalid Connection ID. + * :macro:`NGTCP2_CRYPTO_ERR_INTERNAL` + * Internal error occurred. + */ +NGTCP2_EXTERN int ngtcp2_crypto_verify_retry_token2( + ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen, + const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *dcid, ngtcp2_duration timeout, ngtcp2_tstamp ts); /** * @function @@ -644,9 +759,9 @@ NGTCP2_EXTERN int ngtcp2_crypto_verify_retry_token( * or -1. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_generate_regular_token( - uint8_t *token, const uint8_t *secret, size_t secretlen, - const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, - ngtcp2_tstamp ts); + uint8_t *token, const uint8_t *secret, size_t secretlen, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + ngtcp2_tstamp ts); /** * @function @@ -661,9 +776,9 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_generate_regular_token( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_verify_regular_token( - const uint8_t *token, size_t tokenlen, const uint8_t *secret, - size_t secretlen, const ngtcp2_sockaddr *remote_addr, - ngtcp2_socklen remote_addrlen, ngtcp2_duration timeout, ngtcp2_tstamp ts); + const uint8_t *token, size_t tokenlen, const uint8_t *secret, + size_t secretlen, const ngtcp2_sockaddr *remote_addr, + ngtcp2_socklen remote_addrlen, ngtcp2_duration timeout, ngtcp2_tstamp ts); /** * @function @@ -685,9 +800,9 @@ NGTCP2_EXTERN int ngtcp2_crypto_verify_regular_token( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_write_connection_close( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, - size_t reasonlen); + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, + size_t reasonlen); /** * @function @@ -705,9 +820,9 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_write_connection_close( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_write_retry( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, const ngtcp2_cid *odcid, const uint8_t *token, - size_t tokenlen); + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, const ngtcp2_cid *odcid, const uint8_t *token, + size_t tokenlen); /** * @function @@ -759,7 +874,7 @@ ngtcp2_crypto_aead_ctx_free(ngtcp2_crypto_aead_ctx *aead_ctx); * :member:`ngtcp2_callbacks.delete_crypto_aead_ctx` field. */ NGTCP2_EXTERN void ngtcp2_crypto_delete_crypto_aead_ctx_cb( - ngtcp2_conn *conn, ngtcp2_crypto_aead_ctx *aead_ctx, void *user_data); + ngtcp2_conn *conn, ngtcp2_crypto_aead_ctx *aead_ctx, void *user_data); /** * @function @@ -771,7 +886,7 @@ NGTCP2_EXTERN void ngtcp2_crypto_delete_crypto_aead_ctx_cb( * :member:`ngtcp2_callbacks.delete_crypto_cipher_ctx` field. */ NGTCP2_EXTERN void ngtcp2_crypto_delete_crypto_cipher_ctx_cb( - ngtcp2_conn *conn, ngtcp2_crypto_cipher_ctx *cipher_ctx, void *user_data); + ngtcp2_conn *conn, ngtcp2_crypto_cipher_ctx *cipher_ctx, void *user_data); /** * @function @@ -813,7 +928,7 @@ typedef struct ngtcp2_crypto_conn_ref ngtcp2_crypto_conn_ref; * must return non-NULL :type:`ngtcp2_conn` object. */ typedef ngtcp2_conn *(*ngtcp2_crypto_get_conn)( - ngtcp2_crypto_conn_ref *conn_ref); + ngtcp2_crypto_conn_ref *conn_ref); /** * @struct @@ -836,6 +951,6 @@ typedef struct ngtcp2_crypto_conn_ref { #ifdef __cplusplus } -#endif +#endif /* defined(__cplusplus) */ -#endif /* NGTCP2_CRYPTO_H */ +#endif /* !defined(NGTCP2_CRYPTO_H) */ diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_boringssl.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_boringssl.h index 43a3c36f03a382..89b26f0bc0e346 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_boringssl.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_boringssl.h @@ -31,7 +31,7 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* defined(__cplusplus) */ /** * @function @@ -42,7 +42,7 @@ extern "C" { */ NGTCP2_EXTERN ngtcp2_encryption_level ngtcp2_crypto_boringssl_from_ssl_encryption_level( - enum ssl_encryption_level_t ssl_level); + enum ssl_encryption_level_t ssl_level); /** * @function @@ -53,7 +53,7 @@ ngtcp2_crypto_boringssl_from_ssl_encryption_level( */ NGTCP2_EXTERN enum ssl_encryption_level_t ngtcp2_crypto_boringssl_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level); + ngtcp2_encryption_level encryption_level); /** * @function @@ -99,6 +99,6 @@ ngtcp2_crypto_boringssl_configure_client_context(SSL_CTX *ssl_ctx); #ifdef __cplusplus } -#endif +#endif /* defined(__cplusplus) */ -#endif /* NGTCP2_CRYPTO_BORINGSSL_H */ +#endif /* !defined(NGTCP2_CRYPTO_BORINGSSL_H) */ diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_picotls.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_picotls.h index 61020bb3a8f376..d3f2f978e79923 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_picotls.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_picotls.h @@ -31,7 +31,7 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* defined(__cplusplus) */ /** * @struct @@ -79,7 +79,7 @@ ngtcp2_crypto_picotls_from_epoch(size_t epoch); * Picotls backend. */ NGTCP2_EXTERN size_t ngtcp2_crypto_picotls_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level); + ngtcp2_encryption_level encryption_level); /** * @function @@ -160,7 +160,7 @@ ngtcp2_crypto_picotls_configure_client_context(ptls_context_t *ctx); * It returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_picotls_configure_server_session( - ngtcp2_crypto_picotls_ctx *cptls); + ngtcp2_crypto_picotls_ctx *cptls); /** * @function @@ -224,8 +224,8 @@ ngtcp2_crypto_picotls_deconfigure_session(ngtcp2_crypto_picotls_ctx *cptls); * :macro:`NGTCP2_TLSEXT_QUIC_TRANSPORT_PARAMETERS_V1`. */ NGTCP2_EXTERN int ngtcp2_crypto_picotls_collect_extension( - ptls_t *ptls, struct st_ptls_handshake_properties_t *properties, - uint16_t type); + ptls_t *ptls, struct st_ptls_handshake_properties_t *properties, + uint16_t type); /** * @function @@ -236,11 +236,11 @@ NGTCP2_EXTERN int ngtcp2_crypto_picotls_collect_extension( * extensions are ignored. */ NGTCP2_EXTERN int ngtcp2_crypto_picotls_collected_extensions( - ptls_t *ptls, struct st_ptls_handshake_properties_t *properties, - ptls_raw_extension_t *extensions); + ptls_t *ptls, struct st_ptls_handshake_properties_t *properties, + ptls_raw_extension_t *extensions); #ifdef __cplusplus } -#endif +#endif /* defined(__cplusplus) */ -#endif /* NGTCP2_CRYPTO_PICOTLS_H */ +#endif /* !defined(NGTCP2_CRYPTO_PICOTLS_H) */ diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_quictls.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_quictls.h index b25c13b81c8b18..22e3eda0c4160a 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_quictls.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_quictls.h @@ -31,7 +31,7 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* defined(__cplusplus) */ /** * @macrosection @@ -70,7 +70,7 @@ extern "C" { */ NGTCP2_EXTERN ngtcp2_encryption_level ngtcp2_crypto_quictls_from_ossl_encryption_level( - OSSL_ENCRYPTION_LEVEL ossl_level); + OSSL_ENCRYPTION_LEVEL ossl_level); /** * @function @@ -81,7 +81,7 @@ ngtcp2_crypto_quictls_from_ossl_encryption_level( */ NGTCP2_EXTERN OSSL_ENCRYPTION_LEVEL ngtcp2_crypto_quictls_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level); + ngtcp2_encryption_level encryption_level); /** * @function @@ -142,6 +142,6 @@ NGTCP2_EXTERN int ngtcp2_crypto_quictls_init(void); #ifdef __cplusplus } -#endif +#endif /* defined(__cplusplus) */ -#endif /* NGTCP2_CRYPTO_QUICTLS_H */ +#endif /* !defined(NGTCP2_CRYPTO_QUICTLS_H) */ diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_wolfssl.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_wolfssl.h index e1d621adce94d8..e95056de5926a9 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_wolfssl.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_wolfssl.h @@ -33,7 +33,7 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* defined(__cplusplus) */ /** * @function @@ -44,7 +44,7 @@ extern "C" { */ NGTCP2_EXTERN ngtcp2_encryption_level ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level( - WOLFSSL_ENCRYPTION_LEVEL wolfssl_level); + WOLFSSL_ENCRYPTION_LEVEL wolfssl_level); /** * @function @@ -55,7 +55,7 @@ ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level( */ NGTCP2_EXTERN WOLFSSL_ENCRYPTION_LEVEL ngtcp2_crypto_wolfssl_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level); + ngtcp2_encryption_level encryption_level); /** * @function @@ -101,6 +101,6 @@ ngtcp2_crypto_wolfssl_configure_client_context(WOLFSSL_CTX *ssl_ctx); #ifdef __cplusplus } -#endif +#endif /* defined(__cplusplus) */ -#endif /* NGTCP2_CRYPTO_WOLFSSL_H */ +#endif /* !defined(NGTCP2_CRYPTO_WOLFSSL_H) */ diff --git a/deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c b/deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c index 35bfb7b2f8fa19..c3f14f5d444bac 100644 --- a/deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c +++ b/deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c @@ -24,7 +24,7 @@ */ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include #include @@ -79,7 +79,7 @@ crypto_cipher_suite_get_aead_max_encryption(ptls_cipher_suite_t *cs) { if (cs->aead == &ptls_openssl_chacha20poly1305) { return NGTCP2_CRYPTO_MAX_ENCRYPTION_CHACHA20_POLY1305; } -#endif /* PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 */ +#endif /* defined(PTLS_OPENSSL_HAVE_CHACHA20_POLY1305) */ return 0; } @@ -95,7 +95,7 @@ crypto_cipher_suite_get_aead_max_decryption_failure(ptls_cipher_suite_t *cs) { if (cs->aead == &ptls_openssl_chacha20poly1305) { return NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_CHACHA20_POLY1305; } -#endif /* PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 */ +#endif /* defined(PTLS_OPENSSL_HAVE_CHACHA20_POLY1305) */ return 0; } @@ -114,7 +114,7 @@ crypto_cipher_suite_get_hp(ptls_cipher_suite_t *cs) { if (cs->aead == &ptls_openssl_chacha20poly1305) { return &ptls_openssl_chacha20; } -#endif /* PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 */ +#endif /* defined(PTLS_OPENSSL_HAVE_CHACHA20_POLY1305) */ return NULL; } @@ -124,7 +124,7 @@ static int supported_cipher_suite(ptls_cipher_suite_t *cs) { cs->aead == &ptls_openssl_aes256gcm #ifdef PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 || cs->aead == &ptls_openssl_chacha20poly1305 -#endif /* PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 */ +#endif /* defined(PTLS_OPENSSL_HAVE_CHACHA20_POLY1305) */ ; } diff --git a/deps/ngtcp2/ngtcp2/crypto/quictls/quictls.c b/deps/ngtcp2/ngtcp2/crypto/quictls/quictls.c index 330ca687b44666..592e5a86535356 100644 --- a/deps/ngtcp2/ngtcp2/crypto/quictls/quictls.c +++ b/deps/ngtcp2/ngtcp2/crypto/quictls/quictls.c @@ -24,7 +24,7 @@ */ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -339,7 +339,7 @@ static ngtcp2_crypto_ctx *crypto_ctx_cipher_id(ngtcp2_crypto_ctx *ctx, ctx->hp.native_handle = (void *)crypto_cipher_id_get_hp(cipher_id); ctx->max_encryption = crypto_cipher_id_get_aead_max_encryption(cipher_id); ctx->max_decryption_failure = - crypto_cipher_id_get_aead_max_decryption_failure(cipher_id); + crypto_cipher_id_get_aead_max_decryption_failure(cipher_id); return ctx; } @@ -527,14 +527,14 @@ int ngtcp2_crypto_hkdf_extract(uint8_t *dest, const ngtcp2_crypto_md *md, EVP_KDF_CTX *kctx = EVP_KDF_CTX_new(kdf); int mode = EVP_KDF_HKDF_MODE_EXTRACT_ONLY; OSSL_PARAM params[] = { - OSSL_PARAM_construct_int(OSSL_KDF_PARAM_MODE, &mode), - OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, - (char *)EVP_MD_get0_name(prf), 0), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY, (void *)secret, - secretlen), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SALT, (void *)salt, - saltlen), - OSSL_PARAM_construct_end(), + OSSL_PARAM_construct_int(OSSL_KDF_PARAM_MODE, &mode), + OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, + (char *)EVP_MD_get0_name(prf), 0), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY, (void *)secret, + secretlen), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SALT, (void *)salt, + saltlen), + OSSL_PARAM_construct_end(), }; int rv = 0; @@ -584,14 +584,14 @@ int ngtcp2_crypto_hkdf_expand(uint8_t *dest, size_t destlen, EVP_KDF_CTX *kctx = EVP_KDF_CTX_new(kdf); int mode = EVP_KDF_HKDF_MODE_EXPAND_ONLY; OSSL_PARAM params[] = { - OSSL_PARAM_construct_int(OSSL_KDF_PARAM_MODE, &mode), - OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, - (char *)EVP_MD_get0_name(prf), 0), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY, (void *)secret, - secretlen), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_INFO, (void *)info, - infolen), - OSSL_PARAM_construct_end(), + OSSL_PARAM_construct_int(OSSL_KDF_PARAM_MODE, &mode), + OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, + (char *)EVP_MD_get0_name(prf), 0), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY, (void *)secret, + secretlen), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_INFO, (void *)info, + infolen), + OSSL_PARAM_construct_end(), }; int rv = 0; @@ -639,15 +639,15 @@ int ngtcp2_crypto_hkdf(uint8_t *dest, size_t destlen, EVP_KDF *kdf = crypto_kdf_hkdf(); EVP_KDF_CTX *kctx = EVP_KDF_CTX_new(kdf); OSSL_PARAM params[] = { - OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, - (char *)EVP_MD_get0_name(prf), 0), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY, (void *)secret, - secretlen), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SALT, (void *)salt, - saltlen), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_INFO, (void *)info, - infolen), - OSSL_PARAM_construct_end(), + OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, + (char *)EVP_MD_get0_name(prf), 0), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY, (void *)secret, + secretlen), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SALT, (void *)salt, + saltlen), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_INFO, (void *)info, + infolen), + OSSL_PARAM_construct_end(), }; int rv = 0; @@ -672,7 +672,7 @@ int ngtcp2_crypto_hkdf(uint8_t *dest, size_t destlen, if (EVP_PKEY_derive_init(pctx) != 1 || EVP_PKEY_CTX_hkdf_mode(pctx, EVP_PKEY_HKDEF_MODE_EXTRACT_AND_EXPAND) != - 1 || + 1 || EVP_PKEY_CTX_set_hkdf_md(pctx, prf) != 1 || EVP_PKEY_CTX_set1_hkdf_salt(pctx, salt, (int)saltlen) != 1 || EVP_PKEY_CTX_set1_hkdf_key(pctx, secret, (int)secretlen) != 1 || @@ -699,9 +699,9 @@ int ngtcp2_crypto_encrypt(uint8_t *dest, const ngtcp2_crypto_aead *aead, int len; #if OPENSSL_VERSION_NUMBER >= 0x30000000L OSSL_PARAM params[] = { - OSSL_PARAM_construct_octet_string(OSSL_CIPHER_PARAM_AEAD_TAG, - dest + plaintextlen, taglen), - OSSL_PARAM_construct_end(), + OSSL_PARAM_construct_octet_string(OSSL_CIPHER_PARAM_AEAD_TAG, + dest + plaintextlen, taglen), + OSSL_PARAM_construct_end(), }; #endif /* OPENSSL_VERSION_NUMBER >= 0x30000000L */ @@ -794,16 +794,16 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, } int ngtcp2_crypto_read_write_crypto_data( - ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, - const uint8_t *data, size_t datalen) { + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, + const uint8_t *data, size_t datalen) { SSL *ssl = ngtcp2_conn_get_tls_native_handle(conn); int rv; int err; if (SSL_provide_quic_data( - ssl, - ngtcp2_crypto_quictls_from_ngtcp2_encryption_level(encryption_level), - data, datalen) != 1) { + ssl, + ngtcp2_crypto_quictls_from_ngtcp2_encryption_level(encryption_level), + data, datalen) != 1) { return -1; } @@ -874,7 +874,7 @@ int ngtcp2_crypto_set_local_transport_params(void *tls, const uint8_t *buf, } ngtcp2_encryption_level ngtcp2_crypto_quictls_from_ossl_encryption_level( - OSSL_ENCRYPTION_LEVEL ossl_level) { + OSSL_ENCRYPTION_LEVEL ossl_level) { switch (ossl_level) { case ssl_encryption_initial: return NGTCP2_ENCRYPTION_LEVEL_INITIAL; @@ -892,7 +892,7 @@ ngtcp2_encryption_level ngtcp2_crypto_quictls_from_ossl_encryption_level( OSSL_ENCRYPTION_LEVEL ngtcp2_crypto_quictls_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level) { + ngtcp2_encryption_level encryption_level) { switch (encryption_level) { case NGTCP2_ENCRYPTION_LEVEL_INITIAL: return ssl_encryption_initial; @@ -934,7 +934,7 @@ static int set_encryption_secrets(SSL *ssl, OSSL_ENCRYPTION_LEVEL ossl_level, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_quictls_from_ossl_encryption_level(ossl_level); + ngtcp2_crypto_quictls_from_ossl_encryption_level(ossl_level); if (rx_secret && ngtcp2_crypto_derive_and_install_rx_key(conn, NULL, NULL, NULL, level, @@ -956,7 +956,7 @@ static int add_handshake_data(SSL *ssl, OSSL_ENCRYPTION_LEVEL ossl_level, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_quictls_from_ossl_encryption_level(ossl_level); + ngtcp2_crypto_quictls_from_ossl_encryption_level(ossl_level); int rv; rv = ngtcp2_conn_submit_crypto_data(conn, level, data, datalen); @@ -973,8 +973,7 @@ static int flush_flight(SSL *ssl) { return 1; } -static int send_alert(SSL *ssl, enum ssl_encryption_level_t level, - uint8_t alert) { +static int send_alert(SSL *ssl, OSSL_ENCRYPTION_LEVEL level, uint8_t alert) { ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); (void)level; @@ -985,14 +984,14 @@ static int send_alert(SSL *ssl, enum ssl_encryption_level_t level, } static SSL_QUIC_METHOD quic_method = { - set_encryption_secrets, - add_handshake_data, - flush_flight, - send_alert, + set_encryption_secrets, + add_handshake_data, + flush_flight, + send_alert, #ifdef LIBRESSL_VERSION_NUMBER - NULL, - NULL, -#endif /* LIBRESSL_VERSION_NUMBER */ + NULL, + NULL, +#endif /* defined(LIBRESSL_VERSION_NUMBER) */ }; static void crypto_quictls_configure_context(SSL_CTX *ssl_ctx) { diff --git a/deps/ngtcp2/ngtcp2/crypto/shared.c b/deps/ngtcp2/ngtcp2/crypto/shared.c index 162094a375cb8b..98cd4de7e8097d 100644 --- a/deps/ngtcp2/ngtcp2/crypto/shared.c +++ b/deps/ngtcp2/ngtcp2/crypto/shared.c @@ -27,9 +27,9 @@ #ifdef WIN32 # include # include -#else +#elif defined(HAVE_NETINET_IN_H) # include -#endif +#endif /* defined(HAVE_NETINET_IN_H) */ #include #include @@ -110,13 +110,11 @@ int ngtcp2_crypto_derive_initial_secrets(uint8_t *rx_secret, uint8_t *tx_secret, } if (ngtcp2_crypto_hkdf_expand_label( - client_secret, NGTCP2_CRYPTO_INITIAL_SECRETLEN, &ctx.md, - initial_secret, NGTCP2_CRYPTO_INITIAL_SECRETLEN, CLABEL, - sizeof(CLABEL) - 1) != 0 || + client_secret, NGTCP2_CRYPTO_INITIAL_SECRETLEN, &ctx.md, initial_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN, CLABEL, sizeof(CLABEL) - 1) != 0 || ngtcp2_crypto_hkdf_expand_label( - server_secret, NGTCP2_CRYPTO_INITIAL_SECRETLEN, &ctx.md, - initial_secret, NGTCP2_CRYPTO_INITIAL_SECRETLEN, SLABEL, - sizeof(SLABEL) - 1) != 0) { + server_secret, NGTCP2_CRYPTO_INITIAL_SECRETLEN, &ctx.md, initial_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN, SLABEL, sizeof(SLABEL) - 1) != 0) { return -1; } @@ -125,13 +123,13 @@ int ngtcp2_crypto_derive_initial_secrets(uint8_t *rx_secret, uint8_t *tx_secret, size_t ngtcp2_crypto_packet_protection_ivlen(const ngtcp2_crypto_aead *aead) { size_t noncelen = ngtcp2_crypto_aead_noncelen(aead); - return ngtcp2_max(8, noncelen); + return ngtcp2_max_size(8, noncelen); } int ngtcp2_crypto_derive_packet_protection_key( - uint8_t *key, uint8_t *iv, uint8_t *hp_key, uint32_t version, - const ngtcp2_crypto_aead *aead, const ngtcp2_crypto_md *md, - const uint8_t *secret, size_t secretlen) { + uint8_t *key, uint8_t *iv, uint8_t *hp_key, uint32_t version, + const ngtcp2_crypto_aead *aead, const ngtcp2_crypto_md *md, + const uint8_t *secret, size_t secretlen) { static const uint8_t KEY_LABEL_V1[] = "quic key"; static const uint8_t IV_LABEL_V1[] = "quic iv"; static const uint8_t HP_KEY_LABEL_V1[] = "quic hp"; @@ -305,8 +303,8 @@ int ngtcp2_crypto_derive_and_install_rx_key(ngtcp2_conn *conn, uint8_t *key, } break; case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: - rv = ngtcp2_conn_install_rx_handshake_key(conn, &aead_ctx, iv, ivlen, - &hp_ctx); + rv = + ngtcp2_conn_install_rx_handshake_key(conn, &aead_ctx, iv, ivlen, &hp_ctx); if (rv != 0) { goto fail; } @@ -455,8 +453,8 @@ int ngtcp2_crypto_derive_and_install_tx_key(ngtcp2_conn *conn, uint8_t *key, } break; case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: - rv = ngtcp2_conn_install_tx_handshake_key(conn, &aead_ctx, iv, ivlen, - &hp_ctx); + rv = + ngtcp2_conn_install_tx_handshake_key(conn, &aead_ctx, iv, ivlen, &hp_ctx); if (rv != 0) { goto fail; } @@ -489,10 +487,10 @@ int ngtcp2_crypto_derive_and_install_tx_key(ngtcp2_conn *conn, uint8_t *key, } int ngtcp2_crypto_derive_and_install_initial_key( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, - uint8_t *rx_hp_key, uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp_key, - uint32_t version, const ngtcp2_cid *client_dcid) { + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, uint8_t *rx_hp_key, + uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp_key, uint32_t version, + const ngtcp2_cid *client_dcid) { uint8_t rx_secretbuf[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; uint8_t tx_secretbuf[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; uint8_t initial_secretbuf[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; @@ -548,21 +546,20 @@ int ngtcp2_crypto_derive_and_install_initial_key( ngtcp2_conn_set_initial_crypto_ctx(conn, &ctx); if (ngtcp2_crypto_derive_initial_secrets( - rx_secret, tx_secret, initial_secret, version, client_dcid, - server ? NGTCP2_CRYPTO_SIDE_SERVER : NGTCP2_CRYPTO_SIDE_CLIENT) != - 0) { + rx_secret, tx_secret, initial_secret, version, client_dcid, + server ? NGTCP2_CRYPTO_SIDE_SERVER : NGTCP2_CRYPTO_SIDE_CLIENT) != 0) { return -1; } if (ngtcp2_crypto_derive_packet_protection_key( - rx_key, rx_iv, rx_hp_key, version, &ctx.aead, &ctx.md, rx_secret, - NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { + rx_key, rx_iv, rx_hp_key, version, &ctx.aead, &ctx.md, rx_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { return -1; } if (ngtcp2_crypto_derive_packet_protection_key( - tx_key, tx_iv, tx_hp_key, version, &ctx.aead, &ctx.md, tx_secret, - NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { + tx_key, tx_iv, tx_hp_key, version, &ctx.aead, &ctx.md, tx_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { return -1; } @@ -631,10 +628,10 @@ int ngtcp2_crypto_derive_and_install_initial_key( } int ngtcp2_crypto_derive_and_install_vneg_initial_key( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, - uint8_t *rx_hp_key, uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp_key, - uint32_t version, const ngtcp2_cid *client_dcid) { + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, uint8_t *rx_hp_key, + uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp_key, uint32_t version, + const ngtcp2_cid *client_dcid) { uint8_t rx_secretbuf[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; uint8_t tx_secretbuf[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; uint8_t initial_secretbuf[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; @@ -682,21 +679,20 @@ int ngtcp2_crypto_derive_and_install_vneg_initial_key( } if (ngtcp2_crypto_derive_initial_secrets( - rx_secret, tx_secret, initial_secret, version, client_dcid, - server ? NGTCP2_CRYPTO_SIDE_SERVER : NGTCP2_CRYPTO_SIDE_CLIENT) != - 0) { + rx_secret, tx_secret, initial_secret, version, client_dcid, + server ? NGTCP2_CRYPTO_SIDE_SERVER : NGTCP2_CRYPTO_SIDE_CLIENT) != 0) { return -1; } if (ngtcp2_crypto_derive_packet_protection_key( - rx_key, rx_iv, rx_hp_key, version, &ctx->aead, &ctx->md, rx_secret, - NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { + rx_key, rx_iv, rx_hp_key, version, &ctx->aead, &ctx->md, rx_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { return -1; } if (ngtcp2_crypto_derive_packet_protection_key( - tx_key, tx_iv, tx_hp_key, version, &ctx->aead, &ctx->md, tx_secret, - NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { + tx_key, tx_iv, tx_hp_key, version, &ctx->aead, &ctx->md, tx_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { return -1; } @@ -721,8 +717,8 @@ int ngtcp2_crypto_derive_and_install_vneg_initial_key( } rv = ngtcp2_conn_install_vneg_initial_key( - conn, version, &rx_aead_ctx, rx_iv, &rx_hp_ctx, &tx_aead_ctx, tx_iv, - &tx_hp_ctx, NGTCP2_CRYPTO_INITIAL_IVLEN); + conn, version, &rx_aead_ctx, rx_iv, &rx_hp_ctx, &tx_aead_ctx, tx_iv, + &tx_hp_ctx, NGTCP2_CRYPTO_INITIAL_IVLEN); if (rv != 0) { goto fail; } @@ -739,11 +735,11 @@ int ngtcp2_crypto_derive_and_install_vneg_initial_key( } int ngtcp2_crypto_update_key( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_key, uint8_t *rx_iv, - ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_key, uint8_t *tx_iv, - const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, - size_t secretlen) { + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_key, uint8_t *rx_iv, + ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_key, uint8_t *tx_iv, + const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, + size_t secretlen) { const ngtcp2_crypto_ctx *ctx = ngtcp2_conn_get_crypto_ctx(conn); const ngtcp2_crypto_aead *aead = &ctx->aead; const ngtcp2_crypto_md *md = &ctx->md; @@ -756,7 +752,7 @@ int ngtcp2_crypto_update_key( } if (ngtcp2_crypto_derive_packet_protection_key( - rx_key, rx_iv, NULL, version, aead, md, rx_secret, secretlen) != 0) { + rx_key, rx_iv, NULL, version, aead, md, rx_secret, secretlen) != 0) { return -1; } @@ -766,7 +762,7 @@ int ngtcp2_crypto_update_key( } if (ngtcp2_crypto_derive_packet_protection_key( - tx_key, tx_iv, NULL, version, aead, md, tx_secret, secretlen) != 0) { + tx_key, tx_iv, NULL, version, aead, md, tx_secret, secretlen) != 0) { return -1; } @@ -819,20 +815,19 @@ int ngtcp2_crypto_hp_mask_cb(uint8_t *dest, const ngtcp2_crypto_cipher *hp, } int ngtcp2_crypto_update_key_cb( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_iv, - ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_iv, - const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, - size_t secretlen, void *user_data) { + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_iv, + ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_iv, + const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, + size_t secretlen, void *user_data) { uint8_t rx_key[64]; uint8_t tx_key[64]; (void)conn; (void)user_data; - if (ngtcp2_crypto_update_key(conn, rx_secret, tx_secret, rx_aead_ctx, rx_key, - rx_iv, tx_aead_ctx, tx_key, tx_iv, - current_rx_secret, current_tx_secret, - secretlen) != 0) { + if (ngtcp2_crypto_update_key( + conn, rx_secret, tx_secret, rx_aead_ctx, rx_key, rx_iv, tx_aead_ctx, + tx_key, tx_iv, current_rx_secret, current_tx_secret, secretlen) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } return 0; @@ -920,13 +915,14 @@ static size_t crypto_generate_retry_token_aad(uint8_t *dest, uint32_t version, static const uint8_t retry_token_info_prefix[] = "retry_token"; ngtcp2_ssize ngtcp2_crypto_generate_retry_token( - uint8_t *token, const uint8_t *secret, size_t secretlen, uint32_t version, - const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, - const ngtcp2_cid *retry_scid, const ngtcp2_cid *odcid, ngtcp2_tstamp ts) { - uint8_t plaintext[NGTCP2_CRYPTO_MAX_RETRY_TOKENLEN]; + uint8_t *token, const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *retry_scid, const ngtcp2_cid *odcid, ngtcp2_tstamp ts) { + uint8_t + plaintext[/* cid len = */ 1 + NGTCP2_MAX_CIDLEN + sizeof(ngtcp2_tstamp)]; uint8_t rand_data[NGTCP2_CRYPTO_TOKEN_RAND_DATALEN]; - uint8_t key[32]; - uint8_t iv[32]; + uint8_t key[16]; + uint8_t iv[12]; size_t keylen; size_t ivlen; ngtcp2_crypto_aead aead; @@ -934,7 +930,7 @@ ngtcp2_ssize ngtcp2_crypto_generate_retry_token( ngtcp2_crypto_aead_ctx aead_ctx; size_t plaintextlen; uint8_t - aad[sizeof(version) + sizeof(ngtcp2_sockaddr_union) + NGTCP2_MAX_CIDLEN]; + aad[sizeof(version) + sizeof(ngtcp2_sockaddr_union) + NGTCP2_MAX_CIDLEN]; size_t aadlen; uint8_t *p = plaintext; ngtcp2_tstamp ts_be = ngtcp2_htonl64(ts); @@ -962,8 +958,8 @@ ngtcp2_ssize ngtcp2_crypto_generate_retry_token( keylen = ngtcp2_crypto_aead_keylen(&aead); ivlen = ngtcp2_crypto_aead_noncelen(&aead); - assert(sizeof(key) >= keylen); - assert(sizeof(iv) >= ivlen); + assert(sizeof(key) == keylen); + assert(sizeof(iv) == ivlen); if (crypto_derive_token_key(key, keylen, iv, ivlen, &md, secret, secretlen, rand_data, sizeof(rand_data), @@ -999,21 +995,21 @@ ngtcp2_ssize ngtcp2_crypto_generate_retry_token( } int ngtcp2_crypto_verify_retry_token( - ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen, - const uint8_t *secret, size_t secretlen, uint32_t version, - const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, - const ngtcp2_cid *dcid, ngtcp2_duration timeout, ngtcp2_tstamp ts) { + ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen, + const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *dcid, ngtcp2_duration timeout, ngtcp2_tstamp ts) { uint8_t - plaintext[/* cid len = */ 1 + NGTCP2_MAX_CIDLEN + sizeof(ngtcp2_tstamp)]; - uint8_t key[32]; - uint8_t iv[32]; + plaintext[/* cid len = */ 1 + NGTCP2_MAX_CIDLEN + sizeof(ngtcp2_tstamp)]; + uint8_t key[16]; + uint8_t iv[12]; size_t keylen; size_t ivlen; ngtcp2_crypto_aead_ctx aead_ctx; ngtcp2_crypto_aead aead; ngtcp2_crypto_md md; uint8_t - aad[sizeof(version) + sizeof(ngtcp2_sockaddr_union) + NGTCP2_MAX_CIDLEN]; + aad[sizeof(version) + sizeof(ngtcp2_sockaddr_union) + NGTCP2_MAX_CIDLEN]; size_t aadlen; const uint8_t *rand_data; const uint8_t *ciphertext; @@ -1039,6 +1035,9 @@ int ngtcp2_crypto_verify_retry_token( keylen = ngtcp2_crypto_aead_keylen(&aead); ivlen = ngtcp2_crypto_aead_noncelen(&aead); + assert(sizeof(key) == keylen); + assert(sizeof(iv) == ivlen); + if (crypto_derive_token_key(key, keylen, iv, ivlen, &md, secret, secretlen, rand_data, NGTCP2_CRYPTO_TOKEN_RAND_DATALEN, retry_token_info_prefix, @@ -1081,6 +1080,206 @@ int ngtcp2_crypto_verify_retry_token( return 0; } +static size_t crypto_generate_retry_token_aad2(uint8_t *dest, uint32_t version, + const ngtcp2_cid *retry_scid) { + uint8_t *p = dest; + + version = ngtcp2_htonl(version); + memcpy(p, &version, sizeof(version)); + p += sizeof(version); + memcpy(p, retry_scid->data, retry_scid->datalen); + p += retry_scid->datalen; + + return (size_t)(p - dest); +} + +static const uint8_t retry_token_info_prefix2[] = "retry_token2"; + +ngtcp2_ssize ngtcp2_crypto_generate_retry_token2( + uint8_t *token, const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *retry_scid, const ngtcp2_cid *odcid, ngtcp2_tstamp ts) { + uint8_t plaintext[sizeof(ngtcp2_sockaddr_union) + /* cid len = */ 1 + + NGTCP2_MAX_CIDLEN + sizeof(ngtcp2_tstamp)]; + uint8_t rand_data[NGTCP2_CRYPTO_TOKEN_RAND_DATALEN]; + uint8_t key[16]; + uint8_t iv[12]; + size_t keylen; + size_t ivlen; + ngtcp2_crypto_aead aead; + ngtcp2_crypto_md md; + ngtcp2_crypto_aead_ctx aead_ctx; + uint8_t aad[sizeof(version) + NGTCP2_MAX_CIDLEN]; + size_t aadlen; + uint8_t *p = plaintext; + ngtcp2_tstamp ts_be = ngtcp2_htonl64(ts); + int rv; + + assert((size_t)remote_addrlen <= sizeof(ngtcp2_sockaddr_union)); + + memset(plaintext, 0, sizeof(plaintext)); + + memcpy(p, remote_addr, (size_t)remote_addrlen); + p += sizeof(ngtcp2_sockaddr_union); + *p++ = (uint8_t)odcid->datalen; + memcpy(p, odcid->data, odcid->datalen); + p += NGTCP2_MAX_CIDLEN; + memcpy(p, &ts_be, sizeof(ts_be)); + + assert((size_t)(p + sizeof(ts_be) - plaintext) == sizeof(plaintext)); + + if (ngtcp2_crypto_random(rand_data, sizeof(rand_data)) != 0) { + return -1; + } + + ngtcp2_crypto_aead_aes_128_gcm(&aead); + ngtcp2_crypto_md_sha256(&md); + + keylen = ngtcp2_crypto_aead_keylen(&aead); + ivlen = ngtcp2_crypto_aead_noncelen(&aead); + + assert(sizeof(key) == keylen); + assert(sizeof(iv) == ivlen); + + if (crypto_derive_token_key(key, keylen, iv, ivlen, &md, secret, secretlen, + rand_data, sizeof(rand_data), + retry_token_info_prefix2, + sizeof(retry_token_info_prefix2) - 1) != 0) { + return -1; + } + + aadlen = crypto_generate_retry_token_aad2(aad, version, retry_scid); + + p = token; + *p++ = NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY2; + + if (ngtcp2_crypto_aead_ctx_encrypt_init(&aead_ctx, &aead, key, ivlen) != 0) { + return -1; + } + + rv = ngtcp2_crypto_encrypt(p, &aead, &aead_ctx, plaintext, sizeof(plaintext), + iv, ivlen, aad, aadlen); + + ngtcp2_crypto_aead_ctx_free(&aead_ctx); + + if (rv != 0) { + return -1; + } + + p += sizeof(plaintext) + aead.max_overhead; + memcpy(p, rand_data, sizeof(rand_data)); + p += sizeof(rand_data); + + return p - token; +} + +int ngtcp2_crypto_verify_retry_token2( + ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen, + const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *dcid, ngtcp2_duration timeout, ngtcp2_tstamp ts) { + uint8_t plaintext[sizeof(ngtcp2_sockaddr_union) + /* cid len = */ 1 + + NGTCP2_MAX_CIDLEN + sizeof(ngtcp2_tstamp)]; + uint8_t key[16]; + uint8_t iv[12]; + size_t keylen; + size_t ivlen; + ngtcp2_crypto_aead_ctx aead_ctx; + ngtcp2_crypto_aead aead; + ngtcp2_crypto_md md; + uint8_t aad[sizeof(version) + NGTCP2_MAX_CIDLEN]; + size_t aadlen; + const uint8_t *rand_data; + const uint8_t *ciphertext; + size_t ciphertextlen; + size_t cil; + int rv; + ngtcp2_tstamp gen_ts; + ngtcp2_sockaddr_union addr; + size_t addrlen; + uint8_t *p; + + assert((size_t)remote_addrlen <= sizeof(ngtcp2_sockaddr_union)); + + if (tokenlen != NGTCP2_CRYPTO_MAX_RETRY_TOKENLEN2 || + token[0] != NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY2) { + return NGTCP2_CRYPTO_ERR_UNREADABLE_TOKEN; + } + + rand_data = token + tokenlen - NGTCP2_CRYPTO_TOKEN_RAND_DATALEN; + ciphertext = token + 1; + ciphertextlen = (size_t)(rand_data - ciphertext); + + ngtcp2_crypto_aead_aes_128_gcm(&aead); + ngtcp2_crypto_md_sha256(&md); + + keylen = ngtcp2_crypto_aead_keylen(&aead); + ivlen = ngtcp2_crypto_aead_noncelen(&aead); + + assert(sizeof(key) == keylen); + assert(sizeof(iv) == ivlen); + + if (crypto_derive_token_key(key, keylen, iv, ivlen, &md, secret, secretlen, + rand_data, NGTCP2_CRYPTO_TOKEN_RAND_DATALEN, + retry_token_info_prefix2, + sizeof(retry_token_info_prefix2) - 1) != 0) { + return NGTCP2_CRYPTO_ERR_INTERNAL; + } + + aadlen = crypto_generate_retry_token_aad2(aad, version, dcid); + + if (ngtcp2_crypto_aead_ctx_decrypt_init(&aead_ctx, &aead, key, ivlen) != 0) { + return NGTCP2_CRYPTO_ERR_INTERNAL; + } + + rv = ngtcp2_crypto_decrypt(plaintext, &aead, &aead_ctx, ciphertext, + ciphertextlen, iv, ivlen, aad, aadlen); + + ngtcp2_crypto_aead_ctx_free(&aead_ctx); + + if (rv != 0) { + return NGTCP2_CRYPTO_ERR_UNREADABLE_TOKEN; + } + + p = plaintext; + + memcpy(&addr, p, sizeof(addr)); + + switch (addr.sa.sa_family) { + case NGTCP2_AF_INET: + addrlen = sizeof(ngtcp2_sockaddr_in); + break; + case NGTCP2_AF_INET6: + addrlen = sizeof(ngtcp2_sockaddr_in6); + break; + default: + return NGTCP2_CRYPTO_ERR_VERIFY_TOKEN; + } + + if (addrlen != (size_t)remote_addrlen || + memcmp(&addr, remote_addr, addrlen) != 0) { + return NGTCP2_CRYPTO_ERR_VERIFY_TOKEN; + } + + p += sizeof(addr); + cil = *p++; + + if (cil != 0 && (cil < NGTCP2_MIN_CIDLEN || cil > NGTCP2_MAX_CIDLEN)) { + return NGTCP2_CRYPTO_ERR_VERIFY_TOKEN; + } + + memcpy(&gen_ts, p + NGTCP2_MAX_CIDLEN, sizeof(gen_ts)); + + gen_ts = ngtcp2_ntohl64(gen_ts); + if (gen_ts + timeout <= ts) { + return NGTCP2_CRYPTO_ERR_VERIFY_TOKEN; + } + + ngtcp2_cid_init(odcid, p, cil); + + return 0; +} + static size_t crypto_generate_regular_token_aad(uint8_t *dest, const ngtcp2_sockaddr *sa) { const uint8_t *addr; @@ -1093,7 +1292,7 @@ static size_t crypto_generate_regular_token_aad(uint8_t *dest, break; case NGTCP2_AF_INET6: addr = - (const uint8_t *)&((const ngtcp2_sockaddr_in6 *)(void *)sa)->sin6_addr; + (const uint8_t *)&((const ngtcp2_sockaddr_in6 *)(void *)sa)->sin6_addr; addrlen = sizeof(((const ngtcp2_sockaddr_in6 *)(void *)sa)->sin6_addr); break; default: @@ -1109,13 +1308,13 @@ static size_t crypto_generate_regular_token_aad(uint8_t *dest, static const uint8_t regular_token_info_prefix[] = "regular_token"; ngtcp2_ssize ngtcp2_crypto_generate_regular_token( - uint8_t *token, const uint8_t *secret, size_t secretlen, - const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, - ngtcp2_tstamp ts) { + uint8_t *token, const uint8_t *secret, size_t secretlen, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + ngtcp2_tstamp ts) { uint8_t plaintext[sizeof(ngtcp2_tstamp)]; uint8_t rand_data[NGTCP2_CRYPTO_TOKEN_RAND_DATALEN]; - uint8_t key[32]; - uint8_t iv[32]; + uint8_t key[16]; + uint8_t iv[12]; size_t keylen; size_t ivlen; ngtcp2_crypto_aead aead; @@ -1144,8 +1343,8 @@ ngtcp2_ssize ngtcp2_crypto_generate_regular_token( keylen = ngtcp2_crypto_aead_keylen(&aead); ivlen = ngtcp2_crypto_aead_noncelen(&aead); - assert(sizeof(key) >= keylen); - assert(sizeof(iv) >= ivlen); + assert(sizeof(key) == keylen); + assert(sizeof(iv) == ivlen); if (crypto_derive_token_key(key, keylen, iv, ivlen, &md, secret, secretlen, rand_data, sizeof(rand_data), @@ -1186,8 +1385,8 @@ int ngtcp2_crypto_verify_regular_token(const uint8_t *token, size_t tokenlen, ngtcp2_duration timeout, ngtcp2_tstamp ts) { uint8_t plaintext[sizeof(ngtcp2_tstamp)]; - uint8_t key[32]; - uint8_t iv[32]; + uint8_t key[16]; + uint8_t iv[12]; size_t keylen; size_t ivlen; ngtcp2_crypto_aead_ctx aead_ctx; @@ -1217,6 +1416,9 @@ int ngtcp2_crypto_verify_regular_token(const uint8_t *token, size_t tokenlen, keylen = ngtcp2_crypto_aead_keylen(&aead); ivlen = ngtcp2_crypto_aead_noncelen(&aead); + assert(sizeof(key) == keylen); + assert(sizeof(iv) == ivlen); + if (crypto_derive_token_key(key, keylen, iv, ivlen, &md, secret, secretlen, rand_data, NGTCP2_CRYPTO_TOKEN_RAND_DATALEN, regular_token_info_prefix, @@ -1250,9 +1452,9 @@ int ngtcp2_crypto_verify_regular_token(const uint8_t *token, size_t tokenlen, } ngtcp2_ssize ngtcp2_crypto_write_connection_close( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, - size_t reasonlen) { + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, + size_t reasonlen) { uint8_t rx_secret[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; uint8_t tx_secret[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; uint8_t initial_secret[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; @@ -1273,8 +1475,8 @@ ngtcp2_ssize ngtcp2_crypto_write_connection_close( } if (ngtcp2_crypto_derive_packet_protection_key( - tx_key, tx_iv, tx_hp_key, version, &ctx.aead, &ctx.md, tx_secret, - NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { + tx_key, tx_iv, tx_hp_key, version, &ctx.aead, &ctx.md, tx_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { return -1; } @@ -1290,9 +1492,9 @@ ngtcp2_ssize ngtcp2_crypto_write_connection_close( } spktlen = ngtcp2_pkt_write_connection_close( - dest, destlen, version, dcid, scid, error_code, reason, reasonlen, - ngtcp2_crypto_encrypt_cb, &ctx.aead, &aead_ctx, tx_iv, - ngtcp2_crypto_hp_mask_cb, &ctx.hp, &hp_ctx); + dest, destlen, version, dcid, scid, error_code, reason, reasonlen, + ngtcp2_crypto_encrypt_cb, &ctx.aead, &aead_ctx, tx_iv, + ngtcp2_crypto_hp_mask_cb, &ctx.hp, &hp_ctx); if (spktlen < 0) { spktlen = -1; } @@ -1352,8 +1554,8 @@ int ngtcp2_crypto_client_initial_cb(ngtcp2_conn *conn, void *user_data) { (void)user_data; if (ngtcp2_crypto_derive_and_install_initial_key( - conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - ngtcp2_conn_get_client_chosen_version(conn), dcid) != 0) { + conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + ngtcp2_conn_get_client_chosen_version(conn), dcid) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -1362,7 +1564,7 @@ int ngtcp2_crypto_client_initial_cb(ngtcp2_conn *conn, void *user_data) { } if (ngtcp2_crypto_read_write_crypto_data( - conn, NGTCP2_ENCRYPTION_LEVEL_INITIAL, NULL, 0) != 0) { + conn, NGTCP2_ENCRYPTION_LEVEL_INITIAL, NULL, 0) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -1374,8 +1576,8 @@ int ngtcp2_crypto_recv_retry_cb(ngtcp2_conn *conn, const ngtcp2_pkt_hd *hd, (void)user_data; if (ngtcp2_crypto_derive_and_install_initial_key( - conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - ngtcp2_conn_get_client_chosen_version(conn), &hd->scid) != 0) { + conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + ngtcp2_conn_get_client_chosen_version(conn), &hd->scid) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -1388,8 +1590,8 @@ int ngtcp2_crypto_recv_client_initial_cb(ngtcp2_conn *conn, (void)user_data; if (ngtcp2_crypto_derive_and_install_initial_key( - conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - ngtcp2_conn_get_client_chosen_version(conn), dcid) != 0) { + conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + ngtcp2_conn_get_client_chosen_version(conn), dcid) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -1402,8 +1604,8 @@ int ngtcp2_crypto_version_negotiation_cb(ngtcp2_conn *conn, uint32_t version, (void)user_data; if (ngtcp2_crypto_derive_and_install_vneg_initial_key( - conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, version, - client_dcid) != 0) { + conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, version, + client_dcid) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -1420,7 +1622,7 @@ void ngtcp2_crypto_delete_crypto_aead_ctx_cb(ngtcp2_conn *conn, } void ngtcp2_crypto_delete_crypto_cipher_ctx_cb( - ngtcp2_conn *conn, ngtcp2_crypto_cipher_ctx *cipher_ctx, void *user_data) { + ngtcp2_conn *conn, ngtcp2_crypto_cipher_ctx *cipher_ctx, void *user_data) { (void)conn; (void)user_data; diff --git a/deps/ngtcp2/ngtcp2/crypto/shared.h b/deps/ngtcp2/ngtcp2/crypto/shared.h index d69fd21212d7d2..34158d3d02dbc0 100644 --- a/deps/ngtcp2/ngtcp2/crypto/shared.h +++ b/deps/ngtcp2/ngtcp2/crypto/shared.h @@ -22,12 +22,12 @@ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef NGTCP2_SHARED_H -#define NGTCP2_SHARED_H +#ifndef SHARED_H +#define SHARED_H #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -286,10 +286,10 @@ int ngtcp2_crypto_set_remote_transport_params(ngtcp2_conn *conn, void *tls); * This function returns 0 if it succeeds, or -1. */ int ngtcp2_crypto_derive_and_install_initial_key( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, uint8_t *rx_hp, - uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp, uint32_t version, - const ngtcp2_cid *client_dcid); + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, uint8_t *rx_hp, + uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp, uint32_t version, + const ngtcp2_cid *client_dcid); /** * @function @@ -337,10 +337,10 @@ int ngtcp2_crypto_derive_and_install_initial_key( * This function returns 0 if it succeeds, or -1. */ int ngtcp2_crypto_derive_and_install_vneg_initial_key( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, uint8_t *rx_hp, - uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp, uint32_t version, - const ngtcp2_cid *client_dcid); + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, uint8_t *rx_hp, + uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp, uint32_t version, + const ngtcp2_cid *client_dcid); /** * @function @@ -394,4 +394,4 @@ int ngtcp2_crypto_hkdf_expand_label(uint8_t *dest, size_t destlen, const uint8_t *secret, size_t secretlen, const uint8_t *label, size_t labellen); -#endif /* NGTCP2_SHARED_H */ +#endif /* !defined(SHARED_H) */ diff --git a/deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c b/deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c index 2b7b5321863915..bc9d9d84a862c5 100644 --- a/deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c +++ b/deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c @@ -24,7 +24,7 @@ */ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -39,9 +39,9 @@ #define PRINTF_DEBUG 0 #if PRINTF_DEBUG # define DEBUG_MSG(...) fprintf(stderr, __VA_ARGS__) -#else +#else /* !PRINTF_DEBUG */ # define DEBUG_MSG(...) (void)0 -#endif +#endif /* !PRINTF_DEBUG */ ngtcp2_crypto_aead *ngtcp2_crypto_aead_aes_128_gcm(ngtcp2_crypto_aead *aead) { return ngtcp2_crypto_aead_init(aead, (void *)wolfSSL_EVP_aes_128_gcm()); @@ -65,7 +65,7 @@ ngtcp2_crypto_aead *ngtcp2_crypto_aead_init(ngtcp2_crypto_aead *aead, void *aead_native_handle) { aead->native_handle = aead_native_handle; aead->max_overhead = wolfSSL_quic_get_aead_tag_len( - (const WOLFSSL_EVP_CIPHER *)(aead_native_handle)); + (const WOLFSSL_EVP_CIPHER *)(aead_native_handle)); return aead; } @@ -124,7 +124,7 @@ ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_tls(ngtcp2_crypto_ctx *ctx, ctx->hp.native_handle = (void *)wolfSSL_quic_get_hp(ssl); ctx->max_encryption = crypto_aead_get_aead_max_encryption(aead); ctx->max_decryption_failure = - crypto_aead_get_aead_max_decryption_failure(aead); + crypto_aead_get_aead_max_decryption_failure(aead); return ctx; } @@ -203,7 +203,7 @@ int ngtcp2_crypto_cipher_ctx_encrypt_init(ngtcp2_crypto_cipher_ctx *cipher_ctx, WOLFSSL_EVP_CIPHER_CTX *actx; actx = - wolfSSL_quic_crypt_new(cipher->native_handle, key, NULL, /* encrypt */ 1); + wolfSSL_quic_crypt_new(cipher->native_handle, key, NULL, /* encrypt */ 1); if (actx == NULL) { return -1; } @@ -279,7 +279,6 @@ int ngtcp2_crypto_decrypt(uint8_t *dest, const ngtcp2_crypto_aead *aead, if (wolfSSL_quic_aead_decrypt(dest, aead_ctx->native_handle, ciphertext, ciphertextlen, nonce, aad, aadlen) != WOLFSSL_SUCCESS) { - DEBUG_MSG("WOLFSSL: decrypt FAILED\n"); return -1; } @@ -296,11 +295,11 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, (void)hp; if (wolfSSL_EVP_EncryptInit_ex(actx, NULL, NULL, NULL, sample) != - WOLFSSL_SUCCESS || + WOLFSSL_SUCCESS || wolfSSL_EVP_CipherUpdate(actx, dest, &len, PLAINTEXT, sizeof(PLAINTEXT) - 1) != WOLFSSL_SUCCESS || wolfSSL_EVP_EncryptFinal_ex(actx, dest + sizeof(PLAINTEXT) - 1, &len) != - WOLFSSL_SUCCESS) { + WOLFSSL_SUCCESS) { DEBUG_MSG("WOLFSSL: hp_mask FAILED\n"); return -1; } @@ -309,11 +308,11 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, } int ngtcp2_crypto_read_write_crypto_data( - ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, - const uint8_t *data, size_t datalen) { + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, + const uint8_t *data, size_t datalen) { WOLFSSL *ssl = ngtcp2_conn_get_tls_native_handle(conn); WOLFSSL_ENCRYPTION_LEVEL level = - ngtcp2_crypto_wolfssl_from_ngtcp2_encryption_level(encryption_level); + ngtcp2_crypto_wolfssl_from_ngtcp2_encryption_level(encryption_level); int rv; int err; @@ -397,7 +396,7 @@ int ngtcp2_crypto_set_local_transport_params(void *tls, const uint8_t *buf, } ngtcp2_encryption_level ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level( - WOLFSSL_ENCRYPTION_LEVEL wolfssl_level) { + WOLFSSL_ENCRYPTION_LEVEL wolfssl_level) { switch (wolfssl_level) { case wolfssl_encryption_initial: return NGTCP2_ENCRYPTION_LEVEL_INITIAL; @@ -415,7 +414,7 @@ ngtcp2_encryption_level ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level( WOLFSSL_ENCRYPTION_LEVEL ngtcp2_crypto_wolfssl_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level) { + ngtcp2_encryption_level encryption_level) { switch (encryption_level) { case NGTCP2_ENCRYPTION_LEVEL_INITIAL: return wolfssl_encryption_initial; @@ -458,7 +457,7 @@ static int set_encryption_secrets(WOLFSSL *ssl, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level(wolfssl_level); + ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level(wolfssl_level); DEBUG_MSG("WOLFSSL: set encryption secrets, level=%d, rxlen=%lu, txlen=%lu\n", wolfssl_level, rx_secret ? secretlen : 0, @@ -484,7 +483,7 @@ static int add_handshake_data(WOLFSSL *ssl, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level(wolfssl_level); + ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level(wolfssl_level); int rv; DEBUG_MSG("WOLFSSL: add handshake data, level=%d len=%lu\n", wolfssl_level, @@ -516,10 +515,10 @@ static int send_alert(WOLFSSL *ssl, enum wolfssl_encryption_level_t level, } static WOLFSSL_QUIC_METHOD quic_method = { - set_encryption_secrets, - add_handshake_data, - flush_flight, - send_alert, + set_encryption_secrets, + add_handshake_data, + flush_flight, + send_alert, }; static void crypto_wolfssl_configure_context(WOLFSSL_CTX *ssl_ctx) { @@ -532,7 +531,7 @@ int ngtcp2_crypto_wolfssl_configure_server_context(WOLFSSL_CTX *ssl_ctx) { crypto_wolfssl_configure_context(ssl_ctx); #if PRINTF_DEBUG wolfSSL_Debugging_ON(); -#endif +#endif /* PRINTF_DEBUG */ return 0; } @@ -541,6 +540,6 @@ int ngtcp2_crypto_wolfssl_configure_client_context(WOLFSSL_CTX *ssl_ctx) { wolfSSL_CTX_UseSessionTicket(ssl_ctx); #if PRINTF_DEBUG wolfSSL_Debugging_ON(); -#endif +#endif /* PRINTF_DEBUG */ return 0; } diff --git a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h index 72c8142a5a5aa7..acc79be6e0b375 100644 --- a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h +++ b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h @@ -30,12 +30,12 @@ libcurl) */ #if (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32) # define WIN32 -#endif +#endif /* (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32) */ #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4324) -#endif +#endif /* defined(_MSC_VER) */ #include #if defined(_MSC_VER) && (_MSC_VER < 1800) @@ -43,9 +43,9 @@ compliant. See compiler macros and version number in https://sourceforge.net/p/predef/wiki/Compilers/ */ # include -#else /* !defined(_MSC_VER) || (_MSC_VER >= 1800) */ +#else /* !(defined(_MSC_VER) && (_MSC_VER < 1800)) */ # include -#endif /* !defined(_MSC_VER) || (_MSC_VER >= 1800) */ +#endif /* !(defined(_MSC_VER) && (_MSC_VER < 1800)) */ #include #include #include @@ -54,13 +54,13 @@ # ifdef WIN32 # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN -# endif /* WIN32_LEAN_AND_MEAN */ +# endif /* !defined(WIN32_LEAN_AND_MEAN) */ # include -# else /* !WIN32 */ +# else /* !defined(WIN32) */ # include # include -# endif /* !WIN32 */ -#endif /* NGTCP2_USE_GENERIC_SOCKADDR */ +# endif /* !defined(WIN32) */ +#endif /* !defined(NGTCP2_USE_GENERIC_SOCKADDR) */ #include @@ -69,26 +69,26 @@ #elif defined(WIN32) # ifdef BUILDING_NGTCP2 # define NGTCP2_EXTERN __declspec(dllexport) -# else /* !BUILDING_NGTCP2 */ +# else /* !defined(BUILDING_NGTCP2) */ # define NGTCP2_EXTERN __declspec(dllimport) -# endif /* !BUILDING_NGTCP2 */ -#else /* !defined(WIN32) */ +# endif /* !defined(BUILDING_NGTCP2) */ +#else /* !(defined(NGTCP2_STATICLIB) || defined(WIN32)) */ # ifdef BUILDING_NGTCP2 # define NGTCP2_EXTERN __attribute__((visibility("default"))) -# else /* !BUILDING_NGTCP2 */ +# else /* !defined(BUILDING_NGTCP2) */ # define NGTCP2_EXTERN -# endif /* !BUILDING_NGTCP2 */ -#endif /* !defined(WIN32) */ +# endif /* !defined(BUILDING_NGTCP2) */ +#endif /* !(defined(NGTCP2_STATICLIB) || defined(WIN32)) */ #ifdef _MSC_VER # define NGTCP2_ALIGN(N) __declspec(align(N)) -#else /* !_MSC_VER */ +#else /* !defined(_MSC_VER) */ # define NGTCP2_ALIGN(N) __attribute__((aligned(N))) -#endif /* !_MSC_VER */ +#endif /* !defined(_MSC_VER) */ #ifdef __cplusplus extern "C" { -#endif +#endif /* defined(__cplusplus) */ /** * @typedef @@ -1229,11 +1229,11 @@ typedef struct ngtcp2_pkt_stateless_reset { #ifdef NGTCP2_USE_GENERIC_SOCKADDR # ifndef NGTCP2_AF_INET # error NGTCP2_AF_INET must be defined -# endif /* !NGTCP2_AF_INET */ +# endif /* !defined(NGTCP2_AF_INET) */ # ifndef NGTCP2_AF_INET6 # error NGTCP2_AF_INET6 must be defined -# endif /* !NGTCP2_AF_INET6 */ +# endif /* !defined(NGTCP2_AF_INET6) */ typedef unsigned short int ngtcp2_sa_family; typedef uint16_t ngtcp2_in_port; @@ -1267,7 +1267,7 @@ typedef struct ngtcp2_sockaddr_in6 { } ngtcp2_sockaddr_in6; typedef uint32_t ngtcp2_socklen; -#else /* !NGTCP2_USE_GENERIC_SOCKADDR */ +#else /* !defined(NGTCP2_USE_GENERIC_SOCKADDR) */ # define NGTCP2_AF_INET AF_INET # define NGTCP2_AF_INET6 AF_INET6 @@ -1303,7 +1303,7 @@ typedef struct sockaddr_in6 ngtcp2_sockaddr_in6; * uint32_t. */ typedef socklen_t ngtcp2_socklen; -#endif /* !NGTCP2_USE_GENERIC_SOCKADDR */ +#endif /* !defined(NGTCP2_USE_GENERIC_SOCKADDR) */ /** * @struct @@ -1689,7 +1689,8 @@ typedef enum ngtcp2_token_type { } ngtcp2_token_type; #define NGTCP2_SETTINGS_V1 1 -#define NGTCP2_SETTINGS_VERSION NGTCP2_SETTINGS_V1 +#define NGTCP2_SETTINGS_V2 2 +#define NGTCP2_SETTINGS_VERSION NGTCP2_SETTINGS_V2 /** * @struct @@ -1723,8 +1724,7 @@ typedef struct ngtcp2_settings { ngtcp2_printf log_printf; /** * :member:`max_tx_udp_payload_size` is the maximum size of UDP - * datagram payload that the local endpoint transmits. It is used - * by congestion controller to compute congestion window. + * datagram payload that the local endpoint transmits. */ size_t max_tx_udp_payload_size; /** @@ -1777,6 +1777,13 @@ typedef struct ngtcp2_settings { * or :member:`ngtcp2_transport_params.initial_max_stream_data_uni`, * depending on the type of stream. The window size is scaled up to * the value specified in this field. + * + * Please note that the auto-tuning is done per stream. Even if the + * previous stream gets larger window as a result of auto-tuning, + * the new stream still starts with the initial value set in + * transport parameters. This might become a bottleneck if + * congestion window of a remote server is wide open. If this + * causes an issue, do not enable auto-tuning. */ uint64_t max_stream_window; /** @@ -1873,10 +1880,29 @@ typedef struct ngtcp2_settings { */ uint8_t no_pmtud; /** - * :member:`pkt_num` is the initial packet number for each packet - * number space. It must be in range [0, INT32_MAX], inclusive. + * :member:`initial_pkt_num` is the initial packet number for each + * packet number space. It must be in range [0, INT32_MAX], + * inclusive. */ uint32_t initial_pkt_num; + /* The following fields have been added since NGTCP2_SETTINGS_V2. */ + /** + * :member:`pmtud_probes` is the array of UDP datagram payload size + * to probe during Path MTU Discovery. The discovery is done in the + * order appeared in this array. The size must be strictly larger + * than 1200, otherwise the behavior is undefined. The maximum + * value in this array should be set to + * :member:`max_tx_udp_payload_size`. If this field is not set, the + * predefined PMTUD probes are made. This field has been available + * since v1.4.0. + */ + const uint16_t *pmtud_probes; + /** + * :member:`pmtud_probeslen` is the number of elements that are + * contained in the array pointed by :member:`pmtud_probes`. This + * field has been available since v1.4.0. + */ + size_t pmtud_probeslen; } ngtcp2_settings; /** @@ -2080,8 +2106,8 @@ typedef struct ngtcp2_crypto_ctx { * Buffer is too small. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_transport_params_encode_versioned( - uint8_t *dest, size_t destlen, int transport_params_version, - const ngtcp2_transport_params *params); + uint8_t *dest, size_t destlen, int transport_params_version, + const ngtcp2_transport_params *params); /** * @function @@ -2324,8 +2350,8 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_decode_hd_short(ngtcp2_pkt_hd *dest, * :macro:`NGTCP2_MIN_STATELESS_RESET_RANDLEN`. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_stateless_reset( - uint8_t *dest, size_t destlen, const uint8_t *stateless_reset_token, - const uint8_t *rand, size_t randlen); + uint8_t *dest, size_t destlen, const uint8_t *stateless_reset_token, + const uint8_t *rand, size_t randlen); /** * @function @@ -2347,9 +2373,9 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_stateless_reset( * Buffer is too small. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_version_negotiation( - uint8_t *dest, size_t destlen, uint8_t unused_random, const uint8_t *dcid, - size_t dcidlen, const uint8_t *scid, size_t scidlen, const uint32_t *sv, - size_t nsv); + uint8_t *dest, size_t destlen, uint8_t unused_random, const uint8_t *dcid, + size_t dcidlen, const uint8_t *scid, size_t scidlen, const uint32_t *sv, + size_t nsv); /** * @struct @@ -2770,8 +2796,8 @@ typedef int (*ngtcp2_stream_reset)(ngtcp2_conn *conn, int64_t stream_id, * call return immediately. */ typedef int (*ngtcp2_acked_stream_data_offset)( - ngtcp2_conn *conn, int64_t stream_id, uint64_t offset, uint64_t datalen, - void *user_data, void *stream_user_data); + ngtcp2_conn *conn, int64_t stream_id, uint64_t offset, uint64_t datalen, + void *user_data, void *stream_user_data); /** * @functypedef @@ -2893,11 +2919,11 @@ typedef int (*ngtcp2_remove_connection_id)(ngtcp2_conn *conn, * immediately. */ typedef int (*ngtcp2_update_key)( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_iv, - ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_iv, - const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, - size_t secretlen, void *user_data); + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_iv, + ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_iv, + const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, + size_t secretlen, void *user_data); /** * @macrosection @@ -3024,8 +3050,8 @@ typedef enum ngtcp2_connection_id_status_type { * immediately. */ typedef int (*ngtcp2_connection_id_status)( - ngtcp2_conn *conn, ngtcp2_connection_id_status_type type, uint64_t seq, - const ngtcp2_cid *cid, const uint8_t *token, void *user_data); + ngtcp2_conn *conn, ngtcp2_connection_id_status_type type, uint64_t seq, + const ngtcp2_cid *cid, const uint8_t *token, void *user_data); /** * @functypedef @@ -3064,7 +3090,7 @@ typedef void (*ngtcp2_delete_crypto_aead_ctx)(ngtcp2_conn *conn, * `. */ typedef void (*ngtcp2_delete_crypto_cipher_ctx)( - ngtcp2_conn *conn, ngtcp2_crypto_cipher_ctx *cipher_ctx, void *user_data); + ngtcp2_conn *conn, ngtcp2_crypto_cipher_ctx *cipher_ctx, void *user_data); /** * @macrosection @@ -3510,12 +3536,12 @@ typedef struct ngtcp2_callbacks { * Callback function failed. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_connection_close( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, - size_t reasonlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, - const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, - ngtcp2_hp_mask hp_mask, const ngtcp2_crypto_cipher *hp, - const ngtcp2_crypto_cipher_ctx *hp_ctx); + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, + size_t reasonlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, + const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + ngtcp2_hp_mask hp_mask, const ngtcp2_crypto_cipher *hp, + const ngtcp2_crypto_cipher_ctx *hp_ctx); /** * @function @@ -3542,10 +3568,10 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_connection_close( * :macro:`NGTCP2_MIN_INITIAL_DCIDLEN`. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_retry( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, const ngtcp2_cid *odcid, const uint8_t *token, - size_t tokenlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, - const ngtcp2_crypto_aead_ctx *aead_ctx); + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, const ngtcp2_cid *odcid, const uint8_t *token, + size_t tokenlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, + const ngtcp2_crypto_aead_ctx *aead_ctx); /** * @function @@ -3594,12 +3620,12 @@ NGTCP2_EXTERN int ngtcp2_accept(ngtcp2_pkt_hd *dest, const uint8_t *pkt, * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_client_new_versioned( - ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, - const ngtcp2_path *path, uint32_t client_chosen_version, - int callbacks_version, const ngtcp2_callbacks *callbacks, - int settings_version, const ngtcp2_settings *settings, - int transport_params_version, const ngtcp2_transport_params *params, - const ngtcp2_mem *mem, void *user_data); + ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, + const ngtcp2_path *path, uint32_t client_chosen_version, + int callbacks_version, const ngtcp2_callbacks *callbacks, + int settings_version, const ngtcp2_settings *settings, + int transport_params_version, const ngtcp2_transport_params *params, + const ngtcp2_mem *mem, void *user_data); /** * @function @@ -3629,12 +3655,12 @@ NGTCP2_EXTERN int ngtcp2_conn_client_new_versioned( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_server_new_versioned( - ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, - const ngtcp2_path *path, uint32_t client_chosen_version, - int callbacks_version, const ngtcp2_callbacks *callbacks, - int settings_version, const ngtcp2_settings *settings, - int transport_params_version, const ngtcp2_transport_params *params, - const ngtcp2_mem *mem, void *user_data); + ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, + const ngtcp2_path *path, uint32_t client_chosen_version, + int callbacks_version, const ngtcp2_callbacks *callbacks, + int settings_version, const ngtcp2_settings *settings, + int transport_params_version, const ngtcp2_transport_params *params, + const ngtcp2_mem *mem, void *user_data); /** * @function @@ -3698,8 +3724,8 @@ ngtcp2_conn_read_pkt_versioned(ngtcp2_conn *conn, const ngtcp2_path *path, * and :macro:`NGTCP2_WRITE_STREAM_FLAG_NONE` as flags. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_pkt_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_tstamp ts); /** * @function @@ -3755,10 +3781,10 @@ NGTCP2_EXTERN int ngtcp2_conn_get_handshake_completed(ngtcp2_conn *conn); * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_initial_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *rx_aead_ctx, - const uint8_t *rx_iv, const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, - const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, - const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen); + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *rx_aead_ctx, + const uint8_t *rx_iv, const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, + const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, + const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen); /** * @function @@ -3790,11 +3816,11 @@ NGTCP2_EXTERN int ngtcp2_conn_install_initial_key( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_vneg_initial_key( - ngtcp2_conn *conn, uint32_t version, - const ngtcp2_crypto_aead_ctx *rx_aead_ctx, const uint8_t *rx_iv, - const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, - const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, - const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen); + ngtcp2_conn *conn, uint32_t version, + const ngtcp2_crypto_aead_ctx *rx_aead_ctx, const uint8_t *rx_iv, + const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, + const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, + const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen); /** * @function @@ -3821,8 +3847,8 @@ NGTCP2_EXTERN int ngtcp2_conn_install_vneg_initial_key( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_rx_handshake_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, - const uint8_t *iv, size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); /** * @function @@ -3849,8 +3875,8 @@ NGTCP2_EXTERN int ngtcp2_conn_install_rx_handshake_key( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_tx_handshake_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, - const uint8_t *iv, size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); /** * @function @@ -3876,8 +3902,8 @@ NGTCP2_EXTERN int ngtcp2_conn_install_tx_handshake_key( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_0rtt_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, - const uint8_t *iv, size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); /** * @function @@ -3906,9 +3932,9 @@ NGTCP2_EXTERN int ngtcp2_conn_install_0rtt_key( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_rx_key( - ngtcp2_conn *conn, const uint8_t *secret, size_t secretlen, - const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, size_t ivlen, - const ngtcp2_crypto_cipher_ctx *hp_ctx); + ngtcp2_conn *conn, const uint8_t *secret, size_t secretlen, + const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, size_t ivlen, + const ngtcp2_crypto_cipher_ctx *hp_ctx); /** * @function @@ -3937,9 +3963,9 @@ NGTCP2_EXTERN int ngtcp2_conn_install_rx_key( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_tx_key( - ngtcp2_conn *conn, const uint8_t *secret, size_t secretlen, - const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, size_t ivlen, - const ngtcp2_crypto_cipher_ctx *hp_ctx); + ngtcp2_conn *conn, const uint8_t *secret, size_t secretlen, + const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, size_t ivlen, + const ngtcp2_crypto_cipher_ctx *hp_ctx); /** * @function @@ -4063,7 +4089,7 @@ NGTCP2_EXTERN ngtcp2_duration ngtcp2_conn_get_pto(ngtcp2_conn *conn); * User callback failed */ NGTCP2_EXTERN int ngtcp2_conn_decode_and_set_remote_transport_params( - ngtcp2_conn *conn, const uint8_t *data, size_t datalen); + ngtcp2_conn *conn, const uint8_t *data, size_t datalen); /** * @function @@ -4148,7 +4174,7 @@ ngtcp2_ssize ngtcp2_conn_encode_0rtt_transport_params(ngtcp2_conn *conn, * The input is malformed. */ NGTCP2_EXTERN int ngtcp2_conn_decode_and_set_0rtt_transport_params( - ngtcp2_conn *conn, const uint8_t *data, size_t datalen); + ngtcp2_conn *conn, const uint8_t *data, size_t datalen); /** * @function @@ -4168,8 +4194,8 @@ NGTCP2_EXTERN int ngtcp2_conn_decode_and_set_0rtt_transport_params( * `ngtcp2_conn_install_tx_handshake_key` has been called. */ NGTCP2_EXTERN int ngtcp2_conn_set_local_transport_params_versioned( - ngtcp2_conn *conn, int transport_params_version, - const ngtcp2_transport_params *params); + ngtcp2_conn *conn, int transport_params_version, + const ngtcp2_transport_params *params); /** * @function @@ -4193,7 +4219,7 @@ ngtcp2_conn_get_local_transport_params(ngtcp2_conn *conn); * Buffer is too small. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_encode_local_transport_params( - ngtcp2_conn *conn, uint8_t *dest, size_t destlen); + ngtcp2_conn *conn, uint8_t *dest, size_t destlen); /** * @function @@ -4374,10 +4400,10 @@ NGTCP2_EXTERN int ngtcp2_conn_shutdown_stream_read(ngtcp2_conn *conn, * conveniently accepts a single buffer. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, - uint32_t flags, int64_t stream_id, const uint8_t *data, size_t datalen, - ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, + uint32_t flags, int64_t stream_id, const uint8_t *data, size_t datalen, + ngtcp2_tstamp ts); /** * @function @@ -4388,7 +4414,8 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( * handshake as well. * * |destlen| should be at least - * :member:`ngtcp2_settings.max_tx_udp_payload_size`. + * :member:`ngtcp2_settings.max_tx_udp_payload_size`. It must be at + * least :macro:`NGTCP2_MAX_UDP_PAYLOAD_SIZE`. * * Specifying -1 to |stream_id| means no new stream data to send. * @@ -4418,8 +4445,10 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( * In that case, |*pdatalen| would be -1 if |pdatalen| is not * ``NULL``. * - * If |flags| & :macro:`NGTCP2_WRITE_STREAM_FLAG_FIN` is nonzero, and - * 0 length STREAM frame is successfully serialized, |*pdatalen| would + * Empty data is treated specially, and it is only accepted if no + * data, including the empty data, is submitted to a stream or + * :macro:`NGTCP2_WRITE_STREAM_FLAG_FIN` is set in |flags|. If 0 + * length STREAM frame is successfully serialized, |*pdatalen| would * be 0. * * The number of data encoded in STREAM frame is stored in |*pdatalen| @@ -4525,10 +4554,10 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( * sending it makes QUIC connection enter the closing state. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, - uint32_t flags, int64_t stream_id, const ngtcp2_vec *datav, size_t datavcnt, - ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, + uint32_t flags, int64_t stream_id, const ngtcp2_vec *datav, size_t datavcnt, + ngtcp2_tstamp ts); /** * @macrosection @@ -4559,10 +4588,10 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( * conveniently accepts a single buffer. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_datagram_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, - uint32_t flags, uint64_t dgram_id, const uint8_t *data, size_t datalen, - ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, + uint32_t flags, uint64_t dgram_id, const uint8_t *data, size_t datalen, + ngtcp2_tstamp ts); /** * @function @@ -4573,7 +4602,8 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_datagram_versioned( * as well. * * |destlen| should be at least - * :member:`ngtcp2_settings.max_tx_udp_payload_size`. + * :member:`ngtcp2_settings.max_tx_udp_payload_size`. It must be at + * least :macro:`NGTCP2_MAX_UDP_PAYLOAD_SIZE`. * * For |path| and |pi| parameters, refer to * `ngtcp2_conn_writev_stream`. @@ -4655,10 +4685,10 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_datagram_versioned( * sending it makes QUIC connection enter the closing state. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_datagram_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, - uint32_t flags, uint64_t dgram_id, const ngtcp2_vec *datav, size_t datavcnt, - ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, + uint32_t flags, uint64_t dgram_id, const ngtcp2_vec *datav, size_t datavcnt, + ngtcp2_tstamp ts); /** * @function @@ -4992,7 +5022,7 @@ ngtcp2_conn_get_path_max_tx_udp_payload_size(ngtcp2_conn *conn); * Out of memory */ NGTCP2_EXTERN int ngtcp2_conn_initiate_immediate_migration( - ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_tstamp ts); + ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_tstamp ts); /** * @function @@ -5193,7 +5223,19 @@ typedef enum ngtcp2_ccerr_type { * transport error, and it indicates that connection is closed * because of idle timeout. */ - NGTCP2_CCERR_TYPE_IDLE_CLOSE + NGTCP2_CCERR_TYPE_IDLE_CLOSE, + /** + * :enum:`NGTCP2_CCERR_TYPE_DROP_CONN` is a special case of QUIC + * transport error, and it indicates that connection should be + * dropped without sending a CONNECTION_CLOSE frame. + */ + NGTCP2_CCERR_TYPE_DROP_CONN, + /** + * :enum:`NGTCP2_CCERR_TYPE_RETRY` is a special case of QUIC + * transport error, and it indicates that RETRY packet should be + * sent to a client. + */ + NGTCP2_CCERR_TYPE_RETRY } ngtcp2_ccerr_type; /** @@ -5284,6 +5326,18 @@ NGTCP2_EXTERN void ngtcp2_ccerr_set_transport_error(ngtcp2_ccerr *ccerr, * :member:`ccerr->error_code ` to * :macro:`NGTCP2_NO_ERROR`. * + * If |liberr| is :macro:`NGTCP2_ERR_DROP_CONN`, :member:`ccerr->type + * ` is set to + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_DROP_CONN`, and + * :member:`ccerr->error_code ` to + * :macro:`NGTCP2_NO_ERROR`. + * + * If |liberr| is :macro:`NGTCP2_ERR_RETRY`, :member:`ccerr->type + * ` is set to + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_RETRY`, and + * :member:`ccerr->error_type ` to + * :macro:`NGTCP2_NO_ERROR`. + * * Otherwise, :member:`ccerr->type ` is set to * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_TRANSPORT`, and * :member:`ccerr->error_code ` is set to an @@ -5380,9 +5434,9 @@ NGTCP2_EXTERN void ngtcp2_ccerr_set_application_error(ngtcp2_ccerr *ccerr, * User callback failed */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_connection_close_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, - const ngtcp2_ccerr *ccerr, ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, const ngtcp2_ccerr *ccerr, + ngtcp2_tstamp ts); /** * @function @@ -5734,8 +5788,8 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_write_stream(CONN, PATH, PI, DEST, DESTLEN, PDATALEN, \ FLAGS, STREAM_ID, DATA, DATALEN, TS) \ ngtcp2_conn_write_stream_versioned( \ - (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ - (PDATALEN), (FLAGS), (STREAM_ID), (DATA), (DATALEN), (TS)) + (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ + (PDATALEN), (FLAGS), (STREAM_ID), (DATA), (DATALEN), (TS)) /* * `ngtcp2_conn_writev_stream` is a wrapper around @@ -5745,8 +5799,8 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_writev_stream(CONN, PATH, PI, DEST, DESTLEN, PDATALEN, \ FLAGS, STREAM_ID, DATAV, DATAVCNT, TS) \ ngtcp2_conn_writev_stream_versioned( \ - (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ - (PDATALEN), (FLAGS), (STREAM_ID), (DATAV), (DATAVCNT), (TS)) + (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ + (PDATALEN), (FLAGS), (STREAM_ID), (DATAV), (DATAVCNT), (TS)) /* * `ngtcp2_conn_write_datagram` is a wrapper around @@ -5756,8 +5810,8 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_write_datagram(CONN, PATH, PI, DEST, DESTLEN, PACCEPTED, \ FLAGS, DGRAM_ID, DATA, DATALEN, TS) \ ngtcp2_conn_write_datagram_versioned( \ - (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ - (PACCEPTED), (FLAGS), (DGRAM_ID), (DATA), (DATALEN), (TS)) + (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ + (PACCEPTED), (FLAGS), (DGRAM_ID), (DATA), (DATALEN), (TS)) /* * `ngtcp2_conn_writev_datagram` is a wrapper around @@ -5767,8 +5821,8 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_writev_datagram(CONN, PATH, PI, DEST, DESTLEN, PACCEPTED, \ FLAGS, DGRAM_ID, DATAV, DATAVCNT, TS) \ ngtcp2_conn_writev_datagram_versioned( \ - (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ - (PACCEPTED), (FLAGS), (DGRAM_ID), (DATAV), (DATAVCNT), (TS)) + (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ + (PACCEPTED), (FLAGS), (DGRAM_ID), (DATAV), (DATAVCNT), (TS)) /* * `ngtcp2_conn_write_connection_close` is a wrapper around @@ -5778,8 +5832,8 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_write_connection_close(CONN, PATH, PI, DEST, DESTLEN, \ CCERR, TS) \ ngtcp2_conn_write_connection_close_versioned( \ - (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ - (CCERR), (TS)) + (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), (CCERR), \ + (TS)) /* * `ngtcp2_transport_params_encode` is a wrapper around @@ -5788,7 +5842,7 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, */ #define ngtcp2_transport_params_encode(DEST, DESTLEN, PARAMS) \ ngtcp2_transport_params_encode_versioned( \ - (DEST), (DESTLEN), NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS)) + (DEST), (DESTLEN), NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS)) /* * `ngtcp2_transport_params_decode` is a wrapper around @@ -5807,9 +5861,9 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_client_new(PCONN, DCID, SCID, PATH, VERSION, CALLBACKS, \ SETTINGS, PARAMS, MEM, USER_DATA) \ ngtcp2_conn_client_new_versioned( \ - (PCONN), (DCID), (SCID), (PATH), (VERSION), NGTCP2_CALLBACKS_VERSION, \ - (CALLBACKS), NGTCP2_SETTINGS_VERSION, (SETTINGS), \ - NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS), (MEM), (USER_DATA)) + (PCONN), (DCID), (SCID), (PATH), (VERSION), NGTCP2_CALLBACKS_VERSION, \ + (CALLBACKS), NGTCP2_SETTINGS_VERSION, (SETTINGS), \ + NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS), (MEM), (USER_DATA)) /* * `ngtcp2_conn_server_new` is a wrapper around @@ -5819,9 +5873,9 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_server_new(PCONN, DCID, SCID, PATH, VERSION, CALLBACKS, \ SETTINGS, PARAMS, MEM, USER_DATA) \ ngtcp2_conn_server_new_versioned( \ - (PCONN), (DCID), (SCID), (PATH), (VERSION), NGTCP2_CALLBACKS_VERSION, \ - (CALLBACKS), NGTCP2_SETTINGS_VERSION, (SETTINGS), \ - NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS), (MEM), (USER_DATA)) + (PCONN), (DCID), (SCID), (PATH), (VERSION), NGTCP2_CALLBACKS_VERSION, \ + (CALLBACKS), NGTCP2_SETTINGS_VERSION, (SETTINGS), \ + NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS), (MEM), (USER_DATA)) /* * `ngtcp2_conn_set_local_transport_params` is a wrapper around @@ -5830,7 +5884,7 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, */ #define ngtcp2_conn_set_local_transport_params(CONN, PARAMS) \ ngtcp2_conn_set_local_transport_params_versioned( \ - (CONN), NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS)) + (CONN), NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS)) /* * `ngtcp2_transport_params_default` is a wrapper around @@ -5859,10 +5913,10 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #ifdef _MSC_VER # pragma warning(pop) -#endif +#endif /* defined(_MSC_VER) */ #ifdef __cplusplus } -#endif +#endif /* defined(__cplusplus) */ -#endif /* NGTCP2_H */ +#endif /* !defined(NGTCP2_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h index 801c6cb2681386..15bf36ded2bc77 100644 --- a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h +++ b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h @@ -22,8 +22,8 @@ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef VERSION_H -#define VERSION_H +#ifndef NGTCP2_VERSION_H +#define NGTCP2_VERSION_H /** * @macrosection @@ -36,7 +36,7 @@ * * Version number of the ngtcp2 library release. */ -#define NGTCP2_VERSION "1.3.0" +#define NGTCP2_VERSION "1.9.0" /** * @macro @@ -46,6 +46,6 @@ * number, 8 bits for minor and 8 bits for patch. Version 1.2.3 * becomes 0x010203. */ -#define NGTCP2_VERSION_NUM 0x010300 +#define NGTCP2_VERSION_NUM 0x010900 -#endif /* VERSION_H */ +#endif /* !defined(NGTCP2_VERSION_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c index d4778d66accf31..0e5bfe069e9fab 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c @@ -29,7 +29,7 @@ #include "ngtcp2_macro.h" #include "ngtcp2_tstamp.h" -ngtcp2_objalloc_def(acktr_entry, ngtcp2_acktr_entry, oplent); +ngtcp2_objalloc_def(acktr_entry, ngtcp2_acktr_entry, oplent) static void acktr_entry_init(ngtcp2_acktr_entry *ent, int64_t pkt_num, ngtcp2_tstamp tstamp) { @@ -56,41 +56,26 @@ void ngtcp2_acktr_entry_objalloc_del(ngtcp2_acktr_entry *ent, ngtcp2_objalloc_acktr_entry_release(objalloc, ent); } -static int greater(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { - return *(int64_t *)lhs > *(int64_t *)rhs; -} - -int ngtcp2_acktr_init(ngtcp2_acktr *acktr, ngtcp2_log *log, - const ngtcp2_mem *mem) { - int rv; +void ngtcp2_acktr_init(ngtcp2_acktr *acktr, ngtcp2_log *log, + const ngtcp2_mem *mem) { + ngtcp2_objalloc_acktr_entry_init(&acktr->objalloc, NGTCP2_ACKTR_MAX_ENT + 1, + mem); - ngtcp2_objalloc_acktr_entry_init(&acktr->objalloc, 32, mem); + ngtcp2_static_ringbuf_acks_init(&acktr->acks); - rv = ngtcp2_ringbuf_init(&acktr->acks, 32, sizeof(ngtcp2_acktr_ack_entry), - mem); - if (rv != 0) { - goto fail_acks_init; - } - - ngtcp2_ksl_init(&acktr->ents, greater, sizeof(int64_t), mem); + ngtcp2_ksl_init(&acktr->ents, ngtcp2_ksl_int64_greater, + ngtcp2_ksl_int64_greater_search, sizeof(int64_t), mem); acktr->log = log; - acktr->mem = mem; acktr->flags = NGTCP2_ACKTR_FLAG_NONE; acktr->first_unacked_ts = UINT64_MAX; acktr->rx_npkt = 0; - - return 0; - -fail_acks_init: - ngtcp2_objalloc_free(&acktr->objalloc); - return rv; } void ngtcp2_acktr_free(ngtcp2_acktr *acktr) { #ifdef NOMEMPOOL ngtcp2_ksl_it it; -#endif /* NOMEMPOOL */ +#endif /* defined(NOMEMPOOL) */ if (acktr == NULL) { return; @@ -101,12 +86,10 @@ void ngtcp2_acktr_free(ngtcp2_acktr *acktr) { ngtcp2_ksl_it_next(&it)) { ngtcp2_acktr_entry_objalloc_del(ngtcp2_ksl_it_get(&it), &acktr->objalloc); } -#endif /* NOMEMPOOL */ +#endif /* defined(NOMEMPOOL) */ ngtcp2_ksl_free(&acktr->ents); - ngtcp2_ringbuf_free(&acktr->acks); - ngtcp2_objalloc_free(&acktr->objalloc); } @@ -213,11 +196,11 @@ void ngtcp2_acktr_forget(ngtcp2_acktr *acktr, ngtcp2_acktr_entry *ent) { } } -ngtcp2_ksl_it ngtcp2_acktr_get(ngtcp2_acktr *acktr) { +ngtcp2_ksl_it ngtcp2_acktr_get(const ngtcp2_acktr *acktr) { return ngtcp2_ksl_begin(&acktr->ents); } -int ngtcp2_acktr_empty(ngtcp2_acktr *acktr) { +int ngtcp2_acktr_empty(const ngtcp2_acktr *acktr) { ngtcp2_ksl_it it = ngtcp2_ksl_begin(&acktr->ents); return ngtcp2_ksl_it_end(&it); } @@ -225,7 +208,7 @@ int ngtcp2_acktr_empty(ngtcp2_acktr *acktr) { ngtcp2_acktr_ack_entry *ngtcp2_acktr_add_ack(ngtcp2_acktr *acktr, int64_t pkt_num, int64_t largest_ack) { - ngtcp2_acktr_ack_entry *ent = ngtcp2_ringbuf_push_front(&acktr->acks); + ngtcp2_acktr_ack_entry *ent = ngtcp2_ringbuf_push_front(&acktr->acks.rb); ent->largest_ack = largest_ack; ent->pkt_num = pkt_num; @@ -266,8 +249,10 @@ static void acktr_on_ack(ngtcp2_acktr *acktr, ngtcp2_ringbuf *rb, ngtcp2_ksl_it_prev(&it); ent = ngtcp2_ksl_it_get(&it); - if (ent->pkt_num > ack_ent->largest_ack && - ack_ent->largest_ack >= ent->pkt_num - (int64_t)(ent->len - 1)) { + + assert(ent->pkt_num > ack_ent->largest_ack); + + if (ack_ent->largest_ack + (int64_t)ent->len > ent->pkt_num) { ent->len = (size_t)(ent->pkt_num - ack_ent->largest_ack); } } @@ -279,7 +264,7 @@ void ngtcp2_acktr_recv_ack(ngtcp2_acktr *acktr, const ngtcp2_ack *fr) { ngtcp2_acktr_ack_entry *ent; int64_t largest_ack = fr->largest_ack, min_ack; size_t i, j; - ngtcp2_ringbuf *rb = &acktr->acks; + ngtcp2_ringbuf *rb = &acktr->acks.rb; size_t nacks = ngtcp2_ringbuf_len(rb); /* Assume that ngtcp2_pkt_validate_ack(fr) returns 0 */ @@ -295,7 +280,7 @@ void ngtcp2_acktr_recv_ack(ngtcp2_acktr *acktr, const ngtcp2_ack *fr) { min_ack = largest_ack - (int64_t)fr->first_ack_range; - if (min_ack <= ent->pkt_num && ent->pkt_num <= largest_ack) { + if (min_ack <= ent->pkt_num) { acktr_on_ack(acktr, rb, j); return; } @@ -306,8 +291,7 @@ void ngtcp2_acktr_recv_ack(ngtcp2_acktr *acktr, const ngtcp2_ack *fr) { for (;;) { if (ent->pkt_num > largest_ack) { - ++j; - if (j == nacks) { + if (++j == nacks) { return; } ent = ngtcp2_ringbuf_get(rb, j); @@ -330,7 +314,7 @@ void ngtcp2_acktr_commit_ack(ngtcp2_acktr *acktr) { acktr->rx_npkt = 0; } -int ngtcp2_acktr_require_active_ack(ngtcp2_acktr *acktr, +int ngtcp2_acktr_require_active_ack(const ngtcp2_acktr *acktr, ngtcp2_duration max_ack_delay, ngtcp2_tstamp ts) { return ngtcp2_tstamp_elapsed(acktr->first_unacked_ts, max_ack_delay, ts); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h index 809fb692adc3c8..16aee42f275f67 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -39,7 +39,7 @@ /* NGTCP2_ACKTR_MAX_ENT is the maximum number of ngtcp2_acktr_entry which ngtcp2_acktr stores. */ -#define NGTCP2_ACKTR_MAX_ENT 1024 +#define NGTCP2_ACKTR_MAX_ENT (NGTCP2_MAX_ACK_RANGES + 1) typedef struct ngtcp2_log ngtcp2_log; @@ -65,7 +65,7 @@ typedef struct ngtcp2_acktr_entry { }; } ngtcp2_acktr_entry; -ngtcp2_objalloc_decl(acktr_entry, ngtcp2_acktr_entry, oplent); +ngtcp2_objalloc_decl(acktr_entry, ngtcp2_acktr_entry, oplent) /* * ngtcp2_acktr_entry_objalloc_new allocates memory for ent, and @@ -108,17 +108,18 @@ typedef struct ngtcp2_acktr_ack_entry { expired and canceled. */ #define NGTCP2_ACKTR_FLAG_CANCEL_TIMER 0x0100u +ngtcp2_static_ringbuf_def(acks, 32, sizeof(ngtcp2_acktr_ack_entry)) + /* * ngtcp2_acktr tracks received packets which we have to send ack. */ typedef struct ngtcp2_acktr { ngtcp2_objalloc objalloc; - ngtcp2_ringbuf acks; + ngtcp2_static_ringbuf_acks acks; /* ents includes ngtcp2_acktr_entry sorted by decreasing order of packet number. */ ngtcp2_ksl ents; ngtcp2_log *log; - const ngtcp2_mem *mem; /* flags is bitwise OR of zero, or more of NGTCP2_ACKTR_FLAG_*. */ uint16_t flags; /* first_unacked_ts is timestamp when ngtcp2_acktr_entry is added @@ -131,15 +132,9 @@ typedef struct ngtcp2_acktr { /* * ngtcp2_acktr_init initializes |acktr|. - * - * This function returns 0 if it succeeds, or one of the following - * negative error codes: - * - * NGTCP2_ERR_NOMEM - * Out of memory. */ -int ngtcp2_acktr_init(ngtcp2_acktr *acktr, ngtcp2_log *log, - const ngtcp2_mem *mem); +void ngtcp2_acktr_init(ngtcp2_acktr *acktr, ngtcp2_log *log, + const ngtcp2_mem *mem); /* * ngtcp2_acktr_free frees resources allocated for |acktr|. It frees @@ -174,13 +169,13 @@ void ngtcp2_acktr_forget(ngtcp2_acktr *acktr, ngtcp2_acktr_entry *ent); * has the largest packet number to be acked. If there is no entry, * returned value satisfies ngtcp2_ksl_it_end(&it) != 0. */ -ngtcp2_ksl_it ngtcp2_acktr_get(ngtcp2_acktr *acktr); +ngtcp2_ksl_it ngtcp2_acktr_get(const ngtcp2_acktr *acktr); /* * ngtcp2_acktr_empty returns nonzero if it has no packet to * acknowledge. */ -int ngtcp2_acktr_empty(ngtcp2_acktr *acktr); +int ngtcp2_acktr_empty(const ngtcp2_acktr *acktr); /* * ngtcp2_acktr_add_ack records outgoing ACK frame whose largest @@ -208,7 +203,7 @@ void ngtcp2_acktr_commit_ack(ngtcp2_acktr *acktr); * ngtcp2_acktr_require_active_ack returns nonzero if ACK frame should * be generated actively. */ -int ngtcp2_acktr_require_active_ack(ngtcp2_acktr *acktr, +int ngtcp2_acktr_require_active_ack(const ngtcp2_acktr *acktr, ngtcp2_duration max_ack_delay, ngtcp2_tstamp ts); @@ -218,4 +213,4 @@ int ngtcp2_acktr_require_active_ack(ngtcp2_acktr *acktr, */ void ngtcp2_acktr_immediate_ack(ngtcp2_acktr *acktr); -#endif /* NGTCP2_ACKTR_H */ +#endif /* !defined(NGTCP2_ACKTR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h index 8e3a9f591d9977..65ee7cd9f3006b 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -68,4 +68,4 @@ uint32_t ngtcp2_addr_compare(const ngtcp2_addr *a, const ngtcp2_addr *b); */ int ngtcp2_addr_empty(const ngtcp2_addr *addr); -#endif /* NGTCP2_ADDR_H */ +#endif /* !defined(NGTCP2_ADDR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.c index 5cc39ee3b03da4..4a6797689fcc01 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.c @@ -66,7 +66,7 @@ int ngtcp2_balloc_get(ngtcp2_balloc *balloc, void **pbuf, size_t n) { if (ngtcp2_buf_left(&balloc->buf) < n) { p = ngtcp2_mem_malloc(balloc->mem, - sizeof(ngtcp2_memblock_hd) + 0x10u + balloc->blklen); + sizeof(ngtcp2_memblock_hd) + 0x8u + balloc->blklen); if (p == NULL) { return NGTCP2_ERR_NOMEM; } @@ -75,10 +75,10 @@ int ngtcp2_balloc_get(ngtcp2_balloc *balloc, void **pbuf, size_t n) { hd->next = balloc->head; balloc->head = hd; ngtcp2_buf_init( - &balloc->buf, - (uint8_t *)(((uintptr_t)p + sizeof(ngtcp2_memblock_hd) + 0xfu) & - ~(uintptr_t)0xfu), - balloc->blklen); + &balloc->buf, + (uint8_t *)(((uintptr_t)p + sizeof(ngtcp2_memblock_hd) + 0xfu) & + ~(uintptr_t)0xfu), + balloc->blklen); } assert(((uintptr_t)balloc->buf.last & 0xfu) == 0); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.h index 1fb16325d9953d..c0e2a3f7562968 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -39,7 +39,10 @@ typedef struct ngtcp2_memblock_hd ngtcp2_memblock_hd; * ngtcp2_memblock_hd is the header of memory block. */ struct ngtcp2_memblock_hd { - ngtcp2_memblock_hd *next; + union { + ngtcp2_memblock_hd *next; + uint64_t pad; + }; }; /* @@ -60,7 +63,7 @@ typedef struct ngtcp2_balloc { /* * ngtcp2_balloc_init initializes |balloc| with |blklen| which is the - * size of memory block. + * size of memory block. |blklen| must be divisible by 16. */ void ngtcp2_balloc_init(ngtcp2_balloc *balloc, size_t blklen, const ngtcp2_mem *mem); @@ -88,4 +91,4 @@ int ngtcp2_balloc_get(ngtcp2_balloc *balloc, void **pbuf, size_t n); */ void ngtcp2_balloc_clear(ngtcp2_balloc *balloc); -#endif /* NGTCP2_BALLOC_H */ +#endif /* !defined(NGTCP2_BALLOC_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c index 27c4667c03924a..8777ca4c8a1632 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c @@ -39,8 +39,9 @@ #define NGTCP2_BBR_EXTRA_ACKED_FILTERLEN 10 #define NGTCP2_BBR_STARTUP_PACING_GAIN_H 277 +#define NGTCP2_BBR_DRAIN_PACING_GAIN_H 35 -#define NGTCP2_BBR_STARTUP_CWND_GAIN_H 200 +#define NGTCP2_BBR_DEFAULT_CWND_GAIN_H 200 #define NGTCP2_BBR_PROBE_RTT_CWND_GAIN_H 50 @@ -72,7 +73,7 @@ static void bbr_reset_lower_bounds(ngtcp2_cc_bbr *bbr); static void bbr_init_round_counting(ngtcp2_cc_bbr *bbr); -static void bbr_init_full_pipe(ngtcp2_cc_bbr *bbr); +static void bbr_reset_full_bw(ngtcp2_cc_bbr *bbr); static void bbr_init_pacing_rate(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); @@ -84,8 +85,7 @@ static void bbr_set_pacing_rate(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); static void bbr_enter_startup(ngtcp2_cc_bbr *bbr); -static void bbr_check_startup_done(ngtcp2_cc_bbr *bbr, - const ngtcp2_cc_ack *ack); +static void bbr_check_startup_done(ngtcp2_cc_bbr *bbr); static void bbr_update_on_ack(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); @@ -148,16 +148,17 @@ static void bbr_start_probe_bw_cruise(ngtcp2_cc_bbr *bbr); static void bbr_start_probe_bw_refill(ngtcp2_cc_bbr *bbr); -static void bbr_start_probe_bw_up(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); +static void bbr_start_probe_bw_up(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); static void bbr_update_probe_bw_cycle_phase(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); -static int bbr_check_time_to_cruise(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); +static int bbr_is_time_to_cruise(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts); + +static int bbr_is_time_to_go_down(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); static int bbr_has_elapsed_in_phase(ngtcp2_cc_bbr *bbr, ngtcp2_duration interval, ngtcp2_tstamp ts); @@ -175,9 +176,8 @@ static void bbr_probe_inflight_hi_upward(ngtcp2_cc_bbr *bbr, static void bbr_adapt_upper_bounds(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); -static int bbr_check_time_to_probe_bw(ngtcp2_cc_bbr *bbr, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); +static int bbr_is_time_to_probe_bw(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts); static void bbr_pick_probe_wait(ngtcp2_cc_bbr *bbr); @@ -197,6 +197,8 @@ static void bbr_handle_inflight_too_high(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_rs *rs, ngtcp2_tstamp ts); +static void bbr_note_loss(ngtcp2_cc_bbr *bbr); + static void bbr_handle_lost_packet(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts); @@ -227,15 +229,14 @@ static void bbr_handle_restart_from_idle(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); -static uint64_t bbr_bdp_multiple(ngtcp2_cc_bbr *bbr, uint64_t bw, - uint64_t gain_h); +static uint64_t bbr_bdp_multiple(ngtcp2_cc_bbr *bbr, uint64_t gain_h); static uint64_t bbr_quantization_budget(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, uint64_t inflight); static uint64_t bbr_inflight(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - uint64_t bw, uint64_t gain_h); + uint64_t gain_h); static void bbr_update_max_inflight(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); @@ -247,10 +248,6 @@ static uint64_t min_pipe_cwnd(size_t max_udp_payload_size); static void bbr_advance_max_bw_filter(ngtcp2_cc_bbr *bbr); -static void bbr_modulate_cwnd_for_recovery(ngtcp2_cc_bbr *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack); - static void bbr_save_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); static void bbr_restore_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); @@ -272,7 +269,7 @@ static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_time); static void bbr_handle_recovery(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); + const ngtcp2_cc_ack *ack); static void bbr_on_init(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, ngtcp2_tstamp initial_ts) { @@ -289,11 +286,12 @@ static void bbr_on_init(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, bbr->idle_restart = 0; bbr->extra_acked_interval_start = initial_ts; bbr->extra_acked_delivered = 0; + bbr->full_bw_reached = 0; bbr_reset_congestion_signals(bbr); bbr_reset_lower_bounds(bbr); bbr_init_round_counting(bbr); - bbr_init_full_pipe(bbr); + bbr_reset_full_bw(bbr); bbr_init_pacing_rate(bbr, cstat); bbr_enter_startup(bbr); @@ -326,23 +324,17 @@ static void bbr_on_init(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, bbr->bw_probe_up_acks = 0; bbr->inflight_hi = UINT64_MAX; - bbr->bw_hi = UINT64_MAX; bbr->probe_rtt_expired = 0; bbr->probe_rtt_min_delay = UINT64_MAX; bbr->probe_rtt_min_stamp = initial_ts; bbr->in_loss_recovery = 0; - bbr->packet_conservation = 0; + bbr->round_count_at_recovery = UINT64_MAX; bbr->max_inflight = 0; bbr->congestion_recovery_start_ts = UINT64_MAX; - bbr->congestion_recovery_next_round_delivered = 0; - - bbr->prior_inflight_lo = 0; - bbr->prior_inflight_hi = 0; - bbr->prior_bw_lo = 0; } static void bbr_reset_congestion_signals(ngtcp2_cc_bbr *bbr) { @@ -362,48 +354,53 @@ static void bbr_init_round_counting(ngtcp2_cc_bbr *bbr) { bbr->round_count = 0; } -static void bbr_init_full_pipe(ngtcp2_cc_bbr *bbr) { - bbr->filled_pipe = 0; +static void bbr_reset_full_bw(ngtcp2_cc_bbr *bbr) { bbr->full_bw = 0; bbr->full_bw_count = 0; + bbr->full_bw_now = 0; } -static void bbr_check_startup_full_bandwidth(ngtcp2_cc_bbr *bbr) { - if (bbr->filled_pipe || !bbr->round_start || bbr->rst->rs.is_app_limited) { +static void bbr_check_full_bw_reached(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + if (bbr->full_bw_now || bbr->rst->rs.is_app_limited) { return; } - if (bbr->max_bw * 100 >= bbr->full_bw * 125) { - bbr->full_bw = bbr->max_bw; - bbr->full_bw_count = 0; - } - - ++bbr->full_bw_count; - - if (bbr->full_bw_count >= 3) { - bbr->filled_pipe = 1; + if (cstat->delivery_rate_sec * 100 >= bbr->full_bw * 125) { + bbr_reset_full_bw(bbr); + bbr->full_bw = cstat->delivery_rate_sec; - ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, - "bbr filled pipe, full_bw=%" PRIu64, bbr->full_bw); + return; } -} -static void bbr_check_startup_high_loss(ngtcp2_cc_bbr *bbr, - const ngtcp2_cc_ack *ack) { - if (bbr->filled_pipe || !bbr->round_start || bbr->rst->rs.is_app_limited) { + if (!bbr->round_start) { return; } - if (bbr->loss_events_in_round <= 3) { + ++bbr->full_bw_count; + + bbr->full_bw_now = bbr->full_bw_count >= 3; + if (!bbr->full_bw_now) { return; } - /* loss_thresh = 2% */ - if (bbr->bytes_lost_in_round * 100 <= ack->prior_bytes_in_flight * 2) { + bbr->full_bw_reached = 1; + + ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, + "bbr reached full bandwidth, full_bw=%" PRIu64, bbr->full_bw); +} + +static void bbr_check_startup_high_loss(ngtcp2_cc_bbr *bbr) { + if (bbr->full_bw_reached || bbr->loss_events_in_round <= 6 || + (bbr->in_loss_recovery && + bbr->round_count <= bbr->round_count_at_recovery) || + !is_inflight_too_high(&bbr->rst->rs)) { return; } - bbr->filled_pipe = 1; + bbr->full_bw_reached = 1; + bbr->inflight_hi = ngtcp2_max_uint64(bbr_bdp_multiple(bbr, bbr->cwnd_gain_h), + bbr->inflight_latest); } static void bbr_init_pacing_rate(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { @@ -423,7 +420,7 @@ static void bbr_set_pacing_rate_with_gain(ngtcp2_cc_bbr *bbr, interval = NGTCP2_SECONDS * 100 * 100 / pacing_gain_h / bbr->bw / (100 - NGTCP2_BBR_PACING_MARGIN_PERCENT); - if (bbr->filled_pipe || interval < cstat->pacing_interval) { + if (bbr->full_bw_reached || interval < cstat->pacing_interval) { cstat->pacing_interval = interval; } } @@ -437,15 +434,13 @@ static void bbr_enter_startup(ngtcp2_cc_bbr *bbr) { bbr->state = NGTCP2_BBR_STATE_STARTUP; bbr->pacing_gain_h = NGTCP2_BBR_STARTUP_PACING_GAIN_H; - bbr->cwnd_gain_h = NGTCP2_BBR_STARTUP_CWND_GAIN_H; + bbr->cwnd_gain_h = NGTCP2_BBR_DEFAULT_CWND_GAIN_H; } -static void bbr_check_startup_done(ngtcp2_cc_bbr *bbr, - const ngtcp2_cc_ack *ack) { - bbr_check_startup_full_bandwidth(bbr); - bbr_check_startup_high_loss(bbr, ack); +static void bbr_check_startup_done(ngtcp2_cc_bbr *bbr) { + bbr_check_startup_high_loss(bbr); - if (bbr->state == NGTCP2_BBR_STATE_STARTUP && bbr->filled_pipe) { + if (bbr->state == NGTCP2_BBR_STATE_STARTUP && bbr->full_bw_reached) { bbr_enter_drain(bbr); } } @@ -468,7 +463,8 @@ static void bbr_update_model_and_state(ngtcp2_cc_bbr *bbr, bbr_update_latest_delivery_signals(bbr, cstat); bbr_update_congestion_signals(bbr, cstat, ack); bbr_update_ack_aggregation(bbr, cstat, ack, ts); - bbr_check_startup_done(bbr, ack); + bbr_check_full_bw_reached(bbr, cstat); + bbr_check_startup_done(bbr); bbr_check_drain(bbr, cstat, ts); bbr_update_probe_bw_cycle_phase(bbr, cstat, ack, ts); bbr_update_min_rtt(bbr, ack, ts); @@ -493,9 +489,9 @@ static void bbr_update_on_loss(ngtcp2_cc_bbr *cc, ngtcp2_conn_stat *cstat, static void bbr_update_latest_delivery_signals(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { bbr->loss_round_start = 0; - bbr->bw_latest = ngtcp2_max(bbr->bw_latest, cstat->delivery_rate_sec); + bbr->bw_latest = ngtcp2_max_uint64(bbr->bw_latest, cstat->delivery_rate_sec); bbr->inflight_latest = - ngtcp2_max(bbr->inflight_latest, bbr->rst->rs.delivered); + ngtcp2_max_uint64(bbr->inflight_latest, bbr->rst->rs.delivered); if (bbr->rst->rs.prior_delivered >= bbr->loss_round_delivered) { bbr->loss_round_delivered = bbr->rst->delivered; @@ -519,11 +515,6 @@ static void bbr_update_congestion_signals(ngtcp2_cc_bbr *bbr, if (ack->bytes_lost) { bbr->bytes_lost_in_round += ack->bytes_lost; ++bbr->loss_events_in_round; - - if (!bbr->loss_in_round) { - bbr->loss_in_round = 1; - bbr->loss_round_delivered = bbr->rst->delivered; - } } if (!bbr->loss_round_start) { @@ -558,16 +549,15 @@ static void bbr_init_lower_bounds(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { } static void bbr_loss_lower_bounds(ngtcp2_cc_bbr *bbr) { - bbr->bw_lo = ngtcp2_max(bbr->bw_latest, bbr->bw_lo * NGTCP2_BBR_BETA_NUMER / - NGTCP2_BBR_BETA_DENOM); - bbr->inflight_lo = ngtcp2_max(bbr->inflight_latest, - bbr->inflight_lo * NGTCP2_BBR_BETA_NUMER / - NGTCP2_BBR_BETA_DENOM); + bbr->bw_lo = ngtcp2_max_uint64( + bbr->bw_latest, bbr->bw_lo * NGTCP2_BBR_BETA_NUMER / NGTCP2_BBR_BETA_DENOM); + bbr->inflight_lo = ngtcp2_max_uint64( + bbr->inflight_latest, + bbr->inflight_lo * NGTCP2_BBR_BETA_NUMER / NGTCP2_BBR_BETA_DENOM); } static void bbr_bound_bw_for_model(ngtcp2_cc_bbr *bbr) { - bbr->bw = ngtcp2_min(bbr->max_bw, bbr->bw_lo); - bbr->bw = ngtcp2_min(bbr->bw, bbr->bw_hi); + bbr->bw = ngtcp2_min_uint64(bbr->max_bw, bbr->bw_lo); } static void bbr_update_max_bw(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, @@ -633,7 +623,13 @@ static void bbr_update_ack_aggregation(ngtcp2_cc_bbr *bbr, bbr->extra_acked_delivered += ack->bytes_delivered; extra = bbr->extra_acked_delivered - expected_delivered; - extra = ngtcp2_min(extra, cstat->cwnd); + extra = ngtcp2_min_uint64(extra, cstat->cwnd); + + if (bbr->full_bw_reached) { + bbr->extra_acked_filter.window_length = NGTCP2_BBR_EXTRA_ACKED_FILTERLEN; + } else { + bbr->extra_acked_filter.window_length = 1; + } ngtcp2_window_filter_update(&bbr->extra_acked_filter, extra, bbr->round_count); @@ -645,14 +641,14 @@ static void bbr_enter_drain(ngtcp2_cc_bbr *bbr) { ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, "bbr enter Drain"); bbr->state = NGTCP2_BBR_STATE_DRAIN; - bbr->pacing_gain_h = 100 * 100 / NGTCP2_BBR_STARTUP_CWND_GAIN_H; - bbr->cwnd_gain_h = NGTCP2_BBR_STARTUP_CWND_GAIN_H; + bbr->pacing_gain_h = NGTCP2_BBR_DRAIN_PACING_GAIN_H; + bbr->cwnd_gain_h = NGTCP2_BBR_DEFAULT_CWND_GAIN_H; } static void bbr_check_drain(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { if (bbr->state == NGTCP2_BBR_STATE_DRAIN && - cstat->bytes_in_flight <= bbr_inflight(bbr, cstat, bbr->bw, 100)) { + cstat->bytes_in_flight <= bbr_inflight(bbr, cstat, 100)) { bbr_enter_probe_bw(bbr, ts); } } @@ -677,7 +673,7 @@ static void bbr_start_probe_bw_down(ngtcp2_cc_bbr *bbr, ngtcp2_tstamp ts) { bbr->state = NGTCP2_BBR_STATE_PROBE_BW_DOWN; bbr->pacing_gain_h = 90; - bbr->cwnd_gain_h = 200; + bbr->cwnd_gain_h = NGTCP2_BBR_DEFAULT_CWND_GAIN_H; } static void bbr_start_probe_bw_cruise(ngtcp2_cc_bbr *bbr) { @@ -686,7 +682,7 @@ static void bbr_start_probe_bw_cruise(ngtcp2_cc_bbr *bbr) { bbr->state = NGTCP2_BBR_STATE_PROBE_BW_CRUISE; bbr->pacing_gain_h = 100; - bbr->cwnd_gain_h = 200; + bbr->cwnd_gain_h = NGTCP2_BBR_DEFAULT_CWND_GAIN_H; } static void bbr_start_probe_bw_refill(ngtcp2_cc_bbr *bbr) { @@ -703,18 +699,18 @@ static void bbr_start_probe_bw_refill(ngtcp2_cc_bbr *bbr) { bbr->state = NGTCP2_BBR_STATE_PROBE_BW_REFILL; bbr->pacing_gain_h = 100; - bbr->cwnd_gain_h = 200; + bbr->cwnd_gain_h = NGTCP2_BBR_DEFAULT_CWND_GAIN_H; } -static void bbr_start_probe_bw_up(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { +static void bbr_start_probe_bw_up(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, "bbr start ProbeBW_UP"); bbr->ack_phase = NGTCP2_BBR_ACK_PHASE_ACKS_PROBE_STARTING; bbr_start_round(bbr); + bbr_reset_full_bw(bbr); - bbr->cycle_stamp = ts; + bbr->full_bw = cstat->delivery_rate_sec; bbr->state = NGTCP2_BBR_STATE_PROBE_BW_UP; bbr->pacing_gain_h = 125; bbr->cwnd_gain_h = 225; @@ -726,7 +722,7 @@ static void bbr_update_probe_bw_cycle_phase(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - if (!bbr->filled_pipe) { + if (!bbr->full_bw_reached) { return; } @@ -738,17 +734,17 @@ static void bbr_update_probe_bw_cycle_phase(ngtcp2_cc_bbr *bbr, switch (bbr->state) { case NGTCP2_BBR_STATE_PROBE_BW_DOWN: - if (bbr_check_time_to_probe_bw(bbr, cstat, ts)) { + if (bbr_is_time_to_probe_bw(bbr, cstat, ts)) { return; } - if (bbr_check_time_to_cruise(bbr, cstat, ts)) { + if (bbr_is_time_to_cruise(bbr, cstat, ts)) { bbr_start_probe_bw_cruise(bbr); } break; case NGTCP2_BBR_STATE_PROBE_BW_CRUISE: - if (bbr_check_time_to_probe_bw(bbr, cstat, ts)) { + if (bbr_is_time_to_probe_bw(bbr, cstat, ts)) { return; } @@ -756,13 +752,12 @@ static void bbr_update_probe_bw_cycle_phase(ngtcp2_cc_bbr *bbr, case NGTCP2_BBR_STATE_PROBE_BW_REFILL: if (bbr->round_start) { bbr->bw_probe_samples = 1; - bbr_start_probe_bw_up(bbr, cstat, ts); + bbr_start_probe_bw_up(bbr, cstat); } break; case NGTCP2_BBR_STATE_PROBE_BW_UP: - if (bbr_has_elapsed_in_phase(bbr, bbr->min_rtt, ts) && - cstat->bytes_in_flight > bbr_inflight(bbr, cstat, bbr->max_bw, 125)) { + if (bbr_is_time_to_go_down(bbr, cstat)) { bbr_start_probe_bw_down(bbr, ts); } @@ -772,15 +767,26 @@ static void bbr_update_probe_bw_cycle_phase(ngtcp2_cc_bbr *bbr, } } -static int bbr_check_time_to_cruise(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { +static int bbr_is_time_to_cruise(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { (void)ts; if (cstat->bytes_in_flight > bbr_inflight_with_headroom(bbr, cstat)) { return 0; } - if (cstat->bytes_in_flight <= bbr_inflight(bbr, cstat, bbr->max_bw, 100)) { + if (cstat->bytes_in_flight <= bbr_inflight(bbr, cstat, 100)) { + return 1; + } + + return 0; +} + +static int bbr_is_time_to_go_down(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { + if (bbr->rst->is_cwnd_limited && cstat->cwnd >= bbr->inflight_hi) { + bbr_reset_full_bw(bbr); + bbr->full_bw = cstat->delivery_rate_sec; + } else if (bbr->full_bw_now) { return 1; } @@ -801,13 +807,13 @@ static uint64_t bbr_inflight_with_headroom(ngtcp2_cc_bbr *bbr, return UINT64_MAX; } - headroom = ngtcp2_max(cstat->max_tx_udp_payload_size, - bbr->inflight_hi * NGTCP2_BBR_HEADROOM_NUMER / - NGTCP2_BBR_HEADROOM_DENOM); + headroom = ngtcp2_max_uint64(cstat->max_tx_udp_payload_size, + bbr->inflight_hi * NGTCP2_BBR_HEADROOM_NUMER / + NGTCP2_BBR_HEADROOM_DENOM); mpcwnd = min_pipe_cwnd(cstat->max_tx_udp_payload_size); if (bbr->inflight_hi > headroom) { - return ngtcp2_max(bbr->inflight_hi - headroom, mpcwnd); + return ngtcp2_max_uint64(bbr->inflight_hi - headroom, mpcwnd); } return mpcwnd; @@ -818,8 +824,8 @@ static void bbr_raise_inflight_hi_slope(ngtcp2_cc_bbr *bbr, uint64_t growth_this_round = cstat->max_tx_udp_payload_size << bbr->bw_probe_up_rounds; - bbr->bw_probe_up_rounds = ngtcp2_min(bbr->bw_probe_up_rounds + 1, 30); - bbr->probe_up_cnt = ngtcp2_max(cstat->cwnd / growth_this_round, 1) * + bbr->bw_probe_up_rounds = ngtcp2_min_size(bbr->bw_probe_up_rounds + 1, 30); + bbr->probe_up_cnt = ngtcp2_max_uint64(cstat->cwnd / growth_this_round, 1) * cstat->max_tx_udp_payload_size; } @@ -860,8 +866,7 @@ static void bbr_adapt_upper_bounds(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, } if (!bbr_check_inflight_too_high(bbr, cstat, ts)) { - /* bbr->bw_hi never be updated */ - if (bbr->inflight_hi == UINT64_MAX /* || bbr->bw_hi == UINT64_MAX */) { + if (bbr->inflight_hi == UINT64_MAX) { return; } @@ -869,19 +874,14 @@ static void bbr_adapt_upper_bounds(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, bbr->inflight_hi = bbr->rst->rs.tx_in_flight; } - if (cstat->delivery_rate_sec > bbr->bw_hi) { - bbr->bw_hi = cstat->delivery_rate_sec; - } - if (bbr->state == NGTCP2_BBR_STATE_PROBE_BW_UP) { bbr_probe_inflight_hi_upward(bbr, cstat, ack); } } } -static int bbr_check_time_to_probe_bw(ngtcp2_cc_bbr *bbr, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { +static int bbr_is_time_to_probe_bw(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { if (bbr_has_elapsed_in_phase(bbr, bbr->bw_probe_wait, ts) || bbr_is_reno_coexistence_probe_time(bbr, cstat)) { bbr_start_probe_bw_refill(bbr); @@ -902,22 +902,22 @@ static void bbr_pick_probe_wait(ngtcp2_cc_bbr *bbr) { bbr->rand(&rand, 1, &bbr->rand_ctx); bbr->bw_probe_wait = - 2 * NGTCP2_SECONDS + (ngtcp2_tstamp)(NGTCP2_SECONDS * rand / 255); + 2 * NGTCP2_SECONDS + (ngtcp2_tstamp)(NGTCP2_SECONDS * rand / 255); } static int bbr_is_reno_coexistence_probe_time(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { uint64_t reno_rounds = - bbr_target_inflight(bbr, cstat) / cstat->max_tx_udp_payload_size; + bbr_target_inflight(bbr, cstat) / cstat->max_tx_udp_payload_size; - return bbr->rounds_since_bw_probe >= ngtcp2_min(reno_rounds, 63); + return bbr->rounds_since_bw_probe >= ngtcp2_min_uint64(reno_rounds, 63); } static uint64_t bbr_target_inflight(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { - uint64_t bdp = bbr_inflight(bbr, cstat, bbr->bw, 100); + uint64_t bdp = bbr_bdp_multiple(bbr, bbr->cwnd_gain_h); - return ngtcp2_min(bdp, cstat->cwnd); + return ngtcp2_min_uint64(bdp, cstat->cwnd); } static int bbr_check_inflight_too_high(ngtcp2_cc_bbr *bbr, @@ -946,9 +946,9 @@ static void bbr_handle_inflight_too_high(ngtcp2_cc_bbr *bbr, bbr->bw_probe_samples = 0; if (!rs->is_app_limited) { - bbr->inflight_hi = ngtcp2_max( - rs->tx_in_flight, bbr_target_inflight(bbr, cstat) * - NGTCP2_BBR_BETA_NUMER / NGTCP2_BBR_BETA_DENOM); + bbr->inflight_hi = ngtcp2_max_uint64( + rs->tx_in_flight, bbr_target_inflight(bbr, cstat) * + NGTCP2_BBR_BETA_NUMER / NGTCP2_BBR_BETA_DENOM); } if (bbr->state == NGTCP2_BBR_STATE_PROBE_BW_UP) { @@ -956,10 +956,21 @@ static void bbr_handle_inflight_too_high(ngtcp2_cc_bbr *bbr, } } +static void bbr_note_loss(ngtcp2_cc_bbr *bbr) { + if (bbr->loss_in_round) { + return; + } + + bbr->loss_in_round = 1; + bbr->loss_round_delivered = bbr->rst->delivered; +} + static void bbr_handle_lost_packet(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { ngtcp2_rs rs = {0}; + bbr_note_loss(bbr); + if (!bbr->bw_probe_samples) { return; } @@ -1007,7 +1018,7 @@ static void bbr_update_min_rtt(ngtcp2_cc_bbr *bbr, const ngtcp2_cc_ack *ack, int min_rtt_expired; bbr->probe_rtt_expired = - ts > bbr->probe_rtt_min_stamp + NGTCP2_BBR_PROBE_RTT_INTERVAL; + ts > bbr->probe_rtt_min_stamp + NGTCP2_BBR_PROBE_RTT_INTERVAL; if (ack->rtt != UINT64_MAX && (ack->rtt < bbr->probe_rtt_min_delay || bbr->probe_rtt_expired)) { @@ -1106,7 +1117,7 @@ static void bbr_mark_connection_app_limited(ngtcp2_cc_bbr *bbr, static void bbr_exit_probe_rtt(ngtcp2_cc_bbr *bbr, ngtcp2_tstamp ts) { bbr_reset_lower_bounds(bbr); - if (bbr->filled_pipe) { + if (bbr->full_bw_reached) { bbr_start_probe_bw_down(bbr, ts); bbr_start_probe_bw_cruise(bbr); } else { @@ -1131,15 +1142,14 @@ static void bbr_handle_restart_from_idle(ngtcp2_cc_bbr *bbr, } } -static uint64_t bbr_bdp_multiple(ngtcp2_cc_bbr *bbr, uint64_t bw, - uint64_t gain_h) { +static uint64_t bbr_bdp_multiple(ngtcp2_cc_bbr *bbr, uint64_t gain_h) { uint64_t bdp; if (bbr->min_rtt == UINT64_MAX) { return bbr->initial_cwnd; } - bdp = bw * bbr->min_rtt / NGTCP2_SECONDS; + bdp = ngtcp2_max_uint64(bbr->bw * bbr->min_rtt / NGTCP2_SECONDS, 1); return (uint64_t)(bdp * gain_h / 100); } @@ -1153,9 +1163,9 @@ static uint64_t bbr_quantization_budget(ngtcp2_cc_bbr *bbr, uint64_t inflight) { bbr_update_offload_budget(bbr, cstat); - inflight = ngtcp2_max(inflight, bbr->offload_budget); + inflight = ngtcp2_max_uint64(inflight, bbr->offload_budget); inflight = - ngtcp2_max(inflight, min_pipe_cwnd(cstat->max_tx_udp_payload_size)); + ngtcp2_max_uint64(inflight, min_pipe_cwnd(cstat->max_tx_udp_payload_size)); if (bbr->state == NGTCP2_BBR_STATE_PROBE_BW_UP) { inflight += 2 * cstat->max_tx_udp_payload_size; @@ -1165,8 +1175,8 @@ static uint64_t bbr_quantization_budget(ngtcp2_cc_bbr *bbr, } static uint64_t bbr_inflight(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - uint64_t bw, uint64_t gain_h) { - uint64_t inflight = bbr_bdp_multiple(bbr, bw, gain_h); + uint64_t gain_h) { + uint64_t inflight = bbr_bdp_multiple(bbr, gain_h); return bbr_quantization_budget(bbr, cstat, inflight); } @@ -1178,8 +1188,7 @@ static void bbr_update_max_inflight(ngtcp2_cc_bbr *bbr, /* Not documented */ /* bbr_update_aggregation_budget(bbr); */ - inflight = - bbr_bdp_multiple(bbr, bbr->bw, bbr->cwnd_gain_h) + bbr->extra_acked; + inflight = bbr_bdp_multiple(bbr, bbr->cwnd_gain_h) + bbr->extra_acked; bbr->max_inflight = bbr_quantization_budget(bbr, cstat, inflight); } @@ -1192,44 +1201,26 @@ static void bbr_advance_max_bw_filter(ngtcp2_cc_bbr *bbr) { ++bbr->cycle_count; } -static void bbr_modulate_cwnd_for_recovery(ngtcp2_cc_bbr *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - if (ack->bytes_lost > 0) { - if (cstat->cwnd > ack->bytes_lost) { - cstat->cwnd -= ack->bytes_lost; - cstat->cwnd = ngtcp2_max(cstat->cwnd, 2 * cstat->max_tx_udp_payload_size); - } else { - cstat->cwnd = 2 * cstat->max_tx_udp_payload_size; - } - } - - if (bbr->packet_conservation) { - cstat->cwnd = - ngtcp2_max(cstat->cwnd, cstat->bytes_in_flight + ack->bytes_delivered); - } -} - static void bbr_save_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { if (!bbr->in_loss_recovery && bbr->state != NGTCP2_BBR_STATE_PROBE_RTT) { bbr->prior_cwnd = cstat->cwnd; return; } - bbr->prior_cwnd = ngtcp2_max(bbr->prior_cwnd, cstat->cwnd); + bbr->prior_cwnd = ngtcp2_max_uint64(bbr->prior_cwnd, cstat->cwnd); } static void bbr_restore_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { - cstat->cwnd = ngtcp2_max(cstat->cwnd, bbr->prior_cwnd); + cstat->cwnd = ngtcp2_max_uint64(cstat->cwnd, bbr->prior_cwnd); } static uint64_t bbr_probe_rtt_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { uint64_t probe_rtt_cwnd = - bbr_bdp_multiple(bbr, bbr->bw, NGTCP2_BBR_PROBE_RTT_CWND_GAIN_H); + bbr_bdp_multiple(bbr, NGTCP2_BBR_PROBE_RTT_CWND_GAIN_H); uint64_t mpcwnd = min_pipe_cwnd(cstat->max_tx_udp_payload_size); - return ngtcp2_max(probe_rtt_cwnd, mpcwnd); + return ngtcp2_max_uint64(probe_rtt_cwnd, mpcwnd); } static void bbr_bound_cwnd_for_probe_rtt(ngtcp2_cc_bbr *bbr, @@ -1239,7 +1230,7 @@ static void bbr_bound_cwnd_for_probe_rtt(ngtcp2_cc_bbr *bbr, if (bbr->state == NGTCP2_BBR_STATE_PROBE_RTT) { probe_rtt_cwnd = bbr_probe_rtt_cwnd(bbr, cstat); - cstat->cwnd = ngtcp2_min(cstat->cwnd, probe_rtt_cwnd); + cstat->cwnd = ngtcp2_min_uint64(cstat->cwnd, probe_rtt_cwnd); } } @@ -1248,21 +1239,18 @@ static void bbr_set_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, uint64_t mpcwnd; bbr_update_max_inflight(bbr, cstat); - bbr_modulate_cwnd_for_recovery(bbr, cstat, ack); - - if (!bbr->packet_conservation) { - if (bbr->filled_pipe) { - cstat->cwnd += ack->bytes_delivered; - cstat->cwnd = ngtcp2_min(cstat->cwnd, bbr->max_inflight); - } else if (cstat->cwnd < bbr->max_inflight || - bbr->rst->delivered < bbr->initial_cwnd) { - cstat->cwnd += ack->bytes_delivered; - } - mpcwnd = min_pipe_cwnd(cstat->max_tx_udp_payload_size); - cstat->cwnd = ngtcp2_max(cstat->cwnd, mpcwnd); + if (bbr->full_bw_reached) { + cstat->cwnd += ack->bytes_delivered; + cstat->cwnd = ngtcp2_min_uint64(cstat->cwnd, bbr->max_inflight); + } else if (cstat->cwnd < bbr->max_inflight || + bbr->rst->delivered < bbr->initial_cwnd) { + cstat->cwnd += ack->bytes_delivered; } + mpcwnd = min_pipe_cwnd(cstat->max_tx_udp_payload_size); + cstat->cwnd = ngtcp2_max_uint64(cstat->cwnd, mpcwnd); + bbr_bound_cwnd_for_probe_rtt(bbr, cstat); bbr_bound_cwnd_for_model(bbr, cstat); } @@ -1280,30 +1268,23 @@ static void bbr_bound_cwnd_for_model(ngtcp2_cc_bbr *bbr, cap = bbr_inflight_with_headroom(bbr, cstat); } - cap = ngtcp2_min(cap, bbr->inflight_lo); - cap = ngtcp2_max(cap, mpcwnd); + cap = ngtcp2_min_uint64(cap, bbr->inflight_lo); + cap = ngtcp2_max_uint64(cap, mpcwnd); - cstat->cwnd = ngtcp2_min(cstat->cwnd, cap); + cstat->cwnd = ngtcp2_min_uint64(cstat->cwnd, cap); } static void bbr_set_send_quantum(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { - size_t floor, send_quantum; + size_t send_quantum = 64 * 1024; (void)bbr; - if (cstat->pacing_interval > (NGTCP2_SECONDS * 8 * 10 / 12) >> 20) { - floor = cstat->max_tx_udp_payload_size; - } else { - floor = 2 * cstat->max_tx_udp_payload_size; - } - if (cstat->pacing_interval) { - send_quantum = (size_t)(NGTCP2_MILLISECONDS / cstat->pacing_interval); - send_quantum = ngtcp2_min(send_quantum, 64 * 1024); - } else { - send_quantum = 64 * 1024; + send_quantum = ngtcp2_min_size( + send_quantum, (size_t)(NGTCP2_MILLISECONDS / cstat->pacing_interval)); } - cstat->send_quantum = ngtcp2_max(send_quantum, floor); + cstat->send_quantum = + ngtcp2_max_size(send_quantum, 2 * cstat->max_tx_udp_payload_size); } static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, @@ -1313,16 +1294,12 @@ static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, } static void bbr_handle_recovery(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { + const ngtcp2_cc_ack *ack) { if (bbr->in_loss_recovery) { - if (ts - cstat->congestion_recovery_start_ts >= cstat->smoothed_rtt) { - bbr->packet_conservation = 0; - } - if (ack->largest_pkt_sent_ts != UINT64_MAX && !in_congestion_recovery(cstat, ack->largest_pkt_sent_ts)) { bbr->in_loss_recovery = 0; - bbr->packet_conservation = 0; + bbr->round_count_at_recovery = UINT64_MAX; bbr_restore_cwnd(bbr, cstat); } @@ -1331,18 +1308,15 @@ static void bbr_handle_recovery(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, if (bbr->congestion_recovery_start_ts != UINT64_MAX) { bbr->in_loss_recovery = 1; + bbr->round_count_at_recovery = + bbr->round_start ? bbr->round_count : bbr->round_count + 1; bbr_save_cwnd(bbr, cstat); cstat->cwnd = - cstat->bytes_in_flight + - ngtcp2_max(ack->bytes_delivered, cstat->max_tx_udp_payload_size); + cstat->bytes_in_flight + + ngtcp2_max_uint64(ack->bytes_delivered, cstat->max_tx_udp_payload_size); cstat->congestion_recovery_start_ts = bbr->congestion_recovery_start_ts; bbr->congestion_recovery_start_ts = UINT64_MAX; - bbr->packet_conservation = 1; - bbr->congestion_recovery_next_round_delivered = bbr->rst->delivered; - bbr->prior_inflight_hi = bbr->inflight_hi; - bbr->prior_inflight_lo = bbr->inflight_lo; - bbr->prior_bw_lo = bbr->bw_lo; } } @@ -1350,12 +1324,18 @@ static void bbr_cc_on_pkt_lost(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); + if (bbr->state == NGTCP2_BBR_STATE_STARTUP) { + return; + } + bbr_update_on_loss(bbr, cstat, pkt, ts); } static void bbr_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp sent_ts, ngtcp2_tstamp ts) { + ngtcp2_tstamp sent_ts, uint64_t bytes_lost, + ngtcp2_tstamp ts) { ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); + (void)bytes_lost; if (bbr->in_loss_recovery || bbr->congestion_recovery_start_ts != UINT64_MAX || @@ -1377,13 +1357,8 @@ static void bbr_cc_on_spurious_congestion(ngtcp2_cc *cc, if (bbr->in_loss_recovery) { bbr->in_loss_recovery = 0; - bbr->packet_conservation = 0; + bbr->round_count_at_recovery = UINT64_MAX; bbr_restore_cwnd(bbr, cstat); - bbr->full_bw_count = 0; - bbr->loss_in_round = 0; - bbr->inflight_lo = ngtcp2_max(bbr->inflight_lo, bbr->prior_inflight_lo); - bbr->inflight_hi = ngtcp2_max(bbr->inflight_hi, bbr->prior_inflight_hi); - bbr->bw_lo = ngtcp2_max(bbr->bw_lo, bbr->prior_bw_lo); } } @@ -1396,19 +1371,19 @@ static void bbr_cc_on_persistent_congestion(ngtcp2_cc *cc, cstat->congestion_recovery_start_ts = UINT64_MAX; bbr->congestion_recovery_start_ts = UINT64_MAX; bbr->in_loss_recovery = 0; - bbr->packet_conservation = 0; + bbr->round_count_at_recovery = UINT64_MAX; bbr_save_cwnd(bbr, cstat); cstat->cwnd = cstat->bytes_in_flight + cstat->max_tx_udp_payload_size; - cstat->cwnd = - ngtcp2_max(cstat->cwnd, min_pipe_cwnd(cstat->max_tx_udp_payload_size)); + cstat->cwnd = ngtcp2_max_uint64( + cstat->cwnd, min_pipe_cwnd(cstat->max_tx_udp_payload_size)); } static void bbr_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); - bbr_handle_recovery(bbr, cstat, ack, ts); + bbr_handle_recovery(bbr, cstat, ack); bbr_update_on_ack(bbr, cstat, ack, ts); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h index 0017be35010e66..f266ec5d71e4e4 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -95,9 +95,10 @@ typedef struct ngtcp2_cc_bbr { uint64_t round_count; /* Full pipe */ - int filled_pipe; uint64_t full_bw; size_t full_bw_count; + int full_bw_reached; + int full_bw_now; /* Pacing rate */ uint64_t pacing_gain_h; @@ -123,19 +124,13 @@ typedef struct ngtcp2_cc_bbr { size_t bw_probe_up_rounds; uint64_t bw_probe_up_acks; uint64_t inflight_hi; - uint64_t bw_hi; int probe_rtt_expired; ngtcp2_duration probe_rtt_min_delay; ngtcp2_tstamp probe_rtt_min_stamp; int in_loss_recovery; - int packet_conservation; + uint64_t round_count_at_recovery; uint64_t max_inflight; ngtcp2_tstamp congestion_recovery_start_ts; - uint64_t congestion_recovery_next_round_delivered; - - uint64_t prior_inflight_lo; - uint64_t prior_inflight_hi; - uint64_t prior_bw_lo; } ngtcp2_cc_bbr; void ngtcp2_cc_bbr_init(ngtcp2_cc_bbr *bbr, ngtcp2_log *log, @@ -143,4 +138,4 @@ void ngtcp2_cc_bbr_init(ngtcp2_cc_bbr *bbr, ngtcp2_log *log, ngtcp2_tstamp initial_ts, ngtcp2_rand rand, const ngtcp2_rand_ctx *rand_ctx); -#endif /* NGTCP2_BBR_H */ +#endif /* !defined(NGTCP2_BBR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h index 85b5f4ddf0464a..e87adb119916ca 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -105,4 +105,4 @@ int ngtcp2_buf_chain_new(ngtcp2_buf_chain **pbufchain, size_t len, */ void ngtcp2_buf_chain_del(ngtcp2_buf_chain *bufchain, const ngtcp2_mem *mem); -#endif /* NGTCP2_BUF_H */ +#endif /* !defined(NGTCP2_BUF_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c index 9ad37fbdb6395a..1ff59f315c5b66 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c @@ -32,16 +32,13 @@ #include "ngtcp2_mem.h" #include "ngtcp2_rcvry.h" #include "ngtcp2_conn_stat.h" +#include "ngtcp2_rst.h" #include "ngtcp2_unreachable.h" -/* NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN is the window length of - delivery rate filter driven by ACK clocking. */ -#define NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN 10 - uint64_t ngtcp2_cc_compute_initcwnd(size_t max_udp_payload_size) { uint64_t n = 2 * max_udp_payload_size; - n = ngtcp2_max(n, 14720); - return ngtcp2_min(10 * max_udp_payload_size, n); + n = ngtcp2_max_uint64(n, 14720); + return ngtcp2_min_uint64(10 * max_udp_payload_size, n); } ngtcp2_cc_pkt *ngtcp2_cc_pkt_init(ngtcp2_cc_pkt *pkt, int64_t pkt_num, @@ -59,13 +56,7 @@ ngtcp2_cc_pkt *ngtcp2_cc_pkt_init(ngtcp2_cc_pkt *pkt, int64_t pkt_num, return pkt; } -static void reno_cc_reset(ngtcp2_cc_reno *reno) { - ngtcp2_window_filter_init(&reno->delivery_rate_sec_filter, - NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN); - reno->ack_count = 0; - reno->target_cwnd = 0; - reno->pending_add = 0; -} +static void reno_cc_reset(ngtcp2_cc_reno *reno) { reno->pending_add = 0; } void ngtcp2_cc_reno_init(ngtcp2_cc_reno *reno, ngtcp2_log *log) { memset(reno, 0, sizeof(*reno)); @@ -74,8 +65,7 @@ void ngtcp2_cc_reno_init(ngtcp2_cc_reno *reno, ngtcp2_log *log) { reno->cc.on_pkt_acked = ngtcp2_cc_reno_cc_on_pkt_acked; reno->cc.congestion_event = ngtcp2_cc_reno_cc_congestion_event; reno->cc.on_persistent_congestion = - ngtcp2_cc_reno_cc_on_persistent_congestion; - reno->cc.on_ack_recv = ngtcp2_cc_reno_cc_on_ack_recv; + ngtcp2_cc_reno_cc_on_persistent_congestion; reno->cc.reset = ngtcp2_cc_reno_cc_reset; reno_cc_reset(reno); @@ -94,11 +84,7 @@ void ngtcp2_cc_reno_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, uint64_t m; (void)ts; - if (in_congestion_recovery(cstat, pkt->sent_ts)) { - return; - } - - if (reno->target_cwnd && reno->target_cwnd < cstat->cwnd) { + if (in_congestion_recovery(cstat, pkt->sent_ts) || pkt->is_app_limited) { return; } @@ -118,9 +104,10 @@ void ngtcp2_cc_reno_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, void ngtcp2_cc_reno_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_ts, - ngtcp2_tstamp ts) { + uint64_t bytes_lost, ngtcp2_tstamp ts) { ngtcp2_cc_reno *reno = ngtcp2_struct_of(cc, ngtcp2_cc_reno, cc); uint64_t min_cwnd; + (void)bytes_lost; if (in_congestion_recovery(cstat, sent_ts)) { return; @@ -129,7 +116,7 @@ void ngtcp2_cc_reno_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, cstat->congestion_recovery_start_ts = ts; cstat->cwnd >>= NGTCP2_LOSS_REDUCTION_FACTOR_BITS; min_cwnd = 2 * cstat->max_tx_udp_payload_size; - cstat->cwnd = ngtcp2_max(cstat->cwnd, min_cwnd); + cstat->cwnd = ngtcp2_max_uint64(cstat->cwnd, min_cwnd); cstat->ssthresh = cstat->cwnd; reno->pending_add = 0; @@ -149,35 +136,6 @@ void ngtcp2_cc_reno_cc_on_persistent_congestion(ngtcp2_cc *cc, cstat->congestion_recovery_start_ts = UINT64_MAX; } -void ngtcp2_cc_reno_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - ngtcp2_cc_reno *reno = ngtcp2_struct_of(cc, ngtcp2_cc_reno, cc); - uint64_t target_cwnd, initcwnd; - uint64_t max_delivery_rate_sec; - (void)ack; - (void)ts; - - ++reno->ack_count; - - ngtcp2_window_filter_update(&reno->delivery_rate_sec_filter, - cstat->delivery_rate_sec, reno->ack_count); - - max_delivery_rate_sec = - ngtcp2_window_filter_get_best(&reno->delivery_rate_sec_filter); - - if (cstat->min_rtt != UINT64_MAX && max_delivery_rate_sec) { - target_cwnd = max_delivery_rate_sec * cstat->smoothed_rtt / NGTCP2_SECONDS; - initcwnd = ngtcp2_cc_compute_initcwnd(cstat->max_tx_udp_payload_size); - reno->target_cwnd = ngtcp2_max(initcwnd, target_cwnd) * 289 / 100; - - ngtcp2_log_info(reno->cc.log, NGTCP2_LOG_EVENT_CCA, - "target_cwnd=%" PRIu64 " max_delivery_rate_sec=%" PRIu64 - " smoothed_rtt=%" PRIu64, - reno->target_cwnd, max_delivery_rate_sec, - cstat->smoothed_rtt); - } -} - void ngtcp2_cc_reno_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { ngtcp2_cc_reno *reno = ngtcp2_struct_of(cc, ngtcp2_cc_reno, cc); @@ -187,45 +145,49 @@ void ngtcp2_cc_reno_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, reno_cc_reset(reno); } +static void cubic_vars_reset(ngtcp2_cubic_vars *v) { + v->cwnd_prior = 0; + v->w_max = 0; + v->k = 0; + v->epoch_start = UINT64_MAX; + v->w_est = 0; + + v->state = NGTCP2_CUBIC_STATE_INITIAL; + v->app_limited_start_ts = UINT64_MAX; + v->app_limited_duration = 0; + v->pending_bytes_delivered = 0; + v->pending_est_bytes_delivered = 0; +} + static void cubic_cc_reset(ngtcp2_cc_cubic *cubic) { - ngtcp2_window_filter_init(&cubic->delivery_rate_sec_filter, - NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN); - cubic->ack_count = 0; - cubic->target_cwnd = 0; - cubic->w_last_max = 0; - cubic->w_tcp = 0; - cubic->origin_point = 0; - cubic->epoch_start = UINT64_MAX; - cubic->k = 0; - - cubic->prior.cwnd = 0; - cubic->prior.ssthresh = 0; - cubic->prior.w_last_max = 0; - cubic->prior.w_tcp = 0; - cubic->prior.origin_point = 0; - cubic->prior.epoch_start = UINT64_MAX; - cubic->prior.k = 0; - - cubic->rtt_sample_count = 0; - cubic->current_round_min_rtt = UINT64_MAX; - cubic->last_round_min_rtt = UINT64_MAX; - cubic->window_end = -1; + cubic_vars_reset(&cubic->current); + cubic_vars_reset(&cubic->undo.v); + cubic->undo.cwnd = 0; + cubic->undo.ssthresh = 0; + + cubic->hs.current_round_min_rtt = UINT64_MAX; + cubic->hs.last_round_min_rtt = UINT64_MAX; + cubic->hs.curr_rtt = UINT64_MAX; + cubic->hs.rtt_sample_count = 0; + cubic->hs.css_baseline_min_rtt = UINT64_MAX; + cubic->hs.css_round = 0; + + cubic->next_round_delivered = 0; } -void ngtcp2_cc_cubic_init(ngtcp2_cc_cubic *cubic, ngtcp2_log *log) { +void ngtcp2_cc_cubic_init(ngtcp2_cc_cubic *cubic, ngtcp2_log *log, + ngtcp2_rst *rst) { memset(cubic, 0, sizeof(*cubic)); cubic->cc.log = log; - cubic->cc.on_pkt_acked = ngtcp2_cc_cubic_cc_on_pkt_acked; + cubic->cc.on_ack_recv = ngtcp2_cc_cubic_cc_on_ack_recv; cubic->cc.congestion_event = ngtcp2_cc_cubic_cc_congestion_event; cubic->cc.on_spurious_congestion = ngtcp2_cc_cubic_cc_on_spurious_congestion; cubic->cc.on_persistent_congestion = - ngtcp2_cc_cubic_cc_on_persistent_congestion; - cubic->cc.on_ack_recv = ngtcp2_cc_cubic_cc_on_ack_recv; - cubic->cc.on_pkt_sent = ngtcp2_cc_cubic_cc_on_pkt_sent; - cubic->cc.new_rtt_sample = ngtcp2_cc_cubic_cc_new_rtt_sample; + ngtcp2_cc_cubic_cc_on_persistent_congestion; cubic->cc.reset = ngtcp2_cc_cubic_cc_reset; - cubic->cc.event = ngtcp2_cc_cubic_cc_event; + + cubic->rst = rst; cubic_cc_reset(cubic); } @@ -254,191 +216,250 @@ uint64_t ngtcp2_cbrt(uint64_t n) { return y; } -/* HyStart++ constants */ -#define NGTCP2_HS_MIN_SSTHRESH 16 +/* RFC 9406 HyStart++ constants */ +#define NGTCP2_HS_MIN_RTT_THRESH (4 * NGTCP2_MILLISECONDS) +#define NGTCP2_HS_MAX_RTT_THRESH (16 * NGTCP2_MILLISECONDS) +#define NGTCP2_HS_MIN_RTT_DIVISOR 8 #define NGTCP2_HS_N_RTT_SAMPLE 8 -#define NGTCP2_HS_MIN_ETA (4 * NGTCP2_MILLISECONDS) -#define NGTCP2_HS_MAX_ETA (16 * NGTCP2_MILLISECONDS) +#define NGTCP2_HS_CSS_GROWTH_DIVISOR 4 +#define NGTCP2_HS_CSS_ROUNDS 5 + +static uint64_t cubic_cc_compute_w_cubic(ngtcp2_cc_cubic *cubic, + const ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + ngtcp2_duration t = ts - cubic->current.epoch_start; + uint64_t delta; + uint64_t tx = (t << 10) / NGTCP2_SECONDS; + uint64_t kx = (cubic->current.k << 10) / NGTCP2_SECONDS; + uint64_t time_delta; + + if (tx < kx) { + return UINT64_MAX; + } + + time_delta = tx - kx; + + delta = cstat->max_tx_udp_payload_size * + ((((time_delta * time_delta) >> 10) * time_delta) >> 10) * 4 / 10; -void ngtcp2_cc_cubic_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt, - ngtcp2_tstamp ts) { + return cubic->current.w_max + (delta >> 10); +} + +void ngtcp2_cc_cubic_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack, + ngtcp2_tstamp ts) { ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); - ngtcp2_duration t, eta; - uint64_t target, cwnd_thres; - uint64_t tx, kx, time_delta, delta; - uint64_t add, tcp_add; - uint64_t m; + uint64_t w_cubic, w_cubic_next, target, m; + ngtcp2_duration rtt_thresh; + int round_start; - if (pkt->pktns_id == NGTCP2_PKTNS_ID_APPLICATION && cubic->window_end != -1 && - cubic->window_end <= pkt->pkt_num) { - cubic->window_end = -1; + if (in_congestion_recovery(cstat, ack->largest_pkt_sent_ts)) { + return; } - if (in_congestion_recovery(cstat, pkt->sent_ts)) { + if (cubic->current.state == NGTCP2_CUBIC_STATE_CONGESTION_AVOIDANCE) { + if (cubic->rst->rs.is_app_limited && !cubic->rst->is_cwnd_limited) { + if (cubic->current.app_limited_start_ts == UINT64_MAX) { + cubic->current.app_limited_start_ts = ts; + } + + return; + } + + if (cubic->current.app_limited_start_ts != UINT64_MAX) { + cubic->current.app_limited_duration += + ts - cubic->current.app_limited_start_ts; + cubic->current.app_limited_start_ts = UINT64_MAX; + } + } else if (cubic->rst->rs.is_app_limited && !cubic->rst->is_cwnd_limited) { return; } + round_start = ack->pkt_delivered >= cubic->next_round_delivered; + if (round_start) { + cubic->next_round_delivered = cubic->rst->delivered; + + cubic->rst->is_cwnd_limited = 0; + } + if (cstat->cwnd < cstat->ssthresh) { /* slow-start */ - if (cubic->target_cwnd == 0 || cubic->target_cwnd > cstat->cwnd) { - cstat->cwnd += pkt->pktlen; + if (cubic->hs.css_round) { + cstat->cwnd += ack->bytes_delivered / NGTCP2_HS_CSS_GROWTH_DIVISOR; + } else { + cstat->cwnd += ack->bytes_delivered; } ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, - "pkn=%" PRId64 " acked, slow start cwnd=%" PRIu64, - pkt->pkt_num, cstat->cwnd); + "%" PRIu64 " bytes acked, slow start cwnd=%" PRIu64, + ack->bytes_delivered, cstat->cwnd); + + if (round_start) { + cubic->hs.last_round_min_rtt = cubic->hs.current_round_min_rtt; + cubic->hs.current_round_min_rtt = UINT64_MAX; + cubic->hs.rtt_sample_count = 0; - if (cubic->last_round_min_rtt != UINT64_MAX && - cubic->current_round_min_rtt != UINT64_MAX && - cstat->cwnd >= - NGTCP2_HS_MIN_SSTHRESH * cstat->max_tx_udp_payload_size && - cubic->rtt_sample_count >= NGTCP2_HS_N_RTT_SAMPLE) { - eta = cubic->last_round_min_rtt / 8; - - if (eta < NGTCP2_HS_MIN_ETA) { - eta = NGTCP2_HS_MIN_ETA; - } else if (eta > NGTCP2_HS_MAX_ETA) { - eta = NGTCP2_HS_MAX_ETA; + if (cubic->hs.css_round) { + ++cubic->hs.css_round; } + } + + cubic->hs.current_round_min_rtt = + ngtcp2_min_uint64(cubic->hs.current_round_min_rtt, ack->rtt); + ++cubic->hs.rtt_sample_count; - if (cubic->current_round_min_rtt >= cubic->last_round_min_rtt + eta) { + if (cubic->hs.css_round) { + if (cubic->hs.current_round_min_rtt < cubic->hs.css_baseline_min_rtt) { + cubic->hs.css_baseline_min_rtt = UINT64_MAX; + cubic->hs.css_round = 0; + return; + } + + if (cubic->hs.css_round >= NGTCP2_HS_CSS_ROUNDS) { ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, "HyStart++ exit slow start"); - cubic->w_last_max = cstat->cwnd; cstat->ssthresh = cstat->cwnd; } - } - - return; - } - /* congestion avoidance */ - - if (cubic->epoch_start == UINT64_MAX) { - cubic->epoch_start = ts; - if (cstat->cwnd < cubic->w_last_max) { - cubic->k = ngtcp2_cbrt((cubic->w_last_max - cstat->cwnd) * 10 / 4 / - cstat->max_tx_udp_payload_size); - cubic->origin_point = cubic->w_last_max; - } else { - cubic->k = 0; - cubic->origin_point = cstat->cwnd; + return; } - cubic->w_tcp = cstat->cwnd; - - ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, - "cubic-ca epoch_start=%" PRIu64 " k=%" PRIu64 - " origin_point=%" PRIu64, - cubic->epoch_start, cubic->k, cubic->origin_point); + if (cubic->hs.rtt_sample_count >= NGTCP2_HS_N_RTT_SAMPLE && + cubic->hs.current_round_min_rtt != UINT64_MAX && + cubic->hs.last_round_min_rtt != UINT64_MAX) { + rtt_thresh = + ngtcp2_max_uint64(NGTCP2_HS_MIN_RTT_THRESH, + ngtcp2_min_uint64(cubic->hs.last_round_min_rtt / + NGTCP2_HS_MIN_RTT_DIVISOR, + NGTCP2_HS_MAX_RTT_THRESH)); + + if (cubic->hs.current_round_min_rtt >= + cubic->hs.last_round_min_rtt + rtt_thresh) { + cubic->hs.css_baseline_min_rtt = cubic->hs.current_round_min_rtt; + cubic->hs.css_round = 1; + } + } - cubic->pending_add = 0; - cubic->pending_w_add = 0; + return; } - t = ts - cubic->epoch_start; - - tx = (t << 10) / NGTCP2_SECONDS; - kx = (cubic->k << 10); + /* congestion avoidance */ - if (tx > kx) { - time_delta = tx - kx; - } else { - time_delta = kx - tx; + switch (cubic->current.state) { + case NGTCP2_CUBIC_STATE_INITIAL: + m = cstat->max_tx_udp_payload_size * ack->bytes_delivered + + cubic->current.pending_bytes_delivered; + cstat->cwnd += m / cstat->cwnd; + cubic->current.pending_bytes_delivered = m % cstat->cwnd; + return; + case NGTCP2_CUBIC_STATE_RECOVERY: + cubic->current.state = NGTCP2_CUBIC_STATE_CONGESTION_AVOIDANCE; + cubic->current.epoch_start = ts; + break; + default: + break; } - delta = cstat->max_tx_udp_payload_size * - ((((time_delta * time_delta) >> 10) * time_delta) >> 10) * 4 / 10; - delta >>= 10; - - if (tx > kx) { - target = cubic->origin_point + delta; - } else { - target = cubic->origin_point - delta; - } + w_cubic = cubic_cc_compute_w_cubic(cubic, cstat, + ts - cubic->current.app_limited_duration); + w_cubic_next = cubic_cc_compute_w_cubic( + cubic, cstat, + ts - cubic->current.app_limited_duration + cstat->smoothed_rtt); - cwnd_thres = - (target * (((t + cstat->smoothed_rtt) << 10) / NGTCP2_SECONDS)) >> 10; - if (cwnd_thres < cstat->cwnd) { + if (w_cubic_next == UINT64_MAX || w_cubic_next < cstat->cwnd) { target = cstat->cwnd; - } else if (2 * cwnd_thres > 3 * cstat->cwnd) { + } else if (2 * w_cubic_next > 3 * cstat->cwnd) { target = cstat->cwnd * 3 / 2; } else { - target = cwnd_thres; + target = w_cubic_next; } - if (target > cstat->cwnd) { - m = cubic->pending_add + - cstat->max_tx_udp_payload_size * (target - cstat->cwnd); - add = m / cstat->cwnd; - cubic->pending_add = m % cstat->cwnd; - } else { - m = cubic->pending_add + cstat->max_tx_udp_payload_size; - add = m / (100 * cstat->cwnd); - cubic->pending_add = m % (100 * cstat->cwnd); - } - - m = cubic->pending_w_add + cstat->max_tx_udp_payload_size * pkt->pktlen; - - cubic->w_tcp += m / cstat->cwnd; - cubic->pending_w_add = m % cstat->cwnd; + m = ack->bytes_delivered * cstat->max_tx_udp_payload_size + + cubic->current.pending_est_bytes_delivered; + cubic->current.pending_est_bytes_delivered = m % cstat->cwnd; - if (cubic->w_tcp > cstat->cwnd) { - tcp_add = cstat->max_tx_udp_payload_size * (cubic->w_tcp - cstat->cwnd) / - cstat->cwnd; - if (tcp_add > add) { - add = tcp_add; - } + if (cubic->current.w_est < cubic->current.cwnd_prior) { + cubic->current.w_est += m * 9 / 17 / cstat->cwnd; + } else { + cubic->current.w_est += m / cstat->cwnd; } - if (cubic->target_cwnd == 0 || cubic->target_cwnd > cstat->cwnd) { - cstat->cwnd += add; + if (w_cubic == UINT64_MAX || cubic->current.w_est > w_cubic) { + cstat->cwnd = cubic->current.w_est; + } else { + m = (target - cstat->cwnd) * cstat->max_tx_udp_payload_size + + cubic->current.pending_bytes_delivered; + cstat->cwnd += m / cstat->cwnd; + cubic->current.pending_bytes_delivered = m % cstat->cwnd; } ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, - "pkn=%" PRId64 " acked, cubic-ca cwnd=%" PRIu64 " t=%" PRIu64 - " k=%" PRIi64 " time_delta=%" PRIu64 " delta=%" PRIu64 - " target=%" PRIu64 " w_tcp=%" PRIu64, - pkt->pkt_num, cstat->cwnd, t, cubic->k, time_delta >> 4, - delta, target, cubic->w_tcp); + "%" PRIu64 " bytes acked, cubic-ca cwnd=%" PRIu64 + " k=%" PRIi64 " target=%" PRIu64 " w_est=%" PRIu64, + ack->bytes_delivered, cstat->cwnd, cubic->current.k, target, + cubic->current.w_est); } void ngtcp2_cc_cubic_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_ts, + uint64_t bytes_lost, ngtcp2_tstamp ts) { ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); - uint64_t min_cwnd; + uint64_t flight_size; if (in_congestion_recovery(cstat, sent_ts)) { return; } - if (cubic->prior.cwnd < cstat->cwnd) { - cubic->prior.cwnd = cstat->cwnd; - cubic->prior.ssthresh = cstat->ssthresh; - cubic->prior.w_last_max = cubic->w_last_max; - cubic->prior.w_tcp = cubic->w_tcp; - cubic->prior.origin_point = cubic->origin_point; - cubic->prior.epoch_start = cubic->epoch_start; - cubic->prior.k = cubic->k; + if (cubic->undo.cwnd < cstat->cwnd) { + cubic->undo.v = cubic->current; + cubic->undo.cwnd = cstat->cwnd; + cubic->undo.ssthresh = cstat->ssthresh; } cstat->congestion_recovery_start_ts = ts; - cubic->epoch_start = UINT64_MAX; - if (cstat->cwnd < cubic->w_last_max) { - cubic->w_last_max = cstat->cwnd * 17 / 10 / 2; + cubic->current.state = NGTCP2_CUBIC_STATE_RECOVERY; + cubic->current.epoch_start = UINT64_MAX; + cubic->current.app_limited_start_ts = UINT64_MAX; + cubic->current.app_limited_duration = 0; + cubic->current.pending_bytes_delivered = 0; + cubic->current.pending_est_bytes_delivered = 0; + + if (cstat->cwnd < cubic->current.w_max) { + cubic->current.w_max = cstat->cwnd * 17 / 20; } else { - cubic->w_last_max = cstat->cwnd; + cubic->current.w_max = cstat->cwnd; } - min_cwnd = 2 * cstat->max_tx_udp_payload_size; cstat->ssthresh = cstat->cwnd * 7 / 10; - cstat->ssthresh = ngtcp2_max(cstat->ssthresh, min_cwnd); + + if (cubic->rst->rs.delivered * 2 < cstat->cwnd) { + flight_size = cstat->bytes_in_flight + bytes_lost; + cstat->ssthresh = ngtcp2_min_uint64( + cstat->ssthresh, + ngtcp2_max_uint64(cubic->rst->rs.delivered, flight_size) * 7 / 10); + } + + cstat->ssthresh = + ngtcp2_max_uint64(cstat->ssthresh, 2 * cstat->max_tx_udp_payload_size); + + cubic->current.cwnd_prior = cstat->cwnd; cstat->cwnd = cstat->ssthresh; + cubic->current.w_est = cstat->cwnd; + + if (cstat->cwnd < cubic->current.w_max) { + cubic->current.k = + ngtcp2_cbrt(((cubic->current.w_max - cstat->cwnd) << 10) * 10 / 4 / + cstat->max_tx_udp_payload_size) * + NGTCP2_SECONDS; + cubic->current.k >>= 10; + } else { + cubic->current.k = 0; + } + ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, "reduce cwnd because of packet loss cwnd=%" PRIu64, cstat->cwnd); @@ -450,101 +471,34 @@ void ngtcp2_cc_cubic_cc_on_spurious_congestion(ngtcp2_cc *cc, ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); (void)ts; - if (cstat->cwnd >= cubic->prior.cwnd) { - return; - } - - cstat->congestion_recovery_start_ts = UINT64_MAX; - - cstat->cwnd = cubic->prior.cwnd; - cstat->ssthresh = cubic->prior.ssthresh; - cubic->w_last_max = cubic->prior.w_last_max; - cubic->w_tcp = cubic->prior.w_tcp; - cubic->origin_point = cubic->prior.origin_point; - cubic->epoch_start = cubic->prior.epoch_start; - cubic->k = cubic->prior.k; - - cubic->prior.cwnd = 0; - cubic->prior.ssthresh = 0; - cubic->prior.w_last_max = 0; - cubic->prior.w_tcp = 0; - cubic->prior.origin_point = 0; - cubic->prior.epoch_start = UINT64_MAX; - cubic->prior.k = 0; - - ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, - "spurious congestion is detected and congestion state is " - "restored cwnd=%" PRIu64, - cstat->cwnd); -} - -void ngtcp2_cc_cubic_cc_on_persistent_congestion(ngtcp2_cc *cc, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - (void)cc; - (void)ts; - - cstat->cwnd = 2 * cstat->max_tx_udp_payload_size; cstat->congestion_recovery_start_ts = UINT64_MAX; -} - -void ngtcp2_cc_cubic_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, - ngtcp2_tstamp ts) { - ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); - uint64_t target_cwnd, initcwnd; - uint64_t max_delivery_rate_sec; - (void)ack; - (void)ts; - - ++cubic->ack_count; - - ngtcp2_window_filter_update(&cubic->delivery_rate_sec_filter, - cstat->delivery_rate_sec, cubic->ack_count); - max_delivery_rate_sec = - ngtcp2_window_filter_get_best(&cubic->delivery_rate_sec_filter); - - if (cstat->min_rtt != UINT64_MAX && max_delivery_rate_sec) { - target_cwnd = max_delivery_rate_sec * cstat->smoothed_rtt / NGTCP2_SECONDS; - initcwnd = ngtcp2_cc_compute_initcwnd(cstat->max_tx_udp_payload_size); - cubic->target_cwnd = ngtcp2_max(initcwnd, target_cwnd) * 289 / 100; + if (cstat->cwnd < cubic->undo.cwnd) { + cubic->current = cubic->undo.v; + cstat->cwnd = cubic->undo.cwnd; + cstat->ssthresh = cubic->undo.ssthresh; ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, - "target_cwnd=%" PRIu64 " max_delivery_rate_sec=%" PRIu64 - " smoothed_rtt=%" PRIu64, - cubic->target_cwnd, max_delivery_rate_sec, - cstat->smoothed_rtt); + "spurious congestion is detected and congestion state is " + "restored cwnd=%" PRIu64, + cstat->cwnd); } -} - -void ngtcp2_cc_cubic_cc_on_pkt_sent(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt) { - ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); - (void)cstat; - if (pkt->pktns_id != NGTCP2_PKTNS_ID_APPLICATION || cubic->window_end != -1) { - return; - } - - cubic->window_end = pkt->pkt_num; - cubic->last_round_min_rtt = cubic->current_round_min_rtt; - cubic->current_round_min_rtt = UINT64_MAX; - cubic->rtt_sample_count = 0; + cubic_vars_reset(&cubic->undo.v); + cubic->undo.cwnd = 0; + cubic->undo.ssthresh = 0; } -void ngtcp2_cc_cubic_cc_new_rtt_sample(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { +void ngtcp2_cc_cubic_cc_on_persistent_congestion(ngtcp2_cc *cc, + ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); (void)ts; - if (cubic->window_end == -1) { - return; - } + cubic_cc_reset(cubic); - cubic->current_round_min_rtt = - ngtcp2_min(cubic->current_round_min_rtt, cstat->latest_rtt); - ++cubic->rtt_sample_count; + cstat->cwnd = 2 * cstat->max_tx_udp_payload_size; + cstat->congestion_recovery_start_ts = UINT64_MAX; } void ngtcp2_cc_cubic_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, @@ -555,23 +509,3 @@ void ngtcp2_cc_cubic_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, cubic_cc_reset(cubic); } - -void ngtcp2_cc_cubic_cc_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_cc_event_type event, ngtcp2_tstamp ts) { - ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); - ngtcp2_tstamp last_ts; - - if (event != NGTCP2_CC_EVENT_TYPE_TX_START || - cubic->epoch_start == UINT64_MAX) { - return; - } - - last_ts = cstat->last_tx_pkt_ts[NGTCP2_PKTNS_ID_APPLICATION]; - if (last_ts == UINT64_MAX || last_ts <= cubic->epoch_start) { - return; - } - - assert(ts >= last_ts); - - cubic->epoch_start += ts - last_ts; -} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h index 524bcdb7e4bf86..e3c363a51bb85a 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h @@ -27,18 +27,18 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include #include "ngtcp2_pktns_id.h" -#include "ngtcp2_window_filter.h" #define NGTCP2_LOSS_REDUCTION_FACTOR_BITS 1 #define NGTCP2_PERSISTENT_CONGESTION_THRESHOLD 3 typedef struct ngtcp2_log ngtcp2_log; typedef struct ngtcp2_conn_stat ngtcp2_conn_stat; +typedef struct ngtcp2_rst ngtcp2_rst; /** * @struct @@ -144,10 +144,12 @@ typedef void (*ngtcp2_cc_on_pkt_lost)(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, * * :type:`ngtcp2_cc_congestion_event` is a callback function which is * called when congestion event happens (e.g., when packet is lost). + * |bytes_lost| is the number of bytes lost in this congestion event. */ typedef void (*ngtcp2_cc_congestion_event)(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_ts, + uint64_t bytes_lost, ngtcp2_tstamp ts); /** @@ -305,9 +307,6 @@ ngtcp2_cc_pkt *ngtcp2_cc_pkt_init(ngtcp2_cc_pkt *pkt, int64_t pkt_num, /* ngtcp2_cc_reno is the RENO congestion controller. */ typedef struct ngtcp2_cc_reno { ngtcp2_cc cc; - ngtcp2_window_filter delivery_rate_sec_filter; - uint64_t ack_count; - uint64_t target_cwnd; uint64_t pending_add; } ngtcp2_cc_reno; @@ -318,59 +317,81 @@ void ngtcp2_cc_reno_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, void ngtcp2_cc_reno_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_ts, - ngtcp2_tstamp ts); + uint64_t bytes_lost, ngtcp2_tstamp ts); void ngtcp2_cc_reno_cc_on_persistent_congestion(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); -void ngtcp2_cc_reno_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); - void ngtcp2_cc_reno_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); +typedef enum ngtcp2_cubic_state { + /* NGTCP2_CUBIC_STATE_INITIAL is the state where CUBIC is in slow + start phase, or congestion avoidance phase before congestion + events occur. */ + NGTCP2_CUBIC_STATE_INITIAL, + /* NGTCP2_CUBIC_STATE_RECOVERY is the state that a connection is in + recovery period. */ + NGTCP2_CUBIC_STATE_RECOVERY, + /* NGTCP2_CUBIC_STATE_CONGESTION_AVOIDANCE is the state where CUBIC + is in congestion avoidance phase after recovery period ends. */ + NGTCP2_CUBIC_STATE_CONGESTION_AVOIDANCE, +} ngtcp2_cubic_state; + +typedef struct ngtcp2_cubic_vars { + uint64_t cwnd_prior; + uint64_t w_max; + ngtcp2_duration k; + ngtcp2_tstamp epoch_start; + uint64_t w_est; + + ngtcp2_cubic_state state; + /* app_limited_start_ts is the timestamp where app limited period + started. */ + ngtcp2_tstamp app_limited_start_ts; + /* app_limited_duration is the cumulative duration where a + connection is under app limited when ACK is received. */ + ngtcp2_duration app_limited_duration; + uint64_t pending_bytes_delivered; + uint64_t pending_est_bytes_delivered; +} ngtcp2_cubic_vars; + /* ngtcp2_cc_cubic is CUBIC congestion controller. */ typedef struct ngtcp2_cc_cubic { ngtcp2_cc cc; - ngtcp2_window_filter delivery_rate_sec_filter; - uint64_t ack_count; - uint64_t target_cwnd; - uint64_t w_last_max; - uint64_t w_tcp; - uint64_t origin_point; - ngtcp2_tstamp epoch_start; - uint64_t k; - /* prior stores the congestion state when a congestion event occurs + ngtcp2_rst *rst; + /* current is a set of variables that are currently in effect. */ + ngtcp2_cubic_vars current; + /* undo stores the congestion state when a congestion event occurs in order to restore the state when it turns out that the event is spurious. */ struct { + ngtcp2_cubic_vars v; uint64_t cwnd; uint64_t ssthresh; - uint64_t w_last_max; - uint64_t w_tcp; - uint64_t origin_point; - ngtcp2_tstamp epoch_start; - uint64_t k; - } prior; + } undo; /* HyStart++ variables */ - size_t rtt_sample_count; - uint64_t current_round_min_rtt; - uint64_t last_round_min_rtt; - int64_t window_end; - uint64_t pending_add; - uint64_t pending_w_add; + struct { + ngtcp2_duration current_round_min_rtt; + ngtcp2_duration last_round_min_rtt; + ngtcp2_duration curr_rtt; + size_t rtt_sample_count; + ngtcp2_duration css_baseline_min_rtt; + size_t css_round; + } hs; + uint64_t next_round_delivered; } ngtcp2_cc_cubic; -void ngtcp2_cc_cubic_init(ngtcp2_cc_cubic *cc, ngtcp2_log *log); +void ngtcp2_cc_cubic_init(ngtcp2_cc_cubic *cc, ngtcp2_log *log, + ngtcp2_rst *rst); -void ngtcp2_cc_cubic_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt, - ngtcp2_tstamp ts); +void ngtcp2_cc_cubic_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); void ngtcp2_cc_cubic_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_ts, - ngtcp2_tstamp ts); + uint64_t bytes_lost, ngtcp2_tstamp ts); void ngtcp2_cc_cubic_cc_on_spurious_congestion(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, @@ -380,21 +401,9 @@ void ngtcp2_cc_cubic_cc_on_persistent_congestion(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); -void ngtcp2_cc_cubic_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); - -void ngtcp2_cc_cubic_cc_on_pkt_sent(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt); - -void ngtcp2_cc_cubic_cc_new_rtt_sample(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); - void ngtcp2_cc_cubic_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); -void ngtcp2_cc_cubic_cc_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_cc_event_type event, ngtcp2_tstamp ts); - uint64_t ngtcp2_cbrt(uint64_t n); -#endif /* NGTCP2_CC_H */ +#endif /* !defined(NGTCP2_CC_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.c index f3b92b569ec928..181850cfcbc87a 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.c @@ -36,6 +36,7 @@ void ngtcp2_cid_init(ngtcp2_cid *cid, const uint8_t *data, size_t datalen) { assert(datalen <= NGTCP2_MAX_CIDLEN); cid->datalen = datalen; + if (datalen) { ngtcp2_cpymem(cid->data, data, datalen); } @@ -74,12 +75,14 @@ void ngtcp2_dcid_init(ngtcp2_dcid *dcid, uint64_t seq, const ngtcp2_cid *cid, const uint8_t *token) { dcid->seq = seq; dcid->cid = *cid; + if (token) { memcpy(dcid->token, token, NGTCP2_STATELESS_RESET_TOKENLEN); dcid->flags = NGTCP2_DCID_FLAG_TOKEN_PRESENT; } else { dcid->flags = NGTCP2_DCID_FLAG_NONE; } + ngtcp2_path_storage_zero(&dcid->ps); dcid->retired_ts = UINT64_MAX; dcid->bound_ts = UINT64_MAX; @@ -115,6 +118,7 @@ void ngtcp2_dcid_copy(ngtcp2_dcid *dest, const ngtcp2_dcid *src) { void ngtcp2_dcid_copy_cid_token(ngtcp2_dcid *dest, const ngtcp2_dcid *src) { dest->seq = src->seq; dest->cid = src->cid; + if (src->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) { dest->flags |= NGTCP2_DCID_FLAG_TOKEN_PRESENT; memcpy(dest->token, src->token, NGTCP2_STATELESS_RESET_TOKENLEN); @@ -123,15 +127,14 @@ void ngtcp2_dcid_copy_cid_token(ngtcp2_dcid *dest, const ngtcp2_dcid *src) { } } -int ngtcp2_dcid_verify_uniqueness(ngtcp2_dcid *dcid, uint64_t seq, +int ngtcp2_dcid_verify_uniqueness(const ngtcp2_dcid *dcid, uint64_t seq, const ngtcp2_cid *cid, const uint8_t *token) { if (dcid->seq == seq) { return ngtcp2_cid_eq(&dcid->cid, cid) && - (dcid->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) && - memcmp(dcid->token, token, - NGTCP2_STATELESS_RESET_TOKENLEN) == 0 - ? 0 - : NGTCP2_ERR_PROTO; + (dcid->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) && + memcmp(dcid->token, token, NGTCP2_STATELESS_RESET_TOKENLEN) == 0 + ? 0 + : NGTCP2_ERR_PROTO; } return !ngtcp2_cid_eq(&dcid->cid, cid) ? 0 : NGTCP2_ERR_PROTO; @@ -140,8 +143,7 @@ int ngtcp2_dcid_verify_uniqueness(ngtcp2_dcid *dcid, uint64_t seq, int ngtcp2_dcid_verify_stateless_reset_token(const ngtcp2_dcid *dcid, const uint8_t *token) { return (dcid->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) && - ngtcp2_cmemeq(dcid->token, token, - NGTCP2_STATELESS_RESET_TOKENLEN) - ? 0 - : NGTCP2_ERR_INVALID_ARGUMENT; + ngtcp2_cmemeq(dcid->token, token, NGTCP2_STATELESS_RESET_TOKENLEN) + ? 0 + : NGTCP2_ERR_INVALID_ARGUMENT; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.h index 0b37441178c72a..6372ef113d6454 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -37,20 +37,20 @@ /* NGTCP2_SCID_FLAG_NONE indicates that no flag is set. */ #define NGTCP2_SCID_FLAG_NONE 0x00u /* NGTCP2_SCID_FLAG_USED indicates that a local endpoint observed that - a remote endpoint uses a particular Connection ID. */ + a remote endpoint uses this particular Connection ID. */ #define NGTCP2_SCID_FLAG_USED 0x01u -/* NGTCP2_SCID_FLAG_RETIRED indicates that a particular Connection ID - is retired. */ +/* NGTCP2_SCID_FLAG_RETIRED indicates that this particular Connection + ID is retired. */ #define NGTCP2_SCID_FLAG_RETIRED 0x02u typedef struct ngtcp2_scid { ngtcp2_pq_entry pe; - /* seq is the sequence number associated to the CID. */ + /* seq is the sequence number associated to the Connection ID. */ uint64_t seq; /* cid is a connection ID */ ngtcp2_cid cid; - /* retired_ts is the timestamp when peer tells that this CID is - retired. */ + /* retired_ts is the timestamp when a remote endpoint tells that + this Connection ID is retired. */ ngtcp2_tstamp retired_ts; /* flags is the bitwise OR of zero or more of NGTCP2_SCID_FLAG_*. */ uint8_t flags; @@ -66,33 +66,33 @@ typedef struct ngtcp2_scid { #define NGTCP2_DCID_FLAG_TOKEN_PRESENT 0x02u typedef struct ngtcp2_dcid { - /* seq is the sequence number associated to the CID. */ + /* seq is the sequence number associated to the Connection ID. */ uint64_t seq; - /* cid is a connection ID */ + /* cid is a Connection ID */ ngtcp2_cid cid; /* path is a path which cid is bound to. The addresses are zero length if cid has not been bound to a particular path yet. */ ngtcp2_path_storage ps; - /* retired_ts is the timestamp when peer tells that this CID is + /* retired_ts is the timestamp when this Connection ID is retired. */ ngtcp2_tstamp retired_ts; - /* bound_ts is the timestamp when this connection ID is bound to a - particular path. It is only assigned when a connection ID is - used just for sending PATH_RESPONSE and is not zero-length. */ + /* bound_ts is the timestamp when this Connection ID is bound to a + particular path. It is only assigned when a Connection ID is + used just for sending PATH_RESPONSE, and is not zero-length. */ ngtcp2_tstamp bound_ts; /* bytes_sent is the number of bytes sent to an associated path. */ uint64_t bytes_sent; /* bytes_recv is the number of bytes received from an associated path. */ uint64_t bytes_recv; - /* max_udp_payload_size is the maximum size of UDP payload that is - allowed to send to this path. */ + /* max_udp_payload_size is the maximum size of UDP datagram payload + that is allowed to be sent to this path. */ size_t max_udp_payload_size; /* flags is bitwise OR of zero or more of NGTCP2_DCID_FLAG_*. */ uint8_t flags; - /* token is a stateless reset token associated to this CID. - Actually, the stateless reset token is tied to the connection, - not to the particular connection ID. */ + /* token is a stateless reset token received along with this + Connection ID. The stateless reset token is tied to the + connection, not to the particular Connection ID. */ uint8_t token[NGTCP2_STATELESS_RESET_TOKENLEN]; } ngtcp2_dcid; @@ -106,7 +106,7 @@ void ngtcp2_cid_zero(ngtcp2_cid *cid); int ngtcp2_cid_less(const ngtcp2_cid *lhs, const ngtcp2_cid *rhs); /* - * ngtcp2_cid_empty returns nonzero if |cid| includes empty connection + * ngtcp2_cid_empty returns nonzero if |cid| includes empty Connection * ID. */ int ngtcp2_cid_empty(const ngtcp2_cid *cid); @@ -123,7 +123,7 @@ void ngtcp2_scid_copy(ngtcp2_scid *dest, const ngtcp2_scid *src); /* * ngtcp2_dcid_init initializes |dcid| with the given parameters. If - * |token| is NULL, the function fills dcid->token it with 0. |token| + * |token| is NULL, the function fills dcid->token with 0. |token| * must be NGTCP2_STATELESS_RESET_TOKENLEN bytes long. */ void ngtcp2_dcid_init(ngtcp2_dcid *dcid, uint64_t seq, const ngtcp2_cid *cid, @@ -131,14 +131,14 @@ void ngtcp2_dcid_init(ngtcp2_dcid *dcid, uint64_t seq, const ngtcp2_cid *cid, /* * ngtcp2_dcid_set_token sets |token| to |dcid|. |token| must not be - * NULL and must be NGTCP2_STATELESS_RESET_TOKENLEN bytes long. + * NULL, and must be NGTCP2_STATELESS_RESET_TOKENLEN bytes long. */ void ngtcp2_dcid_set_token(ngtcp2_dcid *dcid, const uint8_t *token); /* * ngtcp2_dcid_set_path sets |path| to |dcid|. It sets - * max_udp_payload_size to the minimum UDP payload size supported - * by the IP protocol version. + * max_udp_payload_size to the minimum UDP datagram payload size + * supported by the IP protocol version. */ void ngtcp2_dcid_set_path(ngtcp2_dcid *dcid, const ngtcp2_path *path); @@ -149,7 +149,7 @@ void ngtcp2_dcid_copy(ngtcp2_dcid *dest, const ngtcp2_dcid *src); /* * ngtcp2_dcid_copy_cid_token behaves like ngtcp2_dcid_copy, but it - * only copies cid, seq, and path. + * only copies cid, seq, and token. */ void ngtcp2_dcid_copy_cid_token(ngtcp2_dcid *dest, const ngtcp2_dcid *src); @@ -157,14 +157,15 @@ void ngtcp2_dcid_copy_cid_token(ngtcp2_dcid *dest, const ngtcp2_dcid *src); * ngtcp2_dcid_verify_uniqueness verifies uniqueness of (|seq|, |cid|, * |token|) tuple against |dcid|. */ -int ngtcp2_dcid_verify_uniqueness(ngtcp2_dcid *dcid, uint64_t seq, +int ngtcp2_dcid_verify_uniqueness(const ngtcp2_dcid *dcid, uint64_t seq, const ngtcp2_cid *cid, const uint8_t *token); /* * ngtcp2_dcid_verify_stateless_reset_token verifies stateless reset - * token |token| against the one included in |dcid|. This function - * returns 0 if the verification succeeds, or one of the following - * negative error codes: + * token |token| against the one included in |dcid|. Tokens are + * compared in constant time. This function returns 0 if the + * verification succeeds, or one of the following negative error + * codes: * * NGTCP2_ERR_INVALID_ARGUMENT * Tokens do not match; or |dcid| does not contain a token. @@ -172,4 +173,4 @@ int ngtcp2_dcid_verify_uniqueness(ngtcp2_dcid *dcid, uint64_t seq, int ngtcp2_dcid_verify_stateless_reset_token(const ngtcp2_dcid *dcid, const uint8_t *token); -#endif /* NGTCP2_CID_H */ +#endif /* !defined(NGTCP2_CID_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c index c8caf47ea76232..0f67ac09bf33b9 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c @@ -37,7 +37,8 @@ #include "ngtcp2_rcvry.h" #include "ngtcp2_unreachable.h" #include "ngtcp2_net.h" -#include "ngtcp2_conversion.h" +#include "ngtcp2_transport_params.h" +#include "ngtcp2_settings.h" #include "ngtcp2_tstamp.h" #include "ngtcp2_frame_chain.h" @@ -50,8 +51,12 @@ /* NGTCP2_MIN_COALESCED_PAYLOADLEN is the minimum length of QUIC packet payload that should be coalesced to a long packet. */ #define NGTCP2_MIN_COALESCED_PAYLOADLEN 128 +/* NGTCP2_MAX_ACK_PER_PKT is the maximum number of ACK frame per an + incoming QUIC packet to process. ACK frames that exceed this limit + are not processed. */ +#define NGTCP2_MAX_ACK_PER_PKT 1 -ngtcp2_objalloc_def(strm, ngtcp2_strm, oplent); +ngtcp2_objalloc_def(strm, ngtcp2_strm, oplent) /* * conn_local_stream returns nonzero if |stream_id| indicates that it @@ -323,8 +328,8 @@ static int conn_call_select_preferred_addr(ngtcp2_conn *conn, assert(conn->remote.transport_params->preferred_addr_present); rv = conn->callbacks.select_preferred_addr( - conn, dest, &conn->remote.transport_params->preferred_addr, - conn->user_data); + conn, dest, &conn->remote.transport_params->preferred_addr, + conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -377,7 +382,7 @@ static int conn_call_extend_max_stream_data(ngtcp2_conn *conn, } rv = conn->callbacks.extend_max_stream_data( - conn, stream_id, datalen, conn->user_data, strm->stream_user_data); + conn, stream_id, datalen, conn->user_data, strm->stream_user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -395,9 +400,9 @@ static int conn_call_dcid_status(ngtcp2_conn *conn, } rv = conn->callbacks.dcid_status( - conn, type, dcid->seq, &dcid->cid, - (dcid->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) ? dcid->token : NULL, - conn->user_data); + conn, type, dcid->seq, &dcid->cid, + (dcid->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) ? dcid->token : NULL, + conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -413,7 +418,7 @@ static int conn_call_activate_dcid(ngtcp2_conn *conn, const ngtcp2_dcid *dcid) { static int conn_call_deactivate_dcid(ngtcp2_conn *conn, const ngtcp2_dcid *dcid) { return conn_call_dcid_status( - conn, NGTCP2_CONNECTION_ID_STATUS_TYPE_DEACTIVATE, dcid); + conn, NGTCP2_CONNECTION_ID_STATUS_TYPE_DEACTIVATE, dcid); } static int conn_call_stream_stop_sending(ngtcp2_conn *conn, int64_t stream_id, @@ -587,8 +592,8 @@ static int conn_call_recv_datagram(ngtcp2_conn *conn, flags |= NGTCP2_DATAGRAM_FLAG_0RTT; } - rv = conn->callbacks.recv_datagram(conn, flags, data, datalen, - conn->user_data); + rv = + conn->callbacks.recv_datagram(conn, flags, data, datalen, conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -607,8 +612,8 @@ conn_call_update_key(ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, assert(conn->callbacks.update_key); rv = conn->callbacks.update_key( - conn, rx_secret, tx_secret, rx_aead_ctx, rx_iv, tx_aead_ctx, tx_iv, - current_rx_secret, current_tx_secret, secretlen, conn->user_data); + conn, rx_secret, tx_secret, rx_aead_ctx, rx_iv, tx_aead_ctx, tx_iv, + current_rx_secret, current_tx_secret, secretlen, conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -623,7 +628,7 @@ static int conn_call_version_negotiation(ngtcp2_conn *conn, uint32_t version, assert(conn->callbacks.version_negotiation); rv = - conn->callbacks.version_negotiation(conn, version, dcid, conn->user_data); + conn->callbacks.version_negotiation(conn, version, dcid, conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -663,13 +668,11 @@ static int conn_call_recv_tx_key(ngtcp2_conn *conn, return 0; } -static int pktns_init(ngtcp2_pktns *pktns, ngtcp2_pktns_id pktns_id, - ngtcp2_rst *rst, ngtcp2_cc *cc, int64_t initial_pkt_num, - ngtcp2_log *log, ngtcp2_qlog *qlog, - ngtcp2_objalloc *rtb_entry_objalloc, - ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { - int rv; - +static void pktns_init(ngtcp2_pktns *pktns, ngtcp2_pktns_id pktns_id, + ngtcp2_rst *rst, ngtcp2_cc *cc, int64_t initial_pkt_num, + ngtcp2_log *log, ngtcp2_qlog *qlog, + ngtcp2_objalloc *rtb_entry_objalloc, + ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { memset(pktns, 0, sizeof(*pktns)); ngtcp2_gaptr_init(&pktns->rx.pngap, mem); @@ -678,25 +681,15 @@ static int pktns_init(ngtcp2_pktns *pktns, ngtcp2_pktns_id pktns_id, pktns->tx.non_ack_pkt_start_ts = UINT64_MAX; pktns->rx.max_pkt_num = -1; pktns->rx.max_ack_eliciting_pkt_num = -1; + pktns->id = pktns_id; - rv = ngtcp2_acktr_init(&pktns->acktr, log, mem); - if (rv != 0) { - goto fail_acktr_init; - } + ngtcp2_acktr_init(&pktns->acktr, log, mem); ngtcp2_strm_init(&pktns->crypto.strm, 0, NGTCP2_STRM_FLAG_NONE, 0, 0, NULL, frc_objalloc, mem); - ngtcp2_rtb_init(&pktns->rtb, pktns_id, &pktns->crypto.strm, rst, cc, - initial_pkt_num, log, qlog, rtb_entry_objalloc, frc_objalloc, - mem); - - return 0; - -fail_acktr_init: - ngtcp2_gaptr_free(&pktns->rx.pngap); - - return rv; + ngtcp2_rtb_init(&pktns->rtb, rst, cc, initial_pkt_num, log, qlog, + rtb_entry_objalloc, frc_objalloc, mem); } static int pktns_new(ngtcp2_pktns **ppktns, ngtcp2_pktns_id pktns_id, @@ -704,20 +697,15 @@ static int pktns_new(ngtcp2_pktns **ppktns, ngtcp2_pktns_id pktns_id, ngtcp2_log *log, ngtcp2_qlog *qlog, ngtcp2_objalloc *rtb_entry_objalloc, ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { - int rv; - *ppktns = ngtcp2_mem_malloc(mem, sizeof(ngtcp2_pktns)); if (*ppktns == NULL) { return NGTCP2_ERR_NOMEM; } - rv = pktns_init(*ppktns, pktns_id, rst, cc, initial_pkt_num, log, qlog, - rtb_entry_objalloc, frc_objalloc, mem); - if (rv != 0) { - ngtcp2_mem_free(mem, *ppktns); - } + pktns_init(*ppktns, pktns_id, rst, cc, initial_pkt_num, log, qlog, + rtb_entry_objalloc, frc_objalloc, mem); - return rv; + return 0; } static int cycle_less(const ngtcp2_pq_entry *lhs, const ngtcp2_pq_entry *rhs) { @@ -783,6 +771,8 @@ static int cid_less(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { return ngtcp2_cid_less(lhs, rhs); } +ngtcp2_ksl_search_def(cid_less, cid_less) + static int retired_ts_less(const ngtcp2_pq_entry *lhs, const ngtcp2_pq_entry *rhs) { const ngtcp2_scid *a = ngtcp2_struct_of(lhs, ngtcp2_scid, pe); @@ -803,8 +793,9 @@ static void conn_reset_conn_stat_cc(ngtcp2_conn *conn, cstat->first_rtt_sample_ts = UINT64_MAX; cstat->pto_count = 0; cstat->loss_detection_timer = UINT64_MAX; - cstat->cwnd = - ngtcp2_cc_compute_initcwnd(conn->local.settings.max_tx_udp_payload_size); + cstat->max_tx_udp_payload_size = + ngtcp2_conn_get_path_max_tx_udp_payload_size(conn); + cstat->cwnd = ngtcp2_cc_compute_initcwnd(cstat->max_tx_udp_payload_size); cstat->ssthresh = UINT64_MAX; cstat->congestion_recovery_start_ts = UINT64_MAX; cstat->bytes_in_flight = 0; @@ -847,7 +838,7 @@ static void delete_scid(ngtcp2_ksl *scids, const ngtcp2_mem *mem) { static ngtcp2_duration compute_pto(ngtcp2_duration smoothed_rtt, ngtcp2_duration rttvar, ngtcp2_duration max_ack_delay) { - ngtcp2_duration var = ngtcp2_max(4 * rttvar, NGTCP2_GRANULARITY); + ngtcp2_duration var = ngtcp2_max_uint64(4 * rttvar, NGTCP2_GRANULARITY); return smoothed_rtt + var + max_ack_delay; } @@ -859,7 +850,7 @@ static ngtcp2_duration conn_compute_initial_pto(ngtcp2_conn *conn, ngtcp2_duration initial_rtt = conn->local.settings.initial_rtt; ngtcp2_duration max_ack_delay; - if (pktns->rtb.pktns_id == NGTCP2_PKTNS_ID_APPLICATION && + if (pktns->id == NGTCP2_PKTNS_ID_APPLICATION && conn->remote.transport_params) { max_ack_delay = conn->remote.transport_params->max_ack_delay; } else { @@ -876,7 +867,7 @@ static ngtcp2_duration conn_compute_pto(ngtcp2_conn *conn, ngtcp2_conn_stat *cstat = &conn->cstat; ngtcp2_duration max_ack_delay; - if (pktns->rtb.pktns_id == NGTCP2_PKTNS_ID_APPLICATION && + if (pktns->id == NGTCP2_PKTNS_ID_APPLICATION && conn->remote.transport_params) { max_ack_delay = conn->remote.transport_params->max_ack_delay; } else { @@ -898,7 +889,7 @@ static ngtcp2_duration conn_compute_pv_timeout_pto(ngtcp2_conn *conn, ngtcp2_duration pto) { ngtcp2_duration initial_pto = conn_compute_initial_pto(conn, &conn->pktns); - return 3 * ngtcp2_max(pto, initial_pto); + return 3 * ngtcp2_max_uint64(pto, initial_pto); } /* @@ -1007,28 +998,15 @@ static void conn_reset_ecn_validation_state(ngtcp2_conn *conn) { static uint8_t server_default_available_versions[] = {0, 0, 0, 1}; /* - * available_versions_new allocates new buffer, and writes |versions| - * of length |versionslen| in network byte order, suitable for sending - * in available_versions field of version_information QUIC transport - * parameter. The pointer to the allocated buffer is assigned to - * |*pbuf|. - * - * This function returns 0 if it succeeds, or one of the negative - * error codes: - * - * NGTCP2_ERR_NOMEM - * Out of memory. + * available_versions_init writes |versions| of length |versionslen| + * in network byte order to the buffer pointed by |buf|, suitable for + * sending in available_versions field of version_information QUIC + * transport parameter. This function returns the pointer to the one + * beyond the last byte written. */ -static int available_versions_new(uint8_t **pbuf, const uint32_t *versions, - size_t versionslen, const ngtcp2_mem *mem) { +static void *available_versions_init(void *buf, const uint32_t *versions, + size_t versionslen) { size_t i; - uint8_t *buf = ngtcp2_mem_malloc(mem, sizeof(uint32_t) * versionslen); - - if (buf == NULL) { - return NGTCP2_ERR_NOMEM; - } - - *pbuf = buf; for (i = 0; i < versionslen; ++i) { buf = ngtcp2_put_uint32be(buf, versions[i]); @@ -1055,6 +1033,16 @@ conn_set_local_transport_params(ngtcp2_conn *conn, p->version_info_present = 1; } +static size_t buflen_align(size_t buflen) { + return (buflen + 0x7) & (size_t)~0x7; +} + +static void *buf_align(void *buf) { + return (void *)((uintptr_t)((uint8_t *)buf + 0x7) & (uintptr_t)~0x7); +} + +static void *buf_advance(void *buf, size_t n) { return (uint8_t *)buf + n; } + static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, const ngtcp2_path *path, uint32_t client_chosen_version, int callbacks_version, @@ -1065,16 +1053,20 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_mem *mem, void *user_data, int server) { int rv; ngtcp2_scid *scident; - uint8_t *buf; + void *buf, *tokenbuf; + size_t buflen; uint8_t fixed_bit_byte; size_t i; uint32_t *preferred_versions; + ngtcp2_settings settingsbuf; ngtcp2_transport_params paramsbuf; (void)callbacks_version; (void)settings_version; + settings = + ngtcp2_settings_convert_to_latest(&settingsbuf, settings_version, settings); params = ngtcp2_transport_params_convert_to_latest( - ¶msbuf, transport_params_version, params); + ¶msbuf, transport_params_version, params); assert(settings->max_window <= NGTCP2_MAX_VARINT); assert(settings->max_stream_window <= NGTCP2_MAX_VARINT); @@ -1111,21 +1103,59 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, assert(callbacks->get_path_challenge_data); assert(!server || !ngtcp2_is_reserved_version(client_chosen_version)); + for (i = 0; i < settings->pmtud_probeslen; ++i) { + assert(settings->pmtud_probes[i] > NGTCP2_MAX_UDP_PAYLOAD_SIZE); + } + if (mem == NULL) { mem = ngtcp2_mem_default(); } - *pconn = ngtcp2_mem_calloc(mem, 1, sizeof(ngtcp2_conn)); - if (*pconn == NULL) { - rv = NGTCP2_ERR_NOMEM; - goto fail_conn; + buflen = sizeof(ngtcp2_conn); + if (settings->qlog_write) { + buflen = buflen_align(buflen); + buflen += NGTCP2_QLOG_BUFLEN; + } + + if (settings->pmtud_probeslen) { + buflen = buflen_align(buflen); + buflen += sizeof(settings->pmtud_probes[0]) * settings->pmtud_probeslen; } + if (settings->preferred_versionslen) { + buflen = buflen_align(buflen); + buflen += + sizeof(settings->preferred_versions[0]) * settings->preferred_versionslen; + } + + if (settings->available_versionslen) { + buflen = buflen_align(buflen); + buflen += + sizeof(settings->available_versions[0]) * settings->available_versionslen; + } else if (server) { + if (settings->preferred_versionslen) { + buflen = buflen_align(buflen); + buflen += sizeof(settings->preferred_versions[0]) * + settings->preferred_versionslen; + } + } else if (!ngtcp2_is_reserved_version(client_chosen_version)) { + buflen = buflen_align(buflen); + buflen += sizeof(client_chosen_version); + } + + buf = ngtcp2_mem_calloc(mem, 1, buflen); + if (buf == NULL) { + return NGTCP2_ERR_NOMEM; + } + + *pconn = buf; + buf = buf_advance(buf, sizeof(ngtcp2_conn)); + (*pconn)->server = server; - ngtcp2_objalloc_frame_chain_init(&(*pconn)->frc_objalloc, 64, mem); - ngtcp2_objalloc_rtb_entry_init(&(*pconn)->rtb_entry_objalloc, 64, mem); - ngtcp2_objalloc_strm_init(&(*pconn)->strm_objalloc, 64, mem); + ngtcp2_objalloc_frame_chain_init(&(*pconn)->frc_objalloc, 16, mem); + ngtcp2_objalloc_rtb_entry_init(&(*pconn)->rtb_entry_objalloc, 16, mem); + ngtcp2_objalloc_strm_init(&(*pconn)->strm_objalloc, 16, mem); ngtcp2_static_ringbuf_dcid_bound_init(&(*pconn)->dcid.bound); @@ -1135,7 +1165,8 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, ngtcp2_gaptr_init(&(*pconn)->dcid.seqgap, mem); - ngtcp2_ksl_init(&(*pconn)->scid.set, cid_less, sizeof(ngtcp2_cid), mem); + ngtcp2_ksl_init(&(*pconn)->scid.set, cid_less, ksl_cid_less_search, + sizeof(ngtcp2_cid), mem); ngtcp2_pq_init(&(*pconn)->scid.used, retired_ts_less, mem); @@ -1143,9 +1174,9 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, ngtcp2_pq_init(&(*pconn)->tx.strmq, cycle_less, mem); - ngtcp2_idtr_init(&(*pconn)->remote.bidi.idtr, !server, mem); + ngtcp2_idtr_init(&(*pconn)->remote.bidi.idtr, mem); - ngtcp2_idtr_init(&(*pconn)->remote.uni.idtr, !server, mem); + ngtcp2_idtr_init(&(*pconn)->remote.uni.idtr, mem); ngtcp2_static_ringbuf_path_challenge_init(&(*pconn)->rx.path_challenge); @@ -1154,36 +1185,46 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, ngtcp2_qlog_init(&(*pconn)->qlog, settings->qlog_write, settings->initial_ts, user_data); if ((*pconn)->qlog.write) { - buf = ngtcp2_mem_malloc(mem, NGTCP2_QLOG_BUFLEN); - if (buf == NULL) { - rv = NGTCP2_ERR_NOMEM; - goto fail_qlog_buf; - } + buf = buf_align(buf); ngtcp2_buf_init(&(*pconn)->qlog.buf, buf, NGTCP2_QLOG_BUFLEN); + buf = buf_advance(buf, NGTCP2_QLOG_BUFLEN); } (*pconn)->local.settings = *settings; if (settings->tokenlen) { - buf = ngtcp2_mem_malloc(mem, settings->tokenlen); - if (buf == NULL) { + tokenbuf = ngtcp2_mem_malloc(mem, settings->tokenlen); + if (tokenbuf == NULL) { rv = NGTCP2_ERR_NOMEM; goto fail_token; } - memcpy(buf, settings->token, settings->tokenlen); - (*pconn)->local.settings.token = buf; + memcpy(tokenbuf, settings->token, settings->tokenlen); + (*pconn)->local.settings.token = tokenbuf; } else { (*pconn)->local.settings.token = NULL; } + if (settings->pmtud_probeslen) { + (*pconn)->local.settings.pmtud_probes = buf_align(buf); + buf = ngtcp2_cpymem( + (uint16_t *)(*pconn)->local.settings.pmtud_probes, settings->pmtud_probes, + sizeof(settings->pmtud_probes[0]) * settings->pmtud_probeslen); + } + if (!(*pconn)->local.settings.original_version) { (*pconn)->local.settings.original_version = client_chosen_version; } + ngtcp2_dcid_init(&(*pconn)->dcid.current, 0, dcid, NULL); + ngtcp2_dcid_set_path(&(*pconn)->dcid.current, path); + + rv = ngtcp2_gaptr_push(&(*pconn)->dcid.seqgap, 0, 1); + if (rv != 0) { + goto fail_seqgap_push; + } + conn_reset_conn_stat(*pconn, &(*pconn)->cstat); (*pconn)->cstat.initial_rtt = settings->initial_rtt; - (*pconn)->cstat.max_tx_udp_payload_size = - (*pconn)->local.settings.max_tx_udp_payload_size; ngtcp2_rst_init(&(*pconn)->rst); @@ -1195,7 +1236,7 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, break; case NGTCP2_CC_ALGO_CUBIC: - ngtcp2_cc_cubic_init(&(*pconn)->cubic, &(*pconn)->log); + ngtcp2_cc_cubic_init(&(*pconn)->cubic, &(*pconn)->log, &(*pconn)->rst); break; case NGTCP2_CC_ALGO_BBR: @@ -1224,13 +1265,10 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, goto fail_hs_pktns_init; } - rv = pktns_init(&(*pconn)->pktns, NGTCP2_PKTNS_ID_APPLICATION, &(*pconn)->rst, - &(*pconn)->cc, settings->initial_pkt_num, &(*pconn)->log, - &(*pconn)->qlog, &(*pconn)->rtb_entry_objalloc, - &(*pconn)->frc_objalloc, mem); - if (rv != 0) { - goto fail_pktns_init; - } + pktns_init(&(*pconn)->pktns, NGTCP2_PKTNS_ID_APPLICATION, &(*pconn)->rst, + &(*pconn)->cc, settings->initial_pkt_num, &(*pconn)->log, + &(*pconn)->qlog, &(*pconn)->rtb_entry_objalloc, + &(*pconn)->frc_objalloc, mem); scident = ngtcp2_mem_malloc(mem, sizeof(*scident)); if (scident == NULL) { @@ -1249,14 +1287,6 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, scident = NULL; - ngtcp2_dcid_init(&(*pconn)->dcid.current, 0, dcid, NULL); - ngtcp2_dcid_set_path(&(*pconn)->dcid.current, path); - - rv = ngtcp2_gaptr_push(&(*pconn)->dcid.seqgap, 0, 1); - if (rv != 0) { - goto fail_seqgap_push; - } - if (settings->preferred_versionslen) { if (!server && !ngtcp2_is_reserved_version(client_chosen_version)) { for (i = 0; i < settings->preferred_versionslen; ++i) { @@ -1268,12 +1298,9 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, assert(i < settings->preferred_versionslen); } - preferred_versions = ngtcp2_mem_malloc( - mem, sizeof(uint32_t) * settings->preferred_versionslen); - if (preferred_versions == NULL) { - rv = NGTCP2_ERR_NOMEM; - goto fail_preferred_versions; - } + preferred_versions = buf_align(buf); + buf = buf_advance(preferred_versions, sizeof(preferred_versions[0]) * + settings->preferred_versionslen); for (i = 0; i < settings->preferred_versionslen; ++i) { assert(ngtcp2_is_supported_version(settings->preferred_versions[i])); @@ -1304,39 +1331,33 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, ngtcp2_is_supported_version(settings->available_versions[i])); } - rv = available_versions_new(&buf, settings->available_versions, - settings->available_versionslen, mem); - if (rv != 0) { - goto fail_available_versions; - } - - (*pconn)->vneg.available_versions = buf; + (*pconn)->vneg.available_versions = buf_align(buf); (*pconn)->vneg.available_versionslen = - sizeof(uint32_t) * settings->available_versionslen; + sizeof(uint32_t) * settings->available_versionslen; + + buf = available_versions_init((*pconn)->vneg.available_versions, + settings->available_versions, + settings->available_versionslen); } else if (server) { if (settings->preferred_versionslen) { - rv = available_versions_new(&buf, settings->preferred_versions, - settings->preferred_versionslen, mem); - if (rv != 0) { - goto fail_available_versions; - } - - (*pconn)->vneg.available_versions = buf; + (*pconn)->vneg.available_versions = buf_align(buf); (*pconn)->vneg.available_versionslen = - sizeof(uint32_t) * settings->preferred_versionslen; + sizeof(uint32_t) * settings->preferred_versionslen; + + buf = available_versions_init((*pconn)->vneg.available_versions, + settings->preferred_versions, + settings->preferred_versionslen); } else { (*pconn)->vneg.available_versions = server_default_available_versions; (*pconn)->vneg.available_versionslen = - sizeof(server_default_available_versions); + sizeof(server_default_available_versions); } - } else if (!server && !ngtcp2_is_reserved_version(client_chosen_version)) { - rv = available_versions_new(&buf, &client_chosen_version, 1, mem); - if (rv != 0) { - goto fail_available_versions; - } - - (*pconn)->vneg.available_versions = buf; + } else if (!ngtcp2_is_reserved_version(client_chosen_version)) { + (*pconn)->vneg.available_versions = buf_align(buf); (*pconn)->vneg.available_versionslen = sizeof(uint32_t); + + buf = available_versions_init((*pconn)->vneg.available_versions, + &client_chosen_version, 1); } (*pconn)->local.settings.available_versions = NULL; @@ -1367,54 +1388,39 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, conn_reset_ecn_validation_state(*pconn); - ngtcp2_qlog_start( - &(*pconn)->qlog, - server ? ((*pconn)->local.transport_params.retry_scid_present - ? &(*pconn)->local.transport_params.retry_scid - : &(*pconn)->local.transport_params.original_dcid) - : dcid, - server); + ngtcp2_qlog_start(&(*pconn)->qlog, + server + ? ((*pconn)->local.transport_params.retry_scid_present + ? &(*pconn)->local.transport_params.retry_scid + : &(*pconn)->local.transport_params.original_dcid) + : dcid, + server); return 0; -fail_available_versions: - ngtcp2_mem_free(mem, (*pconn)->vneg.preferred_versions); -fail_preferred_versions: -fail_seqgap_push: fail_scid_set_insert: ngtcp2_mem_free(mem, scident); fail_scident: - pktns_free(&(*pconn)->pktns, mem); -fail_pktns_init: pktns_del((*pconn)->hs_pktns, mem); fail_hs_pktns_init: pktns_del((*pconn)->in_pktns, mem); fail_in_pktns_init: + ngtcp2_gaptr_free(&(*pconn)->dcid.seqgap); +fail_seqgap_push: ngtcp2_mem_free(mem, (uint8_t *)(*pconn)->local.settings.token); fail_token: - ngtcp2_mem_free(mem, (*pconn)->qlog.buf.begin); -fail_qlog_buf: - ngtcp2_idtr_free(&(*pconn)->remote.uni.idtr); - ngtcp2_idtr_free(&(*pconn)->remote.bidi.idtr); - ngtcp2_map_free(&(*pconn)->strms); - delete_scid(&(*pconn)->scid.set, mem); - ngtcp2_ksl_free(&(*pconn)->scid.set); - ngtcp2_gaptr_free(&(*pconn)->dcid.seqgap); - ngtcp2_objalloc_free(&(*pconn)->strm_objalloc); - ngtcp2_objalloc_free(&(*pconn)->rtb_entry_objalloc); - ngtcp2_objalloc_free(&(*pconn)->frc_objalloc); ngtcp2_mem_free(mem, *pconn); -fail_conn: + return rv; } int ngtcp2_conn_client_new_versioned( - ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, - const ngtcp2_path *path, uint32_t client_chosen_version, - int callbacks_version, const ngtcp2_callbacks *callbacks, - int settings_version, const ngtcp2_settings *settings, - int transport_params_version, const ngtcp2_transport_params *params, - const ngtcp2_mem *mem, void *user_data) { + ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, + const ngtcp2_path *path, uint32_t client_chosen_version, + int callbacks_version, const ngtcp2_callbacks *callbacks, + int settings_version, const ngtcp2_settings *settings, + int transport_params_version, const ngtcp2_transport_params *params, + const ngtcp2_mem *mem, void *user_data) { int rv; rv = conn_new(pconn, dcid, scid, path, client_chosen_version, @@ -1438,12 +1444,12 @@ int ngtcp2_conn_client_new_versioned( } int ngtcp2_conn_server_new_versioned( - ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, - const ngtcp2_path *path, uint32_t client_chosen_version, - int callbacks_version, const ngtcp2_callbacks *callbacks, - int settings_version, const ngtcp2_settings *settings, - int transport_params_version, const ngtcp2_transport_params *params, - const ngtcp2_mem *mem, void *user_data) { + ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, + const ngtcp2_path *path, uint32_t client_chosen_version, + int callbacks_version, const ngtcp2_callbacks *callbacks, + int settings_version, const ngtcp2_settings *settings, + int transport_params_version, const ngtcp2_transport_params *params, + const ngtcp2_mem *mem, void *user_data) { int rv; rv = conn_new(pconn, dcid, scid, path, client_chosen_version, @@ -1471,8 +1477,8 @@ int ngtcp2_conn_server_new_versioned( * credits are considered. */ static uint64_t conn_fc_credits(ngtcp2_conn *conn, ngtcp2_strm *strm) { - return ngtcp2_min(strm->tx.max_offset - strm->tx.offset, - conn->tx.max_offset - conn->tx.offset); + return ngtcp2_min_uint64(strm->tx.max_offset - strm->tx.offset, + conn->tx.max_offset - conn->tx.offset); } /* @@ -1483,7 +1489,7 @@ static uint64_t conn_fc_credits(ngtcp2_conn *conn, ngtcp2_strm *strm) { static uint64_t conn_enforce_flow_control(ngtcp2_conn *conn, ngtcp2_strm *strm, uint64_t len) { uint64_t fc_credits = conn_fc_credits(conn, strm); - return ngtcp2_min(len, fc_credits); + return ngtcp2_min_uint64(len, fc_credits); } static int delete_strms_each(void *data, void *ptr) { @@ -1525,15 +1531,15 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { if (conn->crypto.key_update.old_rx_ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->crypto.key_update.old_rx_ckm->aead_ctx); + conn, &conn->crypto.key_update.old_rx_ckm->aead_ctx); } if (conn->crypto.key_update.new_rx_ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->crypto.key_update.new_rx_ckm->aead_ctx); + conn, &conn->crypto.key_update.new_rx_ckm->aead_ctx); } if (conn->crypto.key_update.new_tx_ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->crypto.key_update.new_tx_ckm->aead_ctx); + conn, &conn->crypto.key_update.new_tx_ckm->aead_ctx); } if (conn->pktns.crypto.rx.ckm) { @@ -1551,26 +1557,26 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { if (conn->hs_pktns) { if (conn->hs_pktns->crypto.rx.ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->hs_pktns->crypto.rx.ckm->aead_ctx); + conn, &conn->hs_pktns->crypto.rx.ckm->aead_ctx); } conn_call_delete_crypto_cipher_ctx(conn, &conn->hs_pktns->crypto.rx.hp_ctx); if (conn->hs_pktns->crypto.tx.ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->hs_pktns->crypto.tx.ckm->aead_ctx); + conn, &conn->hs_pktns->crypto.tx.ckm->aead_ctx); } conn_call_delete_crypto_cipher_ctx(conn, &conn->hs_pktns->crypto.tx.hp_ctx); } if (conn->in_pktns) { if (conn->in_pktns->crypto.rx.ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->in_pktns->crypto.rx.ckm->aead_ctx); + conn, &conn->in_pktns->crypto.rx.ckm->aead_ctx); } conn_call_delete_crypto_cipher_ctx(conn, &conn->in_pktns->crypto.rx.hp_ctx); if (conn->in_pktns->crypto.tx.ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->in_pktns->crypto.tx.ckm->aead_ctx); + conn, &conn->in_pktns->crypto.tx.ckm->aead_ctx); } conn_call_delete_crypto_cipher_ctx(conn, &conn->in_pktns->crypto.tx.hp_ctx); } @@ -1582,11 +1588,6 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { conn_vneg_crypto_free(conn); - ngtcp2_mem_free(conn->mem, conn->vneg.preferred_versions); - if (conn->vneg.available_versions != server_default_available_versions) { - ngtcp2_mem_free(conn->mem, conn->vneg.available_versions); - } - ngtcp2_mem_free(conn->mem, conn->crypto.decrypt_buf.base); ngtcp2_mem_free(conn->mem, conn->crypto.decrypt_hp_buf.base); ngtcp2_mem_free(conn->mem, (uint8_t *)conn->local.settings.token); @@ -1600,8 +1601,6 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { pktns_del(conn->hs_pktns, conn->mem); pktns_del(conn->in_pktns, conn->mem); - ngtcp2_mem_free(conn->mem, conn->qlog.buf.begin); - ngtcp2_pmtud_del(conn->pmtud); ngtcp2_pv_del(conn->pv); @@ -1611,7 +1610,7 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { ngtcp2_idtr_free(&conn->remote.bidi.idtr); ngtcp2_mem_free(conn->mem, conn->tx.ack); ngtcp2_pq_free(&conn->tx.strmq); - ngtcp2_map_each_free(&conn->strms, delete_strms_each, (void *)conn); + ngtcp2_map_each(&conn->strms, delete_strms_each, (void *)conn); ngtcp2_map_free(&conn->strms); ngtcp2_pq_free(&conn->scid.used); @@ -1665,8 +1664,9 @@ static int conn_ensure_ack_ranges(ngtcp2_conn *conn, size_t n) { * ACK. */ static ngtcp2_duration conn_compute_ack_delay(ngtcp2_conn *conn) { - return ngtcp2_min(conn->local.transport_params.max_ack_delay, - conn->cstat.smoothed_rtt / 8); + return ngtcp2_min_uint64( + conn->local.transport_params.max_ack_delay, + ngtcp2_max_uint64(conn->cstat.smoothed_rtt / 8, NGTCP2_NANOSECONDS)); } int ngtcp2_conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, @@ -1702,8 +1702,8 @@ int ngtcp2_conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, if (conn->tx.ack == NULL) { conn->tx.ack = ngtcp2_mem_malloc( - conn->mem, - sizeof(ngtcp2_ack) + sizeof(ngtcp2_ack_range) * initial_max_ack_ranges); + conn->mem, + sizeof(ngtcp2_ack) + sizeof(ngtcp2_ack_range) * initial_max_ack_ranges); if (conn->tx.ack == NULL) { return NGTCP2_ERR_NOMEM; } @@ -1844,8 +1844,9 @@ static int conn_ppe_write_frame(ngtcp2_conn *conn, ngtcp2_ppe *ppe, * NGTCP2_ERR_NOMEM * Out of memory */ -static int conn_on_pkt_sent(ngtcp2_conn *conn, ngtcp2_rtb *rtb, +static int conn_on_pkt_sent(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_rtb_entry *ent) { + ngtcp2_rtb *rtb = &pktns->rtb; int rv; /* This function implements OnPacketSent, but it handles only @@ -1856,7 +1857,7 @@ static int conn_on_pkt_sent(ngtcp2_conn *conn, ngtcp2_rtb *rtb, } if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) { - conn->cstat.last_tx_pkt_ts[rtb->pktns_id] = ent->ts; + conn->cstat.last_tx_pkt_ts[pktns->id] = ent->ts; } ngtcp2_conn_set_loss_detection_timer(conn, ent->ts); @@ -1898,8 +1899,8 @@ static size_t pktns_select_pkt_numlen(ngtcp2_pktns *pktns) { */ static uint64_t conn_get_cwnd(ngtcp2_conn *conn) { return conn->pv && (conn->pv->flags & NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE) - ? ngtcp2_cc_compute_initcwnd(conn->cstat.max_tx_udp_payload_size) - : conn->cstat.cwnd; + ? ngtcp2_cc_compute_initcwnd(conn->cstat.max_tx_udp_payload_size) + : conn->cstat.cwnd; } /* @@ -1947,7 +1948,7 @@ static uint64_t conn_retry_early_payloadlen(ngtcp2_conn *conn) { /* Take the min because in conn_should_pad_pkt we take max in order to deal with unbreakable DATAGRAM. */ - return ngtcp2_min(len, NGTCP2_MIN_COALESCED_PAYLOADLEN); + return ngtcp2_min_uint64(len, NGTCP2_MIN_COALESCED_PAYLOADLEN); } return 0; @@ -2021,12 +2022,13 @@ static int conn_should_pad_pkt(ngtcp2_conn *conn, uint8_t type, size_t left, return 0; } - if (conn->hs_pktns->crypto.tx.ckm && - (conn->hs_pktns->rtb.probe_pkt_left || - !ngtcp2_strm_streamfrq_empty(&conn->hs_pktns->crypto.strm) || - !ngtcp2_acktr_empty(&conn->hs_pktns->acktr))) { - /* If we have something to send in Handshake packet, then add - PADDING in Handshake packet. */ + if ((conn->hs_pktns->crypto.tx.ckm && + (conn->hs_pktns->rtb.probe_pkt_left || + !ngtcp2_strm_streamfrq_empty(&conn->hs_pktns->crypto.strm) || + !ngtcp2_acktr_empty(&conn->hs_pktns->acktr))) || + conn->pktns.crypto.tx.ckm) { + /* If we have something to send in Handshake or 1RTT packet, + then add PADDING in that packet. */ min_payloadlen = NGTCP2_MIN_COALESCED_PAYLOADLEN; } else { return 1; @@ -2044,7 +2046,7 @@ static int conn_should_pad_pkt(ngtcp2_conn *conn, uint8_t type, size_t left, PADDING in that packet. Take maximum in case that write_datalen includes DATAGRAM which cannot be split. */ min_payloadlen = - ngtcp2_max(write_datalen, NGTCP2_MIN_COALESCED_PAYLOADLEN); + ngtcp2_max_uint64(write_datalen, NGTCP2_MIN_COALESCED_PAYLOADLEN); } else { return 1; } @@ -2067,8 +2069,8 @@ static int conn_should_pad_pkt(ngtcp2_conn *conn, uint8_t type, size_t left, return left < /* TODO Assuming that pkt_num is encoded in 1 byte. */ NGTCP2_MIN_LONG_HEADERLEN + conn->dcid.current.cid.datalen + - conn->oscid.datalen + NGTCP2_PKT_LENGTHLEN - 1 + min_payloadlen + - NGTCP2_MAX_AEAD_OVERHEAD; + conn->oscid.datalen + NGTCP2_PKT_LENGTHLEN - 1 + min_payloadlen + + NGTCP2_MAX_AEAD_OVERHEAD; } static void conn_restart_timer_on_write(ngtcp2_conn *conn, ngtcp2_tstamp ts) { @@ -2189,15 +2191,18 @@ static uint8_t conn_pkt_flags_long(ngtcp2_conn *conn) { static uint8_t conn_pkt_flags_short(ngtcp2_conn *conn) { return (uint8_t)(conn_pkt_flags(conn) | ((conn->pktns.crypto.tx.ckm->flags & NGTCP2_CRYPTO_KM_FLAG_KEY_PHASE_ONE) - ? NGTCP2_PKT_FLAG_KEY_PHASE - : NGTCP2_PKT_FLAG_NONE)); + ? NGTCP2_PKT_FLAG_KEY_PHASE + : NGTCP2_PKT_FLAG_NONE)); } +static size_t conn_min_pktlen(ngtcp2_conn *conn); + /* * conn_write_handshake_pkt writes handshake packet in the buffer - * pointed by |dest| whose length is |destlen|. |type| specifies long - * packet type. It should be either NGTCP2_PKT_INITIAL or - * NGTCP2_PKT_HANDSHAKE_PKT. + * pointed by |dest| whose length is |destlen|. |dgram_offset| is the + * offset in UDP datagram payload where this QUIC packet is positioned + * at. |type| specifies long packet type. It should be either + * NGTCP2_PKT_INITIAL or NGTCP2_PKT_HANDSHAKE_PKT. * * |write_datalen| is the minimum length of application data ready to * send in subsequent 0RTT packet. @@ -2212,8 +2217,9 @@ static uint8_t conn_pkt_flags_short(ngtcp2_conn *conn) { */ static ngtcp2_ssize conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, - size_t destlen, uint8_t type, uint8_t flags, - uint64_t write_datalen, ngtcp2_tstamp ts) { + size_t destlen, size_t dgram_offset, uint8_t type, + uint8_t flags, uint64_t write_datalen, + ngtcp2_tstamp ts) { int rv; ngtcp2_ppe ppe; ngtcp2_pkt_hd hd; @@ -2228,6 +2234,7 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, uint16_t rtb_entry_flags = NGTCP2_RTB_ENTRY_FLAG_NONE; int require_padding = (flags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING) != 0; int pkt_empty = 1; + int min_padded = 0; int padded = 0; int hd_logged = 0; uint64_t crypto_offset; @@ -2271,10 +2278,9 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, cc.encrypt = conn->callbacks.encrypt; cc.hp_mask = conn->callbacks.hp_mask; - ngtcp2_pkt_hd_init(&hd, conn_pkt_flags_long(conn), type, - &conn->dcid.current.cid, &conn->oscid, - pktns->tx.last_pkt_num + 1, pktns_select_pkt_numlen(pktns), - version, 0); + ngtcp2_pkt_hd_init( + &hd, conn_pkt_flags_long(conn), type, &conn->dcid.current.cid, &conn->oscid, + pktns->tx.last_pkt_num + 1, pktns_select_pkt_numlen(pktns), version, 0); if (!conn->server && type == NGTCP2_PKT_INITIAL && conn->local.settings.tokenlen) { @@ -2282,7 +2288,7 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, hd.tokenlen = conn->local.settings.tokenlen; } - ngtcp2_ppe_init(&ppe, dest, destlen, &cc); + ngtcp2_ppe_init(&ppe, dest, destlen, dgram_offset, &cc); rv = ngtcp2_ppe_encode_hd(&ppe, &hd); if (rv != 0) { @@ -2395,7 +2401,7 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, assert(rv == NGTCP2_ERR_NOBUF); } else { rtb_entry_flags |= - NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | NGTCP2_RTB_ENTRY_FLAG_PROBE; + NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | NGTCP2_RTB_ENTRY_FLAG_PROBE; pkt_empty = 0; } } @@ -2412,6 +2418,7 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, } else { rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING; pktns->tx.non_ack_pkt_start_ts = UINT64_MAX; + pkt_empty = 0; } } else if (pktns->tx.non_ack_pkt_start_ts == UINT64_MAX) { pktns->tx.non_ack_pkt_start_ts = ts; @@ -2429,20 +2436,24 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, /* If we cannot write another packet, then we need to add padding to Initial here. */ if (conn_should_pad_pkt( - conn, type, ngtcp2_ppe_left(&ppe), write_datalen, - (rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) != 0, - require_padding)) { + conn, type, ngtcp2_ppe_left(&ppe), write_datalen, + (rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) != 0, + require_padding)) { lfr.type = NGTCP2_FRAME_PADDING; - lfr.padding.len = ngtcp2_ppe_padding(&ppe); + lfr.padding.len = ngtcp2_ppe_dgram_padding(&ppe); } else if (pkt_empty) { return 0; } else { lfr.type = NGTCP2_FRAME_PADDING; - lfr.padding.len = ngtcp2_ppe_padding_hp_sample(&ppe); + lfr.padding.len = ngtcp2_ppe_padding_size(&ppe, conn_min_pktlen(conn)); + min_padded = 1; } if (lfr.padding.len) { - padded = 1; + if (!min_padded || + (rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING)) { + padded = 1; + } ngtcp2_log_tx_fr(&conn->log, &hd, &lfr); ngtcp2_qlog_write_frame(&conn->qlog, &lfr); } @@ -2461,16 +2472,16 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, conn_handle_tx_ecn(conn, pi, &rtb_entry_flags, pktns, &hd, ts); } - rv = ngtcp2_rtb_entry_objalloc_new(&rtbent, &hd, frq, ts, (size_t)spktlen, - rtb_entry_flags, - &conn->rtb_entry_objalloc); + rv = + ngtcp2_rtb_entry_objalloc_new(&rtbent, &hd, frq, ts, (size_t)spktlen, + rtb_entry_flags, &conn->rtb_entry_objalloc); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_list_objalloc_del(frq, &conn->frc_objalloc, conn->mem); return rv; } - rv = conn_on_pkt_sent(conn, &pktns->rtb, rtbent); + rv = conn_on_pkt_sent(conn, pktns, rtbent); if (rv != 0) { ngtcp2_rtb_entry_objalloc_del(rtbent, &conn->rtb_entry_objalloc, &conn->frc_objalloc, conn->mem); @@ -2565,8 +2576,8 @@ static ngtcp2_ssize conn_write_ack_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } spktlen = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, type, NGTCP2_WRITE_PKT_FLAG_NONE, - &conn->dcid.current.cid, ackfr, NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); + conn, pi, dest, destlen, type, NGTCP2_WRITE_PKT_FLAG_NONE, + &conn->dcid.current.cid, ackfr, NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); if (spktlen <= 0) { return spktlen; @@ -2588,8 +2599,8 @@ static void conn_discard_pktns(ngtcp2_conn *conn, ngtcp2_pktns **ppktns, conn->cstat.bytes_in_flight -= bytes_in_flight; conn->cstat.pto_count = 0; - conn->cstat.last_tx_pkt_ts[pktns->rtb.pktns_id] = UINT64_MAX; - conn->cstat.loss_time[pktns->rtb.pktns_id] = UINT64_MAX; + conn->cstat.last_tx_pkt_ts[pktns->id] = UINT64_MAX; + conn->cstat.loss_time[pktns->id] = UINT64_MAX; conn_call_delete_crypto_aead_ctx(conn, &pktns->crypto.rx.ckm->aead_ctx); conn_call_delete_crypto_cipher_ctx(conn, &pktns->crypto.rx.hp_ctx); @@ -2602,11 +2613,7 @@ static void conn_discard_pktns(ngtcp2_conn *conn, ngtcp2_pktns **ppktns, ngtcp2_conn_set_loss_detection_timer(conn, ts); } -/* - * conn_discard_initial_state discards state for Initial packet number - * space. - */ -static void conn_discard_initial_state(ngtcp2_conn *conn, ngtcp2_tstamp ts) { +void ngtcp2_conn_discard_initial_state(ngtcp2_conn *conn, ngtcp2_tstamp ts) { if (!conn->in_pktns) { return; } @@ -2622,11 +2629,7 @@ static void conn_discard_initial_state(ngtcp2_conn *conn, ngtcp2_tstamp ts) { memset(&conn->vneg.tx, 0, sizeof(conn->vneg.tx)); } -/* - * conn_discard_handshake_state discards state for Handshake packet - * number space. - */ -static void conn_discard_handshake_state(ngtcp2_conn *conn, ngtcp2_tstamp ts) { +void ngtcp2_conn_discard_handshake_state(ngtcp2_conn *conn, ngtcp2_tstamp ts) { if (!conn->hs_pktns) { return; } @@ -2672,7 +2675,7 @@ static ngtcp2_ssize conn_write_handshake_ack_pkts(ngtcp2_conn *conn, to send ACK is give server RTT measurement early. */ if (conn->server && conn->in_pktns) { nwrite = - conn_write_ack_pkt(conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, ts); + conn_write_ack_pkt(conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, ts); if (nwrite < 0) { assert(nwrite != NGTCP2_ERR_NOBUF); return nwrite; @@ -2685,7 +2688,7 @@ static ngtcp2_ssize conn_write_handshake_ack_pkts(ngtcp2_conn *conn, if (conn->hs_pktns->crypto.tx.ckm) { nwrite = - conn_write_ack_pkt(conn, pi, dest, destlen, NGTCP2_PKT_HANDSHAKE, ts); + conn_write_ack_pkt(conn, pi, dest, destlen, NGTCP2_PKT_HANDSHAKE, ts); if (nwrite < 0) { assert(nwrite != NGTCP2_ERR_NOBUF); return nwrite; @@ -2694,7 +2697,7 @@ static ngtcp2_ssize conn_write_handshake_ack_pkts(ngtcp2_conn *conn, res += nwrite; if (!conn->server && nwrite) { - conn_discard_initial_state(conn, ts); + ngtcp2_conn_discard_initial_state(conn, ts); } } @@ -2725,9 +2728,9 @@ static ngtcp2_ssize conn_write_client_initial(ngtcp2_conn *conn, return rv; } - return conn_write_handshake_pkt(conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, - NGTCP2_WRITE_PKT_FLAG_NONE, early_datalen, - ts); + return conn_write_handshake_pkt( + conn, pi, dest, destlen, 0, NGTCP2_PKT_INITIAL, NGTCP2_WRITE_PKT_FLAG_NONE, + early_datalen, ts); } /* @@ -2807,11 +2810,11 @@ static ngtcp2_ssize conn_write_handshake_pkts(ngtcp2_conn *conn, conn->hs_pktns->rtb.probe_pkt_left)) { /* Discard Initial state here so that Handshake packet is not padded. */ - conn_discard_initial_state(conn, ts); + ngtcp2_conn_discard_initial_state(conn, ts); } else if (conn->in_pktns) { nwrite = - conn_write_handshake_pkt(conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, - NGTCP2_WRITE_PKT_FLAG_NONE, write_datalen, ts); + conn_write_handshake_pkt(conn, pi, dest, destlen, 0, NGTCP2_PKT_INITIAL, + NGTCP2_WRITE_PKT_FLAG_NONE, write_datalen, ts); if (nwrite < 0) { assert(nwrite != NGTCP2_ERR_NOBUF); return nwrite; @@ -2823,11 +2826,11 @@ static ngtcp2_ssize conn_write_handshake_pkts(ngtcp2_conn *conn, !ngtcp2_strm_streamfrq_empty(&conn->in_pktns->crypto.strm))) { if (cstat->loss_detection_timer != UINT64_MAX && conn_server_tx_left(conn, &conn->dcid.current) < - NGTCP2_MAX_UDP_PAYLOAD_SIZE) { + NGTCP2_MAX_UDP_PAYLOAD_SIZE) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_LDC, - "loss detection timer canceled due to amplification limit"); - cstat->loss_detection_timer = UINT64_MAX; + &conn->log, NGTCP2_LOG_EVENT_LDC, + "loss detection timer canceled due to amplification limit"); + ngtcp2_conn_cancel_loss_detection_timer(conn); } return 0; @@ -2837,10 +2840,10 @@ static ngtcp2_ssize conn_write_handshake_pkts(ngtcp2_conn *conn, dest += nwrite; destlen -= (size_t)nwrite; - if (destlen) { - /* We might have already added padding to Initial, but in that - case, we should have destlen == 0 and no Handshake packet - will be written. */ + /* If initial packet size is at least + NGTCP2_MAX_UDP_PAYLOAD_SIZE, no extra padding is needed in a + subsequent packet. */ + if (nwrite < NGTCP2_MAX_UDP_PAYLOAD_SIZE) { if (conn->server) { it = ngtcp2_rtb_head(&conn->in_pktns->rtb); if (!ngtcp2_ksl_it_end(&it)) { @@ -2856,8 +2859,9 @@ static ngtcp2_ssize conn_write_handshake_pkts(ngtcp2_conn *conn, } } - nwrite = conn_write_handshake_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_HANDSHAKE, wflags, write_datalen, ts); + nwrite = + conn_write_handshake_pkt(conn, pi, dest, destlen, (size_t)res, + NGTCP2_PKT_HANDSHAKE, wflags, write_datalen, ts); if (nwrite < 0) { assert(nwrite != NGTCP2_ERR_NOBUF); return nwrite; @@ -2869,7 +2873,7 @@ static ngtcp2_ssize conn_write_handshake_pkts(ngtcp2_conn *conn, /* We don't need to send further Initial packet if we have Handshake key and sent something with it. So discard initial state here. */ - conn_discard_initial_state(conn, ts); + ngtcp2_conn_discard_initial_state(conn, ts); } return res; @@ -2905,7 +2909,7 @@ static int conn_should_send_max_stream_data(ngtcp2_conn *conn, uint64_t inc = strm->rx.unsent_max_offset - strm->rx.max_offset; (void)conn; - return strm->rx.window < 2 * inc; + return strm->rx.window < 4 * inc; } /* @@ -2915,7 +2919,7 @@ static int conn_should_send_max_stream_data(ngtcp2_conn *conn, static int conn_should_send_max_data(ngtcp2_conn *conn) { uint64_t inc = conn->rx.unsent_max_offset - conn->rx.max_offset; - return conn->rx.window < 2 * inc; + return conn->rx.window < 4 * inc; } /* @@ -2948,9 +2952,9 @@ static size_t conn_required_num_new_connection_id(ngtcp2_conn *conn) { n = conn->remote.transport_params->active_connection_id_limit + conn->scid.num_retired; - n = ngtcp2_min(NGTCP2_MAX_SCID_POOL_SIZE, n) - len; + n = ngtcp2_min_uint64(NGTCP2_MAX_SCID_POOL_SIZE, n) - len; - return (size_t)ngtcp2_min(lim, n); + return (size_t)ngtcp2_min_uint64(lim, n); } /* @@ -3093,11 +3097,12 @@ static int conn_remove_retired_connection_id(ngtcp2_conn *conn, } /* - * conn_min_short_pktlen returns the minimum length of Short packet - * this endpoint sends. + * conn_min_pktlen returns the minimum length of packet this endpoint + * sends. It may underestimate the length because this does not take + * into account header protection sample. */ -static size_t conn_min_short_pktlen(ngtcp2_conn *conn) { - return conn->dcid.current.cid.datalen + NGTCP2_MIN_PKT_EXPANDLEN; +static size_t conn_min_pktlen(ngtcp2_conn *conn) { + return conn->oscid.datalen + NGTCP2_MIN_PKT_EXPANDLEN; } /* @@ -3155,8 +3160,10 @@ static void conn_reset_ppe_pending(ngtcp2_conn *conn) { /* * conn_write_pkt writes a protected packet in the buffer pointed by - * |dest| whose length if |destlen|. |type| specifies the type of - * packet. It can be NGTCP2_PKT_1RTT or NGTCP2_PKT_0RTT. + * |dest| whose length if |destlen|. |dgram_offset| is the offset in + * UDP datagram payload where this QUIC packet is positioned at. + * |type| specifies the type of packet. It can be NGTCP2_PKT_1RTT or + * NGTCP2_PKT_0RTT. * * This function can send new stream data. In order to send stream * data, specify the underlying stream and parameters to @@ -3183,8 +3190,9 @@ static void conn_reset_ppe_pending(ngtcp2_conn *conn) { */ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, - ngtcp2_vmsg *vmsg, uint8_t type, - uint8_t flags, ngtcp2_tstamp ts) { + size_t dgram_offset, ngtcp2_vmsg *vmsg, + uint8_t type, uint8_t flags, + ngtcp2_tstamp ts) { int rv = 0; ngtcp2_crypto_cc *cc = &conn->pkt.cc; ngtcp2_ppe *ppe = &conn->pkt.ppe; @@ -3211,7 +3219,8 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, int require_padding = (flags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING) != 0; int write_more = (flags & NGTCP2_WRITE_PKT_FLAG_MORE) != 0; int ppe_pending = (conn->flags & NGTCP2_CONN_FLAG_PPE_PENDING) != 0; - size_t min_pktlen = conn_min_short_pktlen(conn); + size_t min_pktlen = conn_min_pktlen(conn); + int min_padded = 0; int padded = 0; ngtcp2_cc_pkt cc_pkt; uint64_t crypto_offset; @@ -3311,7 +3320,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (conn->local.settings.max_window && conn->tx.last_max_data_ts != UINT64_MAX && ts - conn->tx.last_max_data_ts < - NGTCP2_FLOW_WINDOW_RTT_FACTOR * cstat->smoothed_rtt && + NGTCP2_FLOW_WINDOW_RTT_FACTOR * cstat->smoothed_rtt && conn->local.settings.max_window > conn->rx.window) { target_max_data = NGTCP2_FLOW_WINDOW_SCALING_FACTOR * conn->rx.window; if (target_max_data > conn->local.settings.max_window) { @@ -3336,7 +3345,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, pktns->tx.frq = nfrc; conn->rx.max_offset = conn->rx.unsent_max_offset = - nfrc->fr.max_data.max_data; + nfrc->fr.max_data.max_data; } if (stream_blocked && conn_should_send_max_data(conn)) { @@ -3369,7 +3378,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, pktns->tx.last_pkt_num + 1, pktns_select_pkt_numlen(pktns), version, 0); - ngtcp2_ppe_init(ppe, dest, destlen, cc); + ngtcp2_ppe_init(ppe, dest, destlen, dgram_offset, cc); rv = ngtcp2_ppe_encode_hd(ppe, hd); if (rv != 0) { @@ -3407,8 +3416,8 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } rv = ngtcp2_conn_create_ack_frame( - conn, &ackfr, pktns, type, ts, conn_compute_ack_delay(conn), - conn->local.transport_params.ack_delay_exponent); + conn, &ackfr, pktns, type, ts, conn_compute_ack_delay(conn), + conn->local.transport_params.ack_delay_exponent); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); return rv; @@ -3422,10 +3431,9 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, ngtcp2_acktr_commit_ack(&pktns->acktr); ngtcp2_acktr_add_ack(&pktns->acktr, hd->pkt_num, ackfr->ack.largest_ack); - if (type == NGTCP2_PKT_1RTT) { - conn_handle_unconfirmed_key_update_from_remote( - conn, ackfr->ack.largest_ack, ts); - } + assert(NGTCP2_PKT_1RTT == type); + conn_handle_unconfirmed_key_update_from_remote( + conn, ackfr->ack.largest_ack, ts); pkt_empty = 0; } } @@ -3443,7 +3451,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, switch ((*pfrc)->fr.type) { case NGTCP2_FRAME_RESET_STREAM: strm = - ngtcp2_conn_find_stream(conn, (*pfrc)->fr.reset_stream.stream_id); + ngtcp2_conn_find_stream(conn, (*pfrc)->fr.reset_stream.stream_id); if (strm == NULL || !ngtcp2_strm_require_retransmit_reset_stream(strm)) { frc = *pfrc; @@ -3454,7 +3462,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, break; case NGTCP2_FRAME_STOP_SENDING: strm = - ngtcp2_conn_find_stream(conn, (*pfrc)->fr.stop_sending.stream_id); + ngtcp2_conn_find_stream(conn, (*pfrc)->fr.stop_sending.stream_id); if (strm == NULL || !ngtcp2_strm_require_retransmit_stop_sending(strm)) { frc = *pfrc; @@ -3484,10 +3492,10 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } break; case NGTCP2_FRAME_MAX_STREAM_DATA: - strm = ngtcp2_conn_find_stream(conn, - (*pfrc)->fr.max_stream_data.stream_id); + strm = + ngtcp2_conn_find_stream(conn, (*pfrc)->fr.max_stream_data.stream_id); if (strm == NULL || !ngtcp2_strm_require_retransmit_max_stream_data( - strm, &(*pfrc)->fr.max_stream_data)) { + strm, &(*pfrc)->fr.max_stream_data)) { frc = *pfrc; *pfrc = (*pfrc)->next; ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); @@ -3504,9 +3512,9 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, break; case NGTCP2_FRAME_STREAM_DATA_BLOCKED: strm = ngtcp2_conn_find_stream( - conn, (*pfrc)->fr.stream_data_blocked.stream_id); + conn, (*pfrc)->fr.stream_data_blocked.stream_id); if (strm == NULL || !ngtcp2_strm_require_retransmit_stream_data_blocked( - strm, &(*pfrc)->fr.stream_data_blocked)) { + strm, &(*pfrc)->fr.stream_data_blocked)) { frc = *pfrc; *pfrc = (*pfrc)->next; ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); @@ -3543,7 +3551,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, left = ngtcp2_ppe_left(ppe); crypto_offset = - ngtcp2_strm_streamfrq_unacked_offset(&pktns->crypto.strm); + ngtcp2_strm_streamfrq_unacked_offset(&pktns->crypto.strm); if (crypto_offset == (uint64_t)-1) { ngtcp2_strm_streamfrq_clear(&pktns->crypto.strm); break; @@ -3593,14 +3601,14 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, nfrc->fr.type = NGTCP2_FRAME_RESET_STREAM; nfrc->fr.reset_stream.stream_id = strm->stream_id; nfrc->fr.reset_stream.app_error_code = - strm->tx.reset_stream_app_error_code; + strm->tx.reset_stream_app_error_code; nfrc->fr.reset_stream.final_size = strm->tx.offset; *pfrc = nfrc; strm->flags &= ~NGTCP2_STRM_FLAG_SEND_RESET_STREAM; rv = - conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); + conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); if (rv != 0) { assert(NGTCP2_ERR_NOBUF == rv); @@ -3620,8 +3628,8 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, strm->flags &= ~NGTCP2_STRM_FLAG_SEND_STOP_SENDING; } else { rv = conn_call_stream_stop_sending( - conn, strm->stream_id, strm->tx.stop_sending_app_error_code, - strm->stream_user_data); + conn, strm->stream_id, strm->tx.stop_sending_app_error_code, + strm->stream_user_data); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); @@ -3636,13 +3644,13 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, nfrc->fr.type = NGTCP2_FRAME_STOP_SENDING; nfrc->fr.stop_sending.stream_id = strm->stream_id; nfrc->fr.stop_sending.app_error_code = - strm->tx.stop_sending_app_error_code; + strm->tx.stop_sending_app_error_code; *pfrc = nfrc; strm->flags &= ~NGTCP2_STRM_FLAG_SEND_STOP_SENDING; - rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, - &nfrc->fr); + rv = + conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); if (rv != 0) { assert(NGTCP2_ERR_NOBUF == rv); @@ -3672,7 +3680,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, strm->tx.last_blocked_offset = strm->tx.max_offset; rv = - conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); + conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); if (rv != 0) { assert(NGTCP2_ERR_NOBUF == rv); @@ -3698,10 +3706,10 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (conn->local.settings.max_stream_window && strm->tx.last_max_stream_data_ts != UINT64_MAX && ts - strm->tx.last_max_stream_data_ts < - NGTCP2_FLOW_WINDOW_RTT_FACTOR * cstat->smoothed_rtt && + NGTCP2_FLOW_WINDOW_RTT_FACTOR * cstat->smoothed_rtt && conn->local.settings.max_stream_window > strm->rx.window) { target_max_data = - NGTCP2_FLOW_WINDOW_SCALING_FACTOR * strm->rx.window; + NGTCP2_FLOW_WINDOW_SCALING_FACTOR * strm->rx.window; if (target_max_data > conn->local.settings.max_stream_window) { target_max_data = conn->local.settings.max_stream_window; } @@ -3721,14 +3729,14 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, nfrc->fr.type = NGTCP2_FRAME_MAX_STREAM_DATA; nfrc->fr.max_stream_data.stream_id = strm->stream_id; nfrc->fr.max_stream_data.max_stream_data = - strm->rx.unsent_max_offset + delta; + strm->rx.unsent_max_offset + delta; *pfrc = nfrc; strm->rx.max_offset = strm->rx.unsent_max_offset = - nfrc->fr.max_stream_data.max_stream_data; + nfrc->fr.max_stream_data.max_stream_data; rv = - conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); + conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); if (rv != 0) { assert(NGTCP2_ERR_NOBUF == rv); break; @@ -3806,7 +3814,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (*pfrc == NULL && conn->remote.bidi.unsent_max_streams > conn->remote.bidi.max_streams) { rv = conn_call_extend_max_remote_streams_bidi( - conn, conn->remote.bidi.unsent_max_streams); + conn, conn->remote.bidi.unsent_max_streams); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); return rv; @@ -3838,7 +3846,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (*pfrc == NULL && conn->remote.uni.unsent_max_streams > conn->remote.uni.max_streams) { rv = conn_call_extend_max_remote_streams_uni( - conn, conn->remote.uni.unsent_max_streams); + conn, conn->remote.uni.unsent_max_streams); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); return rv; @@ -3901,8 +3909,8 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (*pfrc == NULL && send_stream && (ndatalen = ngtcp2_pkt_stream_max_datalen( - vmsg->stream.strm->stream_id, vmsg->stream.strm->tx.offset, ndatalen, - left)) != (size_t)-1 && + vmsg->stream.strm->stream_id, vmsg->stream.strm->tx.offset, ndatalen, + left)) != (size_t)-1 && (ndatalen || datalen == 0)) { datacnt = ngtcp2_vec_copy_at_most(data, NGTCP2_MAX_STREAM_DATACNT, vmsg->stream.data, vmsg->stream.datacnt, @@ -3912,7 +3920,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, assert((datacnt == 0 && datalen == 0) || (datacnt && datalen)); rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, datacnt, &conn->frc_objalloc, conn->mem); + &nfrc, datacnt, &conn->frc_objalloc, conn->mem); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); return rv; @@ -3944,6 +3952,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, vmsg->stream.strm->tx.offset += ndatalen; conn->tx.offset += ndatalen; + vmsg->stream.strm->flags |= NGTCP2_STRM_FLAG_ANY_SENT; if (fin) { ngtcp2_strm_shutdown(vmsg->stream.strm, NGTCP2_STRM_FLAG_SHUT_WR); @@ -4095,7 +4104,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } keep_alive_expired = - type == NGTCP2_PKT_1RTT && conn_keep_alive_expired(conn, ts); + type == NGTCP2_PKT_1RTT && conn_keep_alive_expired(conn, ts); if (conn->pktns.rtb.probe_pkt_left == 0 && !keep_alive_expired && !require_padding) { @@ -4154,8 +4163,11 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING; if (conn->pktns.rtb.probe_pkt_left) { rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_PROBE; + } else { + rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING; } pktns->tx.non_ack_pkt_start_ts = UINT64_MAX; + pkt_empty = 0; } } else if (pktns->tx.non_ack_pkt_start_ts == UINT64_MAX) { pktns->tx.non_ack_pkt_start_ts = ts; @@ -4166,19 +4178,19 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, /* TODO Push STREAM frame back to ngtcp2_strm if there is an error before ngtcp2_rtb_entry is safely created and added. */ - if (require_padding || - /* Making full sized packet will help GSO a bit */ - ngtcp2_ppe_left(ppe) < 10) { - lfr.padding.len = ngtcp2_ppe_padding(ppe); - } else if (type == NGTCP2_PKT_1RTT) { - lfr.padding.len = ngtcp2_ppe_padding_size(ppe, min_pktlen); + if (require_padding) { + lfr.padding.len = ngtcp2_ppe_dgram_padding(ppe); } else { - lfr.padding.len = ngtcp2_ppe_padding_hp_sample(ppe); + lfr.padding.len = ngtcp2_ppe_padding_size(ppe, min_pktlen); + min_padded = 1; } if (lfr.padding.len) { + if (!min_padded || + (rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING)) { + padded = 1; + } lfr.type = NGTCP2_FRAME_PADDING; - padded = 1; ngtcp2_log_tx_fr(&conn->log, hd, &lfr); ngtcp2_qlog_write_frame(&conn->qlog, &lfr); } @@ -4200,11 +4212,11 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, conn_handle_tx_ecn(conn, pi, &rtb_entry_flags, pktns, hd, ts); } - rv = ngtcp2_rtb_entry_objalloc_new(&ent, hd, NULL, ts, (size_t)nwrite, - rtb_entry_flags, - &conn->rtb_entry_objalloc); + rv = + ngtcp2_rtb_entry_objalloc_new(&ent, hd, NULL, ts, (size_t)nwrite, + rtb_entry_flags, &conn->rtb_entry_objalloc); if (rv != 0) { - assert(ngtcp2_err_is_fatal((int)nwrite)); + assert(ngtcp2_err_is_fatal(rv)); return rv; } @@ -4220,7 +4232,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, ts); } - rv = conn_on_pkt_sent(conn, &pktns->rtb, ent); + rv = conn_on_pkt_sent(conn, pktns, ent); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); ngtcp2_rtb_entry_objalloc_del(ent, &conn->rtb_entry_objalloc, @@ -4231,10 +4243,10 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) { if (conn->cc.on_pkt_sent) { conn->cc.on_pkt_sent( - &conn->cc, &conn->cstat, - ngtcp2_cc_pkt_init(&cc_pkt, hd->pkt_num, (size_t)nwrite, - NGTCP2_PKTNS_ID_APPLICATION, ts, ent->rst.lost, - ent->rst.tx_in_flight, ent->rst.is_app_limited)); + &conn->cc, &conn->cstat, + ngtcp2_cc_pkt_init(&cc_pkt, hd->pkt_num, (size_t)nwrite, + NGTCP2_PKTNS_ID_APPLICATION, ts, ent->rst.lost, + ent->rst.tx_in_flight, ent->rst.is_app_limited)); } if (conn->flags & NGTCP2_CONN_FLAG_RESTART_IDLE_TIMER_ON_WRITE) { @@ -4269,9 +4281,9 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( - ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, - uint8_t type, uint8_t flags, const ngtcp2_cid *dcid, ngtcp2_frame *fr, - uint16_t rtb_entry_flags, const ngtcp2_path *path, ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, + uint8_t type, uint8_t flags, const ngtcp2_cid *dcid, ngtcp2_frame *fr, + uint16_t rtb_entry_flags, const ngtcp2_path *path, ngtcp2_tstamp ts) { int rv; ngtcp2_ppe ppe; ngtcp2_pkt_hd hd; @@ -4332,7 +4344,7 @@ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( pktns->tx.last_pkt_num + 1, pktns_select_pkt_numlen(pktns), version, 0); - ngtcp2_ppe_init(&ppe, dest, destlen, &cc); + ngtcp2_ppe_init(&ppe, dest, destlen, 0, &cc); rv = ngtcp2_ppe_encode_hd(&ppe, &hd); if (rv != 0) { @@ -4354,25 +4366,22 @@ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( } lfr.type = NGTCP2_FRAME_PADDING; - if (flags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING) { - lfr.padding.len = ngtcp2_ppe_padding(&ppe); + if (flags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING_FULL) { + lfr.padding.len = ngtcp2_ppe_dgram_padding_size(&ppe, destlen); + } else if (flags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING) { + lfr.padding.len = ngtcp2_ppe_dgram_padding(&ppe); } else { switch (fr->type) { case NGTCP2_FRAME_PATH_CHALLENGE: case NGTCP2_FRAME_PATH_RESPONSE: if (!conn->server || destlen >= NGTCP2_MAX_UDP_PAYLOAD_SIZE) { - lfr.padding.len = ngtcp2_ppe_padding(&ppe); + lfr.padding.len = ngtcp2_ppe_dgram_padding(&ppe); } else { lfr.padding.len = 0; } break; default: - if (type == NGTCP2_PKT_1RTT) { - lfr.padding.len = - ngtcp2_ppe_padding_size(&ppe, conn_min_short_pktlen(conn)); - } else { - lfr.padding.len = ngtcp2_ppe_padding_hp_sample(&ppe); - } + lfr.padding.len = ngtcp2_ppe_padding_size(&ppe, conn_min_pktlen(conn)); } } if (lfr.padding.len) { @@ -4402,6 +4411,12 @@ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( conn_handle_unconfirmed_key_update_from_remote(conn, fr->ack.largest_ack, ts); } + + if (!(flags & (NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING_FULL | + NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING))) { + padded = 0; + } + break; } @@ -4412,14 +4427,14 @@ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( conn_handle_tx_ecn(conn, pi, &rtb_entry_flags, pktns, &hd, ts); } - rv = ngtcp2_rtb_entry_objalloc_new(&rtbent, &hd, NULL, ts, (size_t)nwrite, - rtb_entry_flags, - &conn->rtb_entry_objalloc); + rv = + ngtcp2_rtb_entry_objalloc_new(&rtbent, &hd, NULL, ts, (size_t)nwrite, + rtb_entry_flags, &conn->rtb_entry_objalloc); if (rv != 0) { return rv; } - rv = conn_on_pkt_sent(conn, &pktns->rtb, rtbent); + rv = conn_on_pkt_sent(conn, pktns, rtbent); if (rv != 0) { ngtcp2_rtb_entry_objalloc_del(rtbent, &conn->rtb_entry_objalloc, &conn->frc_objalloc, conn->mem); @@ -4522,6 +4537,10 @@ static int conn_retire_dcid_seq(ngtcp2_conn *conn, uint64_t seq) { ngtcp2_frame_chain *nfrc; int rv; + if (ngtcp2_conn_check_retired_dcid_tracked(conn, seq)) { + return 0; + } + rv = ngtcp2_conn_track_retired_dcid_seq(conn, seq); if (rv != 0) { return rv; @@ -4658,13 +4677,15 @@ static int conn_start_pmtud(ngtcp2_conn *conn) { assert(conn->remote.transport_params->max_udp_payload_size >= NGTCP2_MAX_UDP_PAYLOAD_SIZE); - hard_max_udp_payload_size = (size_t)ngtcp2_min( - conn->remote.transport_params->max_udp_payload_size, - (uint64_t)conn->local.settings.max_tx_udp_payload_size); + hard_max_udp_payload_size = (size_t)ngtcp2_min_uint64( + conn->remote.transport_params->max_udp_payload_size, + (uint64_t)conn->local.settings.max_tx_udp_payload_size); - rv = ngtcp2_pmtud_new(&conn->pmtud, conn->dcid.current.max_udp_payload_size, - hard_max_udp_payload_size, - conn->pktns.tx.last_pkt_num + 1, conn->mem); + rv = + ngtcp2_pmtud_new(&conn->pmtud, conn->dcid.current.max_udp_payload_size, + hard_max_udp_payload_size, conn->pktns.tx.last_pkt_num + 1, + conn->local.settings.pmtud_probes, + conn->local.settings.pmtud_probeslen, conn->mem); if (rv != 0) { return rv; } @@ -4715,12 +4736,11 @@ static ngtcp2_ssize conn_write_pmtud_probe(ngtcp2_conn *conn, lfr.type = NGTCP2_FRAME_PING; nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, probelen, NGTCP2_PKT_1RTT, - NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING, &conn->dcid.current.cid, &lfr, - NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | - NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | - NGTCP2_RTB_ENTRY_FLAG_PMTUD_PROBE, - NULL, ts); + conn, pi, dest, probelen, NGTCP2_PKT_1RTT, + NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING_FULL, &conn->dcid.current.cid, &lfr, + NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_PMTUD_PROBE, + NULL, ts); if (nwrite < 0) { return nwrite; } @@ -4813,19 +4833,19 @@ static size_t conn_shape_udp_payload(ngtcp2_conn *conn, const ngtcp2_dcid *dcid, assert(conn->remote.transport_params->max_udp_payload_size >= NGTCP2_MAX_UDP_PAYLOAD_SIZE); - payloadlen = - (size_t)ngtcp2_min((uint64_t)payloadlen, - conn->remote.transport_params->max_udp_payload_size); + payloadlen = (size_t)ngtcp2_min_uint64( + (uint64_t)payloadlen, + conn->remote.transport_params->max_udp_payload_size); } payloadlen = - ngtcp2_min(payloadlen, conn->local.settings.max_tx_udp_payload_size); + ngtcp2_min_size(payloadlen, conn->local.settings.max_tx_udp_payload_size); if (conn->local.settings.no_tx_udp_payload_size_shaping) { return payloadlen; } - return ngtcp2_min(payloadlen, dcid->max_udp_payload_size); + return ngtcp2_min_size(payloadlen, dcid->max_udp_payload_size); } static void conn_reset_congestion_state(ngtcp2_conn *conn, ngtcp2_tstamp ts); @@ -4920,15 +4940,15 @@ static ngtcp2_ssize conn_write_path_challenge(ngtcp2_conn *conn, initial_pto = conn_compute_initial_pto(conn, &conn->pktns); timeout = conn_compute_pto(conn, &conn->pktns); - timeout = ngtcp2_max(timeout, initial_pto); + timeout = ngtcp2_max_uint64(timeout, initial_pto); expiry = ts + timeout * (1ULL << pv->round); - destlen = ngtcp2_min(destlen, NGTCP2_MAX_UDP_PAYLOAD_SIZE); + destlen = ngtcp2_min_size(destlen, NGTCP2_MAX_UDP_PAYLOAD_SIZE); if (conn->server) { if (!(pv->dcid.flags & NGTCP2_DCID_FLAG_PATH_VALIDATED)) { tx_left = conn_server_tx_left(conn, &pv->dcid); - destlen = (size_t)ngtcp2_min((uint64_t)destlen, tx_left); + destlen = (size_t)ngtcp2_min_uint64((uint64_t)destlen, tx_left); if (destlen == 0) { return 0; } @@ -4946,10 +4966,10 @@ static ngtcp2_ssize conn_write_path_challenge(ngtcp2_conn *conn, ngtcp2_pv_add_entry(pv, lfr.path_challenge.data, expiry, flags, ts); nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_1RTT, NGTCP2_WRITE_PKT_FLAG_NONE, - &pv->dcid.cid, &lfr, - NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING, - &pv->dcid.ps.path, ts); + conn, pi, dest, destlen, NGTCP2_PKT_1RTT, NGTCP2_WRITE_PKT_FLAG_NONE, + &pv->dcid.cid, &lfr, + NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING, + &pv->dcid.ps.path, ts); if (nwrite <= 0) { return nwrite; } @@ -5039,11 +5059,11 @@ static ngtcp2_ssize conn_write_path_response(ngtcp2_conn *conn, } } - destlen = ngtcp2_min(destlen, NGTCP2_MAX_UDP_PAYLOAD_SIZE); + destlen = ngtcp2_min_size(destlen, NGTCP2_MAX_UDP_PAYLOAD_SIZE); if (conn->server && !(dcid->flags & NGTCP2_DCID_FLAG_PATH_VALIDATED)) { tx_left = conn_server_tx_left(conn, dcid); - destlen = (size_t)ngtcp2_min((uint64_t)destlen, tx_left); + destlen = (size_t)ngtcp2_min_uint64((uint64_t)destlen, tx_left); if (destlen == 0) { return 0; } @@ -5053,9 +5073,8 @@ static ngtcp2_ssize conn_write_path_response(ngtcp2_conn *conn, memcpy(lfr.path_response.data, pcent->data, sizeof(lfr.path_response.data)); nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_1RTT, NGTCP2_WRITE_PKT_FLAG_NONE, - &dcid->cid, &lfr, NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING, &pcent->ps.path, - ts); + conn, pi, dest, destlen, NGTCP2_PKT_1RTT, NGTCP2_WRITE_PKT_FLAG_NONE, + &dcid->cid, &lfr, NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING, &pcent->ps.path, ts); if (nwrite <= 0) { return nwrite; } @@ -5077,10 +5096,10 @@ ngtcp2_ssize ngtcp2_conn_write_pkt_versioned(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_tstamp ts) { return ngtcp2_conn_writev_stream_versioned( - conn, path, pkt_info_version, pi, dest, destlen, - /* pdatalen = */ NULL, NGTCP2_WRITE_STREAM_FLAG_NONE, - /* stream_id = */ -1, - /* datav = */ NULL, /* datavcnt = */ 0, ts); + conn, path, pkt_info_version, pi, dest, destlen, + /* pdatalen = */ NULL, NGTCP2_WRITE_STREAM_FLAG_NONE, + /* stream_id = */ -1, + /* datav = */ NULL, /* datavcnt = */ 0, ts); } /* @@ -5225,8 +5244,8 @@ static int conn_on_retry(ngtcp2_conn *conn, const ngtcp2_pkt_hd *hd, retry.odcid = conn->dcid.current.cid; rv = ngtcp2_pkt_verify_retry_tag( - conn->client_chosen_version, &retry, pkt, pktlen, conn->callbacks.encrypt, - &conn->crypto.retry_aead, &conn->crypto.retry_aead_ctx); + conn->client_chosen_version, &retry, pkt, pktlen, conn->callbacks.encrypt, + &conn->crypto.retry_aead, &conn->crypto.retry_aead_ctx); if (rv != 0) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, "unable to verify Retry packet integrity"); @@ -5334,8 +5353,8 @@ static int conn_recv_ack(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_ack *fr, ngtcp2_acktr_recv_ack(&pktns->acktr, fr); - num_acked = ngtcp2_rtb_recv_ack(&pktns->rtb, fr, &conn->cstat, conn, pktns, - pkt_ts, ts); + num_acked = + ngtcp2_rtb_recv_ack(&pktns->rtb, fr, &conn->cstat, conn, pktns, pkt_ts, ts); if (num_acked < 0) { assert(ngtcp2_err_is_fatal((int)num_acked)); return (int)num_acked; @@ -5351,7 +5370,7 @@ static int conn_recv_ack(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_ack *fr, (conn->server || (conn->flags & NGTCP2_CONN_FLAG_SERVER_ADDR_VERIFIED))) { /* Reset PTO count but no less than 2 to avoid frequent probe packet transmission. */ - cstat->pto_count = ngtcp2_min(cstat->pto_count, 2); + cstat->pto_count = ngtcp2_min_size(cstat->pto_count, 2); } ngtcp2_conn_set_loss_detection_timer(conn, ts); @@ -5366,7 +5385,7 @@ static int conn_recv_ack(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_ack *fr, static void assign_recved_ack_delay_unscaled(ngtcp2_ack *fr, uint64_t ack_delay_exponent) { fr->ack_delay_unscaled = - fr->ack_delay * (1ULL << ack_delay_exponent) * NGTCP2_MICROSECONDS; + fr->ack_delay * (1ULL << ack_delay_exponent) * NGTCP2_MICROSECONDS; } /* @@ -5467,7 +5486,7 @@ static int conn_recv_max_stream_data(ngtcp2_conn *conn, * conn_recv_max_data processes received MAX_DATA frame |fr|. */ static void conn_recv_max_data(ngtcp2_conn *conn, const ngtcp2_max_data *fr) { - conn->tx.max_offset = ngtcp2_max(conn->tx.max_offset, fr->max_data); + conn->tx.max_offset = ngtcp2_max_uint64(conn->tx.max_offset, fr->max_data); } /* @@ -5496,7 +5515,7 @@ static int conn_buffer_pkt(ngtcp2_conn *conn, ngtcp2_pktns *pktns, } rv = - ngtcp2_pkt_chain_new(&pc, path, pi, pkt, pktlen, dgramlen, ts, conn->mem); + ngtcp2_pkt_chain_new(&pc, path, pi, pkt, pktlen, dgramlen, ts, conn->mem); if (rv != 0) { return rv; } @@ -5695,8 +5714,8 @@ conn_emit_pending_crypto_data(ngtcp2_conn *conn, offset = rx_offset; rx_offset += datalen; - rv = conn_call_recv_crypto_data(conn, encryption_level, offset, data, - datalen); + rv = + conn_call_recv_crypto_data(conn, encryption_level, offset, data, datalen); if (rv != 0) { return rv; } @@ -5735,7 +5754,7 @@ static int conn_recv_connection_close(ngtcp2_conn *conn, } } - ccerr->reasonlen = ngtcp2_min(fr->reasonlen, NGTCP2_CCERR_MAX_REASONLEN); + ccerr->reasonlen = ngtcp2_min_size(fr->reasonlen, NGTCP2_CCERR_MAX_REASONLEN); ngtcp2_cpymem((uint8_t *)ccerr->reason, fr->reason, ccerr->reasonlen); return 0; @@ -5907,9 +5926,9 @@ static int pktns_commit_recv_pkt_num(ngtcp2_pktns *pktns, int64_t pkt_num, if (pktns->rx.max_ack_eliciting_pkt_num != -1) { if (pkt_num < pktns->rx.max_ack_eliciting_pkt_num) { ngtcp2_acktr_immediate_ack(&pktns->acktr); - } else if (pkt_num > pktns->rx.max_ack_eliciting_pkt_num) { + } else if (pkt_num != pktns->rx.max_ack_eliciting_pkt_num + 1) { r = ngtcp2_gaptr_get_first_gap_after( - &pktns->rx.pngap, (uint64_t)pktns->rx.max_ack_eliciting_pkt_num); + &pktns->rx.pngap, (uint64_t)pktns->rx.max_ack_eliciting_pkt_num); if (r.begin < (uint64_t)pkt_num) { ngtcp2_acktr_immediate_ack(&pktns->acktr); @@ -5977,7 +5996,7 @@ static int vneg_available_versions_includes(const uint8_t *available_versions, } for (i = 0; i < available_versionslen; i += sizeof(uint32_t)) { - available_versions = ngtcp2_get_uint32(&v, available_versions); + available_versions = ngtcp2_get_uint32be(&v, available_versions); if (version == v) { return 1; @@ -6097,6 +6116,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_strm *crypto; ngtcp2_encryption_level encryption_level; int invalid_reserved_bits = 0; + size_t num_ack_processed = 0; if (pktlen == 0) { return 0; @@ -6116,8 +6136,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CON, "buffering 1RTT packet len=%zu", pktlen); - rv = conn_buffer_pkt(conn, &conn->pktns, path, pi, pkt, pktlen, dgramlen, - ts); + rv = + conn_buffer_pkt(conn, &conn->pktns, path, pi, pkt, pktlen, dgramlen, ts); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); return rv; @@ -6158,8 +6178,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, "packet was ignored because of mismatched SCID"); return NGTCP2_ERR_DISCARD_PKT; } - rv = conn_on_version_negotiation(conn, &hd, pkt + hdpktlen, - pktlen - hdpktlen); + rv = + conn_on_version_negotiation(conn, &hd, pkt + hdpktlen, pktlen - hdpktlen); if (rv != 0) { if (ngtcp2_err_is_fatal(rv)) { return rv; @@ -6250,7 +6270,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_ssize nread2; /* TODO Avoid to parse header twice. */ nread2 = - conn_recv_pkt(conn, path, pi, pkt, pktlen, dgramlen, pkt_ts, ts); + conn_recv_pkt(conn, path, pi, pkt, pktlen, dgramlen, pkt_ts, ts); if (nread2 < 0) { return nread2; } @@ -6275,8 +6295,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, case NGTCP2_PKT_INITIAL: if (!conn->in_pktns) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_PKT, - "Initial packet is discarded because keys have been discarded"); + &conn->log, NGTCP2_LOG_EVENT_PKT, + "Initial packet is discarded because keys have been discarded"); return (ngtcp2_ssize)pktlen; } @@ -6285,10 +6305,10 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, if (conn->server) { if (dgramlen < NGTCP2_MAX_UDP_PAYLOAD_SIZE) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_PKT, - "Initial packet was ignored because it is included in UDP datagram " - "less than %zu bytes: %zu bytes", - NGTCP2_MAX_UDP_PAYLOAD_SIZE, dgramlen); + &conn->log, NGTCP2_LOG_EVENT_PKT, + "Initial packet was ignored because it is included in UDP datagram " + "less than %zu bytes: %zu bytes", + NGTCP2_MAX_UDP_PAYLOAD_SIZE, dgramlen); return NGTCP2_ERR_DISCARD_PKT; } if (conn->local.settings.tokenlen) { @@ -6329,10 +6349,9 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, /* Install new Initial keys using QUIC version = hd.version */ rv = conn_call_version_negotiation( - conn, hd.version, - (conn->flags & NGTCP2_CONN_FLAG_RECV_RETRY) - ? &conn->dcid.current.cid - : &conn->rcid); + conn, hd.version, + (conn->flags & NGTCP2_CONN_FLAG_RECV_RETRY) ? &conn->dcid.current.cid + : &conn->rcid); if (rv != 0) { return rv; } @@ -6364,8 +6383,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, if (!conn->hs_pktns->crypto.rx.ckm) { if (conn->server) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_PKT, - "Handshake packet at this point is unexpected and discarded"); + &conn->log, NGTCP2_LOG_EVENT_PKT, + "Handshake packet at this point is unexpected and discarded"); return (ngtcp2_ssize)pktlen; } ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CON, @@ -6420,8 +6439,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, payload = pkt + hdpktlen; payloadlen = hd.len - hd.pkt_numlen; - hd.pkt_num = ngtcp2_pkt_adjust_pkt_num(pktns->rx.max_pkt_num, hd.pkt_num, - hd.pkt_numlen); + hd.pkt_num = + ngtcp2_pkt_adjust_pkt_num(pktns->rx.max_pkt_num, hd.pkt_num, hd.pkt_numlen); if (hd.pkt_num > NGTCP2_MAX_PKT_NUM) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, "pkn=%" PRId64 " is greater than maximum pkn", hd.pkt_num); @@ -6550,6 +6569,9 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, switch (fr->type) { case NGTCP2_FRAME_ACK: case NGTCP2_FRAME_ACK_ECN: + if (num_ack_processed >= NGTCP2_MAX_ACK_PER_PKT) { + break; + } if (!conn->server && hd.type == NGTCP2_PKT_HANDSHAKE) { conn->flags |= NGTCP2_CONN_FLAG_SERVER_ADDR_VERIFIED; } @@ -6557,6 +6579,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, if (rv != 0) { return rv; } + ++num_ack_processed; break; case NGTCP2_FRAME_PADDING: break; @@ -6609,8 +6632,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, /* Initial and Handshake are always acknowledged without delay. No need to call ngtcp2_acktr_immediate_ack(). */ - rv = ngtcp2_conn_sched_ack(conn, &pktns->acktr, hd.pkt_num, require_ack, - pkt_ts); + rv = + ngtcp2_conn_sched_ack(conn, &pktns->acktr, hd.pkt_num, require_ack, pkt_ts); if (rv != 0) { return rv; } @@ -6661,7 +6684,7 @@ static ngtcp2_ssize conn_recv_handshake_cpkt(ngtcp2_conn *conn, while (pktlen) { nread = - conn_recv_handshake_pkt(conn, path, pi, pkt, pktlen, dgramlen, ts, ts); + conn_recv_handshake_pkt(conn, path, pi, pkt, pktlen, dgramlen, ts, ts); if (nread < 0) { if (ngtcp2_err_is_fatal((int)nread)) { return nread; @@ -6673,7 +6696,7 @@ static ngtcp2_ssize conn_recv_handshake_cpkt(ngtcp2_conn *conn, if ((pkt[0] & NGTCP2_HEADER_FORM_BIT) && pktlen > 4) { /* Not a Version Negotiation packet */ - ngtcp2_get_uint32(&version, &pkt[1]); + ngtcp2_get_uint32be(&version, &pkt[1]); if (ngtcp2_pkt_get_type_long(version, pkt[0]) == NGTCP2_PKT_INITIAL) { if (conn->server) { if (is_unrecoverable_error((int)nread)) { @@ -6738,14 +6761,14 @@ int ngtcp2_conn_init_stream(ngtcp2_conn *conn, ngtcp2_strm *strm, if (bidi_stream(stream_id)) { if (local_stream) { max_rx_offset = - conn->local.transport_params.initial_max_stream_data_bidi_local; + conn->local.transport_params.initial_max_stream_data_bidi_local; max_tx_offset = - conn->remote.transport_params->initial_max_stream_data_bidi_remote; + conn->remote.transport_params->initial_max_stream_data_bidi_remote; } else { max_rx_offset = - conn->local.transport_params.initial_max_stream_data_bidi_remote; + conn->local.transport_params.initial_max_stream_data_bidi_remote; max_tx_offset = - conn->remote.transport_params->initial_max_stream_data_bidi_local; + conn->remote.transport_params->initial_max_stream_data_bidi_local; } } else if (local_stream) { max_rx_offset = 0; @@ -6759,8 +6782,8 @@ int ngtcp2_conn_init_stream(ngtcp2_conn *conn, ngtcp2_strm *strm, max_tx_offset, stream_user_data, &conn->frc_objalloc, conn->mem); - rv = ngtcp2_map_insert(&conn->strms, (ngtcp2_map_key_type)strm->stream_id, - strm); + rv = + ngtcp2_map_insert(&conn->strms, (ngtcp2_map_key_type)strm->stream_id, strm); if (rv != 0) { assert(rv != NGTCP2_ERR_INVALID_ARGUMENT); goto fail; @@ -6833,6 +6856,12 @@ static int conn_emit_pending_stream_data(ngtcp2_conn *conn, ngtcp2_strm *strm, return rv; } + /* ngtcp2_conn_shutdown_stream_read from a callback will free + strm->rx.rob. */ + if (!strm->rx.rob) { + return 0; + } + ngtcp2_rob_pop(strm->rx.rob, rx_offset - datalen, datalen); } } @@ -6901,7 +6930,8 @@ static int conn_recv_crypto(ngtcp2_conn *conn, return 0; } - crypto->rx.last_offset = ngtcp2_max(crypto->rx.last_offset, fr_end_offset); + crypto->rx.last_offset = + ngtcp2_max_uint64(crypto->rx.last_offset, fr_end_offset); /* TODO Before dispatching incoming data to TLS stack, make sure that previous data in previous encryption level has been @@ -6917,14 +6947,14 @@ static int conn_recv_crypto(ngtcp2_conn *conn, rx_offset += datalen; ngtcp2_strm_update_rx_offset(crypto, rx_offset); - rv = conn_call_recv_crypto_data(conn, encryption_level, offset, data, - datalen); + rv = + conn_call_recv_crypto_data(conn, encryption_level, offset, data, datalen); if (rv != 0) { return rv; } - rv = conn_emit_pending_crypto_data(conn, encryption_level, crypto, - rx_offset); + rv = + conn_emit_pending_crypto_data(conn, encryption_level, crypto, rx_offset); if (rv != 0) { return rv; } @@ -7100,7 +7130,8 @@ static int conn_recv_stream(ngtcp2_conn *conn, const ngtcp2_stream *fr) { return NGTCP2_ERR_FINAL_SIZE; } - strm->rx.last_offset = ngtcp2_max(strm->rx.last_offset, fr_end_offset); + strm->rx.last_offset = + ngtcp2_max_uint64(strm->rx.last_offset, fr_end_offset); if (fr_end_offset <= rx_offset) { return 0; @@ -7220,9 +7251,9 @@ handle_max_remote_streams_extension(uint64_t *punsent_max_remote_streams, size_t n) { if ( #if SIZE_MAX > UINT32_MAX - NGTCP2_MAX_STREAMS < n || + NGTCP2_MAX_STREAMS < n || #endif /* SIZE_MAX > UINT32_MAX */ - *punsent_max_remote_streams > (uint64_t)(NGTCP2_MAX_STREAMS - n)) { + *punsent_max_remote_streams > (uint64_t)(NGTCP2_MAX_STREAMS - n)) { *punsent_max_remote_streams = NGTCP2_MAX_STREAMS; } else { *punsent_max_remote_streams += n; @@ -7360,7 +7391,7 @@ static int conn_recv_reset_stream(ngtcp2_conn *conn, which are not passed to application. */ if (!(strm->flags & NGTCP2_STRM_FLAG_STOP_SENDING)) { ngtcp2_conn_extend_max_offset(conn, strm->rx.last_offset - - ngtcp2_strm_rx_offset(strm)); + ngtcp2_strm_rx_offset(strm)); } conn->rx.offset += datalen; @@ -7368,7 +7399,7 @@ static int conn_recv_reset_stream(ngtcp2_conn *conn, strm->rx.last_offset = fr->final_size; strm->flags |= - NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_RESET_STREAM_RECVED; + NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_RESET_STREAM_RECVED; ngtcp2_strm_set_app_error_code(strm, fr->app_error_code); @@ -7471,7 +7502,7 @@ static int conn_recv_stop_sending(ngtcp2_conn *conn, } strm->flags |= - NGTCP2_STRM_FLAG_SHUT_WR | NGTCP2_STRM_FLAG_STOP_SENDING_RECVED; + NGTCP2_STRM_FLAG_SHUT_WR | NGTCP2_STRM_FLAG_STOP_SENDING_RECVED; ngtcp2_strm_streamfrq_clear(strm); @@ -7487,7 +7518,7 @@ static int check_stateless_reset(const ngtcp2_dcid *dcid, const ngtcp2_pkt_stateless_reset *sr) { return ngtcp2_path_eq(&dcid->ps.path, path) && ngtcp2_dcid_verify_stateless_reset_token( - dcid, sr->stateless_reset_token) == 0; + dcid, sr->stateless_reset_token) == 0; } /* @@ -7576,7 +7607,7 @@ static int conn_recv_max_streams(ngtcp2_conn *conn, return NGTCP2_ERR_FRAME_ENCODING; } - n = ngtcp2_min(fr->max_streams, NGTCP2_MAX_STREAMS); + n = ngtcp2_min_uint64(fr->max_streams, NGTCP2_MAX_STREAMS); if (fr->type == NGTCP2_FRAME_MAX_STREAMS_BIDI) { if (conn->local.bidi.max_streams < n) { @@ -8221,7 +8252,7 @@ static int conn_recv_handshake_done(ngtcp2_conn *conn, ngtcp2_tstamp ts) { conn->pktns.rtb.persistent_congestion_start_ts = ts; - conn_discard_handshake_state(conn, ts); + ngtcp2_conn_discard_handshake_state(conn, ts); assert(conn->remote.transport_params); @@ -8323,9 +8354,9 @@ static int conn_prepare_key_update(ngtcp2_conn *conn, ngtcp2_tstamp ts) { new_tx_ckm = conn->crypto.key_update.new_tx_ckm; rv = conn_call_update_key( - conn, new_rx_ckm->secret.base, new_tx_ckm->secret.base, &rx_aead_ctx, - new_rx_ckm->iv.base, &tx_aead_ctx, new_tx_ckm->iv.base, - rx_ckm->secret.base, tx_ckm->secret.base, secretlen); + conn, new_rx_ckm->secret.base, new_tx_ckm->secret.base, &rx_aead_ctx, + new_rx_ckm->iv.base, &tx_aead_ctx, new_tx_ckm->iv.base, rx_ckm->secret.base, + tx_ckm->secret.base, secretlen); if (rv != 0) { return rv; } @@ -8340,7 +8371,7 @@ static int conn_prepare_key_update(ngtcp2_conn *conn, ngtcp2_tstamp ts) { if (conn->crypto.key_update.old_rx_ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->crypto.key_update.old_rx_ckm->aead_ctx); + conn, &conn->crypto.key_update.old_rx_ckm->aead_ctx); ngtcp2_crypto_km_del(conn->crypto.key_update.old_rx_ckm, conn->mem); conn->crypto.key_update.old_rx_ckm = NULL; } @@ -8412,7 +8443,6 @@ static int conn_recv_non_probing_pkt_on_new_path(ngtcp2_conn *conn, size_t dgramlen, int new_cid_used, ngtcp2_tstamp ts) { - ngtcp2_dcid dcid, *bound_dcid, *last; ngtcp2_pv *pv; int rv; @@ -8448,9 +8478,9 @@ static int conn_recv_non_probing_pkt_on_new_path(ngtcp2_conn *conn, } remote_addr_cmp = - ngtcp2_addr_compare(&conn->dcid.current.ps.path.remote, &path->remote); + ngtcp2_addr_compare(&conn->dcid.current.ps.path.remote, &path->remote); local_addr_eq = - ngtcp2_addr_eq(&conn->dcid.current.ps.path.local, &path->local); + ngtcp2_addr_eq(&conn->dcid.current.ps.path.local, &path->local); /* * When to change DCID? RFC 9002 section 9.5 says: @@ -8480,8 +8510,8 @@ static int conn_recv_non_probing_pkt_on_new_path(ngtcp2_conn *conn, bound_dcid = ngtcp2_ringbuf_get(&conn->dcid.bound.rb, i); if (ngtcp2_path_eq(&bound_dcid->ps.path, path)) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_CON, - "Found DCID which has already been bound to the new path"); + &conn->log, NGTCP2_LOG_EVENT_CON, + "Found DCID which has already been bound to the new path"); ngtcp2_dcid_copy(&dcid, bound_dcid); if (i == 0) { @@ -8550,21 +8580,21 @@ static int conn_recv_non_probing_pkt_on_new_path(ngtcp2_conn *conn, pv->fallback_pto = pto; } + ngtcp2_dcid_copy(&conn->dcid.current, &dcid); + if (!local_addr_eq || (remote_addr_cmp & (NGTCP2_ADDR_COMPARE_FLAG_ADDR | NGTCP2_ADDR_COMPARE_FLAG_FAMILY))) { conn_reset_congestion_state(conn, ts); } - ngtcp2_dcid_copy(&conn->dcid.current, &dcid); - conn_reset_ecn_validation_state(conn); ngtcp2_conn_stop_pmtud(conn); if (conn->pv) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_PTV, - "path migration is aborted because new migration has started"); + &conn->log, NGTCP2_LOG_EVENT_PTV, + "path migration is aborted because new migration has started"); rv = conn_abort_pv(conn, ts); if (rv != 0) { return rv; @@ -8658,6 +8688,7 @@ conn_recv_delayed_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_pkt_info *pi, int rv; int require_ack = 0; ngtcp2_pktns *pktns; + size_t num_ack_processed = 0; assert(hd->type == NGTCP2_PKT_HANDSHAKE); @@ -8692,6 +8723,9 @@ conn_recv_delayed_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_pkt_info *pi, switch (fr->type) { case NGTCP2_FRAME_ACK: case NGTCP2_FRAME_ACK_ECN: + if (num_ack_processed >= NGTCP2_MAX_ACK_PER_PKT) { + break; + } if (!conn->server) { conn->flags |= NGTCP2_CONN_FLAG_SERVER_ADDR_VERIFIED; } @@ -8699,6 +8733,7 @@ conn_recv_delayed_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_pkt_info *pi, if (rv != 0) { return rv; } + ++num_ack_processed; break; case NGTCP2_FRAME_PADDING: break; @@ -8762,7 +8797,7 @@ conn_allow_path_change_under_disable_active_migration(ngtcp2_conn *conn, (NAT rebinding). */ if (ngtcp2_addr_eq(&conn->dcid.current.ps.path.local, &path->local)) { remote_addr_cmp = - ngtcp2_addr_compare(&conn->dcid.current.ps.path.remote, &path->remote); + ngtcp2_addr_compare(&conn->dcid.current.ps.path.remote, &path->remote); return (remote_addr_cmp | NGTCP2_ADDR_COMPARE_FLAG_PORT) == NGTCP2_ADDR_COMPARE_FLAG_PORT; @@ -8860,6 +8895,7 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, int recv_ncid = 0; int new_cid_used = 0; int path_challenge_recved = 0; + size_t num_ack_processed = 0; if (conn->server && conn->local.transport_params.disable_active_migration && !ngtcp2_path_eq(&conn->dcid.current.ps.path, path) && @@ -8992,8 +9028,8 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, payload = pkt + hdpktlen; payloadlen = pktlen - hdpktlen; - hd.pkt_num = ngtcp2_pkt_adjust_pkt_num(pktns->rx.max_pkt_num, hd.pkt_num, - hd.pkt_numlen); + hd.pkt_num = + ngtcp2_pkt_adjust_pkt_num(pktns->rx.max_pkt_num, hd.pkt_num, hd.pkt_numlen); if (hd.pkt_num > NGTCP2_MAX_PKT_NUM) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, "pkn=%" PRId64 " is greater than maximum pkn", hd.pkt_num); @@ -9057,7 +9093,7 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, if (hd.type == NGTCP2_PKT_1RTT && ++conn->crypto.decryption_failure_count >= - pktns->crypto.ctx.max_decryption_failure) { + pktns->crypto.ctx.max_decryption_failure) { return NGTCP2_ERR_AEAD_LIMIT_REACHED; } @@ -9163,7 +9199,7 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, } assert(conn->remote.transport_params); assign_recved_ack_delay_unscaled( - &fr->ack, conn->remote.transport_params->ack_delay_exponent); + &fr->ack, conn->remote.transport_params->ack_delay_exponent); break; } @@ -9210,6 +9246,9 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, switch (fr->type) { case NGTCP2_FRAME_ACK: case NGTCP2_FRAME_ACK_ECN: + if (num_ack_processed >= NGTCP2_MAX_ACK_PER_PKT) { + break; + } if (!conn->server) { conn->flags |= NGTCP2_CONN_FLAG_SERVER_ADDR_VERIFIED; } @@ -9218,6 +9257,7 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, return rv; } non_probing_pkt = 1; + ++num_ack_processed; break; case NGTCP2_FRAME_STREAM: rv = conn_recv_stream(conn, &fr->stream); @@ -9429,8 +9469,8 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_acktr_immediate_ack(&pktns->acktr); } - rv = ngtcp2_conn_sched_ack(conn, &pktns->acktr, hd.pkt_num, require_ack, - pkt_ts); + rv = + ngtcp2_conn_sched_ack(conn, &pktns->acktr, hd.pkt_num, require_ack, pkt_ts); if (rv != 0) { return rv; } @@ -9602,8 +9642,8 @@ static int conn_handshake_completed(ngtcp2_conn *conn) { } } if (conn->local.uni.max_streams > 0) { - rv = conn_call_extend_max_local_streams_uni(conn, - conn->local.uni.max_streams); + rv = + conn_call_extend_max_local_streams_uni(conn, conn->local.uni.max_streams); if (rv != 0) { return rv; } @@ -9822,7 +9862,7 @@ static ngtcp2_ssize conn_read_handshake(ngtcp2_conn *conn, } if (conn->hs_pktns->rx.max_pkt_num != -1) { - conn_discard_initial_state(conn, ts); + ngtcp2_conn_discard_initial_state(conn, ts); } if (!conn_is_tls_handshake_completed(conn)) { @@ -9888,7 +9928,7 @@ static ngtcp2_ssize conn_read_handshake(ngtcp2_conn *conn, return rv; } - conn_discard_handshake_state(conn, ts); + ngtcp2_conn_discard_handshake_state(conn, ts); rv = conn_enqueue_handshake_done(conn); if (rv != 0) { @@ -9936,7 +9976,7 @@ int ngtcp2_conn_read_pkt_versioned(ngtcp2_conn *conn, const ngtcp2_path *path, pktlen); if (pktlen == 0) { - return NGTCP2_ERR_INVALID_ARGUMENT; + return 0; } /* client does not expect a packet from unknown path. */ @@ -10035,13 +10075,12 @@ static int conn_check_pkt_num_exhausted(ngtcp2_conn *conn) { * conn_retransmit_retry_early retransmits 0RTT packet after Retry is * received from server. */ -static ngtcp2_ssize conn_retransmit_retry_early(ngtcp2_conn *conn, - ngtcp2_pkt_info *pi, - uint8_t *dest, size_t destlen, - uint8_t flags, - ngtcp2_tstamp ts) { - return conn_write_pkt(conn, pi, dest, destlen, NULL, NGTCP2_PKT_0RTT, flags, - ts); +static ngtcp2_ssize +conn_retransmit_retry_early(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, + uint8_t *dest, size_t destlen, size_t dgram_offset, + uint8_t flags, ngtcp2_tstamp ts) { + return conn_write_pkt(conn, pi, dest, destlen, dgram_offset, NULL, + NGTCP2_PKT_0RTT, flags, ts); } /* @@ -10067,21 +10106,21 @@ static int conn_validate_early_transport_params_limits(ngtcp2_conn *conn) { assert(params); if (conn->early.transport_params.active_connection_id_limit > - params->active_connection_id_limit || + params->active_connection_id_limit || conn->early.transport_params.initial_max_data > - params->initial_max_data || + params->initial_max_data || conn->early.transport_params.initial_max_stream_data_bidi_local > - params->initial_max_stream_data_bidi_local || + params->initial_max_stream_data_bidi_local || conn->early.transport_params.initial_max_stream_data_bidi_remote > - params->initial_max_stream_data_bidi_remote || + params->initial_max_stream_data_bidi_remote || conn->early.transport_params.initial_max_stream_data_uni > - params->initial_max_stream_data_uni || + params->initial_max_stream_data_uni || conn->early.transport_params.initial_max_streams_bidi > - params->initial_max_streams_bidi || + params->initial_max_streams_bidi || conn->early.transport_params.initial_max_streams_uni > - params->initial_max_streams_uni || + params->initial_max_streams_uni || conn->early.transport_params.max_datagram_frame_size > - params->max_datagram_frame_size) { + params->max_datagram_frame_size) { return NGTCP2_ERR_PROTO; } @@ -10131,14 +10170,14 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (!(conn->flags & NGTCP2_CONN_FLAG_RECV_RETRY)) { nwrite = - conn_write_client_initial(conn, pi, dest, destlen, write_datalen, ts); + conn_write_client_initial(conn, pi, dest, destlen, write_datalen, ts); if (nwrite <= 0) { return nwrite; } } else { - nwrite = conn_write_handshake_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, - NGTCP2_WRITE_PKT_FLAG_NONE, write_datalen, ts); + nwrite = + conn_write_handshake_pkt(conn, pi, dest, destlen, 0, NGTCP2_PKT_INITIAL, + NGTCP2_WRITE_PKT_FLAG_NONE, write_datalen, ts); if (nwrite < 0) { return nwrite; } @@ -10146,10 +10185,10 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (pending_early_datalen) { early_spktlen = conn_retransmit_retry_early( - conn, pi, dest + nwrite, destlen - (size_t)nwrite, - nwrite ? NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING - : NGTCP2_WRITE_PKT_FLAG_NONE, - ts); + conn, pi, dest + nwrite, destlen - (size_t)nwrite, (size_t)nwrite, + nwrite ? NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING + : NGTCP2_WRITE_PKT_FLAG_NONE, + ts); if (early_spktlen < 0) { assert(ngtcp2_err_is_fatal((int)early_spktlen)); @@ -10163,6 +10202,8 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, return res; case NGTCP2_CS_CLIENT_WAIT_HANDSHAKE: + pending_early_datalen = 0; + if (!conn_handshake_probe_left(conn) && conn_cwnd_is_zero(conn)) { destlen = 0; } else { @@ -10174,7 +10215,7 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } nwrite = - conn_write_handshake_pkts(conn, pi, dest, destlen, write_datalen, ts); + conn_write_handshake_pkts(conn, pi, dest, destlen, write_datalen, ts); if (nwrite < 0) { return nwrite; } @@ -10185,9 +10226,10 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } if (!conn_is_tls_handshake_completed(conn)) { - if (!(conn->flags & NGTCP2_CONN_FLAG_EARLY_DATA_REJECTED)) { - nwrite = conn_retransmit_retry_early(conn, pi, dest, destlen, - NGTCP2_WRITE_PKT_FLAG_NONE, ts); + if (pending_early_datalen && + !(conn->flags & NGTCP2_CONN_FLAG_EARLY_DATA_REJECTED)) { + nwrite = conn_retransmit_retry_early( + conn, pi, dest, destlen, (size_t)res, NGTCP2_WRITE_PKT_FLAG_NONE, ts); if (nwrite < 0) { return nwrite; } @@ -10250,8 +10292,8 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, assert(conn->dcid.current.seq == 0); assert(!(conn->dcid.current.flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT)); ngtcp2_dcid_set_token( - &conn->dcid.current, - conn->remote.transport_params->stateless_reset_token); + &conn->dcid.current, + conn->remote.transport_params->stateless_reset_token); } rv = conn_call_activate_dcid(conn, &conn->dcid.current); @@ -10271,7 +10313,7 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, return res; case NGTCP2_CS_SERVER_INITIAL: nwrite = - conn_write_handshake_pkts(conn, pi, dest, destlen, write_datalen, ts); + conn_write_handshake_pkts(conn, pi, dest, destlen, write_datalen, ts); if (nwrite < 0) { return nwrite; } @@ -10284,7 +10326,7 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, case NGTCP2_CS_SERVER_WAIT_HANDSHAKE: if (conn_handshake_probe_left(conn) || !conn_cwnd_is_zero(conn)) { nwrite = - conn_write_handshake_pkts(conn, pi, dest, destlen, write_datalen, ts); + conn_write_handshake_pkts(conn, pi, dest, destlen, write_datalen, ts); if (nwrite < 0) { return nwrite; } @@ -10363,8 +10405,8 @@ static ngtcp2_ssize conn_client_write_handshake(ngtcp2_conn *conn, datalen = ngtcp2_vec_len(vmsg->stream.data, vmsg->stream.datacnt); send_stream = conn_retry_early_payloadlen(conn) == 0; if (send_stream) { - write_datalen = ngtcp2_min(datalen + NGTCP2_STREAM_OVERHEAD, - NGTCP2_MIN_COALESCED_PAYLOADLEN); + write_datalen = ngtcp2_min_uint64(datalen + NGTCP2_STREAM_OVERHEAD, + NGTCP2_MIN_COALESCED_PAYLOADLEN); if (vmsg->stream.flags & NGTCP2_WRITE_STREAM_FLAG_MORE) { wflags |= NGTCP2_WRITE_PKT_FLAG_MORE; @@ -10428,8 +10470,8 @@ static ngtcp2_ssize conn_client_write_handshake(ngtcp2_conn *conn, return spktlen; } - early_spktlen = conn_write_pkt(conn, pi, dest, destlen, vmsg, NGTCP2_PKT_0RTT, - wflags, ts); + early_spktlen = conn_write_pkt(conn, pi, dest, destlen, (size_t)spktlen, vmsg, + NGTCP2_PKT_0RTT, wflags, ts); if (early_spktlen < 0) { switch (early_spktlen) { case NGTCP2_ERR_STREAM_DATA_BLOCKED: @@ -10520,10 +10562,10 @@ int ngtcp2_accept(ngtcp2_pkt_hd *dest, const uint8_t *pkt, size_t pktlen) { } int ngtcp2_conn_install_initial_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *rx_aead_ctx, - const uint8_t *rx_iv, const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, - const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, - const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen) { + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *rx_aead_ctx, + const uint8_t *rx_iv, const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, + const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, + const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen) { ngtcp2_pktns *pktns = conn->in_pktns; int rv; @@ -10571,11 +10613,11 @@ int ngtcp2_conn_install_initial_key( } int ngtcp2_conn_install_vneg_initial_key( - ngtcp2_conn *conn, uint32_t version, - const ngtcp2_crypto_aead_ctx *rx_aead_ctx, const uint8_t *rx_iv, - const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, - const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, - const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen) { + ngtcp2_conn *conn, uint32_t version, + const ngtcp2_crypto_aead_ctx *rx_aead_ctx, const uint8_t *rx_iv, + const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, + const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, + const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen) { int rv; assert(ivlen >= 8); @@ -10622,8 +10664,8 @@ int ngtcp2_conn_install_vneg_initial_key( } int ngtcp2_conn_install_rx_handshake_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, - const uint8_t *iv, size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx) { + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx) { ngtcp2_pktns *pktns = conn->hs_pktns; int rv; @@ -10654,8 +10696,8 @@ int ngtcp2_conn_install_rx_handshake_key( } int ngtcp2_conn_install_tx_handshake_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, - const uint8_t *iv, size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx) { + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx) { ngtcp2_pktns *pktns = conn->hs_pktns; int rv; @@ -10905,7 +10947,7 @@ ngtcp2_tstamp ngtcp2_conn_loss_detection_expiry(ngtcp2_conn *conn) { } ngtcp2_tstamp ngtcp2_conn_internal_expiry(ngtcp2_conn *conn) { - ngtcp2_tstamp res = UINT64_MAX, t; + ngtcp2_tstamp res = UINT64_MAX; ngtcp2_duration pto = conn_compute_pto(conn, &conn->pktns); ngtcp2_scid *scid; ngtcp2_dcid *dcid; @@ -10916,21 +10958,19 @@ ngtcp2_tstamp ngtcp2_conn_internal_expiry(ngtcp2_conn *conn) { } if (conn->pmtud) { - res = ngtcp2_min(res, conn->pmtud->expiry); + res = ngtcp2_min_uint64(res, conn->pmtud->expiry); } if (!ngtcp2_pq_empty(&conn->scid.used)) { scid = ngtcp2_struct_of(ngtcp2_pq_top(&conn->scid.used), ngtcp2_scid, pe); if (scid->retired_ts != UINT64_MAX) { - t = scid->retired_ts + pto; - res = ngtcp2_min(res, t); + res = ngtcp2_min_uint64(res, scid->retired_ts + pto); } } if (ngtcp2_ringbuf_len(&conn->dcid.retired.rb)) { dcid = ngtcp2_ringbuf_get(&conn->dcid.retired.rb, 0); - t = dcid->retired_ts + pto; - res = ngtcp2_min(res, t); + res = ngtcp2_min_uint64(res, dcid->retired_ts + pto); } if (conn->dcid.current.cid.datalen) { @@ -10941,15 +10981,13 @@ ngtcp2_tstamp ngtcp2_conn_internal_expiry(ngtcp2_conn *conn) { assert(dcid->cid.datalen); assert(dcid->bound_ts != UINT64_MAX); - t = dcid->bound_ts + 3 * pto; - res = ngtcp2_min(res, t); + res = ngtcp2_min_uint64(res, dcid->bound_ts + 3 * pto); } } if (conn->server && conn->early.ckm && conn->early.discard_started_ts != UINT64_MAX) { - t = conn->early.discard_started_ts + 3 * pto; - res = ngtcp2_min(res, t); + res = ngtcp2_min_uint64(res, conn->early.discard_started_ts + 3 * pto); } return res; @@ -10969,7 +11007,7 @@ static ngtcp2_tstamp conn_handshake_expiry(ngtcp2_conn *conn) { if (conn_is_tls_handshake_completed(conn) || conn->local.settings.handshake_timeout == UINT64_MAX || conn->local.settings.initial_ts >= - UINT64_MAX - conn->local.settings.handshake_timeout) { + UINT64_MAX - conn->local.settings.handshake_timeout) { return UINT64_MAX; } @@ -10978,20 +11016,14 @@ static ngtcp2_tstamp conn_handshake_expiry(ngtcp2_conn *conn) { } ngtcp2_tstamp ngtcp2_conn_get_expiry(ngtcp2_conn *conn) { - ngtcp2_tstamp t1 = ngtcp2_conn_loss_detection_expiry(conn); - ngtcp2_tstamp t2 = ngtcp2_conn_ack_delay_expiry(conn); - ngtcp2_tstamp t3 = ngtcp2_conn_internal_expiry(conn); - ngtcp2_tstamp t4 = ngtcp2_conn_lost_pkt_expiry(conn); - ngtcp2_tstamp t5 = conn_keep_alive_expiry(conn); - ngtcp2_tstamp t6 = conn_handshake_expiry(conn); - ngtcp2_tstamp t7 = ngtcp2_conn_get_idle_expiry(conn); - ngtcp2_tstamp res = ngtcp2_min(t1, t2); - res = ngtcp2_min(res, t3); - res = ngtcp2_min(res, t4); - res = ngtcp2_min(res, t5); - res = ngtcp2_min(res, t6); - res = ngtcp2_min(res, t7); - return ngtcp2_min(res, conn->tx.pacing.next_ts); + ngtcp2_tstamp res = ngtcp2_min_uint64(ngtcp2_conn_loss_detection_expiry(conn), + ngtcp2_conn_ack_delay_expiry(conn)); + res = ngtcp2_min_uint64(res, ngtcp2_conn_internal_expiry(conn)); + res = ngtcp2_min_uint64(res, ngtcp2_conn_lost_pkt_expiry(conn)); + res = ngtcp2_min_uint64(res, conn_keep_alive_expiry(conn)); + res = ngtcp2_min_uint64(res, conn_handshake_expiry(conn)); + res = ngtcp2_min_uint64(res, ngtcp2_conn_get_idle_expiry(conn)); + return ngtcp2_min_uint64(res, conn->tx.pacing.next_ts); } int ngtcp2_conn_handle_expiry(ngtcp2_conn *conn, ngtcp2_tstamp ts) { @@ -11089,7 +11121,7 @@ ngtcp2_tstamp ngtcp2_conn_lost_pkt_expiry(ngtcp2_conn *conn) { ts = ngtcp2_rtb_lost_pkt_ts(&conn->in_pktns->rtb); if (ts != UINT64_MAX) { ts += conn_compute_pto(conn, conn->in_pktns); - res = ngtcp2_min(res, ts); + res = ngtcp2_min_uint64(res, ts); } } @@ -11097,14 +11129,14 @@ ngtcp2_tstamp ngtcp2_conn_lost_pkt_expiry(ngtcp2_conn *conn) { ts = ngtcp2_rtb_lost_pkt_ts(&conn->hs_pktns->rtb); if (ts != UINT64_MAX) { ts += conn_compute_pto(conn, conn->hs_pktns); - res = ngtcp2_min(res, ts); + res = ngtcp2_min_uint64(res, ts); } } ts = ngtcp2_rtb_lost_pkt_ts(&conn->pktns.rtb); if (ts != UINT64_MAX) { ts += conn_compute_pto(conn, &conn->pktns); - res = ngtcp2_min(res, ts); + res = ngtcp2_min_uint64(res, ts); } return res; @@ -11153,7 +11185,7 @@ static uint32_t select_preferred_version(const uint32_t *preferred_versions, } for (j = 0, p = available_versions; j < available_versionslen; j += sizeof(uint32_t)) { - p = ngtcp2_get_uint32(&v, p); + p = ngtcp2_get_uint32be(&v, p); if (preferred_versions[i] == v) { return v; @@ -11266,13 +11298,13 @@ ngtcp2_conn_server_negotiate_version(ngtcp2_conn *conn, assert(conn->client_chosen_version == version_info->chosen_version); return select_preferred_version( - conn->vneg.preferred_versions, conn->vneg.preferred_versionslen, - version_info->chosen_version, version_info->available_versions, - version_info->available_versionslen, version_info->chosen_version); + conn->vneg.preferred_versions, conn->vneg.preferred_versionslen, + version_info->chosen_version, version_info->available_versions, + version_info->available_versionslen, version_info->chosen_version); } int ngtcp2_conn_set_remote_transport_params( - ngtcp2_conn *conn, const ngtcp2_transport_params *params) { + ngtcp2_conn *conn, const ngtcp2_transport_params *params) { int rv; /* We expect this function is called once per QUIC connection, but @@ -11315,9 +11347,9 @@ int ngtcp2_conn_set_remote_transport_params( if (params->version_info_present) { if (!vneg_available_versions_includes( - params->version_info.available_versions, - params->version_info.available_versionslen, - params->version_info.chosen_version)) { + params->version_info.available_versions, + params->version_info.available_versionslen, + params->version_info.chosen_version)) { return NGTCP2_ERR_TRANSPORT_PARAM; } @@ -11326,7 +11358,7 @@ int ngtcp2_conn_set_remote_transport_params( } conn->negotiated_version = - ngtcp2_conn_server_negotiate_version(conn, ¶ms->version_info); + ngtcp2_conn_server_negotiate_version(conn, ¶ms->version_info); if (conn->negotiated_version != conn->client_chosen_version) { rv = conn_call_version_negotiation(conn, conn->negotiated_version, &conn->rcid); @@ -11339,7 +11371,7 @@ int ngtcp2_conn_set_remote_transport_params( } conn->local.transport_params.version_info.chosen_version = - conn->negotiated_version; + conn->negotiated_version; ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CON, "the negotiated version is 0x%08x", @@ -11372,7 +11404,7 @@ int ngtcp2_conn_set_remote_transport_params( assert(!conn->remote.pending_transport_params); rv = ngtcp2_transport_params_copy_new( - &conn->remote.pending_transport_params, params, conn->mem); + &conn->remote.pending_transport_params, params, conn->mem); if (rv != 0) { return rv; } @@ -11424,9 +11456,9 @@ ngtcp2_ssize ngtcp2_conn_encode_0rtt_transport_params(ngtcp2_conn *conn, params.initial_max_streams_bidi = src->initial_max_streams_bidi; params.initial_max_streams_uni = src->initial_max_streams_uni; params.initial_max_stream_data_bidi_local = - src->initial_max_stream_data_bidi_local; + src->initial_max_stream_data_bidi_local; params.initial_max_stream_data_bidi_remote = - src->initial_max_stream_data_bidi_remote; + src->initial_max_stream_data_bidi_remote; params.initial_max_stream_data_uni = src->initial_max_stream_data_uni; params.initial_max_data = src->initial_max_data; params.active_connection_id_limit = src->active_connection_id_limit; @@ -11456,7 +11488,7 @@ int ngtcp2_conn_decode_and_set_0rtt_transport_params(ngtcp2_conn *conn, } int ngtcp2_conn_set_0rtt_remote_transport_params( - ngtcp2_conn *conn, const ngtcp2_transport_params *params) { + ngtcp2_conn *conn, const ngtcp2_transport_params *params) { ngtcp2_transport_params *p; assert(!conn->server); @@ -11475,41 +11507,41 @@ int ngtcp2_conn_set_0rtt_remote_transport_params( p->initial_max_streams_bidi = params->initial_max_streams_bidi; p->initial_max_streams_uni = params->initial_max_streams_uni; p->initial_max_stream_data_bidi_local = - params->initial_max_stream_data_bidi_local; + params->initial_max_stream_data_bidi_local; p->initial_max_stream_data_bidi_remote = - params->initial_max_stream_data_bidi_remote; + params->initial_max_stream_data_bidi_remote; p->initial_max_stream_data_uni = params->initial_max_stream_data_uni; p->initial_max_data = params->initial_max_data; /* we might hit garbage, then set the sane default. */ p->active_connection_id_limit = - ngtcp2_max(NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT, - params->active_connection_id_limit); + ngtcp2_max_uint64(NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT, + params->active_connection_id_limit); p->max_datagram_frame_size = params->max_datagram_frame_size; /* we might hit garbage, then set the sane default. */ if (params->max_udp_payload_size) { - p->max_udp_payload_size = - ngtcp2_max(NGTCP2_MAX_UDP_PAYLOAD_SIZE, params->max_udp_payload_size); + p->max_udp_payload_size = ngtcp2_max_uint64(NGTCP2_MAX_UDP_PAYLOAD_SIZE, + params->max_udp_payload_size); } /* These parameters are treated specially. If server accepts early data, it must not set values for these parameters that are smaller than these remembered values. */ conn->early.transport_params.initial_max_streams_bidi = - params->initial_max_streams_bidi; + params->initial_max_streams_bidi; conn->early.transport_params.initial_max_streams_uni = - params->initial_max_streams_uni; + params->initial_max_streams_uni; conn->early.transport_params.initial_max_stream_data_bidi_local = - params->initial_max_stream_data_bidi_local; + params->initial_max_stream_data_bidi_local; conn->early.transport_params.initial_max_stream_data_bidi_remote = - params->initial_max_stream_data_bidi_remote; + params->initial_max_stream_data_bidi_remote; conn->early.transport_params.initial_max_stream_data_uni = - params->initial_max_stream_data_uni; + params->initial_max_stream_data_uni; conn->early.transport_params.initial_max_data = params->initial_max_data; conn->early.transport_params.active_connection_id_limit = - params->active_connection_id_limit; + params->active_connection_id_limit; conn->early.transport_params.max_datagram_frame_size = - params->max_datagram_frame_size; + params->max_datagram_frame_size; conn_sync_stream_id_limit(conn); @@ -11522,12 +11554,12 @@ int ngtcp2_conn_set_0rtt_remote_transport_params( } int ngtcp2_conn_set_local_transport_params_versioned( - ngtcp2_conn *conn, int transport_params_version, - const ngtcp2_transport_params *params) { + ngtcp2_conn *conn, int transport_params_version, + const ngtcp2_transport_params *params) { ngtcp2_transport_params paramsbuf; params = ngtcp2_transport_params_convert_to_latest( - ¶msbuf, transport_params_version, params); + ¶msbuf, transport_params_version, params); assert(conn->server); assert(params->active_connection_id_limit >= @@ -11576,7 +11608,7 @@ int ngtcp2_conn_commit_local_transport_params(ngtcp2_conn *conn) { } conn->rx.window = conn->rx.unsent_max_offset = conn->rx.max_offset = - params->initial_max_data; + params->initial_max_data; conn->remote.bidi.unsent_max_streams = params->initial_max_streams_bidi; conn->remote.bidi.max_streams = params->initial_max_streams_bidi; conn->remote.uni.unsent_max_streams = params->initial_max_streams_uni; @@ -11662,10 +11694,10 @@ ngtcp2_strm *ngtcp2_conn_find_stream(ngtcp2_conn *conn, int64_t stream_id) { } ngtcp2_ssize ngtcp2_conn_write_stream_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, - uint32_t flags, int64_t stream_id, const uint8_t *data, size_t datalen, - ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, + uint32_t flags, int64_t stream_id, const uint8_t *data, size_t datalen, + ngtcp2_tstamp ts) { ngtcp2_vec datav; datav.len = datalen; @@ -11693,9 +11725,10 @@ static ngtcp2_ssize conn_write_vmsg_wrapper(ngtcp2_conn *conn, if (cstat->bytes_in_flight >= cstat->cwnd) { conn->rst.is_cwnd_limited = 1; - } - - if (nwrite == 0 && cstat->bytes_in_flight < cstat->cwnd) { + } else if ((cstat->cwnd >= cstat->ssthresh || + cstat->bytes_in_flight * 2 < cstat->cwnd) && + nwrite == 0 && conn_pacing_pkt_tx_allowed(conn, ts) && + (conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED)) { conn->rst.app_limited = conn->rst.delivered + cstat->bytes_in_flight; if (conn->rst.app_limited == 0) { @@ -11707,10 +11740,10 @@ static ngtcp2_ssize conn_write_vmsg_wrapper(ngtcp2_conn *conn, } ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, - uint32_t flags, int64_t stream_id, const ngtcp2_vec *datav, size_t datavcnt, - ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, + uint32_t flags, int64_t stream_id, const ngtcp2_vec *datav, size_t datavcnt, + ngtcp2_tstamp ts) { ngtcp2_vmsg vmsg, *pvmsg; ngtcp2_strm *strm; int64_t datalen; @@ -11734,19 +11767,24 @@ ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( return NGTCP2_ERR_INVALID_ARGUMENT; } - if ((uint64_t)datalen > NGTCP2_MAX_VARINT - strm->tx.offset || - (uint64_t)datalen > NGTCP2_MAX_VARINT - conn->tx.offset) { - return NGTCP2_ERR_INVALID_ARGUMENT; - } + if (datalen == 0 && !(flags & NGTCP2_WRITE_STREAM_FLAG_FIN) && + (strm->flags & NGTCP2_STRM_FLAG_ANY_SENT)) { + pvmsg = NULL; + } else { + if ((uint64_t)datalen > NGTCP2_MAX_VARINT - strm->tx.offset || + (uint64_t)datalen > NGTCP2_MAX_VARINT - conn->tx.offset) { + return NGTCP2_ERR_INVALID_ARGUMENT; + } - vmsg.type = NGTCP2_VMSG_TYPE_STREAM; - vmsg.stream.strm = strm; - vmsg.stream.flags = flags; - vmsg.stream.data = datav; - vmsg.stream.datacnt = datavcnt; - vmsg.stream.pdatalen = pdatalen; + vmsg.type = NGTCP2_VMSG_TYPE_STREAM; + vmsg.stream.strm = strm; + vmsg.stream.flags = flags; + vmsg.stream.data = datav; + vmsg.stream.datacnt = datavcnt; + vmsg.stream.pdatalen = pdatalen; - pvmsg = &vmsg; + pvmsg = &vmsg; + } } else { pvmsg = NULL; } @@ -11756,10 +11794,10 @@ ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( } ngtcp2_ssize ngtcp2_conn_write_datagram_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, - uint32_t flags, uint64_t dgram_id, const uint8_t *data, size_t datalen, - ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, + uint32_t flags, uint64_t dgram_id, const uint8_t *data, size_t datalen, + ngtcp2_tstamp ts) { ngtcp2_vec datav; datav.len = datalen; @@ -11771,10 +11809,10 @@ ngtcp2_ssize ngtcp2_conn_write_datagram_versioned( } ngtcp2_ssize ngtcp2_conn_writev_datagram_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, - uint32_t flags, uint64_t dgram_id, const ngtcp2_vec *datav, size_t datavcnt, - ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, + uint32_t flags, uint64_t dgram_id, const ngtcp2_vec *datav, size_t datavcnt, + ngtcp2_tstamp ts) { ngtcp2_vmsg vmsg; int64_t datalen; @@ -11837,7 +11875,7 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, } origlen = destlen = - conn_shape_udp_payload(conn, &conn->dcid.current, destlen); + conn_shape_udp_payload(conn, &conn->dcid.current, destlen); if (!ppe_pending && pi) { pi->ecn = NGTCP2_ECN_NOT_ECT; @@ -11867,10 +11905,9 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, assert(dest[0] & NGTCP2_HEADER_FORM_BIT); assert(conn->negotiated_version); - if (ngtcp2_pkt_get_type_long(conn->negotiated_version, dest[0]) == - NGTCP2_PKT_INITIAL) { - /* We have added padding already, but in that case, there is no - space left to write 1RTT packet. */ + if (nwrite < NGTCP2_MAX_UDP_PAYLOAD_SIZE && + ngtcp2_pkt_get_type_long(conn->negotiated_version, dest[0]) == + NGTCP2_PKT_INITIAL) { wflags |= NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING; } @@ -11890,7 +11927,7 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, return 0; } - origlen = (size_t)ngtcp2_min((uint64_t)origlen, server_tx_left); + origlen = (size_t)ngtcp2_min_uint64((uint64_t)origlen, server_tx_left); } return conn_write_handshake_ack_pkts(conn, pi, dest, origlen, ts); @@ -11902,15 +11939,15 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, if (server_tx_left == 0) { if (cstat->loss_detection_timer != UINT64_MAX) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_LDC, - "loss detection timer canceled due to amplification limit"); - cstat->loss_detection_timer = UINT64_MAX; + &conn->log, NGTCP2_LOG_EVENT_LDC, + "loss detection timer canceled due to amplification limit"); + ngtcp2_conn_cancel_loss_detection_timer(conn); } return 0; } - destlen = (size_t)ngtcp2_min((uint64_t)destlen, server_tx_left); + destlen = (size_t)ngtcp2_min_uint64((uint64_t)destlen, server_tx_left); } if (conn->in_pktns) { @@ -11931,14 +11968,12 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, dest += nwrite; destlen -= (size_t)nwrite; - if (conn->in_pktns && nwrite > 0) { + if (res < NGTCP2_MAX_UDP_PAYLOAD_SIZE && conn->in_pktns && nwrite > 0) { it = ngtcp2_rtb_head(&conn->in_pktns->rtb); if (!ngtcp2_ksl_it_end(&it)) { rtbent = ngtcp2_ksl_it_get(&it); if (rtbent->hd.pkt_num != prev_in_pkt_num && (rtbent->flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING)) { - /* We have added padding already, but in that case, there - is no space left to write 1RTT packet. */ wflags |= NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING; } } @@ -11959,7 +11994,7 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, return 0; } - origlen = (size_t)ngtcp2_min((uint64_t)origlen, server_tx_left); + origlen = (size_t)ngtcp2_min_uint64((uint64_t)origlen, server_tx_left); } return conn_write_ack_pkt(conn, pi, dest, origlen, NGTCP2_PKT_1RTT, ts); @@ -12004,12 +12039,12 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, } /* dest and destlen have already been adjusted in ppe in the first run. They are adjusted for probe packet later. */ - nwrite = conn_write_pkt(conn, pi, dest, destlen, vmsg, NGTCP2_PKT_1RTT, - wflags, ts); + nwrite = conn_write_pkt(conn, pi, dest, destlen, (size_t)res, vmsg, + NGTCP2_PKT_1RTT, wflags, ts); goto fin; } else { conn->pkt.require_padding = - (wflags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING); + (wflags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING); if (conn->state == NGTCP2_CS_POST_HANDSHAKE) { rv = conn_prepare_key_update(conn, ts); @@ -12023,14 +12058,14 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, } else { if (res == 0) { nwrite = - conn_write_path_response(conn, path, pi, dest, origdestlen, ts); + conn_write_path_response(conn, path, pi, dest, origdestlen, ts); if (nwrite) { goto fin; } if (conn->pv) { nwrite = - conn_write_path_challenge(conn, path, pi, dest, origdestlen, ts); + conn_write_path_challenge(conn, path, pi, dest, origdestlen, ts); if (nwrite) { goto fin; } @@ -12051,15 +12086,15 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, if (conn->server && !(conn->dcid.current.flags & NGTCP2_DCID_FLAG_PATH_VALIDATED)) { server_tx_left = conn_server_tx_left(conn, &conn->dcid.current); - origlen = (size_t)ngtcp2_min((uint64_t)origlen, server_tx_left); - destlen = (size_t)ngtcp2_min((uint64_t)destlen, server_tx_left); + origlen = (size_t)ngtcp2_min_uint64((uint64_t)origlen, server_tx_left); + destlen = (size_t)ngtcp2_min_uint64((uint64_t)destlen, server_tx_left); if (server_tx_left == 0 && conn->cstat.loss_detection_timer != UINT64_MAX) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_LDC, - "loss detection timer canceled due to amplification limit"); - conn->cstat.loss_detection_timer = UINT64_MAX; + &conn->log, NGTCP2_LOG_EVENT_LDC, + "loss detection timer canceled due to amplification limit"); + ngtcp2_conn_cancel_loss_detection_timer(conn); } } } @@ -12100,14 +12135,14 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, "transmit probe pkt left=%zu", conn->pktns.rtb.probe_pkt_left); - nwrite = conn_write_pkt(conn, pi, dest, destlen, vmsg, NGTCP2_PKT_1RTT, - wflags, ts); + nwrite = conn_write_pkt(conn, pi, dest, destlen, (size_t)res, vmsg, + NGTCP2_PKT_1RTT, wflags, ts); goto fin; } - nwrite = conn_write_pkt(conn, pi, dest, destlen, vmsg, NGTCP2_PKT_1RTT, - wflags, ts); + nwrite = conn_write_pkt(conn, pi, dest, destlen, (size_t)res, vmsg, + NGTCP2_PKT_1RTT, wflags, ts); if (nwrite) { assert(nwrite != NGTCP2_ERR_NOBUF); goto fin; @@ -12162,9 +12197,8 @@ conn_write_connection_close(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, pkt_type != NGTCP2_PKT_INITIAL) { if (in_pktns && conn->server) { nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, - NGTCP2_WRITE_PKT_FLAG_NONE, &conn->dcid.current.cid, &fr, - NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); + conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, NGTCP2_WRITE_PKT_FLAG_NONE, + &conn->dcid.current.cid, &fr, NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); if (nwrite < 0) { return nwrite; } @@ -12177,9 +12211,9 @@ conn_write_connection_close(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (pkt_type != NGTCP2_PKT_HANDSHAKE && hs_pktns && hs_pktns->crypto.tx.ckm) { nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_HANDSHAKE, - NGTCP2_WRITE_PKT_FLAG_NONE, &conn->dcid.current.cid, &fr, - NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); + conn, pi, dest, destlen, NGTCP2_PKT_HANDSHAKE, + NGTCP2_WRITE_PKT_FLAG_NONE, &conn->dcid.current.cid, &fr, + NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); if (nwrite < 0) { return nwrite; } @@ -12195,8 +12229,8 @@ conn_write_connection_close(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, pkt_type, flags, &conn->dcid.current.cid, &fr, - NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); + conn, pi, dest, destlen, pkt_type, flags, &conn->dcid.current.cid, &fr, + NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); if (nwrite < 0) { return nwrite; @@ -12212,9 +12246,9 @@ conn_write_connection_close(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } ngtcp2_ssize ngtcp2_conn_write_connection_close_pkt( - ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, - size_t destlen, uint64_t error_code, const uint8_t *reason, - size_t reasonlen, ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, + size_t destlen, uint64_t error_code, const uint8_t *reason, size_t reasonlen, + ngtcp2_tstamp ts) { ngtcp2_pktns *in_pktns = conn->in_pktns; ngtcp2_pktns *hs_pktns = conn->hs_pktns; uint8_t pkt_type; @@ -12247,7 +12281,7 @@ ngtcp2_ssize ngtcp2_conn_write_connection_close_pkt( if (conn->server) { server_tx_left = conn_server_tx_left(conn, &conn->dcid.current); - destlen = (size_t)ngtcp2_min((uint64_t)destlen, server_tx_left); + destlen = (size_t)ngtcp2_min_uint64((uint64_t)destlen, server_tx_left); } if (conn->state == NGTCP2_CS_POST_HANDSHAKE || @@ -12275,9 +12309,9 @@ ngtcp2_ssize ngtcp2_conn_write_connection_close_pkt( } ngtcp2_ssize ngtcp2_conn_write_application_close_pkt( - ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, - size_t destlen, uint64_t app_error_code, const uint8_t *reason, - size_t reasonlen, ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, + size_t destlen, uint64_t app_error_code, const uint8_t *reason, + size_t reasonlen, ngtcp2_tstamp ts) { ngtcp2_ssize nwrite; ngtcp2_ssize res = 0; ngtcp2_frame fr; @@ -12309,15 +12343,14 @@ ngtcp2_ssize ngtcp2_conn_write_application_close_pkt( if (conn->server) { server_tx_left = conn_server_tx_left(conn, &conn->dcid.current); - destlen = (size_t)ngtcp2_min((uint64_t)destlen, server_tx_left); + destlen = (size_t)ngtcp2_min_uint64((uint64_t)destlen, server_tx_left); } if (!(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_CONFIRMED)) { - nwrite = conn_write_connection_close(conn, pi, dest, destlen, - conn->hs_pktns->crypto.tx.ckm - ? NGTCP2_PKT_HANDSHAKE - : NGTCP2_PKT_INITIAL, - NGTCP2_APPLICATION_ERROR, NULL, 0, ts); + nwrite = conn_write_connection_close( + conn, pi, dest, destlen, + conn->hs_pktns->crypto.tx.ckm ? NGTCP2_PKT_HANDSHAKE : NGTCP2_PKT_INITIAL, + NGTCP2_APPLICATION_ERROR, NULL, 0, ts); if (nwrite < 0) { return nwrite; } @@ -12326,12 +12359,9 @@ ngtcp2_ssize ngtcp2_conn_write_application_close_pkt( destlen -= (size_t)nwrite; } - if (conn->state != NGTCP2_CS_POST_HANDSHAKE) { - assert(res); - - if (!conn->server || !conn->pktns.crypto.tx.ckm) { - return res; - } + if (conn->state != NGTCP2_CS_POST_HANDSHAKE && + (!conn->server || !conn->pktns.crypto.tx.ckm)) { + return res; } assert(conn->pktns.crypto.tx.ckm); @@ -12343,8 +12373,8 @@ ngtcp2_ssize ngtcp2_conn_write_application_close_pkt( fr.connection_close.reason = (uint8_t *)reason; nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_1RTT, NGTCP2_WRITE_PKT_FLAG_NONE, - &conn->dcid.current.cid, &fr, NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); + conn, pi, dest, destlen, NGTCP2_PKT_1RTT, NGTCP2_WRITE_PKT_FLAG_NONE, + &conn->dcid.current.cid, &fr, NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); if (nwrite < 0) { return nwrite; @@ -12392,12 +12422,22 @@ void ngtcp2_ccerr_set_liberr(ngtcp2_ccerr *ccerr, int liberr, ccerr_init(ccerr, NGTCP2_CCERR_TYPE_IDLE_CLOSE, NGTCP2_NO_ERROR, reason, reasonlen); + return; + case NGTCP2_ERR_DROP_CONN: + ccerr_init(ccerr, NGTCP2_CCERR_TYPE_DROP_CONN, NGTCP2_NO_ERROR, reason, + reasonlen); + + return; + case NGTCP2_ERR_RETRY: + ccerr_init(ccerr, NGTCP2_CCERR_TYPE_RETRY, NGTCP2_NO_ERROR, reason, + reasonlen); + return; }; ngtcp2_ccerr_set_transport_error( - ccerr, ngtcp2_err_infer_quic_transport_error_code(liberr), reason, - reasonlen); + ccerr, ngtcp2_err_infer_quic_transport_error_code(liberr), reason, + reasonlen); } void ngtcp2_ccerr_set_tls_alert(ngtcp2_ccerr *ccerr, uint8_t tls_alert, @@ -12415,9 +12455,9 @@ void ngtcp2_ccerr_set_application_error(ngtcp2_ccerr *ccerr, } ngtcp2_ssize ngtcp2_conn_write_connection_close_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, - const ngtcp2_ccerr *ccerr, ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, const ngtcp2_ccerr *ccerr, + ngtcp2_tstamp ts) { (void)pkt_info_version; conn_update_timestamp(conn, ts); @@ -12425,12 +12465,12 @@ ngtcp2_ssize ngtcp2_conn_write_connection_close_versioned( switch (ccerr->type) { case NGTCP2_CCERR_TYPE_TRANSPORT: return ngtcp2_conn_write_connection_close_pkt( - conn, path, pi, dest, destlen, ccerr->error_code, ccerr->reason, - ccerr->reasonlen, ts); + conn, path, pi, dest, destlen, ccerr->error_code, ccerr->reason, + ccerr->reasonlen, ts); case NGTCP2_CCERR_TYPE_APPLICATION: return ngtcp2_conn_write_application_close_pkt( - conn, path, pi, dest, destlen, ccerr->error_code, ccerr->reason, - ccerr->reasonlen, ts); + conn, path, pi, dest, destlen, ccerr->error_code, ccerr->reason, + ccerr->reasonlen, ts); default: return 0; } @@ -12471,7 +12511,7 @@ int ngtcp2_conn_close_stream(ngtcp2_conn *conn, ngtcp2_strm *strm) { int ngtcp2_conn_close_stream_if_shut_rdwr(ngtcp2_conn *conn, ngtcp2_strm *strm) { if ((strm->flags & NGTCP2_STRM_FLAG_SHUT_RDWR) == - NGTCP2_STRM_FLAG_SHUT_RDWR && + NGTCP2_STRM_FLAG_SHUT_RDWR && ((strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM_RECVED) || ngtcp2_strm_rx_offset(strm) == strm->rx.last_offset) && (((strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM) && @@ -12537,7 +12577,7 @@ static int conn_shutdown_stream_read(ngtcp2_conn *conn, ngtcp2_strm *strm, which are not passed to application. */ if (!(strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM_RECVED)) { ngtcp2_conn_extend_max_offset(conn, strm->rx.last_offset - - ngtcp2_strm_rx_offset(strm)); + ngtcp2_strm_rx_offset(strm)); } strm->flags |= NGTCP2_STRM_FLAG_STOP_SENDING; @@ -12720,20 +12760,20 @@ static void conn_discard_early_data_state(ngtcp2_conn *conn) { ngtcp2_rtb_remove_early_data(&conn->pktns.rtb, &conn->cstat); - ngtcp2_map_each_free(&conn->strms, delete_strms_pq_each, conn); + ngtcp2_map_each(&conn->strms, delete_strms_pq_each, conn); ngtcp2_map_clear(&conn->strms); conn->tx.offset = 0; conn->tx.last_blocked_offset = UINT64_MAX; conn->rx.unsent_max_offset = conn->rx.max_offset = - conn->local.transport_params.initial_max_data; + conn->local.transport_params.initial_max_data; conn->remote.bidi.unsent_max_streams = conn->remote.bidi.max_streams = - conn->local.transport_params.initial_max_streams_bidi; + conn->local.transport_params.initial_max_streams_bidi; conn->remote.uni.unsent_max_streams = conn->remote.uni.max_streams = - conn->local.transport_params.initial_max_streams_uni; + conn->local.transport_params.initial_max_streams_uni; if (conn->server) { conn->local.bidi.next_stream_id = 1; @@ -12778,6 +12818,8 @@ int ngtcp2_conn_update_rtt(ngtcp2_conn *conn, ngtcp2_duration rtt, ngtcp2_duration ack_delay, ngtcp2_tstamp ts) { ngtcp2_conn_stat *cstat = &conn->cstat; + assert(rtt > 0); + if (cstat->min_rtt == UINT64_MAX) { cstat->latest_rtt = rtt; cstat->min_rtt = rtt; @@ -12788,43 +12830,43 @@ int ngtcp2_conn_update_rtt(ngtcp2_conn *conn, ngtcp2_duration rtt, if (conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_CONFIRMED) { assert(conn->remote.transport_params); - ack_delay = - ngtcp2_min(ack_delay, conn->remote.transport_params->max_ack_delay); + ack_delay = ngtcp2_min_uint64( + ack_delay, conn->remote.transport_params->max_ack_delay); } else if (ack_delay > 0 && rtt >= cstat->min_rtt && rtt < cstat->min_rtt + ack_delay) { /* Ignore RTT sample if adjusting ack_delay causes the sample less than min_rtt before handshake confirmation. */ ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_LDC, - "ignore rtt sample because ack_delay is too large latest_rtt=%" PRIu64 - " min_rtt=%" PRIu64 " ack_delay=%" PRIu64, - rtt / NGTCP2_MILLISECONDS, cstat->min_rtt / NGTCP2_MILLISECONDS, - ack_delay / NGTCP2_MILLISECONDS); + &conn->log, NGTCP2_LOG_EVENT_LDC, + "ignore rtt sample because ack_delay is too large latest_rtt=%" PRIu64 + " min_rtt=%" PRIu64 " ack_delay=%" PRIu64, + rtt / NGTCP2_MILLISECONDS, cstat->min_rtt / NGTCP2_MILLISECONDS, + ack_delay / NGTCP2_MILLISECONDS); return NGTCP2_ERR_INVALID_ARGUMENT; } cstat->latest_rtt = rtt; - cstat->min_rtt = ngtcp2_min(cstat->min_rtt, rtt); + cstat->min_rtt = ngtcp2_min_uint64(cstat->min_rtt, rtt); if (rtt >= cstat->min_rtt + ack_delay) { rtt -= ack_delay; } cstat->rttvar = (cstat->rttvar * 3 + (cstat->smoothed_rtt < rtt - ? rtt - cstat->smoothed_rtt - : cstat->smoothed_rtt - rtt)) / + ? rtt - cstat->smoothed_rtt + : cstat->smoothed_rtt - rtt)) / 4; cstat->smoothed_rtt = (cstat->smoothed_rtt * 7 + rtt) / 8; } ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_LDC, - "latest_rtt=%" PRIu64 " min_rtt=%" PRIu64 " smoothed_rtt=%" PRIu64 - " rttvar=%" PRIu64 " ack_delay=%" PRIu64, - cstat->latest_rtt / NGTCP2_MILLISECONDS, - cstat->min_rtt / NGTCP2_MILLISECONDS, - cstat->smoothed_rtt / NGTCP2_MILLISECONDS, - cstat->rttvar / NGTCP2_MILLISECONDS, ack_delay / NGTCP2_MILLISECONDS); + &conn->log, NGTCP2_LOG_EVENT_LDC, + "latest_rtt=%" PRIu64 " min_rtt=%" PRIu64 " smoothed_rtt=%" PRIu64 + " rttvar=%" PRIu64 " ack_delay=%" PRIu64, + cstat->latest_rtt / NGTCP2_MILLISECONDS, + cstat->min_rtt / NGTCP2_MILLISECONDS, + cstat->smoothed_rtt / NGTCP2_MILLISECONDS, + cstat->rttvar / NGTCP2_MILLISECONDS, ack_delay / NGTCP2_MILLISECONDS); return 0; } @@ -12879,8 +12921,8 @@ static ngtcp2_tstamp conn_get_earliest_pto_expiry(ngtcp2_conn *conn, ngtcp2_conn_stat *cstat = &conn->cstat; ngtcp2_tstamp *times = cstat->last_tx_pkt_ts; ngtcp2_duration duration = - compute_pto(cstat->smoothed_rtt, cstat->rttvar, /* max_ack_delay = */ 0) * - (1ULL << cstat->pto_count); + compute_pto(cstat->smoothed_rtt, cstat->rttvar, /* max_ack_delay = */ 0) * + (1ULL << cstat->pto_count); for (i = NGTCP2_PKTNS_ID_INITIAL; i < NGTCP2_PKTNS_ID_MAX; ++i) { if (ns[i] == NULL || ns[i]->rtb.num_pto_eliciting == 0 || @@ -12939,8 +12981,7 @@ void ngtcp2_conn_set_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { if (cstat->loss_detection_timer != UINT64_MAX) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_LDC, "loss detection timer canceled"); - cstat->loss_detection_timer = UINT64_MAX; - cstat->pto_count = 0; + ngtcp2_conn_cancel_loss_detection_timer(conn); } return; } @@ -12948,13 +12989,20 @@ void ngtcp2_conn_set_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { cstat->loss_detection_timer = conn_get_earliest_pto_expiry(conn, ts); timeout = - cstat->loss_detection_timer > ts ? cstat->loss_detection_timer - ts : 0; + cstat->loss_detection_timer > ts ? cstat->loss_detection_timer - ts : 0; ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_LDC, "loss_detection_timer=%" PRIu64 " timeout=%" PRIu64, cstat->loss_detection_timer, timeout / NGTCP2_MILLISECONDS); } +void ngtcp2_conn_cancel_loss_detection_timer(ngtcp2_conn *conn) { + ngtcp2_conn_stat *cstat = &conn->cstat; + + cstat->loss_detection_timer = UINT64_MAX; + cstat->pto_count = 0; +} + int ngtcp2_conn_on_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { ngtcp2_conn_stat *cstat = &conn->cstat; int rv; @@ -12966,8 +13014,7 @@ int ngtcp2_conn_on_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { switch (conn->state) { case NGTCP2_CS_CLOSING: case NGTCP2_CS_DRAINING: - cstat->loss_detection_timer = UINT64_MAX; - cstat->pto_count = 0; + ngtcp2_conn_cancel_loss_detection_timer(conn); return 0; default: break; @@ -13042,7 +13089,8 @@ static int conn_buffer_crypto_data(ngtcp2_conn *conn, const uint8_t **pdata, } if (!*pbufchain) { - rv = ngtcp2_buf_chain_new(pbufchain, ngtcp2_max(1024, datalen), conn->mem); + rv = ngtcp2_buf_chain_new(pbufchain, ngtcp2_max_size(1024, datalen), + conn->mem); if (rv != 0) { return rv; } @@ -13125,7 +13173,7 @@ int ngtcp2_conn_submit_new_token(ngtcp2_conn *conn, const uint8_t *token, assert(tokenlen); rv = ngtcp2_frame_chain_new_token_objalloc_new( - &nfrc, token, tokenlen, &conn->frc_objalloc, conn->mem); + &nfrc, token, tokenlen, &conn->frc_objalloc, conn->mem); if (rv != 0) { return rv; } @@ -13208,7 +13256,7 @@ static void copy_dcid_to_cid_token(ngtcp2_cid_token *dest, dest->cid = src->cid; ngtcp2_path_storage_init2(&dest->ps, &src->ps.path); if ((dest->token_present = - (src->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) != 0)) { + (src->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) != 0)) { memcpy(dest->token, src->token, NGTCP2_STATELESS_RESET_TOKENLEN); } } @@ -13411,8 +13459,8 @@ uint64_t ngtcp2_conn_get_streams_bidi_left(ngtcp2_conn *conn) { uint64_t n = ngtcp2_ord_stream_id(conn->local.bidi.next_stream_id); return n > conn->local.bidi.max_streams - ? 0 - : conn->local.bidi.max_streams - n + 1; + ? 0 + : conn->local.bidi.max_streams - n + 1; } uint64_t ngtcp2_conn_get_streams_uni_left(ngtcp2_conn *conn) { @@ -13444,7 +13492,7 @@ ngtcp2_tstamp ngtcp2_conn_get_idle_expiry(ngtcp2_conn *conn) { conn->remote.transport_params->max_idle_timeout == 0 || (conn->local.transport_params.max_idle_timeout && conn->local.transport_params.max_idle_timeout < - conn->remote.transport_params->max_idle_timeout)) { + conn->remote.transport_params->max_idle_timeout)) { idle_timeout = conn->local.transport_params.max_idle_timeout; } else { idle_timeout = conn->remote.transport_params->max_idle_timeout; @@ -13455,10 +13503,10 @@ ngtcp2_tstamp ngtcp2_conn_get_idle_expiry(ngtcp2_conn *conn) { } trpto = 3 * conn_compute_pto(conn, conn_is_tls_handshake_completed(conn) - ? &conn->pktns - : conn->hs_pktns); + ? &conn->pktns + : conn->hs_pktns); - idle_timeout = ngtcp2_max(idle_timeout, trpto); + idle_timeout = ngtcp2_max_uint64(idle_timeout, trpto); if (conn->idle_ts >= UINT64_MAX - idle_timeout) { return UINT64_MAX; @@ -13469,8 +13517,8 @@ ngtcp2_tstamp ngtcp2_conn_get_idle_expiry(ngtcp2_conn *conn) { ngtcp2_duration ngtcp2_conn_get_pto(ngtcp2_conn *conn) { return conn_compute_pto(conn, conn_is_tls_handshake_completed(conn) - ? &conn->pktns - : conn->hs_pktns); + ? &conn->pktns + : conn->hs_pktns); } void ngtcp2_conn_set_initial_crypto_ctx(ngtcp2_conn *conn, @@ -13581,8 +13629,8 @@ void ngtcp2_conn_update_pkt_tx_time(ngtcp2_conn *conn, ngtcp2_tstamp ts) { /* 1.25 is the under-utilization avoidance factor described in https://datatracker.ietf.org/doc/html/rfc9002#section-7.7 */ pacing_interval = (conn->cstat.first_rtt_sample_ts == UINT64_MAX - ? NGTCP2_MILLISECONDS - : conn->cstat.smoothed_rtt) * + ? NGTCP2_MILLISECONDS + : conn->cstat.smoothed_rtt) * 100 / 125 / conn->cstat.cwnd; } @@ -13597,20 +13645,11 @@ size_t ngtcp2_conn_get_send_quantum(ngtcp2_conn *conn) { } int ngtcp2_conn_track_retired_dcid_seq(ngtcp2_conn *conn, uint64_t seq) { - size_t i; - if (conn->dcid.retire_unacked.len >= ngtcp2_arraylen(conn->dcid.retire_unacked.seqs)) { return NGTCP2_ERR_CONNECTION_ID_LIMIT; } - /* Make sure that we do not have a duplicate */ - for (i = 0; i < conn->dcid.retire_unacked.len; ++i) { - if (conn->dcid.retire_unacked.seqs[i] == seq) { - ngtcp2_unreachable(); - } - } - conn->dcid.retire_unacked.seqs[conn->dcid.retire_unacked.len++] = seq; return 0; @@ -13626,7 +13665,7 @@ void ngtcp2_conn_untrack_retired_dcid_seq(ngtcp2_conn *conn, uint64_t seq) { if (i != conn->dcid.retire_unacked.len - 1) { conn->dcid.retire_unacked.seqs[i] = - conn->dcid.retire_unacked.seqs[conn->dcid.retire_unacked.len - 1]; + conn->dcid.retire_unacked.seqs[conn->dcid.retire_unacked.len - 1]; } --conn->dcid.retire_unacked.len; @@ -13635,6 +13674,18 @@ void ngtcp2_conn_untrack_retired_dcid_seq(ngtcp2_conn *conn, uint64_t seq) { } } +int ngtcp2_conn_check_retired_dcid_tracked(ngtcp2_conn *conn, uint64_t seq) { + size_t i; + + for (i = 0; i < conn->dcid.retire_unacked.len; ++i) { + if (conn->dcid.retire_unacked.seqs[i] == seq) { + return 1; + } + } + + return 0; +} + size_t ngtcp2_conn_get_stream_loss_count(ngtcp2_conn *conn, int64_t stream_id) { ngtcp2_strm *strm = ngtcp2_conn_find_stream(conn, stream_id); @@ -13652,56 +13703,17 @@ void ngtcp2_path_challenge_entry_init(ngtcp2_path_challenge_entry *pcent, memcpy(pcent->data, data, sizeof(pcent->data)); } -void ngtcp2_settings_default_versioned(int settings_version, - ngtcp2_settings *settings) { - (void)settings_version; - - memset(settings, 0, sizeof(*settings)); - settings->cc_algo = NGTCP2_CC_ALGO_CUBIC; - settings->initial_rtt = NGTCP2_DEFAULT_INITIAL_RTT; - settings->ack_thresh = 2; - settings->max_tx_udp_payload_size = 1500 - 48; - settings->handshake_timeout = UINT64_MAX; -} - -void ngtcp2_transport_params_default_versioned( - int transport_params_version, ngtcp2_transport_params *params) { - size_t len; - - switch (transport_params_version) { - case NGTCP2_TRANSPORT_PARAMS_VERSION: - len = sizeof(*params); - - break; - default: - ngtcp2_unreachable(); - } - - memset(params, 0, len); - - switch (transport_params_version) { - case NGTCP2_TRANSPORT_PARAMS_VERSION: - params->max_udp_payload_size = NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE; - params->active_connection_id_limit = - NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT; - params->ack_delay_exponent = NGTCP2_DEFAULT_ACK_DELAY_EXPONENT; - params->max_ack_delay = NGTCP2_DEFAULT_MAX_ACK_DELAY; - - break; - } -} - /* The functions prefixed with ngtcp2_pkt_ are usually put inside ngtcp2_pkt.c. This function uses encryption construct and uses test data defined only in ngtcp2_conn_test.c, so it is written here. */ ngtcp2_ssize ngtcp2_pkt_write_connection_close( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, - size_t reasonlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, - const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, - ngtcp2_hp_mask hp_mask, const ngtcp2_crypto_cipher *hp, - const ngtcp2_crypto_cipher_ctx *hp_ctx) { + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, + size_t reasonlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, + const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + ngtcp2_hp_mask hp_mask, const ngtcp2_crypto_cipher *hp, + const ngtcp2_crypto_cipher_ctx *hp_ctx) { ngtcp2_pkt_hd hd; ngtcp2_crypto_km ckm; ngtcp2_crypto_cc cc; @@ -13726,7 +13738,7 @@ ngtcp2_ssize ngtcp2_pkt_write_connection_close( cc.encrypt = encrypt; cc.hp_mask = hp_mask; - ngtcp2_ppe_init(&ppe, dest, destlen, &cc); + ngtcp2_ppe_init(&ppe, dest, destlen, 0, &cc); rv = ngtcp2_ppe_encode_hd(&ppe, &hd); if (rv != 0) { diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h index 4ed67876bc3749..55073fcc828d73 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -65,9 +65,6 @@ typedef enum { NGTCP2_CS_DRAINING, } ngtcp2_conn_state; -/* NGTCP2_MAX_STREAMS is the maximum number of streams. */ -#define NGTCP2_MAX_STREAMS (1LL << 60) - /* NGTCP2_MAX_NUM_BUFFED_RX_PKTS is the maximum number of buffered reordered packets. */ #define NGTCP2_MAX_NUM_BUFFED_RX_PKTS 4 @@ -77,15 +74,6 @@ typedef enum { unreceived data. */ #define NGTCP2_MAX_REORDERED_CRYPTO_DATA 65536 -/* NGTCP2_MAX_RX_INITIAL_CRYPTO_DATA is the maximum offset of received - crypto stream in Initial packet. We set this hard limit here - because crypto stream is unbounded. */ -#define NGTCP2_MAX_RX_INITIAL_CRYPTO_DATA 65536 -/* NGTCP2_MAX_RX_HANDSHAKE_CRYPTO_DATA is the maximum offset of - received crypto stream in Handshake packet. We set this hard limit - here because crypto stream is unbounded. */ -#define NGTCP2_MAX_RX_HANDSHAKE_CRYPTO_DATA 65536 - /* NGTCP2_MAX_RETRIES is the number of Retry packet which client can accept. */ #define NGTCP2_MAX_RETRIES 3 @@ -121,12 +109,18 @@ typedef enum { /* NGTCP2_WRITE_PKT_FLAG_NONE indicates that no flag is set. */ #define NGTCP2_WRITE_PKT_FLAG_NONE 0x00u /* NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING indicates that packet other - than Initial packet should be padded. Initial packet might be - padded based on QUIC requirement regardless of this flag. */ + than Initial packet should be padded so that UDP datagram payload + is at least NGTCP2_MAX_UDP_PAYLOAD_SIZE bytes. Initial packet + might be padded based on QUIC requirement regardless of this + flag. */ #define NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING 0x01u /* NGTCP2_WRITE_PKT_FLAG_MORE indicates that more frames might come and it should be encoded into the current packet. */ #define NGTCP2_WRITE_PKT_FLAG_MORE 0x02u +/* NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING_FULL is just like + NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING, but it requests to add + padding to the full UDP datagram payload size. */ +#define NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING_FULL 0x04u /* * ngtcp2_max_frame is defined so that it covers the largest ACK @@ -319,6 +313,7 @@ typedef struct ngtcp2_pktns { ngtcp2_acktr acktr; ngtcp2_rtb rtb; + ngtcp2_pktns_id id; } ngtcp2_pktns; typedef enum ngtcp2_ecn_state { @@ -342,15 +337,15 @@ typedef struct ngtcp2_early_transport_params { } ngtcp2_early_transport_params; ngtcp2_static_ringbuf_def(dcid_bound, NGTCP2_MAX_BOUND_DCID_POOL_SIZE, - sizeof(ngtcp2_dcid)); + sizeof(ngtcp2_dcid)) ngtcp2_static_ringbuf_def(dcid_unused, NGTCP2_MAX_DCID_POOL_SIZE, - sizeof(ngtcp2_dcid)); + sizeof(ngtcp2_dcid)) ngtcp2_static_ringbuf_def(dcid_retired, NGTCP2_MAX_DCID_RETIRED_SIZE, - sizeof(ngtcp2_dcid)); + sizeof(ngtcp2_dcid)) ngtcp2_static_ringbuf_def(path_challenge, 4, - sizeof(ngtcp2_path_challenge_entry)); + sizeof(ngtcp2_path_challenge_entry)) -ngtcp2_objalloc_decl(strm, ngtcp2_strm, oplent); +ngtcp2_objalloc_decl(strm, ngtcp2_strm, oplent) struct ngtcp2_conn { ngtcp2_objalloc frc_objalloc; @@ -813,6 +808,8 @@ int ngtcp2_conn_update_rtt(ngtcp2_conn *conn, ngtcp2_duration rtt, void ngtcp2_conn_set_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts); +void ngtcp2_conn_cancel_loss_detection_timer(ngtcp2_conn *conn); + int ngtcp2_conn_on_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts); /* @@ -878,9 +875,9 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, * User-defined callback function failed. */ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( - ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, - uint8_t type, uint8_t flags, const ngtcp2_cid *dcid, ngtcp2_frame *fr, - uint16_t rtb_entry_flags, const ngtcp2_path *path, ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, + uint8_t type, uint8_t flags, const ngtcp2_cid *dcid, ngtcp2_frame *fr, + uint16_t rtb_entry_flags, const ngtcp2_path *path, ngtcp2_tstamp ts); /* * ngtcp2_conn_commit_local_transport_params commits the local @@ -973,6 +970,12 @@ int ngtcp2_conn_track_retired_dcid_seq(ngtcp2_conn *conn, uint64_t seq); */ void ngtcp2_conn_untrack_retired_dcid_seq(ngtcp2_conn *conn, uint64_t seq); +/* + * ngtcp2_conn_check_retired_dcid_tracked returns nonzero if |seq| has + * already been tracked. + */ +int ngtcp2_conn_check_retired_dcid_tracked(ngtcp2_conn *conn, uint64_t seq); + /* * ngtcp2_conn_server_negotiate_version negotiates QUIC version. It * is compatible version negotiation. It returns the negotiated QUIC @@ -1020,9 +1023,9 @@ ngtcp2_conn_server_negotiate_version(ngtcp2_conn *conn, * User callback failed */ ngtcp2_ssize ngtcp2_conn_write_connection_close_pkt( - ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, - size_t destlen, uint64_t error_code, const uint8_t *reason, - size_t reasonlen, ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, + size_t destlen, uint64_t error_code, const uint8_t *reason, size_t reasonlen, + ngtcp2_tstamp ts); /** * @function @@ -1066,9 +1069,9 @@ ngtcp2_ssize ngtcp2_conn_write_connection_close_pkt( * User callback failed */ ngtcp2_ssize ngtcp2_conn_write_application_close_pkt( - ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, - size_t destlen, uint64_t app_error_code, const uint8_t *reason, - size_t reasonlen, ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, + size_t destlen, uint64_t app_error_code, const uint8_t *reason, + size_t reasonlen, ngtcp2_tstamp ts); int ngtcp2_conn_start_pmtud(ngtcp2_conn *conn); @@ -1093,7 +1096,7 @@ void ngtcp2_conn_stop_pmtud(ngtcp2_conn *conn); * Out of memory. */ int ngtcp2_conn_set_remote_transport_params( - ngtcp2_conn *conn, const ngtcp2_transport_params *params); + ngtcp2_conn *conn, const ngtcp2_transport_params *params); /** * @function @@ -1132,7 +1135,7 @@ int ngtcp2_conn_set_remote_transport_params( * Out of memory. */ int ngtcp2_conn_set_0rtt_remote_transport_params( - ngtcp2_conn *conn, const ngtcp2_transport_params *params); + ngtcp2_conn *conn, const ngtcp2_transport_params *params); /* * ngtcp2_conn_create_ack_frame creates ACK frame, and assigns its @@ -1156,4 +1159,16 @@ int ngtcp2_conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, ngtcp2_tstamp ts, ngtcp2_duration ack_delay, uint64_t ack_delay_exponent); -#endif /* NGTCP2_CONN_H */ +/* + * ngtcp2_conn_discard_initial_state discards state for Initial packet + * number space. + */ +void ngtcp2_conn_discard_initial_state(ngtcp2_conn *conn, ngtcp2_tstamp ts); + +/* + * ngtcp2_conn_discard_handshake_state discards state for Handshake + * packet number space. + */ +void ngtcp2_conn_discard_handshake_state(ngtcp2_conn *conn, ngtcp2_tstamp ts); + +#endif /* !defined(NGTCP2_CONN_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h index 1a93867aab3cae..ad2b7329f48df2 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -105,8 +105,9 @@ typedef struct ngtcp2_conn_stat { uint64_t bytes_in_flight; /** * :member:`max_tx_udp_payload_size` is the maximum size of UDP - * datagram payload that this endpoint transmits. It is used by - * congestion controller to compute congestion window. + * datagram payload that this endpoint transmits to the current + * path. It is used by congestion controller to compute congestion + * window. */ size_t max_tx_udp_payload_size; /** @@ -129,4 +130,4 @@ typedef struct ngtcp2_conn_stat { size_t send_quantum; } ngtcp2_conn_stat; -#endif /* NGTCP2_CONN_STAT_H */ +#endif /* !defined(NGTCP2_CONN_STAT_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c index 336721772b4e4c..6528011cc0edf4 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c @@ -32,47 +32,37 @@ #include "ngtcp2_net.h" #include "ngtcp2_unreachable.h" -const uint8_t *ngtcp2_get_uint64(uint64_t *dest, const uint8_t *p) { - uint64_t n; - memcpy(&n, p, sizeof(n)); - *dest = ngtcp2_ntohl64(n); - return p + sizeof(n); -} - -const uint8_t *ngtcp2_get_uint48(uint64_t *dest, const uint8_t *p) { - uint64_t n = 0; - memcpy(((uint8_t *)&n) + 2, p, 6); - *dest = ngtcp2_ntohl64(n); - return p + 6; +const uint8_t *ngtcp2_get_uint64be(uint64_t *dest, const uint8_t *p) { + memcpy(dest, p, sizeof(*dest)); + *dest = ngtcp2_ntohl64(*dest); + return p + sizeof(*dest); } -const uint8_t *ngtcp2_get_uint32(uint32_t *dest, const uint8_t *p) { - uint32_t n; - memcpy(&n, p, sizeof(n)); - *dest = ngtcp2_ntohl(n); - return p + sizeof(n); +const uint8_t *ngtcp2_get_uint32be(uint32_t *dest, const uint8_t *p) { + memcpy(dest, p, sizeof(*dest)); + *dest = ngtcp2_ntohl(*dest); + return p + sizeof(*dest); } -const uint8_t *ngtcp2_get_uint24(uint32_t *dest, const uint8_t *p) { - uint32_t n = 0; - memcpy(((uint8_t *)&n) + 1, p, 3); - *dest = ngtcp2_ntohl(n); +const uint8_t *ngtcp2_get_uint24be(uint32_t *dest, const uint8_t *p) { + *dest = 0; + memcpy(((uint8_t *)dest) + 1, p, 3); + *dest = ngtcp2_ntohl(*dest); return p + 3; } -const uint8_t *ngtcp2_get_uint16(uint16_t *dest, const uint8_t *p) { - uint16_t n; - memcpy(&n, p, sizeof(n)); - *dest = ngtcp2_ntohs(n); - return p + sizeof(n); +const uint8_t *ngtcp2_get_uint16be(uint16_t *dest, const uint8_t *p) { + memcpy(dest, p, sizeof(*dest)); + *dest = ngtcp2_ntohs(*dest); + return p + sizeof(*dest); } -const uint8_t *ngtcp2_get_uint16be(uint16_t *dest, const uint8_t *p) { +const uint8_t *ngtcp2_get_uint16(uint16_t *dest, const uint8_t *p) { memcpy(dest, p, sizeof(*dest)); return p + sizeof(*dest); } -static uint64_t get_uvarint(size_t *plen, const uint8_t *p) { +static const uint8_t *get_uvarint(uint64_t *dest, const uint8_t *p) { union { uint8_t n8; uint16_t n16; @@ -80,42 +70,39 @@ static uint64_t get_uvarint(size_t *plen, const uint8_t *p) { uint64_t n64; } n; - *plen = (size_t)(1u << (*p >> 6)); - - switch (*plen) { + switch (*p >> 6) { + case 0: + *dest = *p++; + return p; case 1: - return *p; - case 2: memcpy(&n, p, 2); n.n8 &= 0x3f; - return ngtcp2_ntohs(n.n16); - case 4: + *dest = ngtcp2_ntohs(n.n16); + + return p + 2; + case 2: memcpy(&n, p, 4); n.n8 &= 0x3f; - return ngtcp2_ntohl(n.n32); - case 8: + *dest = ngtcp2_ntohl(n.n32); + + return p + 4; + case 3: memcpy(&n, p, 8); n.n8 &= 0x3f; - return ngtcp2_ntohl64(n.n64); + *dest = ngtcp2_ntohl64(n.n64); + + return p + 8; default: ngtcp2_unreachable(); } } const uint8_t *ngtcp2_get_uvarint(uint64_t *dest, const uint8_t *p) { - size_t len; - - *dest = get_uvarint(&len, p); - - return p + len; + return get_uvarint(dest, p); } const uint8_t *ngtcp2_get_varint(int64_t *dest, const uint8_t *p) { - size_t len; - - *dest = (int64_t)get_uvarint(&len, p); - - return p + len; + return get_uvarint((uint64_t *)dest, p); } int64_t ngtcp2_get_pkt_num(const uint8_t *p, size_t pkt_numlen) { @@ -126,13 +113,13 @@ int64_t ngtcp2_get_pkt_num(const uint8_t *p, size_t pkt_numlen) { case 1: return *p; case 2: - ngtcp2_get_uint16(&s, p); + ngtcp2_get_uint16be(&s, p); return (int64_t)s; case 3: - ngtcp2_get_uint24(&l, p); + ngtcp2_get_uint24be(&l, p); return (int64_t)l; case 4: - ngtcp2_get_uint32(&l, p); + ngtcp2_get_uint32be(&l, p); return (int64_t)l; default: ngtcp2_unreachable(); @@ -144,11 +131,6 @@ uint8_t *ngtcp2_put_uint64be(uint8_t *p, uint64_t n) { return ngtcp2_cpymem(p, (const uint8_t *)&n, sizeof(n)); } -uint8_t *ngtcp2_put_uint48be(uint8_t *p, uint64_t n) { - n = ngtcp2_htonl64(n); - return ngtcp2_cpymem(p, ((const uint8_t *)&n) + 2, 6); -} - uint8_t *ngtcp2_put_uint32be(uint8_t *p, uint32_t n) { n = ngtcp2_htonl(n); return ngtcp2_cpymem(p, (const uint8_t *)&n, sizeof(n)); @@ -207,14 +189,11 @@ uint8_t *ngtcp2_put_pkt_num(uint8_t *p, int64_t pkt_num, size_t len) { *p++ = (uint8_t)pkt_num; return p; case 2: - ngtcp2_put_uint16be(p, (uint16_t)pkt_num); - return p + 2; + return ngtcp2_put_uint16be(p, (uint16_t)pkt_num); case 3: - ngtcp2_put_uint24be(p, (uint32_t)pkt_num); - return p + 3; + return ngtcp2_put_uint24be(p, (uint32_t)pkt_num); case 4: - ngtcp2_put_uint32be(p, (uint32_t)pkt_num); - return p + 4; + return ngtcp2_put_uint32be(p, (uint32_t)pkt_num); default: ngtcp2_unreachable(); } @@ -238,54 +217,6 @@ size_t ngtcp2_put_uvarintlen(uint64_t n) { return 8; } -int64_t ngtcp2_nth_server_bidi_id(uint64_t n) { - if (n == 0) { - return 0; - } - - if ((NGTCP2_MAX_VARINT >> 2) < n - 1) { - return NGTCP2_MAX_SERVER_STREAM_ID_BIDI; - } - - return (int64_t)(((n - 1) << 2) | 0x01); -} - -int64_t ngtcp2_nth_client_bidi_id(uint64_t n) { - if (n == 0) { - return 0; - } - - if ((NGTCP2_MAX_VARINT >> 2) < n - 1) { - return NGTCP2_MAX_CLIENT_STREAM_ID_BIDI; - } - - return (int64_t)((n - 1) << 2); -} - -int64_t ngtcp2_nth_server_uni_id(uint64_t n) { - if (n == 0) { - return 0; - } - - if ((NGTCP2_MAX_VARINT >> 2) < n - 1) { - return NGTCP2_MAX_SERVER_STREAM_ID_UNI; - } - - return (int64_t)(((n - 1) << 2) | 0x03); -} - -int64_t ngtcp2_nth_client_uni_id(uint64_t n) { - if (n == 0) { - return 0; - } - - if ((NGTCP2_MAX_VARINT >> 2) < n - 1) { - return NGTCP2_MAX_CLIENT_STREAM_ID_UNI; - } - - return (int64_t)(((n - 1) << 2) | 0x02); -} - uint64_t ngtcp2_ord_stream_id(int64_t stream_id) { return (uint64_t)(stream_id >> 2) + 1; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h index ef089a971a37f1..ad924683b8dc10 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h @@ -27,51 +27,44 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include /* - * ngtcp2_get_uint64 reads 8 bytes from |p| as 64 bits unsigned + * ngtcp2_get_uint64be reads 8 bytes from |p| as 64 bits unsigned * integer encoded as network byte order, and stores it in the buffer * pointed by |dest| in host byte order. It returns |p| + 8. */ -const uint8_t *ngtcp2_get_uint64(uint64_t *dest, const uint8_t *p); +const uint8_t *ngtcp2_get_uint64be(uint64_t *dest, const uint8_t *p); /* - * ngtcp2_get_uint48 reads 6 bytes from |p| as 48 bits unsigned - * integer encoded as network byte order, and stores it in the buffer - * pointed by |dest| in host byte order. It returns |p| + 6. - */ -const uint8_t *ngtcp2_get_uint48(uint64_t *dest, const uint8_t *p); - -/* - * ngtcp2_get_uint32 reads 4 bytes from |p| as 32 bits unsigned + * ngtcp2_get_uint32be reads 4 bytes from |p| as 32 bits unsigned * integer encoded as network byte order, and stores it in the buffer * pointed by |dest| in host byte order. It returns |p| + 4. */ -const uint8_t *ngtcp2_get_uint32(uint32_t *dest, const uint8_t *p); +const uint8_t *ngtcp2_get_uint32be(uint32_t *dest, const uint8_t *p); /* - * ngtcp2_get_uint24 reads 3 bytes from |p| as 24 bits unsigned + * ngtcp2_get_uint24be reads 3 bytes from |p| as 24 bits unsigned * integer encoded as network byte order, and stores it in the buffer * pointed by |dest| in host byte order. It returns |p| + 3. */ -const uint8_t *ngtcp2_get_uint24(uint32_t *dest, const uint8_t *p); +const uint8_t *ngtcp2_get_uint24be(uint32_t *dest, const uint8_t *p); /* - * ngtcp2_get_uint16 reads 2 bytes from |p| as 16 bits unsigned + * ngtcp2_get_uint16be reads 2 bytes from |p| as 16 bits unsigned * integer encoded as network byte order, and stores it in the buffer * pointed by |dest| in host byte order. It returns |p| + 2. */ -const uint8_t *ngtcp2_get_uint16(uint16_t *dest, const uint8_t *p); +const uint8_t *ngtcp2_get_uint16be(uint16_t *dest, const uint8_t *p); /* - * ngtcp2_get_uint16be reads 2 bytes from |p| as 16 bits unsigned + * ngtcp2_get_uint16 reads 2 bytes from |p| as 16 bits unsigned * integer encoded as network byte order, and stores it in the buffer * pointed by |dest| as is. It returns |p| + 2. */ -const uint8_t *ngtcp2_get_uint16be(uint16_t *dest, const uint8_t *p); +const uint8_t *ngtcp2_get_uint16(uint16_t *dest, const uint8_t *p); /* * ngtcp2_get_uvarint reads variable-length unsigned integer from |p|, @@ -102,13 +95,6 @@ int64_t ngtcp2_get_pkt_num(const uint8_t *p, size_t pkt_numlen); */ uint8_t *ngtcp2_put_uint64be(uint8_t *p, uint64_t n); -/* - * ngtcp2_put_uint48be writes |n| in host byte order in |p| in network - * byte order. It writes only least significant 48 bits. It returns - * the one beyond of the last written position. - */ -uint8_t *ngtcp2_put_uint48be(uint8_t *p, uint64_t n); - /* * ngtcp2_put_uint32be writes |n| in host byte order in |p| in network * byte order. It returns the one beyond of the last written @@ -168,41 +154,9 @@ size_t ngtcp2_get_uvarintlen(const uint8_t *p); */ size_t ngtcp2_put_uvarintlen(uint64_t n); -/* - * ngtcp2_nth_server_bidi_id returns |n|-th server bidirectional - * stream ID. If |n| is 0, it returns 0. If the |n|-th stream ID is - * larger than NGTCP2_MAX_SERVER_STREAM_ID_BIDI, this function returns - * NGTCP2_MAX_SERVER_STREAM_ID_BIDI. - */ -int64_t ngtcp2_nth_server_bidi_id(uint64_t n); - -/* - * ngtcp2_nth_client_bidi_id returns |n|-th client bidirectional - * stream ID. If |n| is 0, it returns 0. If the |n|-th stream ID is - * larger than NGTCP2_MAX_CLIENT_STREAM_ID_BIDI, this function returns - * NGTCP2_MAX_CLIENT_STREAM_ID_BIDI. - */ -int64_t ngtcp2_nth_client_bidi_id(uint64_t n); - -/* - * ngtcp2_nth_server_uni_id returns |n|-th server unidirectional - * stream ID. If |n| is 0, it returns 0. If the |n|-th stream ID is - * larger than NGTCP2_MAX_SERVER_STREAM_ID_UNI, this function returns - * NGTCP2_MAX_SERVER_STREAM_ID_UNI. - */ -int64_t ngtcp2_nth_server_uni_id(uint64_t n); - -/* - * ngtcp2_nth_client_uni_id returns |n|-th client unidirectional - * stream ID. If |n| is 0, it returns 0. If the |n|-th stream ID is - * larger than NGTCP2_MAX_CLIENT_STREAM_ID_UNI, this function returns - * NGTCP2_MAX_CLIENT_STREAM_ID_UNI. - */ -int64_t ngtcp2_nth_client_uni_id(uint64_t n); - /* * ngtcp2_ord_stream_id returns the ordinal number of |stream_id|. */ uint64_t ngtcp2_ord_stream_id(int64_t stream_id); -#endif /* NGTCP2_CONV_H */ +#endif /* !defined(NGTCP2_CONV_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c deleted file mode 100644 index eb85687a068449..00000000000000 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * ngtcp2 - * - * Copyright (c) 2023 ngtcp2 contributors - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#include "ngtcp2_conversion.h" - -#include -#include - -static void transport_params_copy(int transport_params_version, - ngtcp2_transport_params *dest, - const ngtcp2_transport_params *src) { - assert(transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION); - - switch (transport_params_version) { - case NGTCP2_TRANSPORT_PARAMS_V1: - memcpy(dest, src, - offsetof(ngtcp2_transport_params, version_info_present) + - sizeof(src->version_info_present)); - - break; - } -} - -const ngtcp2_transport_params * -ngtcp2_transport_params_convert_to_latest(ngtcp2_transport_params *dest, - int transport_params_version, - const ngtcp2_transport_params *src) { - if (transport_params_version == NGTCP2_TRANSPORT_PARAMS_VERSION) { - return src; - } - - ngtcp2_transport_params_default(dest); - - transport_params_copy(transport_params_version, dest, src); - - return dest; -} - -void ngtcp2_transport_params_convert_to_old( - int transport_params_version, ngtcp2_transport_params *dest, - const ngtcp2_transport_params *src) { - assert(transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION); - - transport_params_copy(transport_params_version, dest, src); -} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c index 0a3ecf6a2440cb..1f74e8c58397b1 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c @@ -27,11 +27,7 @@ #include #include -#include "ngtcp2_str.h" -#include "ngtcp2_conv.h" -#include "ngtcp2_conn.h" #include "ngtcp2_net.h" -#include "ngtcp2_conversion.h" int ngtcp2_crypto_km_new(ngtcp2_crypto_km **pckm, const uint8_t *secret, size_t secretlen, @@ -46,9 +42,11 @@ int ngtcp2_crypto_km_new(ngtcp2_crypto_km **pckm, const uint8_t *secret, if (secretlen) { memcpy((*pckm)->secret.base, secret, secretlen); } + if (aead_ctx) { (*pckm)->aead_ctx = *aead_ctx; } + memcpy((*pckm)->iv.base, iv, ivlen); return 0; @@ -85,825 +83,30 @@ void ngtcp2_crypto_km_del(ngtcp2_crypto_km *ckm, const ngtcp2_mem *mem) { return; } + if (ckm->secret.len) { +#ifdef WIN32 + SecureZeroMemory(ckm->secret.base, ckm->secret.len); +#elif defined(HAVE_EXPLICIT_BZERO) + explicit_bzero(ckm->secret.base, ckm->secret.len); +#elif defined(HAVE_MEMSET_S) + memset_s(ckm->secret.base, ckm->secret.len, 0, ckm->secret.len); +#endif /* defined(HAVE_MEMSET_S) */ + } + ngtcp2_mem_free(mem, ckm); } void ngtcp2_crypto_create_nonce(uint8_t *dest, const uint8_t *iv, size_t ivlen, int64_t pkt_num) { - size_t i; uint64_t n; - assert(ivlen >= 8); + assert(ivlen >= sizeof(n)); memcpy(dest, iv, ivlen); - n = ngtcp2_htonl64((uint64_t)pkt_num); - - for (i = 0; i < 8; ++i) { - dest[ivlen - 8 + i] ^= ((uint8_t *)&n)[i]; - } -} - -/* - * varint_paramlen returns the length of a single transport parameter - * which has variable integer in its parameter. - */ -static size_t varint_paramlen(ngtcp2_transport_param_id id, uint64_t param) { - size_t valuelen = ngtcp2_put_uvarintlen(param); - return ngtcp2_put_uvarintlen(id) + ngtcp2_put_uvarintlen(valuelen) + valuelen; -} - -/* - * write_varint_param writes parameter |id| of the given |value| in - * varint encoding. It returns p + the number of bytes written. - */ -static uint8_t *write_varint_param(uint8_t *p, ngtcp2_transport_param_id id, - uint64_t value) { - p = ngtcp2_put_uvarint(p, id); - p = ngtcp2_put_uvarint(p, ngtcp2_put_uvarintlen(value)); - return ngtcp2_put_uvarint(p, value); -} - -/* - * zero_paramlen returns the length of a single transport parameter - * which has zero length value in its parameter. - */ -static size_t zero_paramlen(ngtcp2_transport_param_id id) { - return ngtcp2_put_uvarintlen(id) + 1; -} - -/* - * write_zero_param writes parameter |id| that has zero length value. - * It returns p + the number of bytes written. - */ -static uint8_t *write_zero_param(uint8_t *p, ngtcp2_transport_param_id id) { - p = ngtcp2_put_uvarint(p, id); - *p++ = 0; - - return p; -} - -/* - * cid_paramlen returns the length of a single transport parameter - * which has |cid| as value. - */ -static size_t cid_paramlen(ngtcp2_transport_param_id id, - const ngtcp2_cid *cid) { - return ngtcp2_put_uvarintlen(id) + ngtcp2_put_uvarintlen(cid->datalen) + - cid->datalen; -} - -/* - * write_cid_param writes parameter |id| of the given |cid|. It - * returns p + the number of bytes written. - */ -static uint8_t *write_cid_param(uint8_t *p, ngtcp2_transport_param_id id, - const ngtcp2_cid *cid) { - assert(cid->datalen == 0 || cid->datalen >= NGTCP2_MIN_CIDLEN); - assert(cid->datalen <= NGTCP2_MAX_CIDLEN); - - p = ngtcp2_put_uvarint(p, id); - p = ngtcp2_put_uvarint(p, cid->datalen); - if (cid->datalen) { - p = ngtcp2_cpymem(p, cid->data, cid->datalen); - } - return p; -} - -static const uint8_t empty_address[16]; - -ngtcp2_ssize ngtcp2_transport_params_encode_versioned( - uint8_t *dest, size_t destlen, int transport_params_version, - const ngtcp2_transport_params *params) { - uint8_t *p; - size_t len = 0; - /* For some reason, gcc 7.3.0 requires this initialization. */ - size_t preferred_addrlen = 0; - size_t version_infolen = 0; - const ngtcp2_sockaddr_in *sa_in; - const ngtcp2_sockaddr_in6 *sa_in6; - ngtcp2_transport_params paramsbuf; - - params = ngtcp2_transport_params_convert_to_latest( - ¶msbuf, transport_params_version, params); - - if (params->original_dcid_present) { - len += - cid_paramlen(NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID, - ¶ms->original_dcid); - } - - if (params->stateless_reset_token_present) { - len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN) + - ngtcp2_put_uvarintlen(NGTCP2_STATELESS_RESET_TOKENLEN) + - NGTCP2_STATELESS_RESET_TOKENLEN; - } - - if (params->preferred_addr_present) { - assert(params->preferred_addr.cid.datalen >= NGTCP2_MIN_CIDLEN); - assert(params->preferred_addr.cid.datalen <= NGTCP2_MAX_CIDLEN); - preferred_addrlen = 4 /* ipv4Address */ + 2 /* ipv4Port */ + - 16 /* ipv6Address */ + 2 /* ipv6Port */ - + 1 + params->preferred_addr.cid.datalen /* CID */ + - NGTCP2_STATELESS_RESET_TOKENLEN; - len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS) + - ngtcp2_put_uvarintlen(preferred_addrlen) + preferred_addrlen; - } - if (params->retry_scid_present) { - len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID, - ¶ms->retry_scid); - } - - if (params->initial_scid_present) { - len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID, - ¶ms->initial_scid); - } - - if (params->initial_max_stream_data_bidi_local) { - len += varint_paramlen( - NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, - params->initial_max_stream_data_bidi_local); - } - if (params->initial_max_stream_data_bidi_remote) { - len += varint_paramlen( - NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, - params->initial_max_stream_data_bidi_remote); - } - if (params->initial_max_stream_data_uni) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI, - params->initial_max_stream_data_uni); - } - if (params->initial_max_data) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA, - params->initial_max_data); - } - if (params->initial_max_streams_bidi) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI, - params->initial_max_streams_bidi); - } - if (params->initial_max_streams_uni) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI, - params->initial_max_streams_uni); - } - if (params->max_udp_payload_size != - NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE, - params->max_udp_payload_size); - } - if (params->ack_delay_exponent != NGTCP2_DEFAULT_ACK_DELAY_EXPONENT) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT, - params->ack_delay_exponent); - } - if (params->disable_active_migration) { - len += zero_paramlen(NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); - } - if (params->max_ack_delay != NGTCP2_DEFAULT_MAX_ACK_DELAY) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY, - params->max_ack_delay / NGTCP2_MILLISECONDS); - } - if (params->max_idle_timeout) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT, - params->max_idle_timeout / NGTCP2_MILLISECONDS); - } - if (params->active_connection_id_limit && - params->active_connection_id_limit != - NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT, - params->active_connection_id_limit); - } - if (params->max_datagram_frame_size) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE, - params->max_datagram_frame_size); - } - if (params->grease_quic_bit) { - len += zero_paramlen(NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); - } - if (params->version_info_present) { - version_infolen = - sizeof(uint32_t) + params->version_info.available_versionslen; - len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION) + - ngtcp2_put_uvarintlen(version_infolen) + version_infolen; - } - - if (dest == NULL && destlen == 0) { - return (ngtcp2_ssize)len; - } - - if (destlen < len) { - return NGTCP2_ERR_NOBUF; - } - - p = dest; - - if (params->original_dcid_present) { - p = write_cid_param( - p, NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID, - ¶ms->original_dcid); - } - - if (params->stateless_reset_token_present) { - p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN); - p = ngtcp2_put_uvarint(p, sizeof(params->stateless_reset_token)); - p = ngtcp2_cpymem(p, params->stateless_reset_token, - sizeof(params->stateless_reset_token)); - } - - if (params->preferred_addr_present) { - p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS); - p = ngtcp2_put_uvarint(p, preferred_addrlen); - - if (params->preferred_addr.ipv4_present) { - sa_in = ¶ms->preferred_addr.ipv4; - p = ngtcp2_cpymem(p, &sa_in->sin_addr, sizeof(sa_in->sin_addr)); - p = ngtcp2_put_uint16(p, sa_in->sin_port); - } else { - p = ngtcp2_cpymem(p, empty_address, sizeof(sa_in->sin_addr)); - p = ngtcp2_put_uint16(p, 0); - } - - if (params->preferred_addr.ipv6_present) { - sa_in6 = ¶ms->preferred_addr.ipv6; - p = ngtcp2_cpymem(p, &sa_in6->sin6_addr, sizeof(sa_in6->sin6_addr)); - p = ngtcp2_put_uint16(p, sa_in6->sin6_port); - } else { - p = ngtcp2_cpymem(p, empty_address, sizeof(sa_in6->sin6_addr)); - p = ngtcp2_put_uint16(p, 0); - } - - *p++ = (uint8_t)params->preferred_addr.cid.datalen; - if (params->preferred_addr.cid.datalen) { - p = ngtcp2_cpymem(p, params->preferred_addr.cid.data, - params->preferred_addr.cid.datalen); - } - p = ngtcp2_cpymem(p, params->preferred_addr.stateless_reset_token, - sizeof(params->preferred_addr.stateless_reset_token)); - } - - if (params->retry_scid_present) { - p = write_cid_param(p, NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID, - ¶ms->retry_scid); - } - - if (params->initial_scid_present) { - p = write_cid_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID, - ¶ms->initial_scid); - } - - if (params->initial_max_stream_data_bidi_local) { - p = write_varint_param( - p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, - params->initial_max_stream_data_bidi_local); - } - - if (params->initial_max_stream_data_bidi_remote) { - p = write_varint_param( - p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, - params->initial_max_stream_data_bidi_remote); - } - - if (params->initial_max_stream_data_uni) { - p = write_varint_param(p, - NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI, - params->initial_max_stream_data_uni); - } - - if (params->initial_max_data) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA, - params->initial_max_data); - } - - if (params->initial_max_streams_bidi) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI, - params->initial_max_streams_bidi); - } - - if (params->initial_max_streams_uni) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI, - params->initial_max_streams_uni); - } - - if (params->max_udp_payload_size != - NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE, - params->max_udp_payload_size); - } - - if (params->ack_delay_exponent != NGTCP2_DEFAULT_ACK_DELAY_EXPONENT) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT, - params->ack_delay_exponent); - } - - if (params->disable_active_migration) { - p = write_zero_param(p, NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); - } - - if (params->max_ack_delay != NGTCP2_DEFAULT_MAX_ACK_DELAY) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY, - params->max_ack_delay / NGTCP2_MILLISECONDS); - } - - if (params->max_idle_timeout) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT, - params->max_idle_timeout / NGTCP2_MILLISECONDS); - } - - if (params->active_connection_id_limit && - params->active_connection_id_limit != - NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT, - params->active_connection_id_limit); - } - - if (params->max_datagram_frame_size) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE, - params->max_datagram_frame_size); - } - - if (params->grease_quic_bit) { - p = write_zero_param(p, NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); - } - - if (params->version_info_present) { - p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION); - p = ngtcp2_put_uvarint(p, version_infolen); - p = ngtcp2_put_uint32be(p, params->version_info.chosen_version); - if (params->version_info.available_versionslen) { - p = ngtcp2_cpymem(p, params->version_info.available_versions, - params->version_info.available_versionslen); - } - } - - assert((size_t)(p - dest) == len); - - return (ngtcp2_ssize)len; -} - -/* - * decode_varint decodes a single varint from the buffer pointed by - * |*pp| of length |end - *pp|. If it decodes an integer - * successfully, it stores the integer in |*pdest|, increment |*pp| by - * the number of bytes read from |*pp|, and returns 0. Otherwise it - * returns -1. - */ -static int decode_varint(uint64_t *pdest, const uint8_t **pp, - const uint8_t *end) { - const uint8_t *p = *pp; - size_t len; - - if (p == end) { - return -1; - } - - len = ngtcp2_get_uvarintlen(p); - if ((uint64_t)(end - p) < len) { - return -1; - } - - *pp = ngtcp2_get_uvarint(pdest, p); - - return 0; -} - -/* - * decode_varint_param decodes length prefixed value from the buffer - * pointed by |*pp| of length |end - *pp|. The length and value are - * encoded in varint form. If it decodes a value successfully, it - * stores the value in |*pdest|, increment |*pp| by the number of - * bytes read from |*pp|, and returns 0. Otherwise it returns -1. - */ -static int decode_varint_param(uint64_t *pdest, const uint8_t **pp, - const uint8_t *end) { - const uint8_t *p = *pp; - uint64_t valuelen; - - if (decode_varint(&valuelen, &p, end) != 0) { - return -1; - } - - if (p == end) { - return -1; - } - - if ((uint64_t)(end - p) < valuelen) { - return -1; - } - - if (ngtcp2_get_uvarintlen(p) != valuelen) { - return -1; - } - - *pp = ngtcp2_get_uvarint(pdest, p); - - return 0; -} - -/* - * decode_zero_param decodes zero length value from the buffer pointed - * by |*pp| of length |end - *pp|. The length is encoded in varint - * form. If it decodes zero length value successfully, it increments - * |*pp| by 1, and returns 0. Otherwise it returns -1. - */ -static int decode_zero_param(const uint8_t **pp, const uint8_t *end) { - if (*pp == end || **pp != 0) { - return -1; - } - - ++*pp; - - return 0; -} - -/* - * decode_cid_param decodes length prefixed ngtcp2_cid from the buffer - * pointed by |*pp| of length |end - *pp|. The length is encoded in - * varint form. If it decodes a value successfully, it stores the - * value in |*pdest|, increment |*pp| by the number of read from - * |*pp|, and returns the number of bytes read. Otherwise it returns - * the one of the negative error code: - * - * NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM - * Could not decode Connection ID. - */ -static int decode_cid_param(ngtcp2_cid *pdest, const uint8_t **pp, - const uint8_t *end) { - const uint8_t *p = *pp; - uint64_t valuelen; - - if (decode_varint(&valuelen, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - - if ((valuelen != 0 && valuelen < NGTCP2_MIN_CIDLEN) || - valuelen > NGTCP2_MAX_CIDLEN || (size_t)(end - p) < valuelen) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - - ngtcp2_cid_init(pdest, p, (size_t)valuelen); - - p += valuelen; - - *pp = p; - - return 0; -} - -int ngtcp2_transport_params_decode_versioned(int transport_params_version, - ngtcp2_transport_params *dest, - const uint8_t *data, - size_t datalen) { - const uint8_t *p, *end, *lend; - size_t len; - uint64_t param_type; - uint64_t valuelen; - int rv; - ngtcp2_sockaddr_in *sa_in; - ngtcp2_sockaddr_in6 *sa_in6; - uint32_t version; - ngtcp2_transport_params *params, paramsbuf; - if (transport_params_version == NGTCP2_TRANSPORT_PARAMS_VERSION) { - params = dest; - } else { - params = ¶msbuf; - } - - /* Set default values */ - memset(params, 0, sizeof(*params)); - params->original_dcid_present = 0; - params->initial_scid_present = 0; - params->initial_max_streams_bidi = 0; - params->initial_max_streams_uni = 0; - params->initial_max_stream_data_bidi_local = 0; - params->initial_max_stream_data_bidi_remote = 0; - params->initial_max_stream_data_uni = 0; - params->max_udp_payload_size = NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE; - params->ack_delay_exponent = NGTCP2_DEFAULT_ACK_DELAY_EXPONENT; - params->stateless_reset_token_present = 0; - params->preferred_addr_present = 0; - params->disable_active_migration = 0; - params->max_ack_delay = NGTCP2_DEFAULT_MAX_ACK_DELAY; - params->max_idle_timeout = 0; - params->active_connection_id_limit = - NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT; - params->retry_scid_present = 0; - params->max_datagram_frame_size = 0; - memset(¶ms->retry_scid, 0, sizeof(params->retry_scid)); - memset(¶ms->initial_scid, 0, sizeof(params->initial_scid)); - memset(¶ms->original_dcid, 0, sizeof(params->original_dcid)); - params->version_info_present = 0; - - p = data; - end = data + datalen; - - for (; (size_t)(end - p) >= 2;) { - if (decode_varint(¶m_type, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - - switch (param_type) { - case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL: - if (decode_varint_param(¶ms->initial_max_stream_data_bidi_local, &p, - end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE: - if (decode_varint_param(¶ms->initial_max_stream_data_bidi_remote, &p, - end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI: - if (decode_varint_param(¶ms->initial_max_stream_data_uni, &p, end) != - 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA: - if (decode_varint_param(¶ms->initial_max_data, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI: - if (decode_varint_param(¶ms->initial_max_streams_bidi, &p, end) != - 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (params->initial_max_streams_bidi > NGTCP2_MAX_STREAMS) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI: - if (decode_varint_param(¶ms->initial_max_streams_uni, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (params->initial_max_streams_uni > NGTCP2_MAX_STREAMS) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT: - if (decode_varint_param(¶ms->max_idle_timeout, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - params->max_idle_timeout *= NGTCP2_MILLISECONDS; - break; - case NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE: - if (decode_varint_param(¶ms->max_udp_payload_size, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN: - if (decode_varint(&valuelen, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if ((size_t)valuelen != sizeof(params->stateless_reset_token)) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if ((size_t)(end - p) < sizeof(params->stateless_reset_token)) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - - p = ngtcp2_get_bytes(params->stateless_reset_token, p, - sizeof(params->stateless_reset_token)); - params->stateless_reset_token_present = 1; - - break; - case NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT: - if (decode_varint_param(¶ms->ack_delay_exponent, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (params->ack_delay_exponent > 20) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS: - if (decode_varint(&valuelen, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if ((size_t)(end - p) < valuelen) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - len = 4 /* ipv4Address */ + 2 /* ipv4Port */ + 16 /* ipv6Address */ + - 2 /* ipv6Port */ - + 1 /* cid length */ + NGTCP2_STATELESS_RESET_TOKENLEN; - if (valuelen < len) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - - sa_in = ¶ms->preferred_addr.ipv4; - - p = ngtcp2_get_bytes(&sa_in->sin_addr, p, sizeof(sa_in->sin_addr)); - p = ngtcp2_get_uint16be(&sa_in->sin_port, p); - - if (sa_in->sin_port || memcmp(empty_address, &sa_in->sin_addr, - sizeof(sa_in->sin_addr)) != 0) { - sa_in->sin_family = NGTCP2_AF_INET; - params->preferred_addr.ipv4_present = 1; - } - - sa_in6 = ¶ms->preferred_addr.ipv6; - - p = ngtcp2_get_bytes(&sa_in6->sin6_addr, p, sizeof(sa_in6->sin6_addr)); - p = ngtcp2_get_uint16be(&sa_in6->sin6_port, p); - - if (sa_in6->sin6_port || memcmp(empty_address, &sa_in6->sin6_addr, - sizeof(sa_in6->sin6_addr)) != 0) { - sa_in6->sin6_family = NGTCP2_AF_INET6; - params->preferred_addr.ipv6_present = 1; - } - - /* cid */ - params->preferred_addr.cid.datalen = *p++; - len += params->preferred_addr.cid.datalen; - if (valuelen != len || - params->preferred_addr.cid.datalen > NGTCP2_MAX_CIDLEN || - params->preferred_addr.cid.datalen < NGTCP2_MIN_CIDLEN) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (params->preferred_addr.cid.datalen) { - p = ngtcp2_get_bytes(params->preferred_addr.cid.data, p, - params->preferred_addr.cid.datalen); - } - - /* stateless reset token */ - p = ngtcp2_get_bytes( - params->preferred_addr.stateless_reset_token, p, - sizeof(params->preferred_addr.stateless_reset_token)); - params->preferred_addr_present = 1; - break; - case NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION: - if (decode_zero_param(&p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - params->disable_active_migration = 1; - break; - case NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID: - rv = decode_cid_param(¶ms->original_dcid, &p, end); - if (rv != 0) { - return rv; - } - params->original_dcid_present = 1; - break; - case NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID: - rv = decode_cid_param(¶ms->retry_scid, &p, end); - if (rv != 0) { - return rv; - } - params->retry_scid_present = 1; - break; - case NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID: - rv = decode_cid_param(¶ms->initial_scid, &p, end); - if (rv != 0) { - return rv; - } - params->initial_scid_present = 1; - break; - case NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY: - if (decode_varint_param(¶ms->max_ack_delay, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (params->max_ack_delay >= 16384) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - params->max_ack_delay *= NGTCP2_MILLISECONDS; - break; - case NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT: - if (decode_varint_param(¶ms->active_connection_id_limit, &p, end) != - 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE: - if (decode_varint_param(¶ms->max_datagram_frame_size, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT: - if (decode_zero_param(&p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - params->grease_quic_bit = 1; - break; - case NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION: - if (decode_varint(&valuelen, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if ((size_t)(end - p) < valuelen) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (valuelen < sizeof(uint32_t) || (valuelen & 0x3)) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - p = ngtcp2_get_uint32(¶ms->version_info.chosen_version, p); - if (params->version_info.chosen_version == 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (valuelen > sizeof(uint32_t)) { - params->version_info.available_versions = (uint8_t *)p; - params->version_info.available_versionslen = - (size_t)valuelen - sizeof(uint32_t); - - for (lend = p + (valuelen - sizeof(uint32_t)); p != lend;) { - p = ngtcp2_get_uint32(&version, p); - if (version == 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - } - } - params->version_info_present = 1; - break; - default: - /* Ignore unknown parameter */ - if (decode_varint(&valuelen, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if ((size_t)(end - p) < valuelen) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - p += valuelen; - break; - } - } - - if (end - p != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - - if (transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION) { - ngtcp2_transport_params_convert_to_old(transport_params_version, dest, - params); - } - - return 0; -} - -static int transport_params_copy_new(ngtcp2_transport_params **pdest, - const ngtcp2_transport_params *src, - const ngtcp2_mem *mem) { - size_t len = sizeof(**pdest); - ngtcp2_transport_params *dest; - uint8_t *p; - - if (src->version_info_present) { - len += src->version_info.available_versionslen; - } - - dest = ngtcp2_mem_malloc(mem, len); - if (dest == NULL) { - return NGTCP2_ERR_NOMEM; - } - - *dest = *src; - - if (src->version_info_present && src->version_info.available_versionslen) { - p = (uint8_t *)dest + sizeof(*dest); - memcpy(p, src->version_info.available_versions, - src->version_info.available_versionslen); - dest->version_info.available_versions = p; - } - - *pdest = dest; - - return 0; -} - -int ngtcp2_transport_params_decode_new(ngtcp2_transport_params **pparams, - const uint8_t *data, size_t datalen, - const ngtcp2_mem *mem) { - int rv; - ngtcp2_transport_params params; - - rv = ngtcp2_transport_params_decode(¶ms, data, datalen); - if (rv < 0) { - return rv; - } - - if (mem == NULL) { - mem = ngtcp2_mem_default(); - } - - return transport_params_copy_new(pparams, ¶ms, mem); -} - -void ngtcp2_transport_params_del(ngtcp2_transport_params *params, - const ngtcp2_mem *mem) { - if (params == NULL) { - return; - } - - if (mem == NULL) { - mem = ngtcp2_mem_default(); - } - - ngtcp2_mem_free(mem, params); -} - -int ngtcp2_transport_params_copy_new(ngtcp2_transport_params **pdest, - const ngtcp2_transport_params *src, - const ngtcp2_mem *mem) { - if (src == NULL) { - *pdest = NULL; - return 0; - } + dest += ivlen - sizeof(n); - return transport_params_copy_new(pdest, src, mem); + memcpy(&n, dest, sizeof(n)); + n ^= ngtcp2_htonl64((uint64_t)pkt_num); + memcpy(dest, &n, sizeof(n)); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h index b78429bb38f582..ca6d494846f324 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -38,36 +38,9 @@ bytes. */ #define NGTCP2_INITIAL_AEAD_OVERHEAD 16 -/* NGTCP2_MAX_AEAD_OVERHEAD is expected maximum AEAD overhead. */ +/* NGTCP2_MAX_AEAD_OVERHEAD is the maximum AEAD overhead. */ #define NGTCP2_MAX_AEAD_OVERHEAD 16 -/* ngtcp2_transport_param_id is the registry of QUIC transport - parameter ID. */ -typedef uint64_t ngtcp2_transport_param_id; - -#define NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID 0x00 -#define NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT 0x01 -#define NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN 0x02 -#define NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE 0x03 -#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA 0x04 -#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL 0x05 -#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE 0x06 -#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI 0x07 -#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI 0x08 -#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI 0x09 -#define NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT 0x0a -#define NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY 0x0b -#define NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION 0x0c -#define NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS 0x0d -#define NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT 0x0e -#define NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID 0x0f -#define NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID 0x10 -/* https://datatracker.ietf.org/doc/html/rfc9221 */ -#define NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE 0x20 -#define NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT 0x2ab2 -/* https://datatracker.ietf.org/doc/html/rfc9368 */ -#define NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION 0x11 - /* NGTCP2_CRYPTO_KM_FLAG_NONE indicates that no flag is set. */ #define NGTCP2_CRYPTO_KM_FLAG_NONE 0x00u /* NGTCP2_CRYPTO_KM_FLAG_KEY_PHASE_ONE is set if key phase bit is @@ -83,8 +56,7 @@ typedef struct ngtcp2_crypto_km { a packet. For decryption key, it is the lowest packet number of a packet which can be decrypted with this keying material. */ int64_t pkt_num; - /* use_count is the number of encryption applied with this key. - This field is only used for tx key. */ + /* use_count is the number of encryption applied with this key. */ uint64_t use_count; /* flags is the bitwise OR of zero or more of NGTCP2_CRYPTO_KM_FLAG_*. */ @@ -92,12 +64,12 @@ typedef struct ngtcp2_crypto_km { } ngtcp2_crypto_km; /* - * ngtcp2_crypto_km_new creates new ngtcp2_crypto_km object and + * ngtcp2_crypto_km_new creates new ngtcp2_crypto_km object, and * assigns its pointer to |*pckm|. The |secret| of length - * |secretlen|, the |key| of length |keylen| and the |iv| of length - * |ivlen| are copied to |*pckm|. If |secretlen| == 0, the function - * assumes no secret is given which is acceptable. The sole reason to - * store secret is update keys. Only 1RTT key can be updated. + * |secretlen|, |aead_ctx|, and the |iv| of length |ivlen| are copied + * to |*pckm|. If |secretlen| == 0, the function assumes no secret is + * given which is acceptable. The sole reason to store secret is + * update keys. Only 1RTT key can be updated. */ int ngtcp2_crypto_km_new(ngtcp2_crypto_km **pckm, const uint8_t *secret, size_t secretlen, @@ -107,7 +79,7 @@ int ngtcp2_crypto_km_new(ngtcp2_crypto_km **pckm, const uint8_t *secret, /* * ngtcp2_crypto_km_nocopy_new is similar to ngtcp2_crypto_km_new, but - * it does not copy secret, key and IV. + * it does not copy secret, aead context, and IV. */ int ngtcp2_crypto_km_nocopy_new(ngtcp2_crypto_km **pckm, size_t secretlen, size_t ivlen, const ngtcp2_mem *mem); @@ -127,21 +99,4 @@ typedef struct ngtcp2_crypto_cc { void ngtcp2_crypto_create_nonce(uint8_t *dest, const uint8_t *iv, size_t ivlen, int64_t pkt_num); -/* - * ngtcp2_transport_params_copy_new makes a copy of |src|, and assigns - * it to |*pdest|. If |src| is NULL, NULL is assigned to |*pdest|. - * - * Caller is responsible to call ngtcp2_transport_params_del to free - * the memory assigned to |*pdest|. - * - * This function returns 0 if it succeeds, or one of the following - * negative error codes: - * - * NGTCP2_ERR_NOMEM - * Out of memory. - */ -int ngtcp2_transport_params_copy_new(ngtcp2_transport_params **pdest, - const ngtcp2_transport_params *src, - const ngtcp2_mem *mem); - -#endif /* NGTCP2_CRYPTO_H */ +#endif /* !defined(NGTCP2_CRYPTO_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_err.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_err.h index 9229f5425a63cf..44527b11bdec47 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_err.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_err.h @@ -27,8 +27,8 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include -#endif /* NGTCP2_ERR_H */ +#endif /* !defined(NGTCP2_ERR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c index 41c2a6a755cc8a..6a8a22c3f0d010 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c @@ -27,7 +27,7 @@ #include #include -ngtcp2_objalloc_def(frame_chain, ngtcp2_frame_chain, oplent); +ngtcp2_objalloc_def(frame_chain, ngtcp2_frame_chain, oplent) int ngtcp2_frame_chain_new(ngtcp2_frame_chain **pfrc, const ngtcp2_mem *mem) { *pfrc = ngtcp2_mem_malloc(mem, sizeof(ngtcp2_frame_chain)); @@ -68,14 +68,11 @@ int ngtcp2_frame_chain_stream_datacnt_objalloc_new(ngtcp2_frame_chain **pfrc, size_t datacnt, ngtcp2_objalloc *objalloc, const ngtcp2_mem *mem) { - size_t need, avail = sizeof(ngtcp2_frame) - sizeof(ngtcp2_stream); - - if (datacnt > 1) { - need = sizeof(ngtcp2_vec) * (datacnt - 1); - - if (need > avail) { - return ngtcp2_frame_chain_extralen_new(pfrc, need - avail, mem); - } + if (datacnt > NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES) { + return ngtcp2_frame_chain_extralen_new(pfrc, + sizeof(ngtcp2_vec) * (datacnt - 1) - + NGTCP2_FRAME_CHAIN_STREAM_AVAIL, + mem); } return ngtcp2_frame_chain_objalloc_new(pfrc, objalloc); @@ -139,9 +136,7 @@ void ngtcp2_frame_chain_objalloc_del(ngtcp2_frame_chain *frc, switch (frc->fr.type) { case NGTCP2_FRAME_CRYPTO: case NGTCP2_FRAME_STREAM: - if (frc->fr.stream.datacnt && - sizeof(ngtcp2_vec) * (frc->fr.stream.datacnt - 1) > - sizeof(ngtcp2_frame) - sizeof(ngtcp2_stream)) { + if (frc->fr.stream.datacnt > NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES) { ngtcp2_frame_chain_del(frc, mem); return; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h index 656fa5b799450e..e5b6779c0f03c2 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -74,7 +74,7 @@ struct ngtcp2_frame_chain { }; }; -ngtcp2_objalloc_decl(frame_chain, ngtcp2_frame_chain, oplent); +ngtcp2_objalloc_decl(frame_chain, ngtcp2_frame_chain, oplent) /* * ngtcp2_bind_frame_chains binds two frame chains |a| and |b| using @@ -121,12 +121,29 @@ int ngtcp2_frame_chain_objalloc_new(ngtcp2_frame_chain **pfrc, int ngtcp2_frame_chain_extralen_new(ngtcp2_frame_chain **pfrc, size_t extralen, const ngtcp2_mem *mem); +/* NGTCP2_FRAME_CHAIN_STREAM_AVAIL is the number of additional bytes + available after ngtcp2_stream when it is embedded in + ngtcp2_frame. */ +#define NGTCP2_FRAME_CHAIN_STREAM_AVAIL \ + (sizeof(ngtcp2_frame) - sizeof(ngtcp2_stream)) + +/* NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES is the number of datacnt + that changes allocation method. If datacnt is more than this + value, ngtcp2_frame_chain is allocated without ngtcp2_objalloc. + Otherwise, it is allocated using ngtcp2_objalloc. */ +#define NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES \ + (NGTCP2_FRAME_CHAIN_STREAM_AVAIL / sizeof(ngtcp2_vec) + 1) + /* * ngtcp2_frame_chain_stream_datacnt_objalloc_new works like * ngtcp2_frame_chain_new, but it allocates enough data to store * additional |datacnt| - 1 ngtcp2_vec object after ngtcp2_stream - * object. If no additional space is required, - * ngtcp2_frame_chain_objalloc_new is called internally. + * object. If no additional space is required, in other words, + * |datacnt| <= NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES, + * ngtcp2_frame_chain_objalloc_new is called internally. Otherwise, + * ngtcp2_frame_chain_extralen_new is used and objalloc is not used. + * Therefore, it is important to call ngtcp2_frame_chain_objalloc_del + * without changing datacnt field. */ int ngtcp2_frame_chain_stream_datacnt_objalloc_new(ngtcp2_frame_chain **pfrc, size_t datacnt, @@ -168,4 +185,4 @@ void ngtcp2_frame_chain_list_objalloc_del(ngtcp2_frame_chain *frc, ngtcp2_objalloc *objalloc, const ngtcp2_mem *mem); -#endif /* NGTCP2_FRAME_CHAIN_H */ +#endif /* !defined(NGTCP2_FRAME_CHAIN_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.c index 87c23898e8207d..3bfa398480c382 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.c @@ -28,22 +28,16 @@ #include void ngtcp2_gaptr_init(ngtcp2_gaptr *gaptr, const ngtcp2_mem *mem) { - ngtcp2_ksl_init(&gaptr->gap, ngtcp2_ksl_range_compar, sizeof(ngtcp2_range), - mem); + ngtcp2_ksl_init(&gaptr->gap, ngtcp2_ksl_range_compar, ngtcp2_ksl_range_search, + sizeof(ngtcp2_range), mem); gaptr->mem = mem; } static int gaptr_gap_init(ngtcp2_gaptr *gaptr) { ngtcp2_range range = {0, UINT64_MAX}; - int rv; - - rv = ngtcp2_ksl_insert(&gaptr->gap, NULL, &range, NULL); - if (rv != 0) { - return rv; - } - return 0; + return ngtcp2_ksl_insert(&gaptr->gap, NULL, &range, NULL); } void ngtcp2_gaptr_free(ngtcp2_gaptr *gaptr) { @@ -66,8 +60,8 @@ int ngtcp2_gaptr_push(ngtcp2_gaptr *gaptr, uint64_t offset, uint64_t datalen) { } } - it = ngtcp2_ksl_lower_bound_compar(&gaptr->gap, &q, - ngtcp2_ksl_range_exclusive_compar); + it = ngtcp2_ksl_lower_bound_search(&gaptr->gap, &q, + ngtcp2_ksl_range_exclusive_search); for (; !ngtcp2_ksl_it_end(&it);) { k = *(ngtcp2_range *)ngtcp2_ksl_it_key(&it); @@ -80,7 +74,9 @@ int ngtcp2_gaptr_push(ngtcp2_gaptr *gaptr, uint64_t offset, uint64_t datalen) { ngtcp2_ksl_remove_hint(&gaptr->gap, &it, &it, &k); continue; } + ngtcp2_range_cut(&l, &r, &k, &m); + if (ngtcp2_range_len(&l)) { ngtcp2_ksl_update_key(&gaptr->gap, &k, &l); @@ -93,26 +89,26 @@ int ngtcp2_gaptr_push(ngtcp2_gaptr *gaptr, uint64_t offset, uint64_t datalen) { } else if (ngtcp2_range_len(&r)) { ngtcp2_ksl_update_key(&gaptr->gap, &k, &r); } + ngtcp2_ksl_it_next(&it); } + return 0; } -uint64_t ngtcp2_gaptr_first_gap_offset(ngtcp2_gaptr *gaptr) { +uint64_t ngtcp2_gaptr_first_gap_offset(const ngtcp2_gaptr *gaptr) { ngtcp2_ksl_it it; - ngtcp2_range r; if (ngtcp2_ksl_len(&gaptr->gap) == 0) { return 0; } it = ngtcp2_ksl_begin(&gaptr->gap); - r = *(ngtcp2_range *)ngtcp2_ksl_it_key(&it); - return r.begin; + return ((ngtcp2_range *)ngtcp2_ksl_it_key(&it))->begin; } -ngtcp2_range ngtcp2_gaptr_get_first_gap_after(ngtcp2_gaptr *gaptr, +ngtcp2_range ngtcp2_gaptr_get_first_gap_after(const ngtcp2_gaptr *gaptr, uint64_t offset) { ngtcp2_range q = {offset, offset + 1}; ngtcp2_ksl_it it; @@ -122,29 +118,27 @@ ngtcp2_range ngtcp2_gaptr_get_first_gap_after(ngtcp2_gaptr *gaptr, return r; } - it = ngtcp2_ksl_lower_bound_compar(&gaptr->gap, &q, - ngtcp2_ksl_range_exclusive_compar); + it = ngtcp2_ksl_lower_bound_search(&gaptr->gap, &q, + ngtcp2_ksl_range_exclusive_search); assert(!ngtcp2_ksl_it_end(&it)); return *(ngtcp2_range *)ngtcp2_ksl_it_key(&it); } -int ngtcp2_gaptr_is_pushed(ngtcp2_gaptr *gaptr, uint64_t offset, +int ngtcp2_gaptr_is_pushed(const ngtcp2_gaptr *gaptr, uint64_t offset, uint64_t datalen) { ngtcp2_range q = {offset, offset + datalen}; ngtcp2_ksl_it it; - ngtcp2_range k; ngtcp2_range m; if (ngtcp2_ksl_len(&gaptr->gap) == 0) { return 0; } - it = ngtcp2_ksl_lower_bound_compar(&gaptr->gap, &q, - ngtcp2_ksl_range_exclusive_compar); - k = *(ngtcp2_range *)ngtcp2_ksl_it_key(&it); - m = ngtcp2_range_intersect(&q, &k); + it = ngtcp2_ksl_lower_bound_search(&gaptr->gap, &q, + ngtcp2_ksl_range_exclusive_search); + m = ngtcp2_range_intersect(&q, (ngtcp2_range *)ngtcp2_ksl_it_key(&it)); return ngtcp2_range_len(&m) == 0; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.h index 0f100a81c4286c..3120676cf849d4 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -39,8 +39,9 @@ * ngtcp2_gaptr maintains the gap in the range [0, UINT64_MAX). */ typedef struct ngtcp2_gaptr { - /* gap maintains the range of offset which is not received - yet. Initially, its range is [0, UINT64_MAX). */ + /* gap maintains the range of offset which is not pushed + yet. Initially, its range is [0, UINT64_MAX). "gap" is the range + that is not pushed yet. */ ngtcp2_ksl gap; /* mem is custom memory allocator */ const ngtcp2_mem *mem; @@ -57,8 +58,7 @@ void ngtcp2_gaptr_init(ngtcp2_gaptr *gaptr, const ngtcp2_mem *mem); void ngtcp2_gaptr_free(ngtcp2_gaptr *gaptr); /* - * ngtcp2_gaptr_push adds new data of length |datalen| at the stream - * offset |offset|. + * ngtcp2_gaptr_push pushes the range [offset, offset + datalen). * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -72,20 +72,20 @@ int ngtcp2_gaptr_push(ngtcp2_gaptr *gaptr, uint64_t offset, uint64_t datalen); * ngtcp2_gaptr_first_gap_offset returns the offset to the first gap. * If there is no gap, it returns UINT64_MAX. */ -uint64_t ngtcp2_gaptr_first_gap_offset(ngtcp2_gaptr *gaptr); +uint64_t ngtcp2_gaptr_first_gap_offset(const ngtcp2_gaptr *gaptr); /* * ngtcp2_gaptr_get_first_gap_after returns the first gap which - * overlaps or comes after |offset|. + * includes or comes after |offset|. */ -ngtcp2_range ngtcp2_gaptr_get_first_gap_after(ngtcp2_gaptr *gaptr, +ngtcp2_range ngtcp2_gaptr_get_first_gap_after(const ngtcp2_gaptr *gaptr, uint64_t offset); /* * ngtcp2_gaptr_is_pushed returns nonzero if range [offset, offset + * datalen) is completely pushed into this object. */ -int ngtcp2_gaptr_is_pushed(ngtcp2_gaptr *gaptr, uint64_t offset, +int ngtcp2_gaptr_is_pushed(const ngtcp2_gaptr *gaptr, uint64_t offset, uint64_t datalen); /* @@ -95,4 +95,4 @@ int ngtcp2_gaptr_is_pushed(ngtcp2_gaptr *gaptr, uint64_t offset, */ void ngtcp2_gaptr_drop_first_gap(ngtcp2_gaptr *gaptr); -#endif /* NGTCP2_GAPTR_H */ +#endif /* !defined(NGTCP2_GAPTR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.c index d9880227690faf..2cf9d3cbfd1393 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.c @@ -26,10 +26,8 @@ #include -void ngtcp2_idtr_init(ngtcp2_idtr *idtr, int server, const ngtcp2_mem *mem) { +void ngtcp2_idtr_init(ngtcp2_idtr *idtr, const ngtcp2_mem *mem) { ngtcp2_gaptr_init(&idtr->gap, mem); - - idtr->server = server; } void ngtcp2_idtr_free(ngtcp2_idtr *idtr) { @@ -41,8 +39,7 @@ void ngtcp2_idtr_free(ngtcp2_idtr *idtr) { } /* - * id_from_stream_id translates |stream_id| to id space used by - * ngtcp2_idtr. + * id_from_stream_id translates |stream_id| to an internal ID. */ static uint64_t id_from_stream_id(int64_t stream_id) { return (uint64_t)(stream_id >> 2); @@ -51,9 +48,6 @@ static uint64_t id_from_stream_id(int64_t stream_id) { int ngtcp2_idtr_open(ngtcp2_idtr *idtr, int64_t stream_id) { uint64_t q; - assert((idtr->server && (stream_id % 2)) || - (!idtr->server && (stream_id % 2)) == 0); - q = id_from_stream_id(stream_id); if (ngtcp2_gaptr_is_pushed(&idtr->gap, q, 1)) { @@ -63,17 +57,10 @@ int ngtcp2_idtr_open(ngtcp2_idtr *idtr, int64_t stream_id) { return ngtcp2_gaptr_push(&idtr->gap, q, 1); } -int ngtcp2_idtr_is_open(ngtcp2_idtr *idtr, int64_t stream_id) { +int ngtcp2_idtr_is_open(const ngtcp2_idtr *idtr, int64_t stream_id) { uint64_t q; - assert((idtr->server && (stream_id % 2)) || - (!idtr->server && (stream_id % 2)) == 0); - q = id_from_stream_id(stream_id); return ngtcp2_gaptr_is_pushed(&idtr->gap, q, 1); } - -uint64_t ngtcp2_idtr_first_gap(ngtcp2_idtr *idtr) { - return ngtcp2_gaptr_first_gap_offset(&idtr->gap); -} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.h index edb8c68c8db9b5..0671f5ed91a5cd 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -38,21 +38,17 @@ * ngtcp2_idtr tracks the usage of stream ID. */ typedef struct ngtcp2_idtr { - /* gap maintains the range of ID which is not used yet. Initially, - its range is [0, UINT64_MAX). */ + /* gap maintains the range of an internal ID which is not used yet. + Initially, its range is [0, UINT64_MAX). The internal ID and + stream ID are in the different number spaces. See + id_from_stream_id to convert a stream ID to an internal ID. */ ngtcp2_gaptr gap; - /* server is nonzero if this object records server initiated stream - ID. */ - int server; } ngtcp2_idtr; /* * ngtcp2_idtr_init initializes |idtr|. - * - * If this object records server initiated ID (even number), set - * |server| to nonzero. */ -void ngtcp2_idtr_init(ngtcp2_idtr *idtr, int server, const ngtcp2_mem *mem); +void ngtcp2_idtr_init(ngtcp2_idtr *idtr, const ngtcp2_mem *mem); /* * ngtcp2_idtr_free frees resources allocated for |idtr|. @@ -60,30 +56,21 @@ void ngtcp2_idtr_init(ngtcp2_idtr *idtr, int server, const ngtcp2_mem *mem); void ngtcp2_idtr_free(ngtcp2_idtr *idtr); /* - * ngtcp2_idtr_open claims that |stream_id| is in used. + * ngtcp2_idtr_open claims that |stream_id| is in use. * * It returns 0 if it succeeds, or one of the following negative error * codes: * * NGTCP2_ERR_STREAM_IN_USE - * ID has already been used. + * |stream_id| has already been used. * NGTCP2_ERR_NOMEM * Out of memory. */ int ngtcp2_idtr_open(ngtcp2_idtr *idtr, int64_t stream_id); /* - * ngtcp2_idtr_open tells whether ID |stream_id| is in used or not. - * - * It returns nonzero if |stream_id| is used. - */ -int ngtcp2_idtr_is_open(ngtcp2_idtr *idtr, int64_t stream_id); - -/* - * ngtcp2_idtr_first_gap returns the first id of first gap. If there - * is no gap, it returns UINT64_MAX. The returned id is an id space - * used in this object internally, and not stream ID. + * ngtcp2_idtr_open returns nonzero if |stream_id| is in use. */ -uint64_t ngtcp2_idtr_first_gap(ngtcp2_idtr *idtr); +int ngtcp2_idtr_is_open(const ngtcp2_idtr *idtr, int64_t stream_id); -#endif /* NGTCP2_IDTR_H */ +#endif /* !defined(NGTCP2_IDTR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c index 0ccc048b5b16b1..5e74f647241816 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c @@ -35,11 +35,13 @@ static ngtcp2_ksl_blk null_blk = {{{NULL, NULL, 0, 0, {0}}}}; -ngtcp2_objalloc_def(ksl_blk, ngtcp2_ksl_blk, oplent); +ngtcp2_objalloc_def(ksl_blk, ngtcp2_ksl_blk, oplent) static size_t ksl_nodelen(size_t keylen) { - return (sizeof(ngtcp2_ksl_node) + keylen - sizeof(uint64_t) + 0xfu) & - ~(uintptr_t)0xfu; + assert(keylen >= sizeof(uint64_t)); + + return (sizeof(ngtcp2_ksl_node) + keylen - sizeof(uint64_t) + 0x7u) & + ~(uintptr_t)0x7u; } static size_t ksl_blklen(size_t nodelen) { @@ -55,20 +57,21 @@ static void ksl_node_set_key(ngtcp2_ksl *ksl, ngtcp2_ksl_node *node, memcpy(node->key, key, ksl->keylen); } -void ngtcp2_ksl_init(ngtcp2_ksl *ksl, ngtcp2_ksl_compar compar, size_t keylen, +void ngtcp2_ksl_init(ngtcp2_ksl *ksl, ngtcp2_ksl_compar compar, + ngtcp2_ksl_search search, size_t keylen, const ngtcp2_mem *mem) { size_t nodelen = ksl_nodelen(keylen); ngtcp2_objalloc_init(&ksl->blkalloc, - ((ksl_blklen(nodelen) + 0xfu) & ~(uintptr_t)0xfu) * 8, - mem); + (ksl_blklen(nodelen) + 0xfu) & ~(uintptr_t)0xfu, mem); ksl->head = NULL; ksl->front = ksl->back = NULL; ksl->compar = compar; + ksl->search = search; + ksl->n = 0; ksl->keylen = keylen; ksl->nodelen = nodelen; - ksl->n = 0; } static ngtcp2_ksl_blk *ksl_blk_objalloc_new(ngtcp2_ksl *ksl) { @@ -82,6 +85,7 @@ static void ksl_blk_objalloc_del(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk) { static int ksl_head_init(ngtcp2_ksl *ksl) { ngtcp2_ksl_blk *head = ksl_blk_objalloc_new(ksl); + if (!head) { return NGTCP2_ERR_NOMEM; } @@ -111,7 +115,7 @@ static void ksl_free_blk(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk) { ksl_blk_objalloc_del(ksl, blk); } -#endif /* NOMEMPOOL */ +#endif /* defined(NOMEMPOOL) */ void ngtcp2_ksl_free(ngtcp2_ksl *ksl) { if (!ksl || !ksl->head) { @@ -120,7 +124,7 @@ void ngtcp2_ksl_free(ngtcp2_ksl *ksl) { #ifdef NOMEMPOOL ksl_free_blk(ksl, ksl->head); -#endif /* NOMEMPOOL */ +#endif /* defined(NOMEMPOOL) */ ngtcp2_objalloc_free(&ksl->blkalloc); } @@ -143,21 +147,22 @@ static ngtcp2_ksl_blk *ksl_split_blk(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk) { rblk->next = blk->next; blk->next = rblk; + if (rblk->next) { rblk->next->prev = rblk; } else if (ksl->back == blk) { ksl->back = rblk; } + rblk->prev = blk; rblk->leaf = blk->leaf; rblk->n = blk->n / 2; + blk->n -= rblk->n; - memcpy(rblk->nodes, blk->nodes + ksl->nodelen * (blk->n - rblk->n), + memcpy(rblk->nodes, blk->nodes + ksl->nodelen * blk->n, ksl->nodelen * rblk->n); - blk->n -= rblk->n; - assert(blk->n >= NGTCP2_KSL_MIN_NBLK); assert(rblk->n >= NGTCP2_KSL_MIN_NBLK); @@ -173,7 +178,7 @@ static ngtcp2_ksl_blk *ksl_split_blk(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk) { * codes: * * NGTCP2_ERR_NOMEM - * Out of memory. + * Out of memory. */ static int ksl_split_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { ngtcp2_ksl_node *node; @@ -207,7 +212,7 @@ static int ksl_split_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { * codes: * * NGTCP2_ERR_NOMEM - * Out of memory. + * Out of memory. */ static int ksl_split_head(ngtcp2_ksl *ksl) { ngtcp2_ksl_blk *rblk = NULL, *lblk, *nhead = NULL; @@ -221,10 +226,12 @@ static int ksl_split_head(ngtcp2_ksl *ksl) { lblk = ksl->head; nhead = ksl_blk_objalloc_new(ksl); + if (nhead == NULL) { ksl_blk_objalloc_del(ksl, rblk); return NGTCP2_ERR_NOMEM; } + nhead->next = nhead->prev = NULL; nhead->n = 2; nhead->leaf = 0; @@ -243,9 +250,9 @@ static int ksl_split_head(ngtcp2_ksl *ksl) { } /* - * insert_node inserts a node whose key is |key| with the associated - * |data| at the index of |i|. This function assumes that the number - * of nodes contained by |blk| is strictly less than + * ksl_insert_node inserts a node whose key is |key| with the + * associated |data| at the index of |i|. This function assumes that + * the number of nodes contained by |blk| is strictly less than * NGTCP2_KSL_MAX_NBLK. */ static void ksl_insert_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i, @@ -264,19 +271,6 @@ static void ksl_insert_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i, ++blk->n; } -static size_t ksl_bsearch(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, - const ngtcp2_ksl_key *key, ngtcp2_ksl_compar compar) { - size_t i; - ngtcp2_ksl_node *node; - - for (i = 0, node = (ngtcp2_ksl_node *)(void *)blk->nodes; - i < blk->n && compar((ngtcp2_ksl_key *)node->key, key); - ++i, node = (ngtcp2_ksl_node *)(void *)((uint8_t *)node + ksl->nodelen)) - ; - - return i; -} - int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, const ngtcp2_ksl_key *key, void *data) { ngtcp2_ksl_blk *blk; @@ -291,18 +285,17 @@ int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, } } - blk = ksl->head; - - if (blk->n == NGTCP2_KSL_MAX_NBLK) { + if (ksl->head->n == NGTCP2_KSL_MAX_NBLK) { rv = ksl_split_head(ksl); if (rv != 0) { return rv; } - blk = ksl->head; } + blk = ksl->head; + for (;;) { - i = ksl_bsearch(ksl, blk, key, ksl->compar); + i = ksl->search(ksl, blk, key); if (blk->leaf) { if (i < blk->n && @@ -310,13 +303,17 @@ int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, if (it) { *it = ngtcp2_ksl_end(ksl); } + return NGTCP2_ERR_INVALID_ARGUMENT; } + ksl_insert_node(ksl, blk, i, key, data); ++ksl->n; + if (it) { ngtcp2_ksl_it_init(it, ksl, blk, i); } + return 0; } @@ -329,16 +326,21 @@ int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, if (rv != 0) { return rv; } + node = ngtcp2_ksl_nth_node(ksl, blk, blk->n - 1); } + ksl_node_set_key(ksl, node, key); blk = node->blk; } + ksl_insert_node(ksl, blk, blk->n, key, data); ++ksl->n; + if (it) { ngtcp2_ksl_it_init(it, ksl, blk, blk->n - 1); } + return 0; } @@ -349,8 +351,10 @@ int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, if (rv != 0) { return rv; } + if (ksl->compar((ngtcp2_ksl_key *)node->key, key)) { node = ngtcp2_ksl_nth_node(ksl, blk, i + 1); + if (ksl->compar((ngtcp2_ksl_key *)node->key, key)) { ksl_node_set_key(ksl, node, key); } @@ -376,19 +380,22 @@ static void ksl_remove_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { * ksl_merge_node merges 2 nodes which are the nodes at the index of * |i| and |i + 1|. * - * If |blk| is the direct descendant of head (root) block and the head - * block contains just 2 nodes, the merged block becomes head block, - * which decreases the height of |ksl| by 1. + * If |blk| is the head (root) block and it contains just 2 nodes + * before merging nodes, the merged block becomes head block, which + * decreases the height of |ksl| by 1. * * This function returns the pointer to the merged block. */ static ngtcp2_ksl_blk *ksl_merge_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { + ngtcp2_ksl_node *lnode; ngtcp2_ksl_blk *lblk, *rblk; assert(i + 1 < blk->n); - lblk = ngtcp2_ksl_nth_node(ksl, blk, i)->blk; + lnode = ngtcp2_ksl_nth_node(ksl, blk, i); + + lblk = lnode->blk; rblk = ngtcp2_ksl_nth_node(ksl, blk, i + 1)->blk; assert(lblk->n + rblk->n < NGTCP2_KSL_MAX_NBLK); @@ -398,6 +405,7 @@ static ngtcp2_ksl_blk *ksl_merge_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, lblk->n += rblk->n; lblk->next = rblk->next; + if (lblk->next) { lblk->next->prev = lblk; } else if (ksl->back == rblk) { @@ -411,7 +419,7 @@ static ngtcp2_ksl_blk *ksl_merge_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, ksl->head = lblk; } else { ksl_remove_node(ksl, blk, i + 1); - ksl_node_set_key(ksl, ngtcp2_ksl_nth_node(ksl, blk, i), + ksl_node_set_key(ksl, lnode, ngtcp2_ksl_nth_node(ksl, lblk, lblk->n - 1)->key); } @@ -425,6 +433,7 @@ static ngtcp2_ksl_blk *ksl_merge_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, */ static void ksl_shift_left(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { ngtcp2_ksl_node *lnode, *rnode; + ngtcp2_ksl_blk *lblk, *rblk; size_t n; assert(i > 0); @@ -432,35 +441,37 @@ static void ksl_shift_left(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { lnode = ngtcp2_ksl_nth_node(ksl, blk, i - 1); rnode = ngtcp2_ksl_nth_node(ksl, blk, i); - assert(lnode->blk->n < NGTCP2_KSL_MAX_NBLK); - assert(rnode->blk->n > NGTCP2_KSL_MIN_NBLK); + lblk = lnode->blk; + rblk = rnode->blk; + + assert(lblk->n < NGTCP2_KSL_MAX_NBLK); + assert(rblk->n > NGTCP2_KSL_MIN_NBLK); - n = (lnode->blk->n + rnode->blk->n + 1) / 2 - lnode->blk->n; + n = (lblk->n + rblk->n + 1) / 2 - lblk->n; assert(n > 0); - assert(lnode->blk->n <= NGTCP2_KSL_MAX_NBLK - n); - assert(rnode->blk->n >= NGTCP2_KSL_MIN_NBLK + n); + assert(lblk->n <= NGTCP2_KSL_MAX_NBLK - n); + assert(rblk->n >= NGTCP2_KSL_MIN_NBLK + n); - memcpy(lnode->blk->nodes + ksl->nodelen * lnode->blk->n, rnode->blk->nodes, - ksl->nodelen * n); + memcpy(lblk->nodes + ksl->nodelen * lblk->n, rblk->nodes, ksl->nodelen * n); - lnode->blk->n += (uint32_t)n; - rnode->blk->n -= (uint32_t)n; + lblk->n += (uint32_t)n; + rblk->n -= (uint32_t)n; - ksl_node_set_key( - ksl, lnode, ngtcp2_ksl_nth_node(ksl, lnode->blk, lnode->blk->n - 1)->key); + ksl_node_set_key(ksl, lnode, + ngtcp2_ksl_nth_node(ksl, lblk, lblk->n - 1)->key); - memmove(rnode->blk->nodes, rnode->blk->nodes + ksl->nodelen * n, - ksl->nodelen * rnode->blk->n); + memmove(rblk->nodes, rblk->nodes + ksl->nodelen * n, ksl->nodelen * rblk->n); } /* * ksl_shift_right moves the last nodes in blk->nodes[i]->blk->nodes * to blk->nodes[i + 1]->blk->nodes in a manner that they have the - * same amount of nodes as much as possible.. + * same amount of nodes as much as possible. */ static void ksl_shift_right(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { ngtcp2_ksl_node *lnode, *rnode; + ngtcp2_ksl_blk *lblk, *rblk; size_t n; assert(i < blk->n - 1); @@ -468,26 +479,27 @@ static void ksl_shift_right(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { lnode = ngtcp2_ksl_nth_node(ksl, blk, i); rnode = ngtcp2_ksl_nth_node(ksl, blk, i + 1); - assert(lnode->blk->n > NGTCP2_KSL_MIN_NBLK); - assert(rnode->blk->n < NGTCP2_KSL_MAX_NBLK); + lblk = lnode->blk; + rblk = rnode->blk; - n = (lnode->blk->n + rnode->blk->n + 1) / 2 - rnode->blk->n; + assert(lblk->n > NGTCP2_KSL_MIN_NBLK); + assert(rblk->n < NGTCP2_KSL_MAX_NBLK); + + n = (lblk->n + rblk->n + 1) / 2 - rblk->n; assert(n > 0); - assert(lnode->blk->n >= NGTCP2_KSL_MIN_NBLK + n); - assert(rnode->blk->n <= NGTCP2_KSL_MAX_NBLK - n); + assert(lblk->n >= NGTCP2_KSL_MIN_NBLK + n); + assert(rblk->n <= NGTCP2_KSL_MAX_NBLK - n); - memmove(rnode->blk->nodes + ksl->nodelen * n, rnode->blk->nodes, - ksl->nodelen * rnode->blk->n); + memmove(rblk->nodes + ksl->nodelen * n, rblk->nodes, ksl->nodelen * rblk->n); - rnode->blk->n += (uint32_t)n; - lnode->blk->n -= (uint32_t)n; + rblk->n += (uint32_t)n; + lblk->n -= (uint32_t)n; - memcpy(rnode->blk->nodes, lnode->blk->nodes + ksl->nodelen * lnode->blk->n, - ksl->nodelen * n); + memcpy(rblk->nodes, lblk->nodes + ksl->nodelen * lblk->n, ksl->nodelen * n); - ksl_node_set_key( - ksl, lnode, ngtcp2_ksl_nth_node(ksl, lnode->blk, lnode->blk->n - 1)->key); + ksl_node_set_key(ksl, lnode, + ngtcp2_ksl_nth_node(ksl, lblk, lblk->n - 1)->key); } /* @@ -531,23 +543,24 @@ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, ngtcp2_ksl_node *node; size_t i; - if (!ksl->head) { + if (!blk) { return NGTCP2_ERR_INVALID_ARGUMENT; } if (!blk->leaf && blk->n == 2 && ngtcp2_ksl_nth_node(ksl, blk, 0)->blk->n == NGTCP2_KSL_MIN_NBLK && ngtcp2_ksl_nth_node(ksl, blk, 1)->blk->n == NGTCP2_KSL_MIN_NBLK) { - blk = ksl_merge_node(ksl, ksl->head, 0); + blk = ksl_merge_node(ksl, blk, 0); } for (;;) { - i = ksl_bsearch(ksl, blk, key, ksl->compar); + i = ksl->search(ksl, blk, key); if (i == blk->n) { if (it) { *it = ngtcp2_ksl_end(ksl); } + return NGTCP2_ERR_INVALID_ARGUMENT; } @@ -556,10 +569,13 @@ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, if (it) { *it = ngtcp2_ksl_end(ksl); } + return NGTCP2_ERR_INVALID_ARGUMENT; } + ksl_remove_node(ksl, blk, i); --ksl->n; + if (it) { if (blk->n == i && blk->next) { ngtcp2_ksl_it_init(it, ksl, blk->next, 0); @@ -567,6 +583,7 @@ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, ngtcp2_ksl_it_init(it, ksl, blk, i); } } + return 0; } @@ -583,6 +600,7 @@ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, ngtcp2_ksl_nth_node(ksl, blk, i + 1)->blk->n > NGTCP2_KSL_MIN_NBLK) { ksl_shift_left(ksl, blk, i + 1); blk = node->blk; + continue; } @@ -590,6 +608,7 @@ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, ngtcp2_ksl_nth_node(ksl, blk, i - 1)->blk->n > NGTCP2_KSL_MIN_NBLK) { ksl_shift_right(ksl, blk, i - 1); blk = node->blk; + continue; } @@ -604,50 +623,14 @@ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, } } -ngtcp2_ksl_it ngtcp2_ksl_lower_bound(ngtcp2_ksl *ksl, +ngtcp2_ksl_it ngtcp2_ksl_lower_bound(const ngtcp2_ksl *ksl, const ngtcp2_ksl_key *key) { - ngtcp2_ksl_blk *blk = ksl->head; - ngtcp2_ksl_it it; - size_t i; - - if (!blk) { - ngtcp2_ksl_it_init(&it, ksl, &null_blk, 0); - return it; - } - - for (;;) { - i = ksl_bsearch(ksl, blk, key, ksl->compar); - - if (blk->leaf) { - if (i == blk->n && blk->next) { - blk = blk->next; - i = 0; - } - ngtcp2_ksl_it_init(&it, ksl, blk, i); - return it; - } - - if (i == blk->n) { - /* This happens if descendant has smaller key. Fast forward to - find last node in this subtree. */ - for (; !blk->leaf; blk = ngtcp2_ksl_nth_node(ksl, blk, blk->n - 1)->blk) - ; - if (blk->next) { - blk = blk->next; - i = 0; - } else { - i = blk->n; - } - ngtcp2_ksl_it_init(&it, ksl, blk, i); - return it; - } - blk = ngtcp2_ksl_nth_node(ksl, blk, i)->blk; - } + return ngtcp2_ksl_lower_bound_search(ksl, key, ksl->search); } -ngtcp2_ksl_it ngtcp2_ksl_lower_bound_compar(ngtcp2_ksl *ksl, +ngtcp2_ksl_it ngtcp2_ksl_lower_bound_search(const ngtcp2_ksl *ksl, const ngtcp2_ksl_key *key, - ngtcp2_ksl_compar compar) { + ngtcp2_ksl_search search) { ngtcp2_ksl_blk *blk = ksl->head; ngtcp2_ksl_it it; size_t i; @@ -658,14 +641,16 @@ ngtcp2_ksl_it ngtcp2_ksl_lower_bound_compar(ngtcp2_ksl *ksl, } for (;;) { - i = ksl_bsearch(ksl, blk, key, compar); + i = search(ksl, blk, key); if (blk->leaf) { if (i == blk->n && blk->next) { blk = blk->next; i = 0; } + ngtcp2_ksl_it_init(&it, ksl, blk, i); + return it; } @@ -674,15 +659,19 @@ ngtcp2_ksl_it ngtcp2_ksl_lower_bound_compar(ngtcp2_ksl *ksl, find last node in this subtree. */ for (; !blk->leaf; blk = ngtcp2_ksl_nth_node(ksl, blk, blk->n - 1)->blk) ; + if (blk->next) { blk = blk->next; i = 0; } else { i = blk->n; } + ngtcp2_ksl_it_init(&it, ksl, blk, i); + return it; } + blk = ngtcp2_ksl_nth_node(ksl, blk, i)->blk; } } @@ -696,7 +685,7 @@ void ngtcp2_ksl_update_key(ngtcp2_ksl *ksl, const ngtcp2_ksl_key *old_key, assert(ksl->head); for (;;) { - i = ksl_bsearch(ksl, blk, old_key, ksl->compar); + i = ksl->search(ksl, blk, old_key); assert(i < blk->n); node = ngtcp2_ksl_nth_node(ksl, blk, i); @@ -704,6 +693,7 @@ void ngtcp2_ksl_update_key(ngtcp2_ksl *ksl, const ngtcp2_ksl_key *old_key, if (blk->leaf) { assert(key_equal(ksl->compar, (ngtcp2_ksl_key *)node->key, old_key)); ksl_node_set_key(ksl, node, new_key); + return; } @@ -716,7 +706,7 @@ void ngtcp2_ksl_update_key(ngtcp2_ksl *ksl, const ngtcp2_ksl_key *old_key, } } -size_t ngtcp2_ksl_len(ngtcp2_ksl *ksl) { return ksl->n; } +size_t ngtcp2_ksl_len(const ngtcp2_ksl *ksl) { return ksl->n; } void ngtcp2_ksl_clear(ngtcp2_ksl *ksl) { if (!ksl->head) { @@ -725,7 +715,7 @@ void ngtcp2_ksl_clear(ngtcp2_ksl *ksl) { #ifdef NOMEMPOOL ksl_free_blk(ksl, ksl->head); -#endif /* NOMEMPOOL */ +#endif /* defined(NOMEMPOOL) */ ksl->front = ksl->back = ksl->head = NULL; ksl->n = 0; @@ -734,7 +724,8 @@ void ngtcp2_ksl_clear(ngtcp2_ksl *ksl) { } #ifndef WIN32 -static void ksl_print(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t level) { +static void ksl_print(const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, + size_t level) { size_t i; ngtcp2_ksl_node *node; @@ -745,7 +736,9 @@ static void ksl_print(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t level) { node = ngtcp2_ksl_nth_node(ksl, blk, i); fprintf(stderr, " %" PRId64, *(int64_t *)(void *)node->key); } + fprintf(stderr, "\n"); + return; } @@ -754,14 +747,14 @@ static void ksl_print(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t level) { } } -void ngtcp2_ksl_print(ngtcp2_ksl *ksl) { +void ngtcp2_ksl_print(const ngtcp2_ksl *ksl) { if (!ksl->head) { return; } ksl_print(ksl, ksl->head, 0); } -#endif /* !WIN32 */ +#endif /* !defined(WIN32) */ ngtcp2_ksl_it ngtcp2_ksl_begin(const ngtcp2_ksl *ksl) { ngtcp2_ksl_it it; @@ -815,9 +808,49 @@ int ngtcp2_ksl_range_compar(const ngtcp2_ksl_key *lhs, return a->begin < b->begin; } +ngtcp2_ksl_search_def(range, ngtcp2_ksl_range_compar) + +size_t ngtcp2_ksl_range_search(const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key) { + return ksl_range_search(ksl, blk, key); +} + int ngtcp2_ksl_range_exclusive_compar(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { const ngtcp2_range *a = lhs, *b = rhs; - return a->begin < b->begin && - !(ngtcp2_max(a->begin, b->begin) < ngtcp2_min(a->end, b->end)); + return a->begin < b->begin && !(ngtcp2_max_uint64(a->begin, b->begin) < + ngtcp2_min_uint64(a->end, b->end)); +} + +ngtcp2_ksl_search_def(range_exclusive, ngtcp2_ksl_range_exclusive_compar) + +size_t ngtcp2_ksl_range_exclusive_search(const ngtcp2_ksl *ksl, + ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key) { + return ksl_range_exclusive_search(ksl, blk, key); +} + +int ngtcp2_ksl_uint64_less(const ngtcp2_ksl_key *lhs, + const ngtcp2_ksl_key *rhs) { + return *(uint64_t *)lhs < *(uint64_t *)rhs; +} + +ngtcp2_ksl_search_def(uint64_less, ngtcp2_ksl_uint64_less) + +size_t ngtcp2_ksl_uint64_less_search(const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key) { + return ksl_uint64_less_search(ksl, blk, key); +} + +int ngtcp2_ksl_int64_greater(const ngtcp2_ksl_key *lhs, + const ngtcp2_ksl_key *rhs) { + return *(int64_t *)lhs > *(int64_t *)rhs; +} + +ngtcp2_ksl_search_def(int64_greater, ngtcp2_ksl_int64_greater) + +size_t ngtcp2_ksl_int64_greater_search(const ngtcp2_ksl *ksl, + ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key) { + return ksl_int64_greater_search(ksl, blk, key); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h index 7e08f15cdae6e8..de78bcb8070f53 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -35,16 +35,12 @@ #include "ngtcp2_objalloc.h" -/* - * Skip List using single key instead of range. - */ - #define NGTCP2_KSL_DEGR 16 /* NGTCP2_KSL_MAX_NBLK is the maximum number of nodes which a single block can contain. */ #define NGTCP2_KSL_MAX_NBLK (2 * NGTCP2_KSL_DEGR - 1) /* NGTCP2_KSL_MIN_NBLK is the minimum number of nodes which a single - block other than root must contains. */ + block other than root must contain. */ #define NGTCP2_KSL_MIN_NBLK (NGTCP2_KSL_DEGR - 1) /* @@ -85,7 +81,8 @@ struct ngtcp2_ksl_blk { struct { /* next points to the next block if leaf field is nonzero. */ ngtcp2_ksl_blk *next; - /* prev points to the previous block if leaf field is nonzero. */ + /* prev points to the previous block if leaf field is + nonzero. */ ngtcp2_ksl_blk *prev; /* n is the number of nodes this object contains in nodes. */ uint32_t n; @@ -106,7 +103,7 @@ struct ngtcp2_ksl_blk { }; }; -ngtcp2_objalloc_decl(ksl_blk, ngtcp2_ksl_blk, oplent); +ngtcp2_objalloc_decl(ksl_blk, ngtcp2_ksl_blk, oplent) /* * ngtcp2_ksl_compar is a function type which returns nonzero if key @@ -117,10 +114,37 @@ typedef int (*ngtcp2_ksl_compar)(const ngtcp2_ksl_key *lhs, typedef struct ngtcp2_ksl ngtcp2_ksl; +/* + * ngtcp2_ksl_search is a function to search for the first element in + * |blk|->nodes which is not ordered before |key|. It returns the + * index of such element. It returns |blk|->n if there is no such + * element. + */ +typedef size_t (*ngtcp2_ksl_search)(const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key); + +/* + * ngtcp2_ksl_search_def is a macro to implement ngtcp2_ksl_search + * with COMPAR which is supposed to be ngtcp2_ksl_compar. + */ +#define ngtcp2_ksl_search_def(NAME, COMPAR) \ + static size_t ksl_##NAME##_search( \ + const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, const ngtcp2_ksl_key *key) { \ + size_t i; \ + ngtcp2_ksl_node *node; \ + \ + for (i = 0, node = (ngtcp2_ksl_node *)(void *)blk->nodes; \ + i < blk->n && COMPAR((ngtcp2_ksl_key *)node->key, key); ++i, \ + node = (ngtcp2_ksl_node *)(void *)((uint8_t *)node + ksl->nodelen)) \ + ; \ + \ + return i; \ + } + typedef struct ngtcp2_ksl_it ngtcp2_ksl_it; /* - * ngtcp2_ksl_it is a forward iterator to iterate nodes. + * ngtcp2_ksl_it is a bidirectional iterator to iterate nodes. */ struct ngtcp2_ksl_it { const ngtcp2_ksl *ksl; @@ -140,6 +164,8 @@ struct ngtcp2_ksl { /* back points to the last leaf block. */ ngtcp2_ksl_blk *back; ngtcp2_ksl_compar compar; + ngtcp2_ksl_search search; + /* n is the number of elements stored. */ size_t n; /* keylen is the size of key */ size_t keylen; @@ -150,9 +176,12 @@ struct ngtcp2_ksl { /* * ngtcp2_ksl_init initializes |ksl|. |compar| specifies compare - * function. |keylen| is the length of key. + * function. |search| is a search function which must use |compar|. + * |keylen| is the length of key and must be at least + * sizeof(uint64_t). */ -void ngtcp2_ksl_init(ngtcp2_ksl *ksl, ngtcp2_ksl_compar compar, size_t keylen, +void ngtcp2_ksl_init(ngtcp2_ksl *ksl, ngtcp2_ksl_compar compar, + ngtcp2_ksl_search search, size_t keylen, const ngtcp2_mem *mem); /* @@ -165,15 +194,15 @@ void ngtcp2_ksl_free(ngtcp2_ksl *ksl); /* * ngtcp2_ksl_insert inserts |key| with its associated |data|. On * successful insertion, the iterator points to the inserted node is - * stored in |*it|. + * stored in |*it| if |it| is not NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGTCP2_ERR_NOMEM - * Out of memory. + * Out of memory. * NGTCP2_ERR_INVALID_ARGUMENT - * |key| already exists. + * |key| already exists. */ int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, const ngtcp2_ksl_key *key, void *data); @@ -184,13 +213,14 @@ int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, * This function assigns the iterator to |*it|, which points to the * node which is located at the right next of the removed node if |it| * is not NULL. If |key| is not found, no deletion takes place and - * the return value of ngtcp2_ksl_end(ksl) is assigned to |*it|. + * the return value of ngtcp2_ksl_end(ksl) is assigned to |*it| if + * |it| is not NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGTCP2_ERR_INVALID_ARGUMENT - * |key| does not exist. + * |key| does not exist. */ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, const ngtcp2_ksl_key *key); @@ -213,16 +243,16 @@ int ngtcp2_ksl_remove_hint(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, * node, it returns the iterator which satisfies ngtcp2_ksl_it_end(it) * != 0. */ -ngtcp2_ksl_it ngtcp2_ksl_lower_bound(ngtcp2_ksl *ksl, +ngtcp2_ksl_it ngtcp2_ksl_lower_bound(const ngtcp2_ksl *ksl, const ngtcp2_ksl_key *key); /* - * ngtcp2_ksl_lower_bound_compar works like ngtcp2_ksl_lower_bound, - * but it takes custom function |compar| to do lower bound search. + * ngtcp2_ksl_lower_bound_search works like ngtcp2_ksl_lower_bound, + * but it takes custom function |search| to do lower bound search. */ -ngtcp2_ksl_it ngtcp2_ksl_lower_bound_compar(ngtcp2_ksl *ksl, +ngtcp2_ksl_it ngtcp2_ksl_lower_bound_search(const ngtcp2_ksl *ksl, const ngtcp2_ksl_key *key, - ngtcp2_ksl_compar compar); + ngtcp2_ksl_search search); /* * ngtcp2_ksl_update_key replaces the key of nodes which has |old_key| @@ -235,7 +265,8 @@ void ngtcp2_ksl_update_key(ngtcp2_ksl *ksl, const ngtcp2_ksl_key *old_key, /* * ngtcp2_ksl_begin returns the iterator which points to the first * node. If there is no node in |ksl|, it returns the iterator which - * satisfies ngtcp2_ksl_it_end(it) != 0. + * satisfies both ngtcp2_ksl_it_begin(it) != 0 and + * ngtcp2_ksl_it_end(it) != 0. */ ngtcp2_ksl_it ngtcp2_ksl_begin(const ngtcp2_ksl *ksl); @@ -243,14 +274,15 @@ ngtcp2_ksl_it ngtcp2_ksl_begin(const ngtcp2_ksl *ksl); * ngtcp2_ksl_end returns the iterator which points to the node * following the last node. The returned object satisfies * ngtcp2_ksl_it_end(). If there is no node in |ksl|, it returns the - * iterator which satisfies ngtcp2_ksl_it_begin(it) != 0. + * iterator which satisfies ngtcp2_ksl_it_begin(it) != 0 and + * ngtcp2_ksl_it_end(it) != 0. */ ngtcp2_ksl_it ngtcp2_ksl_end(const ngtcp2_ksl *ksl); /* * ngtcp2_ksl_len returns the number of elements stored in |ksl|. */ -size_t ngtcp2_ksl_len(ngtcp2_ksl *ksl); +size_t ngtcp2_ksl_len(const ngtcp2_ksl *ksl); /* * ngtcp2_ksl_clear removes all elements stored in |ksl|. @@ -269,8 +301,8 @@ void ngtcp2_ksl_clear(ngtcp2_ksl *ksl); * that the key is of type int64_t. This function should be used for * the debugging purpose only. */ -void ngtcp2_ksl_print(ngtcp2_ksl *ksl); -#endif /* !WIN32 */ +void ngtcp2_ksl_print(const ngtcp2_ksl *ksl); +#endif /* !defined(WIN32) */ /* * ngtcp2_ksl_it_init initializes |it|. @@ -293,8 +325,8 @@ void ngtcp2_ksl_it_init(ngtcp2_ksl_it *it, const ngtcp2_ksl *ksl, */ #define ngtcp2_ksl_it_next(IT) \ (++(IT)->i == (IT)->blk->n && (IT)->blk->next \ - ? ((IT)->blk = (IT)->blk->next, (IT)->i = 0) \ - : 0) + ? ((IT)->blk = (IT)->blk->next, (IT)->i = 0) \ + : 0) /* * ngtcp2_ksl_it_prev moves backward the iterator by one. It is @@ -304,16 +336,16 @@ void ngtcp2_ksl_it_init(ngtcp2_ksl_it *it, const ngtcp2_ksl *ksl, void ngtcp2_ksl_it_prev(ngtcp2_ksl_it *it); /* - * ngtcp2_ksl_it_end returns nonzero if |it| points to the beyond the - * last node. + * ngtcp2_ksl_it_end returns nonzero if |it| points to the one beyond + * the last node. */ #define ngtcp2_ksl_it_end(IT) \ ((IT)->blk->n == (IT)->i && (IT)->blk->next == NULL) /* * ngtcp2_ksl_it_begin returns nonzero if |it| points to the first - * node. |it| might satisfy both ngtcp2_ksl_it_begin(&it) and - * ngtcp2_ksl_it_end(&it) if the skip list has no node. + * node. |it| might satisfy both ngtcp2_ksl_it_begin(it) != 0 and + * ngtcp2_ksl_it_end(it) != 0 if the skip list has no node. */ int ngtcp2_ksl_it_begin(const ngtcp2_ksl_it *it); @@ -327,21 +359,67 @@ int ngtcp2_ksl_it_begin(const ngtcp2_ksl_it *it); /* * ngtcp2_ksl_range_compar is an implementation of ngtcp2_ksl_compar. - * lhs->ptr and rhs->ptr must point to ngtcp2_range object and the - * function returns nonzero if (const ngtcp2_range *)(lhs->ptr)->begin - * < (const ngtcp2_range *)(rhs->ptr)->begin. + * |lhs| and |rhs| must point to ngtcp2_range object, and the function + * returns nonzero if ((const ngtcp2_range *)lhs)->begin < ((const + * ngtcp2_range *)rhs)->begin. */ int ngtcp2_ksl_range_compar(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs); +/* + * ngtcp2_ksl_range_search is an implementation of ngtcp2_ksl_search + * that uses ngtcp2_ksl_range_compar. + */ +size_t ngtcp2_ksl_range_search(const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key); + /* * ngtcp2_ksl_range_exclusive_compar is an implementation of - * ngtcp2_ksl_compar. lhs->ptr and rhs->ptr must point to - * ngtcp2_range object and the function returns nonzero if (const - * ngtcp2_range *)(lhs->ptr)->begin < (const ngtcp2_range - * *)(rhs->ptr)->begin and the 2 ranges do not intersect. + * ngtcp2_ksl_compar. |lhs| and |rhs| must point to ngtcp2_range + * object, and the function returns nonzero if ((const ngtcp2_range + * *)lhs)->begin < ((const ngtcp2_range *)rhs)->begin, and the 2 + * ranges do not intersect. */ int ngtcp2_ksl_range_exclusive_compar(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs); -#endif /* NGTCP2_KSL_H */ +/* + * ngtcp2_ksl_range_exclusive_search is an implementation of + * ngtcp2_ksl_search that uses ngtcp2_ksl_range_exclusive_compar. + */ +size_t ngtcp2_ksl_range_exclusive_search(const ngtcp2_ksl *ksl, + ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key); + +/* + * ngtcp2_ksl_uint64_less is an implementation of ngtcp2_ksl_compar. + * |lhs| and |rhs| must point to uint64_t objects, and the function + * returns nonzero if *(uint64_t *)|lhs| < *(uint64_t *)|rhs|. + */ +int ngtcp2_ksl_uint64_less(const ngtcp2_ksl_key *lhs, + const ngtcp2_ksl_key *rhs); + +/* + * ngtcp2_ksl_uint64_less_search is an implementation of + * ngtcp2_ksl_search that uses ngtcp2_ksl_uint64_less. + */ +size_t ngtcp2_ksl_uint64_less_search(const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key); + +/* + * ngtcp2_ksl_int64_greater is an implementation of ngtcp2_ksl_compar. + * |lhs| and |rhs| must point to int64_t objects, and the function + * returns nonzero if *(int64_t *)|lhs| > *(int64_t *)|rhs|. + */ +int ngtcp2_ksl_int64_greater(const ngtcp2_ksl_key *lhs, + const ngtcp2_ksl_key *rhs); + +/* + * ngtcp2_ksl_int64_greater_search is an implementation of + * ngtcp2_ksl_search that uses ngtcp2_ksl_int64_greater. + */ +size_t ngtcp2_ksl_int64_greater_search(const ngtcp2_ksl *ksl, + ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key); + +#endif /* !defined(NGTCP2_KSL_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c index 93922a29c319f4..fc4eb443517d40 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c @@ -27,7 +27,7 @@ #include #ifdef HAVE_UNISTD_H # include -#endif +#endif /* defined(HAVE_UNISTD_H) */ #include #include @@ -99,11 +99,11 @@ void ngtcp2_log_init(ngtcp2_log *log, const ngtcp2_cid *scid, #define NGTCP2_LOG_FRM_HD_FIELDS(DIR) \ timestamp_cast(log->last_ts - log->ts), (const char *)log->scid, "frm", \ - (DIR), hd->pkt_num, strpkttype(hd) + (DIR), hd->pkt_num, strpkttype(hd) #define NGTCP2_LOG_PKT_HD_FIELDS(DIR) \ timestamp_cast(log->last_ts - log->ts), (const char *)log->scid, "pkt", \ - (DIR), hd->pkt_num, strpkttype(hd) + (DIR), hd->pkt_num, strpkttype(hd) #define NGTCP2_LOG_TP_HD_FIELDS \ timestamp_cast(log->last_ts - log->ts), (const char *)log->scid, "cry" @@ -223,12 +223,12 @@ static uint64_t timestamp_cast(uint64_t ns) { return ns / NGTCP2_MILLISECONDS; } static void log_fr_stream(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_stream *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " STREAM(0x%02" PRIx64 ") id=0x%" PRIx64 - " fin=%d offset=%" PRIu64 " len=%" PRIu64 " uni=%d"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type | fr->flags, fr->stream_id, - fr->fin, fr->offset, ngtcp2_vec_len(fr->data, fr->datacnt), - (fr->stream_id & 0x2) != 0); + log->user_data, + (NGTCP2_LOG_PKT " STREAM(0x%02" PRIx64 ") id=0x%" PRIx64 + " fin=%d offset=%" PRIu64 " len=%" PRIu64 " uni=%d"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type | fr->flags, fr->stream_id, fr->fin, + fr->offset, ngtcp2_vec_len(fr->data, fr->datacnt), + (fr->stream_id & 0x2) != 0); } static void log_fr_ack(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -236,13 +236,12 @@ static void log_fr_ack(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, int64_t largest_ack, min_ack; size_t i; - log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " ACK(0x%02" PRIx64 ") largest_ack=%" PRId64 - " ack_delay=%" PRIu64 "(%" PRIu64 - ") ack_range_count=%zu"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->largest_ack, - fr->ack_delay_unscaled / NGTCP2_MILLISECONDS, fr->ack_delay, - fr->rangecnt); + log->log_printf( + log->user_data, + (NGTCP2_LOG_PKT " ACK(0x%02" PRIx64 ") largest_ack=%" PRId64 + " ack_delay=%" PRIu64 "(%" PRIu64 ") ack_range_count=%zu"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->largest_ack, + fr->ack_delay_unscaled / NGTCP2_MILLISECONDS, fr->ack_delay, fr->rangecnt); largest_ack = fr->largest_ack; min_ack = fr->largest_ack - (int64_t)fr->first_ack_range; @@ -285,38 +284,37 @@ static void log_fr_reset_stream(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_reset_stream *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " RESET_STREAM(0x%02" PRIx64 ") id=0x%" PRIx64 - " app_error_code=%s(0x%" PRIx64 ") final_size=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->stream_id, - strapperrorcode(fr->app_error_code), fr->app_error_code, fr->final_size); + log->user_data, + (NGTCP2_LOG_PKT " RESET_STREAM(0x%02" PRIx64 ") id=0x%" PRIx64 + " app_error_code=%s(0x%" PRIx64 ") final_size=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->stream_id, + strapperrorcode(fr->app_error_code), fr->app_error_code, fr->final_size); } static void log_fr_connection_close(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_connection_close *fr, const char *dir) { char reason[256]; - size_t reasonlen = ngtcp2_min(sizeof(reason) - 1, fr->reasonlen); + size_t reasonlen = ngtcp2_min_size(sizeof(reason) - 1, fr->reasonlen); - log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " CONNECTION_CLOSE(0x%02" PRIx64 - ") error_code=%s(0x%" PRIx64 ") " - "frame_type=%" PRIx64 - " reason_len=%zu reason=[%s]"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, - fr->type == NGTCP2_FRAME_CONNECTION_CLOSE - ? strerrorcode(fr->error_code) - : strapperrorcode(fr->error_code), - fr->error_code, fr->frame_type, fr->reasonlen, - ngtcp2_encode_printable_ascii(reason, fr->reason, reasonlen)); + log->log_printf( + log->user_data, + (NGTCP2_LOG_PKT " CONNECTION_CLOSE(0x%02" PRIx64 + ") error_code=%s(0x%" PRIx64 ") " + "frame_type=%" PRIx64 " reason_len=%zu reason=[%s]"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, + fr->type == NGTCP2_FRAME_CONNECTION_CLOSE ? strerrorcode(fr->error_code) + : strapperrorcode(fr->error_code), + fr->error_code, fr->frame_type, fr->reasonlen, + ngtcp2_encode_printable_ascii(reason, fr->reason, reasonlen)); } static void log_fr_max_data(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_max_data *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " MAX_DATA(0x%02" PRIx64 ") max_data=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_data); + log->user_data, + (NGTCP2_LOG_PKT " MAX_DATA(0x%02" PRIx64 ") max_data=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_data); } static void log_fr_max_stream_data(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -333,9 +331,9 @@ static void log_fr_max_stream_data(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, static void log_fr_max_streams(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_max_streams *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " MAX_STREAMS(0x%02" PRIx64 ") max_streams=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_streams); + log->user_data, + (NGTCP2_LOG_PKT " MAX_STREAMS(0x%02" PRIx64 ") max_streams=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_streams); } static void log_fr_ping(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -348,9 +346,9 @@ static void log_fr_data_blocked(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_data_blocked *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " DATA_BLOCKED(0x%02" PRIx64 ") offset=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->offset); + log->user_data, + (NGTCP2_LOG_PKT " DATA_BLOCKED(0x%02" PRIx64 ") offset=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->offset); } static void log_fr_stream_data_blocked(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -367,9 +365,9 @@ static void log_fr_streams_blocked(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_streams_blocked *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " STREAMS_BLOCKED(0x%02" PRIx64 ") max_streams=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_streams); + log->user_data, + (NGTCP2_LOG_PKT " STREAMS_BLOCKED(0x%02" PRIx64 ") max_streams=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_streams); } static void log_fr_new_connection_id(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -379,15 +377,15 @@ static void log_fr_new_connection_id(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, uint8_t cid[sizeof(fr->cid.data) * 2 + 1]; log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " NEW_CONNECTION_ID(0x%02" PRIx64 ") seq=%" PRIu64 - " cid=0x%s retire_prior_to=%" PRIu64 - " stateless_reset_token=0x%s"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->seq, - (const char *)ngtcp2_encode_hex(cid, fr->cid.data, fr->cid.datalen), - fr->retire_prior_to, - (const char *)ngtcp2_encode_hex(buf, fr->stateless_reset_token, - sizeof(fr->stateless_reset_token))); + log->user_data, + (NGTCP2_LOG_PKT " NEW_CONNECTION_ID(0x%02" PRIx64 ") seq=%" PRIu64 + " cid=0x%s retire_prior_to=%" PRIu64 + " stateless_reset_token=0x%s"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->seq, + (const char *)ngtcp2_encode_hex(cid, fr->cid.data, fr->cid.datalen), + fr->retire_prior_to, + (const char *)ngtcp2_encode_hex(buf, fr->stateless_reset_token, + sizeof(fr->stateless_reset_token))); } static void log_fr_stop_sending(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -406,10 +404,10 @@ static void log_fr_path_challenge(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, uint8_t buf[sizeof(fr->data) * 2 + 1]; log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " PATH_CHALLENGE(0x%02" PRIx64 ") data=0x%s"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, - (const char *)ngtcp2_encode_hex(buf, fr->data, sizeof(fr->data))); + log->user_data, + (NGTCP2_LOG_PKT " PATH_CHALLENGE(0x%02" PRIx64 ") data=0x%s"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, + (const char *)ngtcp2_encode_hex(buf, fr->data, sizeof(fr->data))); } static void log_fr_path_response(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -418,19 +416,19 @@ static void log_fr_path_response(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, uint8_t buf[sizeof(fr->data) * 2 + 1]; log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " PATH_RESPONSE(0x%02" PRIx64 ") data=0x%s"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, - (const char *)ngtcp2_encode_hex(buf, fr->data, sizeof(fr->data))); + log->user_data, + (NGTCP2_LOG_PKT " PATH_RESPONSE(0x%02" PRIx64 ") data=0x%s"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, + (const char *)ngtcp2_encode_hex(buf, fr->data, sizeof(fr->data))); } static void log_fr_crypto(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_stream *fr, const char *dir) { - log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " CRYPTO(0x%02" PRIx64 ") offset=%" PRIu64 - " len=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->offset, - ngtcp2_vec_len(fr->data, fr->datacnt)); + log->log_printf( + log->user_data, + (NGTCP2_LOG_PKT " CRYPTO(0x%02" PRIx64 ") offset=%" PRIu64 " len=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->offset, + ngtcp2_vec_len(fr->data, fr->datacnt)); } static void log_fr_new_token(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -448,9 +446,9 @@ static void log_fr_new_token(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, p = ngtcp2_encode_hex(buf, fr->token, fr->tokenlen); } log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " NEW_TOKEN(0x%02" PRIx64 ") token=0x%s len=%zu"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, (const char *)p, fr->tokenlen); + log->user_data, + (NGTCP2_LOG_PKT " NEW_TOKEN(0x%02" PRIx64 ") token=0x%s len=%zu"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, (const char *)p, fr->tokenlen); } static void log_fr_retire_connection_id(ngtcp2_log *log, @@ -458,9 +456,9 @@ static void log_fr_retire_connection_id(ngtcp2_log *log, const ngtcp2_retire_connection_id *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " RETIRE_CONNECTION_ID(0x%02" PRIx64 ") seq=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->seq); + log->user_data, + (NGTCP2_LOG_PKT " RETIRE_CONNECTION_ID(0x%02" PRIx64 ") seq=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->seq); } static void log_fr_handshake_done(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -601,11 +599,11 @@ void ngtcp2_log_rx_sr(ngtcp2_log *log, const ngtcp2_pkt_stateless_reset *sr) { shd.type = NGTCP2_PKT_STATELESS_RESET; log->log_printf( - log->user_data, (NGTCP2_LOG_PKT " token=0x%s randlen=%zu"), - NGTCP2_LOG_PKT_HD_FIELDS("rx"), - (const char *)ngtcp2_encode_hex(buf, sr->stateless_reset_token, - sizeof(sr->stateless_reset_token)), - sr->randlen); + log->user_data, (NGTCP2_LOG_PKT " token=0x%s randlen=%zu"), + NGTCP2_LOG_PKT_HD_FIELDS("rx"), + (const char *)ngtcp2_encode_hex(buf, sr->stateless_reset_token, + sizeof(sr->stateless_reset_token)), + sr->randlen); } void ngtcp2_log_remote_tp(ngtcp2_log *log, @@ -625,10 +623,10 @@ void ngtcp2_log_remote_tp(ngtcp2_log *log, if (params->stateless_reset_token_present) { log->log_printf( - log->user_data, (NGTCP2_LOG_TP " stateless_reset_token=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex(token, params->stateless_reset_token, - sizeof(params->stateless_reset_token))); + log->user_data, (NGTCP2_LOG_TP " stateless_reset_token=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(token, params->stateless_reset_token, + sizeof(params->stateless_reset_token))); } if (params->preferred_addr_present) { @@ -639,7 +637,7 @@ void ngtcp2_log_remote_tp(ngtcp2_log *log, (NGTCP2_LOG_TP " preferred_address.ipv4_addr=%s"), NGTCP2_LOG_TP_HD_FIELDS, (const char *)ngtcp2_encode_ipv4( - addr, (const uint8_t *)&sa_in->sin_addr)); + addr, (const uint8_t *)&sa_in->sin_addr)); log->log_printf(log->user_data, (NGTCP2_LOG_TP " preferred_address.ipv4_port=%u"), NGTCP2_LOG_TP_HD_FIELDS, ngtcp2_ntohs(sa_in->sin_port)); @@ -652,59 +650,59 @@ void ngtcp2_log_remote_tp(ngtcp2_log *log, (NGTCP2_LOG_TP " preferred_address.ipv6_addr=%s"), NGTCP2_LOG_TP_HD_FIELDS, (const char *)ngtcp2_encode_ipv6( - addr, (const uint8_t *)&sa_in6->sin6_addr)); + addr, (const uint8_t *)&sa_in6->sin6_addr)); log->log_printf(log->user_data, (NGTCP2_LOG_TP " preferred_address.ipv6_port=%u"), NGTCP2_LOG_TP_HD_FIELDS, ngtcp2_ntohs(sa_in6->sin6_port)); } log->log_printf( - log->user_data, (NGTCP2_LOG_TP " preferred_address.cid=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex(cid, params->preferred_addr.cid.data, - params->preferred_addr.cid.datalen)); + log->user_data, (NGTCP2_LOG_TP " preferred_address.cid=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(cid, params->preferred_addr.cid.data, + params->preferred_addr.cid.datalen)); log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " preferred_address.stateless_reset_token=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex( - token, params->preferred_addr.stateless_reset_token, - sizeof(params->preferred_addr.stateless_reset_token))); + log->user_data, + (NGTCP2_LOG_TP " preferred_address.stateless_reset_token=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex( + token, params->preferred_addr.stateless_reset_token, + sizeof(params->preferred_addr.stateless_reset_token))); } if (params->original_dcid_present) { log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " original_destination_connection_id=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex(cid, params->original_dcid.data, - params->original_dcid.datalen)); + log->user_data, + (NGTCP2_LOG_TP " original_destination_connection_id=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(cid, params->original_dcid.data, + params->original_dcid.datalen)); } if (params->retry_scid_present) { log->log_printf( - log->user_data, (NGTCP2_LOG_TP " retry_source_connection_id=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex(cid, params->retry_scid.data, - params->retry_scid.datalen)); + log->user_data, (NGTCP2_LOG_TP " retry_source_connection_id=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(cid, params->retry_scid.data, + params->retry_scid.datalen)); } if (params->initial_scid_present) { log->log_printf( - log->user_data, (NGTCP2_LOG_TP " initial_source_connection_id=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex(cid, params->initial_scid.data, - params->initial_scid.datalen)); + log->user_data, (NGTCP2_LOG_TP " initial_source_connection_id=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(cid, params->initial_scid.data, + params->initial_scid.datalen)); } log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " initial_max_stream_data_bidi_local=%" PRIu64), - NGTCP2_LOG_TP_HD_FIELDS, params->initial_max_stream_data_bidi_local); + log->user_data, + (NGTCP2_LOG_TP " initial_max_stream_data_bidi_local=%" PRIu64), + NGTCP2_LOG_TP_HD_FIELDS, params->initial_max_stream_data_bidi_local); log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " initial_max_stream_data_bidi_remote=%" PRIu64), - NGTCP2_LOG_TP_HD_FIELDS, params->initial_max_stream_data_bidi_remote); + log->user_data, + (NGTCP2_LOG_TP " initial_max_stream_data_bidi_remote=%" PRIu64), + NGTCP2_LOG_TP_HD_FIELDS, params->initial_max_stream_data_bidi_remote); log->log_printf(log->user_data, (NGTCP2_LOG_TP " initial_max_stream_data_uni=%" PRIu64), NGTCP2_LOG_TP_HD_FIELDS, params->initial_max_stream_data_uni); @@ -742,21 +740,21 @@ void ngtcp2_log_remote_tp(ngtcp2_log *log, if (params->version_info_present) { log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " version_information.chosen_version=0x%08x"), - NGTCP2_LOG_TP_HD_FIELDS, params->version_info.chosen_version); + log->user_data, + (NGTCP2_LOG_TP " version_information.chosen_version=0x%08x"), + NGTCP2_LOG_TP_HD_FIELDS, params->version_info.chosen_version); assert(!(params->version_info.available_versionslen & 0x3)); for (i = 0, p = params->version_info.available_versions; i < params->version_info.available_versionslen; i += sizeof(uint32_t)) { - p = ngtcp2_get_uint32(&version, p); + p = ngtcp2_get_uint32be(&version, p); log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " version_information.available_versions[%zu]=0x%08x"), - NGTCP2_LOG_TP_HD_FIELDS, i >> 2, version); + log->user_data, + (NGTCP2_LOG_TP " version_information.available_versions[%zu]=0x%08x"), + NGTCP2_LOG_TP_HD_FIELDS, i >> 2, version); } } } @@ -783,18 +781,18 @@ static void log_pkt_hd(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, if (hd->type == NGTCP2_PKT_1RTT) { ngtcp2_log_info( - log, NGTCP2_LOG_EVENT_PKT, "%s pkn=%" PRId64 " dcid=0x%s type=%s k=%d", - dir, hd->pkt_num, - (const char *)ngtcp2_encode_hex(dcid, hd->dcid.data, hd->dcid.datalen), - strpkttype(hd), (hd->flags & NGTCP2_PKT_FLAG_KEY_PHASE) != 0); + log, NGTCP2_LOG_EVENT_PKT, "%s pkn=%" PRId64 " dcid=0x%s type=%s k=%d", + dir, hd->pkt_num, + (const char *)ngtcp2_encode_hex(dcid, hd->dcid.data, hd->dcid.datalen), + strpkttype(hd), (hd->flags & NGTCP2_PKT_FLAG_KEY_PHASE) != 0); } else { ngtcp2_log_info( - log, NGTCP2_LOG_EVENT_PKT, - "%s pkn=%" PRId64 " dcid=0x%s scid=0x%s version=0x%08x type=%s len=%zu", - dir, hd->pkt_num, - (const char *)ngtcp2_encode_hex(dcid, hd->dcid.data, hd->dcid.datalen), - (const char *)ngtcp2_encode_hex(scid, hd->scid.data, hd->scid.datalen), - hd->version, strpkttype(hd), hd->len); + log, NGTCP2_LOG_EVENT_PKT, + "%s pkn=%" PRId64 " dcid=0x%s scid=0x%s version=0x%08x type=%s len=%zu", + dir, hd->pkt_num, + (const char *)ngtcp2_encode_hex(dcid, hd->dcid.data, hd->dcid.datalen), + (const char *)ngtcp2_encode_hex(scid, hd->scid.data, hd->scid.datalen), + hd->version, strpkttype(hd), hd->len); } } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h index 1280ce04d6385a..13fb81a72e1d51 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -129,4 +129,4 @@ void ngtcp2_log_tx_cancel(ngtcp2_log *log, const ngtcp2_pkt_hd *hd); void ngtcp2_log_info(ngtcp2_log *log, ngtcp2_log_event ev, const char *fmt, ...); -#endif /* NGTCP2_LOG_H */ +#endif /* !defined(NGTCP2_LOG_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h index 28d3461bef9238..dfe5e0aed220f8 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h @@ -27,17 +27,14 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include #include -#define ngtcp2_min(A, B) ((A) < (B) ? (A) : (B)) -#define ngtcp2_max(A, B) ((A) > (B) ? (A) : (B)) - #define ngtcp2_struct_of(ptr, type, member) \ - ((type *)(void *)((char *)(ptr)-offsetof(type, member))) + ((type *)(void *)((char *)(ptr) - offsetof(type, member))) /* ngtcp2_list_insert inserts |T| before |*PD|. The contract is that this is singly linked list, and the next element is pointed by next @@ -55,4 +52,30 @@ */ #define ngtcp2_arraylen(A) (sizeof(A) / sizeof(A[0])) -#endif /* NGTCP2_MACRO_H */ +#define ngtcp2_max_def(SUFFIX, T) \ + static inline T ngtcp2_max_##SUFFIX(T a, T b) { return a < b ? b : a; } + +ngtcp2_max_def(int8, int8_t) +ngtcp2_max_def(int16, int16_t) +ngtcp2_max_def(int32, int32_t) +ngtcp2_max_def(int64, int64_t) +ngtcp2_max_def(uint8, uint8_t) +ngtcp2_max_def(uint16, uint16_t) +ngtcp2_max_def(uint32, uint32_t) +ngtcp2_max_def(uint64, uint64_t) +ngtcp2_max_def(size, size_t) + +#define ngtcp2_min_def(SUFFIX, T) \ + static inline T ngtcp2_min_##SUFFIX(T a, T b) { return a < b ? a : b; } + +ngtcp2_min_def(int8, int8_t) +ngtcp2_min_def(int16, int16_t) +ngtcp2_min_def(int32, int32_t) +ngtcp2_min_def(int64, int64_t) +ngtcp2_min_def(uint8, uint8_t) +ngtcp2_min_def(uint16, uint16_t) +ngtcp2_min_def(uint32, uint32_t) +ngtcp2_min_def(uint64, uint64_t) +ngtcp2_min_def(size, size_t) + +#endif /* !defined(NGTCP2_MACRO_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c index 33e9fcc018b5db..9eb102f16b32e2 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c @@ -35,8 +35,7 @@ void ngtcp2_map_init(ngtcp2_map *map, const ngtcp2_mem *mem) { map->mem = mem; - map->tablelen = 0; - map->tablelenbits = 0; + map->hashbits = 0; map->table = NULL; map->size = 0; } @@ -49,33 +48,20 @@ void ngtcp2_map_free(ngtcp2_map *map) { ngtcp2_mem_free(map->mem, map->table); } -void ngtcp2_map_each_free(ngtcp2_map *map, int (*func)(void *data, void *ptr), - void *ptr) { - uint32_t i; - ngtcp2_map_bucket *bkt; - - for (i = 0; i < map->tablelen; ++i) { - bkt = &map->table[i]; - - if (bkt->data == NULL) { - continue; - } - - func(bkt->data, ptr); - } -} - -int ngtcp2_map_each(ngtcp2_map *map, int (*func)(void *data, void *ptr), +int ngtcp2_map_each(const ngtcp2_map *map, int (*func)(void *data, void *ptr), void *ptr) { int rv; - uint32_t i; + size_t i; ngtcp2_map_bucket *bkt; + size_t tablelen; if (map->size == 0) { return 0; } - for (i = 0; i < map->tablelen; ++i) { + tablelen = 1u << map->hashbits; + + for (i = 0; i < tablelen; ++i) { bkt = &map->table[i]; if (bkt->data == NULL) { @@ -91,82 +77,61 @@ int ngtcp2_map_each(ngtcp2_map *map, int (*func)(void *data, void *ptr), return 0; } -static uint32_t hash(ngtcp2_map_key_type key) { - return (uint32_t)((key * 11400714819323198485llu) >> 32); -} - -static size_t h2idx(uint32_t hash, uint32_t bits) { - return hash >> (32 - bits); -} - -static size_t distance(uint32_t tablelen, uint32_t tablelenbits, - ngtcp2_map_bucket *bkt, size_t idx) { - return (idx - h2idx(bkt->hash, tablelenbits)) & (tablelen - 1); +static size_t hash(ngtcp2_map_key_type key, size_t bits) { + return (size_t)((key * 11400714819323198485llu) >> (64 - bits)); } -static void map_bucket_swap(ngtcp2_map_bucket *bkt, uint32_t *phash, - ngtcp2_map_key_type *pkey, void **pdata) { - uint32_t h = bkt->hash; - ngtcp2_map_key_type key = bkt->key; - void *data = bkt->data; - - bkt->hash = *phash; - bkt->key = *pkey; - bkt->data = *pdata; +static void map_bucket_swap(ngtcp2_map_bucket *a, ngtcp2_map_bucket *b) { + ngtcp2_map_bucket c = *a; - *phash = h; - *pkey = key; - *pdata = data; -} - -static void map_bucket_set_data(ngtcp2_map_bucket *bkt, uint32_t hash, - ngtcp2_map_key_type key, void *data) { - bkt->hash = hash; - bkt->key = key; - bkt->data = data; + *a = *b; + *b = c; } #ifndef WIN32 -void ngtcp2_map_print_distance(ngtcp2_map *map) { - uint32_t i; +void ngtcp2_map_print_distance(const ngtcp2_map *map) { + size_t i; size_t idx; ngtcp2_map_bucket *bkt; + size_t tablelen; - for (i = 0; i < map->tablelen; ++i) { + if (map->size == 0) { + return; + } + + tablelen = 1u << map->hashbits; + + for (i = 0; i < tablelen; ++i) { bkt = &map->table[i]; if (bkt->data == NULL) { - fprintf(stderr, "@%u \n", i); + fprintf(stderr, "@%zu \n", i); continue; } - idx = h2idx(bkt->hash, map->tablelenbits); - fprintf(stderr, "@%u hash=%08x key=%" PRIu64 " base=%zu distance=%zu\n", i, - bkt->hash, bkt->key, idx, - distance(map->tablelen, map->tablelenbits, bkt, idx)); + idx = hash(bkt->key, map->hashbits); + fprintf(stderr, "@%zu hash=%zu key=%" PRIu64 " base=%zu distance=%u\n", i, + hash(bkt->key, map->hashbits), bkt->key, idx, bkt->psl); } } -#endif /* !WIN32 */ +#endif /* !defined(WIN32) */ -static int insert(ngtcp2_map_bucket *table, uint32_t tablelen, - uint32_t tablelenbits, uint32_t hash, ngtcp2_map_key_type key, - void *data) { - size_t idx = h2idx(hash, tablelenbits); - size_t d = 0, dd; - ngtcp2_map_bucket *bkt; +static int insert(ngtcp2_map_bucket *table, size_t hashbits, + ngtcp2_map_key_type key, void *data) { + size_t idx = hash(key, hashbits); + ngtcp2_map_bucket b = {0, key, data}, *bkt; + size_t mask = (1u << hashbits) - 1; for (;;) { bkt = &table[idx]; if (bkt->data == NULL) { - map_bucket_set_data(bkt, hash, key, data); + *bkt = b; return 0; } - dd = distance(tablelen, tablelenbits, bkt, idx); - if (d > dd) { - map_bucket_swap(bkt, &hash, &key, &data); - d = dd; + if (b.psl > bkt->psl) { + map_bucket_swap(bkt, &b); } else if (bkt->key == key) { /* TODO This check is just a waste after first swap or if this function is called from map_resize. That said, there is no @@ -175,41 +140,42 @@ static int insert(ngtcp2_map_bucket *table, uint32_t tablelen, return NGTCP2_ERR_INVALID_ARGUMENT; } - ++d; - idx = (idx + 1) & (tablelen - 1); + ++b.psl; + idx = (idx + 1) & mask; } } -/* new_tablelen must be power of 2 and new_tablelen == (1 << - new_tablelenbits) must hold. */ -static int map_resize(ngtcp2_map *map, uint32_t new_tablelen, - uint32_t new_tablelenbits) { - uint32_t i; +static int map_resize(ngtcp2_map *map, size_t new_hashbits) { + size_t i; ngtcp2_map_bucket *new_table; ngtcp2_map_bucket *bkt; + size_t tablelen; int rv; (void)rv; new_table = - ngtcp2_mem_calloc(map->mem, new_tablelen, sizeof(ngtcp2_map_bucket)); + ngtcp2_mem_calloc(map->mem, 1u << new_hashbits, sizeof(ngtcp2_map_bucket)); if (new_table == NULL) { return NGTCP2_ERR_NOMEM; } - for (i = 0; i < map->tablelen; ++i) { - bkt = &map->table[i]; - if (bkt->data == NULL) { - continue; - } - rv = insert(new_table, new_tablelen, new_tablelenbits, bkt->hash, bkt->key, - bkt->data); + if (map->size) { + tablelen = 1u << map->hashbits; - assert(0 == rv); + for (i = 0; i < tablelen; ++i) { + bkt = &map->table[i]; + if (bkt->data == NULL) { + continue; + } + + rv = insert(new_table, new_hashbits, bkt->key, bkt->data); + + assert(0 == rv); + } } ngtcp2_mem_free(map->mem, map->table); - map->tablelen = new_tablelen; - map->tablelenbits = new_tablelenbits; + map->hashbits = new_hashbits; map->table = new_table; return 0; @@ -221,48 +187,49 @@ int ngtcp2_map_insert(ngtcp2_map *map, ngtcp2_map_key_type key, void *data) { assert(data); /* Load factor is 0.75 */ - if ((map->size + 1) * 4 > map->tablelen * 3) { - if (map->tablelen) { - rv = map_resize(map, map->tablelen * 2, map->tablelenbits + 1); + /* Under the very initial condition, that is map->size == 0 and + map->hashbits == 0, 4 > 3 still holds nicely. */ + if ((map->size + 1) * 4 > (1u << map->hashbits) * 3) { + if (map->hashbits) { + rv = map_resize(map, map->hashbits + 1); if (rv != 0) { return rv; } } else { - rv = map_resize(map, 1 << NGTCP2_INITIAL_TABLE_LENBITS, - NGTCP2_INITIAL_TABLE_LENBITS); + rv = map_resize(map, NGTCP2_INITIAL_TABLE_LENBITS); if (rv != 0) { return rv; } } } - rv = insert(map->table, map->tablelen, map->tablelenbits, hash(key), key, - data); + rv = insert(map->table, map->hashbits, key, data); if (rv != 0) { return rv; } + ++map->size; + return 0; } -void *ngtcp2_map_find(ngtcp2_map *map, ngtcp2_map_key_type key) { - uint32_t h; +void *ngtcp2_map_find(const ngtcp2_map *map, ngtcp2_map_key_type key) { size_t idx; ngtcp2_map_bucket *bkt; - size_t d = 0; + size_t psl = 0; + size_t mask; if (map->size == 0) { return NULL; } - h = hash(key); - idx = h2idx(h, map->tablelenbits); + idx = hash(key, map->hashbits); + mask = (1u << map->hashbits) - 1; for (;;) { bkt = &map->table[idx]; - if (bkt->data == NULL || - d > distance(map->tablelen, map->tablelenbits, bkt, idx)) { + if (bkt->data == NULL || psl > bkt->psl) { return NULL; } @@ -270,50 +237,47 @@ void *ngtcp2_map_find(ngtcp2_map *map, ngtcp2_map_key_type key) { return bkt->data; } - ++d; - idx = (idx + 1) & (map->tablelen - 1); + ++psl; + idx = (idx + 1) & mask; } } int ngtcp2_map_remove(ngtcp2_map *map, ngtcp2_map_key_type key) { - uint32_t h; - size_t idx, didx; - ngtcp2_map_bucket *bkt; - size_t d = 0; + size_t idx; + ngtcp2_map_bucket *b, *bkt; + size_t psl = 0; + size_t mask; if (map->size == 0) { return NGTCP2_ERR_INVALID_ARGUMENT; } - h = hash(key); - idx = h2idx(h, map->tablelenbits); + idx = hash(key, map->hashbits); + mask = (1u << map->hashbits) - 1; for (;;) { bkt = &map->table[idx]; - if (bkt->data == NULL || - d > distance(map->tablelen, map->tablelenbits, bkt, idx)) { + if (bkt->data == NULL || psl > bkt->psl) { return NGTCP2_ERR_INVALID_ARGUMENT; } if (bkt->key == key) { - map_bucket_set_data(bkt, 0, 0, NULL); - - didx = idx; - idx = (idx + 1) & (map->tablelen - 1); + b = bkt; + idx = (idx + 1) & mask; for (;;) { bkt = &map->table[idx]; - if (bkt->data == NULL || - distance(map->tablelen, map->tablelenbits, bkt, idx) == 0) { + if (bkt->data == NULL || bkt->psl == 0) { + b->data = NULL; break; } - map->table[didx] = *bkt; - map_bucket_set_data(bkt, 0, 0, NULL); - didx = idx; + --bkt->psl; + *b = *bkt; + b = bkt; - idx = (idx + 1) & (map->tablelen - 1); + idx = (idx + 1) & mask; } --map->size; @@ -321,18 +285,18 @@ int ngtcp2_map_remove(ngtcp2_map *map, ngtcp2_map_key_type key) { return 0; } - ++d; - idx = (idx + 1) & (map->tablelen - 1); + ++psl; + idx = (idx + 1) & mask; } } void ngtcp2_map_clear(ngtcp2_map *map) { - if (map->tablelen == 0) { + if (map->size == 0) { return; } - memset(map->table, 0, sizeof(*map->table) * map->tablelen); + memset(map->table, 0, sizeof(*map->table) * (1u << map->hashbits)); map->size = 0; } -size_t ngtcp2_map_size(ngtcp2_map *map) { return map->size; } +size_t ngtcp2_map_size(const ngtcp2_map *map) { return map->size; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h index d05b1657489e45..9d882fb20088d8 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h @@ -28,7 +28,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -39,7 +39,7 @@ typedef uint64_t ngtcp2_map_key_type; typedef struct ngtcp2_map_bucket { - uint32_t hash; + uint32_t psl; ngtcp2_map_key_type key; void *data; } ngtcp2_map_bucket; @@ -48,33 +48,24 @@ typedef struct ngtcp2_map { ngtcp2_map_bucket *table; const ngtcp2_mem *mem; size_t size; - uint32_t tablelen; - uint32_t tablelenbits; + size_t hashbits; } ngtcp2_map; /* - * Initializes the map |map|. + * ngtcp2_map_init initializes the map |map|. */ void ngtcp2_map_init(ngtcp2_map *map, const ngtcp2_mem *mem); /* - * Deallocates any resources allocated for |map|. The stored entries - * are not freed by this function. Use ngtcp2_map_each_free() to free - * each entries. + * ngtcp2_map_free deallocates any resources allocated for |map|. The + * stored entries are not freed by this function. Use + * ngtcp2_map_each() to free each entry. */ void ngtcp2_map_free(ngtcp2_map *map); /* - * Deallocates each entries using |func| function and any resources - * allocated for |map|. The |func| function is responsible for freeing - * given the |data| object. The |ptr| will be passed to the |func| as - * send argument. The return value of the |func| will be ignored. - */ -void ngtcp2_map_each_free(ngtcp2_map *map, int (*func)(void *data, void *ptr), - void *ptr); - -/* - * Inserts the new |data| with the |key| to the map |map|. + * ngtcp2_map_insert inserts the new |data| with the |key| to the map + * |map|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -82,57 +73,56 @@ void ngtcp2_map_each_free(ngtcp2_map *map, int (*func)(void *data, void *ptr), * NGTCP2_ERR_INVALID_ARGUMENT * The item associated by |key| already exists. * NGTCP2_ERR_NOMEM - * Out of memory + * Out of memory */ int ngtcp2_map_insert(ngtcp2_map *map, ngtcp2_map_key_type key, void *data); /* - * Returns the data associated by the key |key|. If there is no such - * data, this function returns NULL. + * ngtcp2_map_find returns the entry associated by the key |key|. If + * there is no such entry, this function returns NULL. */ -void *ngtcp2_map_find(ngtcp2_map *map, ngtcp2_map_key_type key); +void *ngtcp2_map_find(const ngtcp2_map *map, ngtcp2_map_key_type key); /* - * Removes the data associated by the key |key| from the |map|. The - * removed data is not freed by this function. + * ngtcp2_map_remove removes the entry associated by the key |key| + * from the |map|. The removed entry is not freed by this function. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGTCP2_ERR_INVALID_ARGUMENT - * The data associated by |key| does not exist. + * The entry associated by |key| does not exist. */ int ngtcp2_map_remove(ngtcp2_map *map, ngtcp2_map_key_type key); /* - * Removes all entries from |map|. + * ngtcp2_map_clear removes all entries from |map|. The removed entry + * is not freed by this function. */ void ngtcp2_map_clear(ngtcp2_map *map); /* - * Returns the number of items stored in the map |map|. + * ngtcp2_map_size returns the number of items stored in the map + * |map|. */ -size_t ngtcp2_map_size(ngtcp2_map *map); +size_t ngtcp2_map_size(const ngtcp2_map *map); /* - * Applies the function |func| to each data in the |map| with the - * optional user supplied pointer |ptr|. + * ngtcp2_map_each applies the function |func| to each entry in the + * |map| with the optional user supplied pointer |ptr|. * * If the |func| returns 0, this function calls the |func| with the - * next data. If the |func| returns nonzero, it will not call the + * next entry. If the |func| returns nonzero, it will not call the * |func| for further entries and return the return value of the * |func| immediately. Thus, this function returns 0 if all the * invocations of the |func| return 0, or nonzero value which the last * invocation of |func| returns. - * - * Don't use this function to free each data. Use - * ngtcp2_map_each_free() instead. */ -int ngtcp2_map_each(ngtcp2_map *map, int (*func)(void *data, void *ptr), +int ngtcp2_map_each(const ngtcp2_map *map, int (*func)(void *data, void *ptr), void *ptr); #ifndef WIN32 -void ngtcp2_map_print_distance(ngtcp2_map *map); -#endif /* !WIN32 */ +void ngtcp2_map_print_distance(const ngtcp2_map *map); +#endif /* !defined(WIN32) */ -#endif /* NGTCP2_MAP_H */ +#endif /* !defined(NGTCP2_MAP_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.c index bcce0b5cdfcf02..d30e1f986e97d3 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.c @@ -72,7 +72,7 @@ void *ngtcp2_mem_calloc(const ngtcp2_mem *mem, size_t nmemb, size_t size) { void *ngtcp2_mem_realloc(const ngtcp2_mem *mem, void *ptr, size_t size) { return mem->realloc(ptr, size, mem->user_data); } -#else /* MEMDEBUG */ +#else /* defined(MEMDEBUG) */ void *ngtcp2_mem_malloc_debug(const ngtcp2_mem *mem, size_t size, const char *func, const char *file, size_t line) { void *nptr = mem->malloc(size, mem->user_data); @@ -110,4 +110,4 @@ void *ngtcp2_mem_realloc_debug(const ngtcp2_mem *mem, void *ptr, size_t size, return nptr; } -#endif /* MEMDEBUG */ +#endif /* defined(MEMDEBUG) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.h index c99b6c59726891..9f818752125523 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.h @@ -28,7 +28,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -42,7 +42,7 @@ void ngtcp2_mem_free(const ngtcp2_mem *mem, void *ptr); void *ngtcp2_mem_calloc(const ngtcp2_mem *mem, size_t nmemb, size_t size); void *ngtcp2_mem_realloc(const ngtcp2_mem *mem, void *ptr, size_t size); -#else /* MEMDEBUG */ +#else /* defined(MEMDEBUG) */ void *ngtcp2_mem_malloc_debug(const ngtcp2_mem *mem, size_t size, const char *func, const char *file, size_t line); @@ -67,6 +67,6 @@ void *ngtcp2_mem_realloc_debug(const ngtcp2_mem *mem, void *ptr, size_t size, # define ngtcp2_mem_realloc(MEM, PTR, SIZE) \ ngtcp2_mem_realloc_debug((MEM), (PTR), (SIZE), __func__, __FILE__, __LINE__) -#endif /* MEMDEBUG */ +#endif /* defined(MEMDEBUG) */ -#endif /* NGTCP2_MEM_H */ +#endif /* !defined(NGTCP2_MEM_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h index 4a2c4041d4d170..103a2fb2d80714 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h @@ -30,70 +30,68 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #ifdef HAVE_ARPA_INET_H # include -#endif /* HAVE_ARPA_INET_H */ +#endif /* defined(HAVE_ARPA_INET_H) */ #ifdef HAVE_NETINET_IN_H # include -#endif /* HAVE_NETINET_IN_H */ +#endif /* defined(HAVE_NETINET_IN_H) */ #ifdef HAVE_BYTESWAP_H # include -#endif /* HAVE_BYTESWAP_H */ +#endif /* defined(HAVE_BYTESWAP_H) */ #ifdef HAVE_ENDIAN_H # include -#endif /* HAVE_ENDIAN_H */ +#endif /* defined(HAVE_ENDIAN_H) */ #ifdef HAVE_SYS_ENDIAN_H # include -#endif /* HAVE_SYS_ENDIAN_H */ +#endif /* defined(HAVE_SYS_ENDIAN_H) */ -#if defined(__APPLE__) +#ifdef __APPLE__ # include -#endif // __APPLE__ +#endif /* defined(__APPLE__) */ #include -#if defined(HAVE_BE64TOH) || \ - (defined(HAVE_DECL_BE64TOH) && HAVE_DECL_BE64TOH > 0) +#if HAVE_DECL_BE64TOH # define ngtcp2_ntohl64(N) be64toh(N) # define ngtcp2_htonl64(N) htobe64(N) -#else /* !HAVE_BE64TOH */ -# if defined(WORDS_BIGENDIAN) +#else /* !HAVE_DECL_BE64TOH */ +# ifdef WORDS_BIGENDIAN # define ngtcp2_ntohl64(N) (N) # define ngtcp2_htonl64(N) (N) -# else /* !WORDS_BIGENDIAN */ -# if defined(HAVE_BSWAP_64) || \ - (defined(HAVE_DECL_BSWAP_64) && HAVE_DECL_BSWAP_64 > 0) +# else /* !defined(WORDS_BIGENDIAN) */ +# if HAVE_DECL_BSWAP_64 # define ngtcp2_bswap64 bswap_64 # elif defined(WIN32) # define ngtcp2_bswap64 _byteswap_uint64 # elif defined(__APPLE__) # define ngtcp2_bswap64 OSSwapInt64 -# else /* !HAVE_BSWAP_64 && !WIN32 && !__APPLE__ */ +# else /* !(HAVE_DECL_BSWAP_64 || defined(WIN32) || defined(__APPLE__)) */ # define ngtcp2_bswap64(N) \ ((uint64_t)(ngtcp2_ntohl((uint32_t)(N))) << 32 | \ ngtcp2_ntohl((uint32_t)((N) >> 32))) -# endif /* !HAVE_BSWAP_64 && !WIN32 && !__APPLE__ */ +# endif /* !(HAVE_DECL_BSWAP_64 || defined(WIN32) || defined(__APPLE__)) */ # define ngtcp2_ntohl64(N) ngtcp2_bswap64(N) # define ngtcp2_htonl64(N) ngtcp2_bswap64(N) -# endif /* !WORDS_BIGENDIAN */ -#endif /* !HAVE_BE64TOH */ +# endif /* !defined(WORDS_BIGENDIAN) */ +#endif /* !HAVE_DECL_BE64TOH */ -#if defined(WIN32) +#ifdef WIN32 /* Windows requires ws2_32 library for ntonl family functions. We define inline functions for those function so that we don't have dependency on that lib. */ # ifdef _MSC_VER # define STIN static __inline -# else +# else /* !defined(_MSC_VER) */ # define STIN static inline -# endif +# endif /* !defined(_MSC_VER) */ STIN uint32_t ngtcp2_htonl(uint32_t hostlong) { uint32_t res; @@ -131,13 +129,13 @@ STIN uint16_t ngtcp2_ntohs(uint16_t netshort) { return res; } -#else /* !WIN32 */ +#else /* !defined(WIN32) */ # define ngtcp2_htonl htonl # define ngtcp2_htons htons # define ngtcp2_ntohl ntohl # define ngtcp2_ntohs ntohs -#endif /* !WIN32 */ +#endif /* !defined(WIN32) */ -#endif /* NGTCP2_NET_H */ +#endif /* !defined(NGTCP2_NET_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h index ea73e788317681..cf23de7b2b7f20 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -67,9 +67,9 @@ void ngtcp2_objalloc_clear(ngtcp2_objalloc *objalloc); #ifndef NOMEMPOOL # define ngtcp2_objalloc_decl(NAME, TYPE, OPLENTFIELD) \ inline static void ngtcp2_objalloc_##NAME##_init( \ - ngtcp2_objalloc *objalloc, size_t nmemb, const ngtcp2_mem *mem) { \ + ngtcp2_objalloc *objalloc, size_t nmemb, const ngtcp2_mem *mem) { \ ngtcp2_objalloc_init( \ - objalloc, ((sizeof(TYPE) + 0xfu) & ~(uintptr_t)0xfu) * nmemb, mem); \ + objalloc, ((sizeof(TYPE) + 0xfu) & ~(uintptr_t)0xfu) * nmemb, mem); \ } \ \ TYPE *ngtcp2_objalloc_##NAME##_get(ngtcp2_objalloc *objalloc); \ @@ -78,7 +78,7 @@ void ngtcp2_objalloc_clear(ngtcp2_objalloc *objalloc); size_t len); \ \ inline static void ngtcp2_objalloc_##NAME##_release( \ - ngtcp2_objalloc *objalloc, TYPE *obj) { \ + ngtcp2_objalloc *objalloc, TYPE *obj) { \ ngtcp2_opl_push(&objalloc->opl, &obj->OPLENTFIELD); \ } @@ -90,7 +90,7 @@ void ngtcp2_objalloc_clear(ngtcp2_objalloc *objalloc); \ if (!oplent) { \ rv = \ - ngtcp2_balloc_get(&objalloc->balloc, (void **)&obj, sizeof(TYPE)); \ + ngtcp2_balloc_get(&objalloc->balloc, (void **)&obj, sizeof(TYPE)); \ if (rv != 0) { \ return NULL; \ } \ @@ -118,30 +118,30 @@ void ngtcp2_objalloc_clear(ngtcp2_objalloc *objalloc); \ return ngtcp2_struct_of(oplent, TYPE, OPLENTFIELD); \ } -#else /* NOMEMPOOL */ +#else /* defined(NOMEMPOOL) */ # define ngtcp2_objalloc_decl(NAME, TYPE, OPLENTFIELD) \ inline static void ngtcp2_objalloc_##NAME##_init( \ - ngtcp2_objalloc *objalloc, size_t nmemb, const ngtcp2_mem *mem) { \ + ngtcp2_objalloc *objalloc, size_t nmemb, const ngtcp2_mem *mem) { \ ngtcp2_objalloc_init( \ - objalloc, ((sizeof(TYPE) + 0xfu) & ~(uintptr_t)0xfu) * nmemb, mem); \ + objalloc, ((sizeof(TYPE) + 0xfu) & ~(uintptr_t)0xfu) * nmemb, mem); \ } \ \ inline static TYPE *ngtcp2_objalloc_##NAME##_get( \ - ngtcp2_objalloc *objalloc) { \ + ngtcp2_objalloc *objalloc) { \ return ngtcp2_mem_malloc(objalloc->balloc.mem, sizeof(TYPE)); \ } \ \ inline static TYPE *ngtcp2_objalloc_##NAME##_len_get( \ - ngtcp2_objalloc *objalloc, size_t len) { \ + ngtcp2_objalloc *objalloc, size_t len) { \ return ngtcp2_mem_malloc(objalloc->balloc.mem, len); \ } \ \ inline static void ngtcp2_objalloc_##NAME##_release( \ - ngtcp2_objalloc *objalloc, TYPE *obj) { \ + ngtcp2_objalloc *objalloc, TYPE *obj) { \ ngtcp2_mem_free(objalloc->balloc.mem, obj); \ } # define ngtcp2_objalloc_def(NAME, TYPE, OPLENTFIELD) -#endif /* NOMEMPOOL */ +#endif /* defined(NOMEMPOOL) */ -#endif /* NGTCP2_OBJALLOC_H */ +#endif /* !defined(NGTCP2_OBJALLOC_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_opl.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_opl.h index 714aa366304f0d..f2df3f6dccd45e 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_opl.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_opl.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -62,4 +62,4 @@ ngtcp2_opl_entry *ngtcp2_opl_pop(ngtcp2_opl *opl); void ngtcp2_opl_clear(ngtcp2_opl *opl); -#endif /* NGTCP2_OPL_H */ +#endif /* !defined(NGTCP2_OPL_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_path.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_path.h index 0c360e936231c8..a708378db32fbb 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_path.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_path.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -46,4 +46,4 @@ void ngtcp2_path_init(ngtcp2_path *path, const ngtcp2_addr *local, void ngtcp2_path_storage_init2(ngtcp2_path_storage *ps, const ngtcp2_path *path); -#endif /* NGTCP2_PATH_H */ +#endif /* !defined(NGTCP2_PATH_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c index 1687ff254d94c7..5c82e1bd503f98 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c @@ -83,16 +83,19 @@ int ngtcp2_pkt_decode_version_cid(ngtcp2_version_cid *dest, const uint8_t *data, dcidlen = data[5]; len += dcidlen; + if (datalen < len) { return NGTCP2_ERR_INVALID_ARGUMENT; } + scidlen = data[5 + 1 + dcidlen]; len += scidlen; + if (datalen < len) { return NGTCP2_ERR_INVALID_ARGUMENT; } - ngtcp2_get_uint32(&version, &data[1]); + ngtcp2_get_uint32be(&version, &data[1]); supported_version = ngtcp2_is_supported_version(version); @@ -120,6 +123,7 @@ int ngtcp2_pkt_decode_version_cid(ngtcp2_version_cid *dest, const uint8_t *data, if (!supported_version) { return NGTCP2_ERR_VERSION_NEGOTIATION; } + return 0; } @@ -145,16 +149,19 @@ void ngtcp2_pkt_hd_init(ngtcp2_pkt_hd *hd, uint8_t flags, uint8_t type, size_t len) { hd->flags = flags; hd->type = type; + if (dcid) { hd->dcid = *dcid; } else { ngtcp2_cid_zero(&hd->dcid); } + if (scid) { hd->scid = *scid; } else { ngtcp2_cid_zero(&hd->scid); } + hd->pkt_num = pkt_num; hd->token = NULL; hd->tokenlen = 0; @@ -170,10 +177,11 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, size_t dcil, scil; const uint8_t *p; size_t len = 0; - size_t n; size_t ntokenlen = 0; const uint8_t *token = NULL; size_t tokenlen = 0; + size_t nlonglen = 0; + size_t longlen = 0; uint64_t vi; uint8_t flags = NGTCP2_PKT_FLAG_LONG_FORM; @@ -185,7 +193,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, return NGTCP2_ERR_INVALID_ARGUMENT; } - ngtcp2_get_uint32(&version, &pkt[1]); + ngtcp2_get_uint32be(&version, &pkt[1]); if (version == 0) { type = NGTCP2_PKT_VERSION_NEGOTIATION; @@ -227,6 +235,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, p = &pkt[5]; dcil = *p; + if (dcil > NGTCP2_MAX_CIDLEN) { /* QUIC v1 implementation never expect to receive CID length more than NGTCP2_MAX_CIDLEN. */ @@ -240,9 +249,11 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, p += 1 + dcil; scil = *p; + if (scil > NGTCP2_MAX_CIDLEN) { return NGTCP2_ERR_INVALID_ARGUMENT; } + len += scil; if (pktlen < len) { @@ -264,6 +275,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, if (pktlen - len < vi) { return NGTCP2_ERR_INVALID_ARGUMENT; } + tokenlen = (size_t)vi; len += tokenlen; @@ -285,12 +297,21 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, } /* Length */ - n = ngtcp2_get_uvarintlen(p); - len += n - 1; + nlonglen = ngtcp2_get_uvarintlen(p); + len += nlonglen - 1; if (pktlen < len) { return NGTCP2_ERR_INVALID_ARGUMENT; } + + ngtcp2_get_uvarint(&vi, p); +#if SIZE_MAX > UINT32_MAX + if (vi > SIZE_MAX) { + return NGTCP2_ERR_INVALID_ARGUMENT; + } +#endif /* SIZE_MAX > UINT32_MAX */ + + longlen = (size_t)vi; } dest->flags = flags; @@ -309,24 +330,11 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, dest->tokenlen = tokenlen; p += ntokenlen + tokenlen; - switch (type) { - case NGTCP2_PKT_RETRY: - dest->len = 0; - break; - default: - if (!(flags & NGTCP2_PKT_FLAG_LONG_FORM)) { - assert(type == NGTCP2_PKT_VERSION_NEGOTIATION); - /* Version Negotiation is not a long header packet. */ - dest->len = 0; - break; - } + dest->len = longlen; - p = ngtcp2_get_uvarint(&vi, p); - if (vi > SIZE_MAX) { - return NGTCP2_ERR_INVALID_ARGUMENT; - } - dest->len = (size_t)vi; - } +#ifndef NDEBUG + p += nlonglen; +#endif /* !defined(NDEBUG) */ assert((size_t)(p - pkt) == len); @@ -401,6 +409,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_hd_long(uint8_t *out, size_t outlen, *p = (uint8_t)(NGTCP2_HEADER_FORM_BIT | (ngtcp2_pkt_versioned_type(hd->version, hd->type) << 4) | (uint8_t)(hd->pkt_numlen - 1)); + if (!(hd->flags & NGTCP2_PKT_FLAG_FIXED_BIT_CLEAR)) { *p |= NGTCP2_FIXED_BIT_MASK; } @@ -409,16 +418,20 @@ ngtcp2_ssize ngtcp2_pkt_encode_hd_long(uint8_t *out, size_t outlen, p = ngtcp2_put_uint32be(p, hd->version); *p++ = (uint8_t)hd->dcid.datalen; + if (hd->dcid.datalen) { p = ngtcp2_cpymem(p, hd->dcid.data, hd->dcid.datalen); } + *p++ = (uint8_t)hd->scid.datalen; + if (hd->scid.datalen) { p = ngtcp2_cpymem(p, hd->scid.data, hd->scid.datalen); } if (hd->type == NGTCP2_PKT_INITIAL) { p = ngtcp2_put_uvarint(p, hd->tokenlen); + if (hd->tokenlen) { p = ngtcp2_cpymem(p, hd->token, hd->tokenlen); } @@ -446,9 +459,11 @@ ngtcp2_ssize ngtcp2_pkt_encode_hd_short(uint8_t *out, size_t outlen, p = out; *p = (uint8_t)(hd->pkt_numlen - 1); + if (!(hd->flags & NGTCP2_PKT_FLAG_FIXED_BIT_CLEAR)) { *p |= NGTCP2_FIXED_BIT_MASK; } + if (hd->flags & NGTCP2_PKT_FLAG_KEY_PHASE) { *p |= NGTCP2_SHORT_KEY_PHASE_BIT; } @@ -503,7 +518,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_frame(ngtcp2_frame *dest, const uint8_t *payload, payloadlen); case NGTCP2_FRAME_STREAM_DATA_BLOCKED: return ngtcp2_pkt_decode_stream_data_blocked_frame( - &dest->stream_data_blocked, payload, payloadlen); + &dest->stream_data_blocked, payload, payloadlen); case NGTCP2_FRAME_STREAMS_BLOCKED_BIDI: case NGTCP2_FRAME_STREAMS_BLOCKED_UNI: return ngtcp2_pkt_decode_streams_blocked_frame(&dest->streams_blocked, @@ -530,7 +545,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_frame(ngtcp2_frame *dest, const uint8_t *payload, payloadlen); case NGTCP2_FRAME_RETIRE_CONNECTION_ID: return ngtcp2_pkt_decode_retire_connection_id_frame( - &dest->retire_connection_id, payload, payloadlen); + &dest->retire_connection_id, payload, payloadlen); case NGTCP2_FRAME_HANDSHAKE_DONE: return ngtcp2_pkt_decode_handshake_done_frame(&dest->handshake_done, payload, payloadlen); @@ -613,6 +628,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_stream_frame(ngtcp2_stream *dest, if (payloadlen - len < vi) { return NGTCP2_ERR_FRAME_ENCODING; } + datalen = (size_t)vi; len += datalen; } else { @@ -752,7 +768,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, } /* TODO We might not decode all ranges. It could be very large. */ - max_rangecnt = ngtcp2_min(NGTCP2_MAX_ACK_RANGES, rangecnt); + max_rangecnt = ngtcp2_min_size(NGTCP2_MAX_ACK_RANGES, rangecnt); p = payload + 1; @@ -770,7 +786,8 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, p = ngtcp2_get_uvarint(&range->gap, p); p = ngtcp2_get_uvarint(&range->len, p); } - for (i = max_rangecnt; i < rangecnt; ++i) { + + for (; i < rangecnt; ++i) { p += ngtcp2_get_uvarintlen(p); p += ngtcp2_get_uvarintlen(p); } @@ -820,18 +837,23 @@ ngtcp2_ssize ngtcp2_pkt_decode_reset_stream_frame(ngtcp2_reset_stream *dest, n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } + p += n; n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } + p += n; n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -849,7 +871,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_reset_stream_frame(ngtcp2_reset_stream *dest, } ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( - ngtcp2_connection_close *dest, const uint8_t *payload, size_t payloadlen) { + ngtcp2_connection_close *dest, const uint8_t *payload, size_t payloadlen) { size_t len = 1 + 1 + 1; const uint8_t *p; size_t reasonlen; @@ -868,6 +890,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -879,6 +902,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -888,6 +912,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( nreasonlen = ngtcp2_get_uvarintlen(p); len += nreasonlen - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -896,6 +921,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( if (payloadlen - len < vi) { return NGTCP2_ERR_FRAME_ENCODING; } + reasonlen = (size_t)vi; len += reasonlen; @@ -903,13 +929,16 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( dest->type = type; p = ngtcp2_get_uvarint(&dest->error_code, p); + if (type == NGTCP2_FRAME_CONNECTION_CLOSE) { p = ngtcp2_get_uvarint(&dest->frame_type, p); } else { dest->frame_type = 0; } + dest->reasonlen = reasonlen; p += nreasonlen; + if (reasonlen == 0) { dest->reason = NULL; } else { @@ -951,7 +980,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_max_data_frame(ngtcp2_max_data *dest, } ngtcp2_ssize ngtcp2_pkt_decode_max_stream_data_frame( - ngtcp2_max_stream_data *dest, const uint8_t *payload, size_t payloadlen) { + ngtcp2_max_stream_data *dest, const uint8_t *payload, size_t payloadlen) { size_t len = 1 + 1 + 1; const uint8_t *p; size_t n; @@ -1055,10 +1084,8 @@ ngtcp2_ssize ngtcp2_pkt_decode_data_blocked_frame(ngtcp2_data_blocked *dest, return (ngtcp2_ssize)len; } -ngtcp2_ssize -ngtcp2_pkt_decode_stream_data_blocked_frame(ngtcp2_stream_data_blocked *dest, - const uint8_t *payload, - size_t payloadlen) { +ngtcp2_ssize ngtcp2_pkt_decode_stream_data_blocked_frame( + ngtcp2_stream_data_blocked *dest, const uint8_t *payload, size_t payloadlen) { size_t len = 1 + 1 + 1; const uint8_t *p; size_t n; @@ -1097,7 +1124,7 @@ ngtcp2_pkt_decode_stream_data_blocked_frame(ngtcp2_stream_data_blocked *dest, } ngtcp2_ssize ngtcp2_pkt_decode_streams_blocked_frame( - ngtcp2_streams_blocked *dest, const uint8_t *payload, size_t payloadlen) { + ngtcp2_streams_blocked *dest, const uint8_t *payload, size_t payloadlen) { size_t len = 1 + 1; const uint8_t *p; size_t n; @@ -1124,7 +1151,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_streams_blocked_frame( } ngtcp2_ssize ngtcp2_pkt_decode_new_connection_id_frame( - ngtcp2_new_connection_id *dest, const uint8_t *payload, size_t payloadlen) { + ngtcp2_new_connection_id *dest, const uint8_t *payload, size_t payloadlen) { size_t len = 1 + 1 + 1 + 1 + 16; const uint8_t *p; size_t n; @@ -1138,6 +1165,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_connection_id_frame( n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -1146,6 +1174,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_connection_id_frame( n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -1197,6 +1226,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_stop_sending_frame(ngtcp2_stop_sending *dest, if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } + p += n; n = ngtcp2_get_uvarintlen(p); len += n - 1; @@ -1307,6 +1337,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_crypto_frame(ngtcp2_stream *dest, p = ngtcp2_get_uvarint(&dest->offset, p); dest->data[0].len = datalen; p += ndatalen; + if (dest->data[0].len) { dest->data[0].base = (uint8_t *)p; p += dest->data[0].len; @@ -1347,6 +1378,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_token_frame(ngtcp2_new_token *dest, if (payloadlen - len < vi) { return NGTCP2_ERR_FRAME_ENCODING; } + datalen = (size_t)vi; len += datalen; @@ -1442,6 +1474,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_datagram_frame(ngtcp2_datagram *dest, datalen = (size_t)vi; len += datalen; + break; default: ngtcp2_unreachable(); @@ -1496,7 +1529,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_frame(uint8_t *out, size_t outlen, return ngtcp2_pkt_encode_data_blocked_frame(out, outlen, &fr->data_blocked); case NGTCP2_FRAME_STREAM_DATA_BLOCKED: return ngtcp2_pkt_encode_stream_data_blocked_frame( - out, outlen, &fr->stream_data_blocked); + out, outlen, &fr->stream_data_blocked); case NGTCP2_FRAME_STREAMS_BLOCKED_BIDI: case NGTCP2_FRAME_STREAMS_BLOCKED_UNI: return ngtcp2_pkt_encode_streams_blocked_frame(out, outlen, @@ -1518,7 +1551,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_frame(uint8_t *out, size_t outlen, return ngtcp2_pkt_encode_new_token_frame(out, outlen, &fr->new_token); case NGTCP2_FRAME_RETIRE_CONNECTION_ID: return ngtcp2_pkt_encode_retire_connection_id_frame( - out, outlen, &fr->retire_connection_id); + out, outlen, &fr->retire_connection_id); case NGTCP2_FRAME_HANDSHAKE_DONE: return ngtcp2_pkt_encode_handshake_done_frame(out, outlen, &fr->handshake_done); @@ -1586,7 +1619,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_stream_frame(uint8_t *out, size_t outlen, } ngtcp2_ssize ngtcp2_pkt_encode_ack_frame(uint8_t *out, size_t outlen, - ngtcp2_ack *fr) { + const ngtcp2_ack *fr) { size_t len = 1 + ngtcp2_put_uvarintlen((uint64_t)fr->largest_ack) + ngtcp2_put_uvarintlen(fr->ack_delay) + ngtcp2_put_uvarintlen(fr->rangecnt) + @@ -1676,8 +1709,8 @@ ngtcp2_pkt_encode_connection_close_frame(uint8_t *out, size_t outlen, const ngtcp2_connection_close *fr) { size_t len = 1 + ngtcp2_put_uvarintlen(fr->error_code) + (fr->type == NGTCP2_FRAME_CONNECTION_CLOSE - ? ngtcp2_put_uvarintlen(fr->frame_type) - : 0) + + ? ngtcp2_put_uvarintlen(fr->frame_type) + : 0) + ngtcp2_put_uvarintlen(fr->reasonlen) + fr->reasonlen; uint8_t *p; @@ -1689,10 +1722,13 @@ ngtcp2_pkt_encode_connection_close_frame(uint8_t *out, size_t outlen, *p++ = (uint8_t)fr->type; p = ngtcp2_put_uvarint(p, fr->error_code); + if (fr->type == NGTCP2_FRAME_CONNECTION_CLOSE) { p = ngtcp2_put_uvarint(p, fr->frame_type); } + p = ngtcp2_put_uvarint(p, fr->reasonlen); + if (fr->reasonlen) { p = ngtcp2_cpymem(p, fr->reason, fr->reasonlen); } @@ -1796,7 +1832,7 @@ ngtcp2_pkt_encode_data_blocked_frame(uint8_t *out, size_t outlen, } ngtcp2_ssize ngtcp2_pkt_encode_stream_data_blocked_frame( - uint8_t *out, size_t outlen, const ngtcp2_stream_data_blocked *fr) { + uint8_t *out, size_t outlen, const ngtcp2_stream_data_blocked *fr) { size_t len = 1 + ngtcp2_put_uvarintlen((uint64_t)fr->stream_id) + ngtcp2_put_uvarintlen(fr->offset); uint8_t *p; @@ -1986,7 +2022,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_new_token_frame(uint8_t *out, size_t outlen, } ngtcp2_ssize ngtcp2_pkt_encode_retire_connection_id_frame( - uint8_t *out, size_t outlen, const ngtcp2_retire_connection_id *fr) { + uint8_t *out, size_t outlen, const ngtcp2_retire_connection_id *fr) { size_t len = 1 + ngtcp2_put_uvarintlen(fr->seq); uint8_t *p; @@ -2023,9 +2059,9 @@ ngtcp2_ssize ngtcp2_pkt_encode_datagram_frame(uint8_t *out, size_t outlen, const ngtcp2_datagram *fr) { uint64_t datalen = ngtcp2_vec_len(fr->data, fr->datacnt); uint64_t len = - 1 + - (fr->type == NGTCP2_FRAME_DATAGRAM ? 0 : ngtcp2_put_uvarintlen(datalen)) + - datalen; + 1 + + (fr->type == NGTCP2_FRAME_DATAGRAM ? 0 : ngtcp2_put_uvarintlen(datalen)) + + datalen; uint8_t *p; size_t i; @@ -2039,6 +2075,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_datagram_frame(uint8_t *out, size_t outlen, p = out; *p++ = (uint8_t)fr->type; + if (fr->type == NGTCP2_FRAME_DATAGRAM_LEN) { p = ngtcp2_put_uvarint(p, datalen); } @@ -2055,9 +2092,9 @@ ngtcp2_ssize ngtcp2_pkt_encode_datagram_frame(uint8_t *out, size_t outlen, } ngtcp2_ssize ngtcp2_pkt_write_version_negotiation( - uint8_t *dest, size_t destlen, uint8_t unused_random, const uint8_t *dcid, - size_t dcidlen, const uint8_t *scid, size_t scidlen, const uint32_t *sv, - size_t nsv) { + uint8_t *dest, size_t destlen, uint8_t unused_random, const uint8_t *dcid, + size_t dcidlen, const uint8_t *scid, size_t scidlen, const uint32_t *sv, + size_t nsv) { size_t len = 1 + 4 + 1 + dcidlen + 1 + scidlen + nsv * 4; uint8_t *p; size_t i; @@ -2074,10 +2111,13 @@ ngtcp2_ssize ngtcp2_pkt_write_version_negotiation( *p++ = 0xc0 | unused_random; p = ngtcp2_put_uint32be(p, 0); *p++ = (uint8_t)dcidlen; + if (dcidlen) { p = ngtcp2_cpymem(p, dcid, dcidlen); } + *p++ = (uint8_t)scidlen; + if (scidlen) { p = ngtcp2_cpymem(p, scid, scidlen); } @@ -2099,7 +2139,7 @@ size_t ngtcp2_pkt_decode_version_negotiation(uint32_t *dest, assert((payloadlen % sizeof(uint32_t)) == 0); for (; payload != end;) { - payload = ngtcp2_get_uint32(dest++, payload); + payload = ngtcp2_get_uint32be(dest++, payload); } return payloadlen / sizeof(uint32_t); @@ -2150,13 +2190,15 @@ int64_t ngtcp2_pkt_adjust_pkt_num(int64_t max_pkt_num, int64_t pkt_num, assert(cand <= (int64_t)NGTCP2_MAX_VARINT - win); return cand + win; } + if (cand > expected + hwin && cand >= win) { return cand - win; } + return cand; } -int ngtcp2_pkt_validate_ack(ngtcp2_ack *fr, int64_t min_pkt_num) { +int ngtcp2_pkt_validate_ack(const ngtcp2_ack *fr, int64_t min_pkt_num) { int64_t largest_ack = fr->largest_ack; size_t i; @@ -2208,7 +2250,7 @@ ngtcp2_pkt_write_stateless_reset(uint8_t *dest, size_t destlen, p = dest; - randlen = ngtcp2_min(destlen - NGTCP2_STATELESS_RESET_TOKENLEN, randlen); + randlen = ngtcp2_min_size(destlen - NGTCP2_STATELESS_RESET_TOKENLEN, randlen); p = ngtcp2_cpymem(p, rand, randlen); p = ngtcp2_cpymem(p, stateless_reset_token, NGTCP2_STATELESS_RESET_TOKENLEN); @@ -2218,10 +2260,10 @@ ngtcp2_pkt_write_stateless_reset(uint8_t *dest, size_t destlen, } ngtcp2_ssize ngtcp2_pkt_write_retry( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, const ngtcp2_cid *odcid, const uint8_t *token, - size_t tokenlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, - const ngtcp2_crypto_aead_ctx *aead_ctx) { + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, const ngtcp2_cid *odcid, const uint8_t *token, + size_t tokenlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, + const ngtcp2_crypto_aead_ctx *aead_ctx) { ngtcp2_pkt_hd hd; uint8_t pseudo_retry[1500]; ngtcp2_ssize pseudo_retrylen; @@ -2247,8 +2289,8 @@ ngtcp2_ssize ngtcp2_pkt_write_retry( /* len = */ 0); pseudo_retrylen = - ngtcp2_pkt_encode_pseudo_retry(pseudo_retry, sizeof(pseudo_retry), &hd, - /* unused = */ 0, odcid, token, tokenlen); + ngtcp2_pkt_encode_pseudo_retry(pseudo_retry, sizeof(pseudo_retry), &hd, + /* unused = */ 0, odcid, token, tokenlen); if (pseudo_retrylen < 0) { return pseudo_retrylen; } @@ -2285,8 +2327,8 @@ ngtcp2_ssize ngtcp2_pkt_write_retry( } ngtcp2_ssize ngtcp2_pkt_encode_pseudo_retry( - uint8_t *dest, size_t destlen, const ngtcp2_pkt_hd *hd, uint8_t unused, - const ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen) { + uint8_t *dest, size_t destlen, const ngtcp2_pkt_hd *hd, uint8_t unused, + const ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen) { uint8_t *p = dest; ngtcp2_ssize nwrite; @@ -2382,23 +2424,23 @@ size_t ngtcp2_pkt_stream_max_datalen(int64_t stream_id, uint64_t offset, if (left > 8 + 1073741823 && len > 1073741823) { #if SIZE_MAX > UINT32_MAX - len = ngtcp2_min(len, 4611686018427387903lu); + len = ngtcp2_min_uint64(len, 4611686018427387903lu); #endif /* SIZE_MAX > UINT32_MAX */ - return (size_t)ngtcp2_min(len, (uint64_t)(left - 8)); + return (size_t)ngtcp2_min_uint64(len, (uint64_t)(left - 8)); } if (left > 4 + 16383 && len > 16383) { - len = ngtcp2_min(len, 1073741823); - return (size_t)ngtcp2_min(len, (uint64_t)(left - 4)); + len = ngtcp2_min_uint64(len, 1073741823); + return (size_t)ngtcp2_min_uint64(len, (uint64_t)(left - 4)); } if (left > 2 + 63 && len > 63) { - len = ngtcp2_min(len, 16383); - return (size_t)ngtcp2_min(len, (uint64_t)(left - 2)); + len = ngtcp2_min_uint64(len, 16383); + return (size_t)ngtcp2_min_uint64(len, (uint64_t)(left - 2)); } - len = ngtcp2_min(len, 63); - return (size_t)ngtcp2_min(len, (uint64_t)(left - 1)); + len = ngtcp2_min_uint64(len, 63); + return (size_t)ngtcp2_min_uint64(len, (uint64_t)(left - 1)); } size_t ngtcp2_pkt_crypto_max_datalen(uint64_t offset, size_t len, size_t left) { @@ -2414,23 +2456,23 @@ size_t ngtcp2_pkt_crypto_max_datalen(uint64_t offset, size_t len, size_t left) { if (left > 8 + 1073741823 && len > 1073741823) { #if SIZE_MAX > UINT32_MAX - len = ngtcp2_min(len, 4611686018427387903lu); + len = ngtcp2_min_size(len, 4611686018427387903lu); #endif /* SIZE_MAX > UINT32_MAX */ - return ngtcp2_min(len, left - 8); + return ngtcp2_min_size(len, left - 8); } if (left > 4 + 16383 && len > 16383) { - len = ngtcp2_min(len, 1073741823); - return ngtcp2_min(len, left - 4); + len = ngtcp2_min_size(len, 1073741823); + return ngtcp2_min_size(len, left - 4); } if (left > 2 + 63 && len > 63) { - len = ngtcp2_min(len, 16383); - return ngtcp2_min(len, left - 2); + len = ngtcp2_min_size(len, 16383); + return ngtcp2_min_size(len, left - 2); } - len = ngtcp2_min(len, 63); - return ngtcp2_min(len, left - 1); + len = ngtcp2_min_size(len, 63); + return ngtcp2_min_size(len, left - 1); } size_t ngtcp2_pkt_datagram_framelen(size_t len) { @@ -2528,5 +2570,6 @@ int ngtcp2_pkt_verify_reserved_bits(uint8_t c) { if (c & NGTCP2_HEADER_FORM_BIT) { return (c & NGTCP2_LONG_RESERVED_BIT_MASK) == 0 ? 0 : NGTCP2_ERR_PROTO; } + return (c & NGTCP2_SHORT_RESERVED_BIT_MASK) == 0 ? 0 : NGTCP2_ERR_PROTO; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h index feec4d32c97bdd..86ebecef7bc870 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -41,7 +41,6 @@ #define NGTCP2_LONG_RESERVED_BIT_MASK 0x0c /* Short header specific macros */ -#define NGTCP2_SHORT_SPIN_BIT_MASK 0x20 #define NGTCP2_SHORT_RESERVED_BIT_MASK 0x18 #define NGTCP2_SHORT_KEY_PHASE_BIT 0x04 @@ -53,6 +52,7 @@ LENGTH<1> + PKN<1> */ #define NGTCP2_MIN_LONG_HEADERLEN (1 + 4 + 1 + 1 + 1 + 1) +/* STREAM frame specific macros */ #define NGTCP2_STREAM_FIN_BIT 0x01 #define NGTCP2_STREAM_LEN_BIT 0x02 #define NGTCP2_STREAM_OFF_BIT 0x04 @@ -72,9 +72,6 @@ the beginning of the payload. */ #define NGTCP2_DATAGRAM_OVERHEAD (1 + 8) -/* NGTCP2_MIN_FRAME_PAYLOADLEN is the minimum frame payload length. */ -#define NGTCP2_MIN_FRAME_PAYLOADLEN 16 - /* NGTCP2_MAX_SERVER_STREAM_ID_BIDI is the maximum bidirectional server stream ID. */ #define NGTCP2_MAX_SERVER_STREAM_ID_BIDI ((int64_t)0x3ffffffffffffffdll) @@ -95,16 +92,15 @@ /* NGTCP2_MAX_PKT_NUM is the maximum packet number. */ #define NGTCP2_MAX_PKT_NUM ((int64_t)((1ll << 62) - 1)) -/* NGTCP2_MIN_PKT_EXPANDLEN is the minimum packet size expansion in - addition to the minimum DCID length to hide/trigger Stateless - Reset. */ +/* NGTCP2_MIN_PKT_EXPANDLEN is the minimum packet size expansion to + hide/trigger Stateless Reset. */ #define NGTCP2_MIN_PKT_EXPANDLEN 22 /* NGTCP2_RETRY_TAGLEN is the length of Retry packet integrity tag. */ #define NGTCP2_RETRY_TAGLEN 16 -/* NGTCP2_HARD_MAX_UDP_PAYLOAD_SIZE is the maximum UDP payload size - that this library can write. */ +/* NGTCP2_HARD_MAX_UDP_PAYLOAD_SIZE is the maximum UDP datagram + payload size that this library can write. */ #define NGTCP2_HARD_MAX_UDP_PAYLOAD_SIZE ((1 << 24) - 1) /* NGTCP2_PKT_LENGTHLEN is the number of bytes that is occupied by @@ -357,6 +353,11 @@ typedef union ngtcp2_frame { ngtcp2_retire_connection_id retire_connection_id; ngtcp2_handshake_done handshake_done; ngtcp2_datagram datagram; + /* Extend ngtcp2_frame so that ngtcp2_stream has at least additional + 3 ngtcp2_vec, totaling 4 slots, which can store HEADERS header, + HEADERS payload, DATA header, and DATA payload in the standard + sized ngtcp2_frame_chain. */ + uint8_t pad[sizeof(ngtcp2_stream) + sizeof(ngtcp2_vec) * 3]; } ngtcp2_frame; typedef struct ngtcp2_pkt_chain ngtcp2_pkt_chain; @@ -371,7 +372,7 @@ struct ngtcp2_pkt_chain { uint8_t *pkt; /* pktlen is length of a QUIC packet. */ size_t pktlen; - /* dgramlen is length of UDP datagram that a QUIC packet is + /* dgramlen is length of UDP datagram payload that a QUIC packet is included. */ size_t dgramlen; ngtcp2_tstamp ts; @@ -403,11 +404,11 @@ void ngtcp2_pkt_chain_del(ngtcp2_pkt_chain *pc, const ngtcp2_mem *mem); /* * ngtcp2_pkt_hd_init initializes |hd| with the given values. If - * |dcid| and/or |scid| is NULL, DCID and SCID of |hd| is empty - * respectively. |pkt_numlen| is the number of bytes used to encode - * |pkt_num| and either 1, 2, or 4. |version| is QUIC version for - * long header. |len| is the length field of Initial, 0RTT, and - * Handshake packets. + * |dcid| and/or |scid| is NULL, Destination Connection ID and/or + * Source Connection ID of |hd| is empty respectively. |pkt_numlen| + * is the number of bytes used to encode |pkt_num| and either 1, 2, or + * 4. |version| is QUIC version for long header. |len| is the length + * field of Initial, 0RTT, and Handshake packets. */ void ngtcp2_pkt_hd_init(ngtcp2_pkt_hd *hd, uint8_t flags, uint8_t type, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, @@ -417,8 +418,8 @@ void ngtcp2_pkt_hd_init(ngtcp2_pkt_hd *hd, uint8_t flags, uint8_t type, /* * ngtcp2_pkt_encode_hd_long encodes |hd| as QUIC long header into * |out| which has length |outlen|. It returns the number of bytes - * written into |outlen| if it succeeds, or one of the following - * negative error codes: + * written into |out| if it succeeds, or one of the following negative + * error codes: * * NGTCP2_ERR_NOBUF * Buffer is too short @@ -429,8 +430,8 @@ ngtcp2_ssize ngtcp2_pkt_encode_hd_long(uint8_t *out, size_t outlen, /* * ngtcp2_pkt_encode_hd_short encodes |hd| as QUIC short header into * |out| which has length |outlen|. It returns the number of bytes - * written into |outlen| if it succeeds, or one of the following - * negative error codes: + * written into |out| if it succeeds, or one of the following negative + * error codes: * * NGTCP2_ERR_NOBUF * Buffer is too short @@ -457,7 +458,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_frame(ngtcp2_frame *dest, const uint8_t *payload, /** * @function * - * `ngtcp2_pkt_encode_frame` encodes a frame |fm| into the buffer + * `ngtcp2_pkt_encode_frame` encodes a frame |fr| into the buffer * pointed by |out| of length |outlen|. * * This function returns the number of bytes written to the buffer, or @@ -543,7 +544,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, /* * ngtcp2_pkt_decode_padding_frame decodes contiguous PADDING frames * from |payload| of length |payloadlen|. It continues to parse - * frames as long as the frame type is PADDING. This finishes when it + * frames as long as the frame type is PADDING. It finishes when it * encounters the frame type which is not PADDING, or all input data * is read. The first byte (payload[0]) must be NGTCP2_FRAME_PADDING. * This function returns the exact number of bytes read to decode @@ -573,7 +574,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_reset_stream_frame(ngtcp2_reset_stream *dest, * ngtcp2_pkt_decode_connection_close_frame decodes CONNECTION_CLOSE * frame from |payload| of length |payloadlen|. The result is stored * in the object pointed by |dest|. CONNECTION_CLOSE frame must start - * at payload[0]. This function finishes it decodes one + * at payload[0]. This function finishes when it decodes one * CONNECTION_CLOSE frame, and returns the exact number of bytes read * to decode a frame if it succeeds, or one of the following negative * error codes: @@ -582,7 +583,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_reset_stream_frame(ngtcp2_reset_stream *dest, * Payload is too short to include CONNECTION_CLOSE frame. */ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( - ngtcp2_connection_close *dest, const uint8_t *payload, size_t payloadlen); + ngtcp2_connection_close *dest, const uint8_t *payload, size_t payloadlen); /* * ngtcp2_pkt_decode_max_data_frame decodes MAX_DATA frame from @@ -612,7 +613,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_max_data_frame(ngtcp2_max_data *dest, * Payload is too short to include MAX_STREAM_DATA frame. */ ngtcp2_ssize ngtcp2_pkt_decode_max_stream_data_frame( - ngtcp2_max_stream_data *dest, const uint8_t *payload, size_t payloadlen); + ngtcp2_max_stream_data *dest, const uint8_t *payload, size_t payloadlen); /* * ngtcp2_pkt_decode_max_streams_frame decodes MAX_STREAMS frame from @@ -668,10 +669,8 @@ ngtcp2_ssize ngtcp2_pkt_decode_data_blocked_frame(ngtcp2_data_blocked *dest, * NGTCP2_ERR_FRAME_ENCODING * Payload is too short to include STREAM_DATA_BLOCKED frame. */ -ngtcp2_ssize -ngtcp2_pkt_decode_stream_data_blocked_frame(ngtcp2_stream_data_blocked *dest, - const uint8_t *payload, - size_t payloadlen); +ngtcp2_ssize ngtcp2_pkt_decode_stream_data_blocked_frame( + ngtcp2_stream_data_blocked *dest, const uint8_t *payload, size_t payloadlen); /* * ngtcp2_pkt_decode_streams_blocked_frame decodes STREAMS_BLOCKED @@ -686,7 +685,7 @@ ngtcp2_pkt_decode_stream_data_blocked_frame(ngtcp2_stream_data_blocked *dest, * Payload is too short to include STREAMS_BLOCKED frame. */ ngtcp2_ssize ngtcp2_pkt_decode_streams_blocked_frame( - ngtcp2_streams_blocked *dest, const uint8_t *payload, size_t payloadlen); + ngtcp2_streams_blocked *dest, const uint8_t *payload, size_t payloadlen); /* * ngtcp2_pkt_decode_new_connection_id_frame decodes NEW_CONNECTION_ID @@ -699,11 +698,11 @@ ngtcp2_ssize ngtcp2_pkt_decode_streams_blocked_frame( * * NGTCP2_ERR_FRAME_ENCODING * Payload is too short to include NEW_CONNECTION_ID frame; or the - * length of CID is strictly less than NGTCP2_MIN_CIDLEN or - * greater than NGTCP2_MAX_CIDLEN. + * length of Connection ID is strictly less than NGTCP2_MIN_CIDLEN + * or greater than NGTCP2_MAX_CIDLEN. */ ngtcp2_ssize ngtcp2_pkt_decode_new_connection_id_frame( - ngtcp2_new_connection_id *dest, const uint8_t *payload, size_t payloadlen); + ngtcp2_new_connection_id *dest, const uint8_t *payload, size_t payloadlen); /* * ngtcp2_pkt_decode_stop_sending_frame decodes STOP_SENDING frame @@ -784,20 +783,19 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_token_frame(ngtcp2_new_token *dest, size_t payloadlen); /* - * ngtcp2_pkt_decode_retire_connection_id_frame decodes RETIRE_CONNECTION_ID - * frame from |payload| of length |payloadlen|. The result is stored in the - * object pointed by |dest|. RETIRE_CONNECTION_ID frame must start at - * payload[0]. This function finishes when it decodes one RETIRE_CONNECTION_ID - * frame, and returns the exact number of bytes read to decode a frame - * if it succeeds, or one of the following negative error codes: + * ngtcp2_pkt_decode_retire_connection_id_frame decodes + * RETIRE_CONNECTION_ID frame from |payload| of length |payloadlen|. + * The result is stored in the object pointed by |dest|. + * RETIRE_CONNECTION_ID frame must start at payload[0]. This function + * finishes when it decodes one RETIRE_CONNECTION_ID frame, and + * returns the exact number of bytes read to decode a frame if it + * succeeds, or one of the following negative error codes: * * NGTCP2_ERR_FRAME_ENCODING * Payload is too short to include RETIRE_CONNECTION_ID frame. */ -ngtcp2_ssize -ngtcp2_pkt_decode_retire_connection_id_frame(ngtcp2_retire_connection_id *dest, - const uint8_t *payload, - size_t payloadlen); +ngtcp2_ssize ngtcp2_pkt_decode_retire_connection_id_frame( + ngtcp2_retire_connection_id *dest, const uint8_t *payload, size_t payloadlen); /* * ngtcp2_pkt_decode_handshake_done_frame decodes HANDSHAKE_DONE frame @@ -846,9 +844,6 @@ ngtcp2_ssize ngtcp2_pkt_encode_stream_frame(uint8_t *out, size_t outlen, * ngtcp2_pkt_encode_ack_frame encodes ACK frame |fr| into the buffer * pointed by |out| of length |outlen|. * - * This function assigns & - * ~NGTCP2_FRAME_ACK to fr->flags. - * * This function returns the number of bytes written if it succeeds, * or one of the following negative error codes: * @@ -856,7 +851,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_stream_frame(uint8_t *out, size_t outlen, * Buffer does not have enough capacity to write a frame. */ ngtcp2_ssize ngtcp2_pkt_encode_ack_frame(uint8_t *out, size_t outlen, - ngtcp2_ack *fr); + const ngtcp2_ack *fr); /* * ngtcp2_pkt_encode_padding_frame encodes PADDING frame |fr| into the @@ -980,7 +975,7 @@ ngtcp2_pkt_encode_data_blocked_frame(uint8_t *out, size_t outlen, * Buffer does not have enough capacity to write a frame. */ ngtcp2_ssize ngtcp2_pkt_encode_stream_data_blocked_frame( - uint8_t *out, size_t outlen, const ngtcp2_stream_data_blocked *fr); + uint8_t *out, size_t outlen, const ngtcp2_stream_data_blocked *fr); /* * ngtcp2_pkt_encode_streams_blocked_frame encodes STREAMS_BLOCKED @@ -1079,8 +1074,9 @@ ngtcp2_ssize ngtcp2_pkt_encode_new_token_frame(uint8_t *out, size_t outlen, const ngtcp2_new_token *fr); /* - * ngtcp2_pkt_encode_retire_connection_id_frame encodes RETIRE_CONNECTION_ID - * frame |fr| into the buffer pointed by |out| of length |outlen|. + * ngtcp2_pkt_encode_retire_connection_id_frame encodes + * RETIRE_CONNECTION_ID frame |fr| into the buffer pointed by |out| of + * length |outlen|. * * This function returns the number of bytes written if it succeeds, * or one of the following negative error codes: @@ -1089,7 +1085,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_new_token_frame(uint8_t *out, size_t outlen, * Buffer does not have enough capacity to write a frame. */ ngtcp2_ssize ngtcp2_pkt_encode_retire_connection_id_frame( - uint8_t *out, size_t outlen, const ngtcp2_retire_connection_id *fr); + uint8_t *out, size_t outlen, const ngtcp2_retire_connection_id *fr); /* * ngtcp2_pkt_encode_handshake_done_frame encodes HANDSHAKE_DONE frame @@ -1119,7 +1115,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_datagram_frame(uint8_t *out, size_t outlen, const ngtcp2_datagram *fr); /* - * ngtcp2_pkt_adjust_pkt_num find the full 64 bits packet number for + * ngtcp2_pkt_adjust_pkt_num finds the full 62 bits packet number for * |pkt_num|, which is encoded in |pkt_numlen| bytes. The * |max_pkt_num| is the highest successfully authenticated packet * number. @@ -1128,10 +1124,10 @@ int64_t ngtcp2_pkt_adjust_pkt_num(int64_t max_pkt_num, int64_t pkt_num, size_t pkt_numlen); /* - * ngtcp2_pkt_validate_ack checks that ack is malformed or not. - * |min_pkt_num| is the minimum packet number that an endpoint sends. - * It is an error to receive acknowledgements for a packet less than - * |min_pkt_num|. + * ngtcp2_pkt_validate_ack verifies whether |fr| is malformed or not. + * |min_pkt_num| is the minimum packet number that a local endpoint + * sends. It is an error to receive acknowledgements for a packet + * less than |min_pkt_num|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -1141,13 +1137,13 @@ int64_t ngtcp2_pkt_adjust_pkt_num(int64_t max_pkt_num, int64_t pkt_num, * NGTCP2_ERR_PROTO * |fr| contains a packet number less than |min_pkt_num|. */ -int ngtcp2_pkt_validate_ack(ngtcp2_ack *fr, int64_t min_pkt_num); +int ngtcp2_pkt_validate_ack(const ngtcp2_ack *fr, int64_t min_pkt_num); /* * ngtcp2_pkt_stream_max_datalen returns the maximum number of bytes * which can be sent for stream denoted by |stream_id|. |offset| is - * an offset of within the stream. |len| is the estimated number of - * bytes to be sent. |left| is the size of buffer. If |left| is too + * an offset within the stream. |len| is the estimated number of + * bytes to send. |left| is the size of buffer. If |left| is too * small to write STREAM frame, this function returns (size_t)-1. */ size_t ngtcp2_pkt_stream_max_datalen(int64_t stream_id, uint64_t offset, @@ -1155,10 +1151,10 @@ size_t ngtcp2_pkt_stream_max_datalen(int64_t stream_id, uint64_t offset, /* * ngtcp2_pkt_crypto_max_datalen returns the maximum number of bytes - * which can be sent for crypto stream. |offset| is an offset of - * within the crypto stream. |len| is the estimated number of bytes - * to be sent. |left| is the size of buffer. If |left| is too small - * to write CRYPTO frame, this function returns (size_t)-1. + * which can be sent for crypto stream. |offset| is an offset within + * the crypto stream. |len| is the estimated number of bytes to send. + * |left| is the size of buffer. If |left| is too small to write + * CRYPTO frame, this function returns (size_t)-1. */ size_t ngtcp2_pkt_crypto_max_datalen(uint64_t offset, size_t len, size_t left); @@ -1191,8 +1187,8 @@ int ngtcp2_pkt_verify_reserved_bits(uint8_t c); * Buffer is too short. */ ngtcp2_ssize ngtcp2_pkt_encode_pseudo_retry( - uint8_t *dest, size_t destlen, const ngtcp2_pkt_hd *hd, uint8_t unused, - const ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen); + uint8_t *dest, size_t destlen, const ngtcp2_pkt_hd *hd, uint8_t unused, + const ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen); /* * ngtcp2_pkt_verify_retry_tag verifies Retry packet. The buffer @@ -1229,4 +1225,4 @@ uint8_t ngtcp2_pkt_versioned_type(uint32_t version, uint32_t pkt_type); */ uint8_t ngtcp2_pkt_get_type_long(uint32_t version, uint8_t c); -#endif /* NGTCP2_PKT_H */ +#endif /* !defined(NGTCP2_PKT_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h index 66b0ee9e6c13cf..9cf8444d32d7eb 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -59,4 +59,4 @@ typedef enum ngtcp2_pktns_id { NGTCP2_PKTNS_ID_MAX } ngtcp2_pktns_id; -#endif /* NGTCP2_PKTNS_ID_H */ +#endif /* !defined(NGTCP2_PKTNS_ID_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c index 771ef5e026d12d..ebd113f6746217 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c @@ -33,18 +33,17 @@ for each probe. */ #define NGTCP2_PMTUD_PROBE_NUM_MAX 3 -static size_t mtu_probes[] = { - 1454 - 48, /* The well known MTU used by a domestic optic fiber - service in Japan. */ - 1390 - 48, /* Typical Tunneled MTU */ - 1280 - 48, /* IPv6 minimum MTU */ - 1492 - 48, /* PPPoE */ +static uint16_t pmtud_default_probes[] = { + 1454 - 48, /* The well known MTU used by a domestic optic fiber + service in Japan. */ + 1390 - 48, /* Typical Tunneled MTU */ + 1280 - 48, /* IPv6 minimum MTU */ + 1492 - 48, /* PPPoE */ }; -#define NGTCP2_MTU_PROBESLEN ngtcp2_arraylen(mtu_probes) - int ngtcp2_pmtud_new(ngtcp2_pmtud **ppmtud, size_t max_udp_payload_size, size_t hard_max_udp_payload_size, int64_t tx_pkt_num, + const uint16_t *probes, size_t probeslen, const ngtcp2_mem *mem) { ngtcp2_pmtud *pmtud = ngtcp2_mem_malloc(mem, sizeof(ngtcp2_pmtud)); @@ -61,11 +60,19 @@ int ngtcp2_pmtud_new(ngtcp2_pmtud **ppmtud, size_t max_udp_payload_size, pmtud->hard_max_udp_payload_size = hard_max_udp_payload_size; pmtud->min_fail_udp_payload_size = SIZE_MAX; - for (; pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN; ++pmtud->mtu_idx) { - if (mtu_probes[pmtud->mtu_idx] > pmtud->hard_max_udp_payload_size) { + if (probeslen) { + pmtud->probes = probes; + pmtud->probeslen = probeslen; + } else { + pmtud->probes = pmtud_default_probes; + pmtud->probeslen = ngtcp2_arraylen(pmtud_default_probes); + } + + for (; pmtud->mtu_idx < pmtud->probeslen; ++pmtud->mtu_idx) { + if (pmtud->probes[pmtud->mtu_idx] > pmtud->hard_max_udp_payload_size) { continue; } - if (mtu_probes[pmtud->mtu_idx] > pmtud->max_udp_payload_size) { + if (pmtud->probes[pmtud->mtu_idx] > pmtud->max_udp_payload_size) { break; } } @@ -84,9 +91,9 @@ void ngtcp2_pmtud_del(ngtcp2_pmtud *pmtud) { } size_t ngtcp2_pmtud_probelen(ngtcp2_pmtud *pmtud) { - assert(pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN); + assert(pmtud->mtu_idx < pmtud->probeslen); - return mtu_probes[pmtud->mtu_idx]; + return pmtud->probes[pmtud->mtu_idx]; } void ngtcp2_pmtud_probe_sent(ngtcp2_pmtud *pmtud, ngtcp2_duration pto, @@ -107,19 +114,19 @@ int ngtcp2_pmtud_require_probe(ngtcp2_pmtud *pmtud) { } static void pmtud_next_probe(ngtcp2_pmtud *pmtud) { - assert(pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN); + assert(pmtud->mtu_idx < pmtud->probeslen); ++pmtud->mtu_idx; pmtud->num_pkts_sent = 0; pmtud->expiry = UINT64_MAX; - for (; pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN; ++pmtud->mtu_idx) { - if (mtu_probes[pmtud->mtu_idx] <= pmtud->max_udp_payload_size || - mtu_probes[pmtud->mtu_idx] > pmtud->hard_max_udp_payload_size) { + for (; pmtud->mtu_idx < pmtud->probeslen; ++pmtud->mtu_idx) { + if (pmtud->probes[pmtud->mtu_idx] <= pmtud->max_udp_payload_size || + pmtud->probes[pmtud->mtu_idx] > pmtud->hard_max_udp_payload_size) { continue; } - if (mtu_probes[pmtud->mtu_idx] < pmtud->min_fail_udp_payload_size) { + if (pmtud->probes[pmtud->mtu_idx] < pmtud->min_fail_udp_payload_size) { break; } } @@ -127,11 +134,11 @@ static void pmtud_next_probe(ngtcp2_pmtud *pmtud) { void ngtcp2_pmtud_probe_success(ngtcp2_pmtud *pmtud, size_t payloadlen) { pmtud->max_udp_payload_size = - ngtcp2_max(pmtud->max_udp_payload_size, payloadlen); + ngtcp2_max_size(pmtud->max_udp_payload_size, payloadlen); - assert(pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN); + assert(pmtud->mtu_idx < pmtud->probeslen); - if (mtu_probes[pmtud->mtu_idx] > pmtud->max_udp_payload_size) { + if (pmtud->probes[pmtud->mtu_idx] > pmtud->max_udp_payload_size) { return; } @@ -149,12 +156,12 @@ void ngtcp2_pmtud_handle_expiry(ngtcp2_pmtud *pmtud, ngtcp2_tstamp ts) { return; } - pmtud->min_fail_udp_payload_size = - ngtcp2_min(pmtud->min_fail_udp_payload_size, mtu_probes[pmtud->mtu_idx]); + pmtud->min_fail_udp_payload_size = ngtcp2_min_size( + pmtud->min_fail_udp_payload_size, pmtud->probes[pmtud->mtu_idx]); pmtud_next_probe(pmtud); } int ngtcp2_pmtud_finished(ngtcp2_pmtud *pmtud) { - return pmtud->mtu_idx >= NGTCP2_MTU_PROBESLEN; + return pmtud->mtu_idx >= pmtud->probeslen; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.h index 6b2e691cfc793a..53fc6a538292e0 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -58,6 +58,10 @@ typedef struct ngtcp2_pmtud { /* min_fail_udp_payload_size is the minimum UDP payload size that is known to fail. */ size_t min_fail_udp_payload_size; + /* probes is the array of UDP datagram payload size to probe. */ + const uint16_t *probes; + /* probeslen is the number of probes pointed by probes. */ + size_t probeslen; } ngtcp2_pmtud; /* @@ -70,6 +74,10 @@ typedef struct ngtcp2_pmtud { * larger than or equal to all UDP payload probe candidates. * Therefore, call ngtcp2_pmtud_finished to check this situation. * + * The array pointed by |pmtud_probes| of length |pmtud_probeslen| + * specifies UDP datagram payload size to probe. If |pmtud_probeslen| + * is zero, the default probes are used. + * * This function returns 0 if it succeeds, or one of the following * negative error codes: * @@ -78,6 +86,7 @@ typedef struct ngtcp2_pmtud { */ int ngtcp2_pmtud_new(ngtcp2_pmtud **ppmtud, size_t max_udp_payload_size, size_t hard_max_udp_payload_size, int64_t tx_pkt_num, + const uint16_t *pmtud_probes, size_t pmtud_probeslen, const ngtcp2_mem *mem); /* @@ -120,4 +129,4 @@ void ngtcp2_pmtud_handle_expiry(ngtcp2_pmtud *pmtud, ngtcp2_tstamp ts); */ int ngtcp2_pmtud_finished(ngtcp2_pmtud *pmtud); -#endif /* NGTCP2_PMTUD_H */ +#endif /* !defined(NGTCP2_PMTUD_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c index f7c122b1ab406b..13ef2b24908905 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c @@ -29,11 +29,13 @@ #include "ngtcp2_str.h" #include "ngtcp2_conv.h" +#include "ngtcp2_macro.h" void ngtcp2_ppe_init(ngtcp2_ppe *ppe, uint8_t *out, size_t outlen, - ngtcp2_crypto_cc *cc) { + size_t dgram_offset, ngtcp2_crypto_cc *cc) { ngtcp2_buf_init(&ppe->buf, out, outlen); + ppe->dgram_offset = dgram_offset; ppe->hdlen = 0; ppe->len_offset = 0; ppe->pkt_num_offset = 0; @@ -53,17 +55,22 @@ int ngtcp2_ppe_encode_hd(ngtcp2_ppe *ppe, const ngtcp2_pkt_hd *hd) { if (hd->flags & NGTCP2_PKT_FLAG_LONG_FORM) { ppe->len_offset = 1 + 4 + 1 + hd->dcid.datalen + 1 + hd->scid.datalen; + if (hd->type == NGTCP2_PKT_INITIAL) { ppe->len_offset += ngtcp2_put_uvarintlen(hd->tokenlen) + hd->tokenlen; } + ppe->pkt_num_offset = ppe->len_offset + NGTCP2_PKT_LENGTHLEN; + rv = ngtcp2_pkt_encode_hd_long( - buf->last, ngtcp2_buf_left(buf) - cc->aead.max_overhead, hd); + buf->last, ngtcp2_buf_left(buf) - cc->aead.max_overhead, hd); } else { ppe->pkt_num_offset = 1 + hd->dcid.datalen; + rv = ngtcp2_pkt_encode_hd_short( - buf->last, ngtcp2_buf_left(buf) - cc->aead.max_overhead, hd); + buf->last, ngtcp2_buf_left(buf) - cc->aead.max_overhead, hd); } + if (rv < 0) { return (int)rv; } @@ -72,7 +79,6 @@ int ngtcp2_ppe_encode_hd(ngtcp2_ppe *ppe, const ngtcp2_pkt_hd *hd) { ppe->pkt_numlen = hd->pkt_numlen; ppe->hdlen = (size_t)rv; - ppe->pkt_num = hd->pkt_num; return 0; @@ -88,7 +94,7 @@ int ngtcp2_ppe_encode_frame(ngtcp2_ppe *ppe, ngtcp2_frame *fr) { } rv = ngtcp2_pkt_encode_frame( - buf->last, ngtcp2_buf_left(buf) - cc->aead.max_overhead, fr); + buf->last, ngtcp2_buf_left(buf) - cc->aead.max_overhead, fr); if (rv < 0) { return (int)rv; } @@ -121,8 +127,8 @@ ngtcp2_ssize ngtcp2_ppe_final(ngtcp2_ppe *ppe, const uint8_t **ppkt) { if (ppe->len_offset) { ngtcp2_put_uvarint30( - buf->begin + ppe->len_offset, - (uint16_t)(payloadlen + ppe->pkt_numlen + cc->aead.max_overhead)); + buf->begin + ppe->len_offset, + (uint16_t)(payloadlen + ppe->pkt_numlen + cc->aead.max_overhead)); } ngtcp2_crypto_create_nonce(ppe->nonce, cc->ckm->iv.base, cc->ckm->iv.len, @@ -136,7 +142,7 @@ ngtcp2_ssize ngtcp2_ppe_final(ngtcp2_ppe *ppe, const uint8_t **ppkt) { buf->last = payload + payloadlen + cc->aead.max_overhead; - /* TODO Check that we have enough space to get sample */ + /* Make sure that we have enough space to get sample */ assert(ppe_sample_offset(ppe) + NGTCP2_HP_SAMPLELEN <= ngtcp2_buf_len(buf)); rv = cc->hp_mask(mask, &cc->hp, &cc->hp_ctx, @@ -164,7 +170,7 @@ ngtcp2_ssize ngtcp2_ppe_final(ngtcp2_ppe *ppe, const uint8_t **ppkt) { return (ngtcp2_ssize)ngtcp2_buf_len(buf); } -size_t ngtcp2_ppe_left(ngtcp2_ppe *ppe) { +size_t ngtcp2_ppe_left(const ngtcp2_ppe *ppe) { ngtcp2_crypto_cc *cc = ppe->cc; if (ngtcp2_buf_left(&ppe->buf) < cc->aead.max_overhead) { @@ -174,57 +180,57 @@ size_t ngtcp2_ppe_left(ngtcp2_ppe *ppe) { return ngtcp2_buf_left(&ppe->buf) - cc->aead.max_overhead; } -size_t ngtcp2_ppe_pktlen(ngtcp2_ppe *ppe) { +size_t ngtcp2_ppe_pktlen(const ngtcp2_ppe *ppe) { ngtcp2_crypto_cc *cc = ppe->cc; return ngtcp2_buf_len(&ppe->buf) + cc->aead.max_overhead; } -size_t ngtcp2_ppe_padding(ngtcp2_ppe *ppe) { - ngtcp2_crypto_cc *cc = ppe->cc; - ngtcp2_buf *buf = &ppe->buf; - size_t len; - - assert(ngtcp2_buf_left(buf) >= cc->aead.max_overhead); - - len = ngtcp2_buf_left(buf) - cc->aead.max_overhead; - memset(buf->last, 0, len); - buf->last += len; - - return len; -} - -size_t ngtcp2_ppe_padding_hp_sample(ngtcp2_ppe *ppe) { +size_t ngtcp2_ppe_padding_size(ngtcp2_ppe *ppe, size_t n) { ngtcp2_crypto_cc *cc = ppe->cc; ngtcp2_buf *buf = &ppe->buf; - size_t max_samplelen; + size_t pktlen = ngtcp2_buf_len(buf) + cc->aead.max_overhead; size_t len = 0; + size_t max_samplelen; - assert(cc->aead.max_overhead); + n = ngtcp2_min_size(n, ngtcp2_buf_cap(buf)); + if (pktlen < n) { + len = n - pktlen; + } + /* Ensure header protection sample */ max_samplelen = - ngtcp2_buf_len(buf) + cc->aead.max_overhead - ppe_sample_offset(ppe); + ngtcp2_buf_len(buf) + cc->aead.max_overhead - ppe_sample_offset(ppe); + if (max_samplelen < NGTCP2_HP_SAMPLELEN) { - len = NGTCP2_HP_SAMPLELEN - max_samplelen; - assert(ngtcp2_ppe_left(ppe) >= len); - memset(buf->last, 0, len); - buf->last += len; + len = ngtcp2_max_size(len, NGTCP2_HP_SAMPLELEN - max_samplelen); } + assert(ngtcp2_buf_left(buf) >= len + cc->aead.max_overhead); + + buf->last = ngtcp2_setmem(buf->last, 0, len); + return len; } -size_t ngtcp2_ppe_padding_size(ngtcp2_ppe *ppe, size_t n) { +size_t ngtcp2_ppe_dgram_padding(ngtcp2_ppe *ppe) { + return ngtcp2_ppe_dgram_padding_size(ppe, NGTCP2_MAX_UDP_PAYLOAD_SIZE); +} + +size_t ngtcp2_ppe_dgram_padding_size(ngtcp2_ppe *ppe, size_t n) { ngtcp2_crypto_cc *cc = ppe->cc; ngtcp2_buf *buf = &ppe->buf; - size_t pktlen = ngtcp2_buf_len(buf) + cc->aead.max_overhead; + size_t dgramlen = + ppe->dgram_offset + ngtcp2_buf_len(buf) + cc->aead.max_overhead; size_t len; - if (pktlen >= n) { + n = ngtcp2_min_size(n, ppe->dgram_offset + ngtcp2_buf_cap(buf)); + + if (dgramlen >= n) { return 0; } - len = n - pktlen; + len = n - dgramlen; buf->last = ngtcp2_setmem(buf->last, 0, len); return len; @@ -232,5 +238,6 @@ size_t ngtcp2_ppe_padding_size(ngtcp2_ppe *ppe, size_t n) { int ngtcp2_ppe_ensure_hp_sample(ngtcp2_ppe *ppe) { ngtcp2_buf *buf = &ppe->buf; + return ngtcp2_buf_left(buf) >= (4 - ppe->pkt_numlen) + NGTCP2_HP_SAMPLELEN; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h index 2a069ef33451ab..660b1482b56671 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -36,11 +36,17 @@ #include "ngtcp2_crypto.h" /* - * ngtcp2_ppe is the Protected Packet Encoder. + * ngtcp2_ppe is the QUIC Packet Encoder. */ typedef struct ngtcp2_ppe { + /* buf is the buffer where a QUIC packet is written. */ ngtcp2_buf buf; + /* cc is the encryption context that includes callback functions to + encrypt a QUIC packet, and AEAD cipher, etc. */ ngtcp2_crypto_cc *cc; + /* dgram_offset is the offset in UDP datagram payload that this QUIC + packet is positioned at. */ + size_t dgram_offset; /* hdlen is the number of bytes for packet header written in buf. */ size_t hdlen; /* len_offset is the offset to Length field. */ @@ -53,7 +59,7 @@ typedef struct ngtcp2_ppe { /* pkt_num is the packet number written in buf. */ int64_t pkt_num; /* nonce is the buffer to store nonce. It should be equal or longer - than then length of IV. */ + than the length of IV. */ uint8_t nonce[32]; } ngtcp2_ppe; @@ -61,7 +67,7 @@ typedef struct ngtcp2_ppe { * ngtcp2_ppe_init initializes |ppe| with the given buffer. */ void ngtcp2_ppe_init(ngtcp2_ppe *ppe, uint8_t *out, size_t outlen, - ngtcp2_crypto_cc *cc); + size_t dgram_offset, ngtcp2_crypto_cc *cc); /* * ngtcp2_ppe_encode_hd encodes |hd|. @@ -86,7 +92,7 @@ int ngtcp2_ppe_encode_hd(ngtcp2_ppe *ppe, const ngtcp2_pkt_hd *hd); int ngtcp2_ppe_encode_frame(ngtcp2_ppe *ppe, ngtcp2_frame *fr); /* - * ngtcp2_ppe_final encrypts QUIC packet payload. If |**ppkt| is not + * ngtcp2_ppe_final encrypts QUIC packet payload. If |ppkt| is not * NULL, the pointer to the packet is assigned to it. * * This function returns the length of QUIC packet, including header, @@ -102,39 +108,46 @@ ngtcp2_ssize ngtcp2_ppe_final(ngtcp2_ppe *ppe, const uint8_t **ppkt); * ngtcp2_ppe_left returns the number of bytes left to write * additional frames. This does not count AEAD overhead. */ -size_t ngtcp2_ppe_left(ngtcp2_ppe *ppe); +size_t ngtcp2_ppe_left(const ngtcp2_ppe *ppe); /* * ngtcp2_ppe_pktlen returns the provisional packet length. It * includes AEAD overhead. */ -size_t ngtcp2_ppe_pktlen(ngtcp2_ppe *ppe); +size_t ngtcp2_ppe_pktlen(const ngtcp2_ppe *ppe); -/** - * @function +/* + * ngtcp2_ppe_dgram_padding is equivalent to call + * ngtcp2_ppe_dgram_padding_size(ppe, NGTCP2_MAX_UDP_PAYLOAD_SIZE). + * This function should be called just before calling + * ngtcp2_ppe_final(). * - * `ngtcp2_ppe_padding` encodes PADDING frames to the end of the - * buffer. This function returns the number of bytes padded. + * This function returns the number of bytes padded. */ -size_t ngtcp2_ppe_padding(ngtcp2_ppe *ppe); +size_t ngtcp2_ppe_dgram_padding(ngtcp2_ppe *ppe); /* - * ngtcp2_ppe_padding_hp_sample adds PADDING frame if the current - * payload does not have enough space for header protection sample. - * This function should be called just before calling - * ngtcp2_ppe_final(). + * ngtcp2_ppe_dgram_padding_size adds PADDING frame so that the size + * of a UDP datagram payload is at least |n| bytes long. If it is + * unable to add PADDING in that way, this function still adds PADDING + * frame as much as possible. This function should be called just + * before calling ngtcp2_ppe_final(). * * This function returns the number of bytes added as padding. */ -size_t ngtcp2_ppe_padding_hp_sample(ngtcp2_ppe *ppe); +size_t ngtcp2_ppe_dgram_padding_size(ngtcp2_ppe *ppe, size_t n); /* * ngtcp2_ppe_padding_size adds PADDING frame so that the size of QUIC - * packet is at least |n| bytes long. If it is unable to add PADDING - * in that way, this function still adds PADDING frame as much as - * possible. This function should be called just before calling - * ngtcp2_ppe_final(). For Short packet, this function should be - * called instead of ngtcp2_ppe_padding_hp_sample. + * packet is at least |n| bytes long and the current payload has + * enough space for header protection sample. If it is unable to add + * PADDING at least |n| bytes, this function still adds PADDING frames + * as much as possible. This function also adds PADDING frames so + * that the minimum padding requirement of header protection is met. + * Those padding may be larger than |n| bytes. It is recommended to + * make sure that ngtcp2_ppe_ensure_hp_sample succeeds after writing + * QUIC packet header. This function should be called just before + * calling ngtcp2_ppe_final(). * * This function returns the number of bytes added as padding. */ @@ -147,4 +160,4 @@ size_t ngtcp2_ppe_padding_size(ngtcp2_ppe *ppe, size_t n); */ int ngtcp2_ppe_ensure_hp_sample(ngtcp2_ppe *ppe); -#endif /* NGTCP2_PPE_H */ +#endif /* !defined(NGTCP2_PPE_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.c index 5e1003d7942e53..19e3e3e36aa5e6 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.c @@ -29,17 +29,20 @@ #include "ngtcp2_macro.h" -void ngtcp2_pq_init(ngtcp2_pq *pq, ngtcp2_less less, const ngtcp2_mem *mem) { - pq->mem = mem; - pq->capacity = 0; +void ngtcp2_pq_init(ngtcp2_pq *pq, ngtcp2_pq_less less, const ngtcp2_mem *mem) { pq->q = NULL; + pq->mem = mem; pq->length = 0; + pq->capacity = 0; pq->less = less; } void ngtcp2_pq_free(ngtcp2_pq *pq) { + if (!pq) { + return; + } + ngtcp2_mem_free(pq->mem, pq->q); - pq->q = NULL; } static void swap(ngtcp2_pq *pq, size_t i, size_t j) { @@ -54,11 +57,13 @@ static void swap(ngtcp2_pq *pq, size_t i, size_t j) { static void bubble_up(ngtcp2_pq *pq, size_t index) { size_t parent; - while (index != 0) { + + while (index) { parent = (index - 1) / 2; if (!pq->less(pq->q[index], pq->q[parent])) { return; } + swap(pq, parent, index); index = parent; } @@ -69,56 +74,64 @@ int ngtcp2_pq_push(ngtcp2_pq *pq, ngtcp2_pq_entry *item) { void *nq; size_t ncapacity; - ncapacity = ngtcp2_max(4, (pq->capacity * 2)); + ncapacity = ngtcp2_max_size(4, pq->capacity * 2); - nq = ngtcp2_mem_realloc(pq->mem, pq->q, - ncapacity * sizeof(ngtcp2_pq_entry *)); + nq = + ngtcp2_mem_realloc(pq->mem, pq->q, ncapacity * sizeof(ngtcp2_pq_entry *)); if (nq == NULL) { return NGTCP2_ERR_NOMEM; } + pq->capacity = ncapacity; pq->q = nq; } + pq->q[pq->length] = item; item->index = pq->length; ++pq->length; - bubble_up(pq, pq->length - 1); + bubble_up(pq, item->index); + return 0; } -ngtcp2_pq_entry *ngtcp2_pq_top(ngtcp2_pq *pq) { +ngtcp2_pq_entry *ngtcp2_pq_top(const ngtcp2_pq *pq) { assert(pq->length); return pq->q[0]; } static void bubble_down(ngtcp2_pq *pq, size_t index) { size_t i, j, minindex; + for (;;) { j = index * 2 + 1; minindex = index; + for (i = 0; i < 2; ++i, ++j) { if (j >= pq->length) { break; } + if (pq->less(pq->q[j], pq->q[minindex])) { minindex = j; } } + if (minindex == index) { return; } + swap(pq, index, minindex); index = minindex; } } void ngtcp2_pq_pop(ngtcp2_pq *pq) { - if (pq->length > 0) { - pq->q[0] = pq->q[pq->length - 1]; - pq->q[0]->index = 0; - --pq->length; - bubble_down(pq, 0); - } + assert(pq->length); + + pq->q[0] = pq->q[pq->length - 1]; + pq->q[0]->index = 0; + --pq->length; + bubble_down(pq, 0); } void ngtcp2_pq_remove(ngtcp2_pq *pq, ngtcp2_pq_entry *item) { @@ -145,20 +158,22 @@ void ngtcp2_pq_remove(ngtcp2_pq *pq, ngtcp2_pq_entry *item) { } } -int ngtcp2_pq_empty(ngtcp2_pq *pq) { return pq->length == 0; } +int ngtcp2_pq_empty(const ngtcp2_pq *pq) { return pq->length == 0; } -size_t ngtcp2_pq_size(ngtcp2_pq *pq) { return pq->length; } +size_t ngtcp2_pq_size(const ngtcp2_pq *pq) { return pq->length; } -int ngtcp2_pq_each(ngtcp2_pq *pq, ngtcp2_pq_item_cb fun, void *arg) { +int ngtcp2_pq_each(const ngtcp2_pq *pq, ngtcp2_pq_item_cb fun, void *arg) { size_t i; if (pq->length == 0) { return 0; } + for (i = 0; i < pq->length; ++i) { if ((*fun)(pq->q[i], arg)) { return 1; } } + return 0; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h index 484c8f21f75de2..84961c9143978c 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h @@ -28,7 +28,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -45,37 +45,39 @@ typedef struct ngtcp2_pq_entry { size_t index; } ngtcp2_pq_entry; -/* "less" function, return nonzero if |lhs| is less than |rhs|. */ -typedef int (*ngtcp2_less)(const ngtcp2_pq_entry *lhs, - const ngtcp2_pq_entry *rhs); +/* ngtcp2_pq_less is a "less" function, that returns nonzero if |lhs| + is considered to be less than |rhs|. */ +typedef int (*ngtcp2_pq_less)(const ngtcp2_pq_entry *lhs, + const ngtcp2_pq_entry *rhs); typedef struct ngtcp2_pq { - /* The pointer to the pointer to the item stored */ + /* q is a pointer to an array that stores the items. */ ngtcp2_pq_entry **q; - /* Memory allocator */ + /* mem is a memory allocator. */ const ngtcp2_mem *mem; - /* The number of items stored */ + /* length is the number of items stored. */ size_t length; - /* The maximum number of items this pq can store. This is - automatically extended when length is reached to this value. */ + /* capacity is the maximum number of items this queue can store. + This is automatically extended when length is reached to this + limit. */ size_t capacity; - /* The less function between items */ - ngtcp2_less less; + /* less is the less function to compare items. */ + ngtcp2_pq_less less; } ngtcp2_pq; /* - * Initializes priority queue |pq| with compare function |cmp|. + * ngtcp2_pq_init initializes |pq| with compare function |cmp|. */ -void ngtcp2_pq_init(ngtcp2_pq *pq, ngtcp2_less less, const ngtcp2_mem *mem); +void ngtcp2_pq_init(ngtcp2_pq *pq, ngtcp2_pq_less less, const ngtcp2_mem *mem); /* - * Deallocates any resources allocated for |pq|. The stored items are - * not freed by this function. + * ngtcp2_pq_free deallocates any resources allocated for |pq|. The + * stored items are not freed by this function. */ void ngtcp2_pq_free(ngtcp2_pq *pq); /* - * Adds |item| to the priority queue |pq|. + * ngtcp2_pq_push adds |item| to |pq|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -86,41 +88,42 @@ void ngtcp2_pq_free(ngtcp2_pq *pq); int ngtcp2_pq_push(ngtcp2_pq *pq, ngtcp2_pq_entry *item); /* - * Returns item at the top of the queue |pq|. It is undefined if the - * queue is empty. + * ngtcp2_pq_top returns item at the top of |pq|. It is undefined if + * |pq| is empty. */ -ngtcp2_pq_entry *ngtcp2_pq_top(ngtcp2_pq *pq); +ngtcp2_pq_entry *ngtcp2_pq_top(const ngtcp2_pq *pq); /* - * Pops item at the top of the queue |pq|. The popped item is not - * freed by this function. + * ngtcp2_pq_pop pops item at the top of |pq|. The popped item is not + * freed by this function. It is undefined if |pq| is empty. */ void ngtcp2_pq_pop(ngtcp2_pq *pq); /* - * Returns nonzero if the queue |pq| is empty. + * ngtcp2_pq_empty returns nonzero if |pq| is empty. */ -int ngtcp2_pq_empty(ngtcp2_pq *pq); +int ngtcp2_pq_empty(const ngtcp2_pq *pq); /* - * Returns the number of items in the queue |pq|. + * ngtcp2_pq_size returns the number of items |pq| contains. */ -size_t ngtcp2_pq_size(ngtcp2_pq *pq); +size_t ngtcp2_pq_size(const ngtcp2_pq *pq); typedef int (*ngtcp2_pq_item_cb)(ngtcp2_pq_entry *item, void *arg); /* - * Applies |fun| to each item in |pq|. The |arg| is passed as arg - * parameter to callback function. This function must not change the - * ordering key. If the return value from callback is nonzero, this - * function returns 1 immediately without iterating remaining items. - * Otherwise this function returns 0. + * ngtcp2_pq_each applies |fun| to each item in |pq|. The |arg| is + * passed as arg parameter to callback function. This function must + * not change the ordering key. If the return value from callback is + * nonzero, this function returns 1 immediately without iterating + * remaining items. Otherwise this function returns 0. */ -int ngtcp2_pq_each(ngtcp2_pq *pq, ngtcp2_pq_item_cb fun, void *arg); +int ngtcp2_pq_each(const ngtcp2_pq *pq, ngtcp2_pq_item_cb fun, void *arg); /* - * Removes |item| from priority queue. + * ngtcp2_pq_remove removes |item| from |pq|. |pq| must contain + * |item| otherwise the behavior is undefined. */ void ngtcp2_pq_remove(ngtcp2_pq *pq, ngtcp2_pq_entry *item); -#endif /* NGTCP2_PQ_H */ +#endif /* !defined(NGTCP2_PQ_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.c index 314e005293c279..e4fee94eb558d3 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.c @@ -143,7 +143,7 @@ int ngtcp2_pv_validation_timed_out(ngtcp2_pv *pv, ngtcp2_tstamp ts) { ent = ngtcp2_ringbuf_get(&pv->ents.rb, ngtcp2_ringbuf_len(&pv->ents.rb) - 1); t = pv->started_ts + pv->timeout; - t = ngtcp2_max(t, ent->expiry); + t = ngtcp2_max_uint64(t, ent->expiry); return t <= ts; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h index c9da15248a3e2b..e9573da497bee4 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -83,7 +83,7 @@ void ngtcp2_pv_entry_init(ngtcp2_pv_entry *pvent, const uint8_t *data, typedef struct ngtcp2_pv ngtcp2_pv; ngtcp2_static_ringbuf_def(pv_ents, NGTCP2_PV_MAX_ENTRIES, - sizeof(ngtcp2_pv_entry)); + sizeof(ngtcp2_pv_entry)) /* * ngtcp2_pv is the context of a single path validation. */ @@ -191,4 +191,4 @@ ngtcp2_tstamp ngtcp2_pv_next_expiry(ngtcp2_pv *pv); */ void ngtcp2_pv_cancel_expired_timer(ngtcp2_pv *pv, ngtcp2_tstamp ts); -#endif /* NGTCP2_PV_H */ +#endif /* !defined(NGTCP2_PV_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c index 27675347794b2a..c0f920746a4dff 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c @@ -184,13 +184,12 @@ static uint8_t *write_pair_cid_impl(uint8_t *p, const uint8_t *name, write_pair_cid_impl((DEST), (const uint8_t *)(NAME), sizeof(NAME) - 1, \ (VALUE)) -#define ngtcp2_make_vec_lit(S) \ - { (uint8_t *)(S), sizeof((S)) - 1 } +#define ngtcp2_make_vec_lit(S) {(uint8_t *)(S), sizeof((S)) - 1} static uint8_t *write_common_fields(uint8_t *p, const ngtcp2_cid *odcid) { p = write_verbatim( - p, "\"common_fields\":{\"protocol_type\":[\"QUIC\"],\"time_format\":" - "\"relative\",\"reference_time\":0,\"group_id\":"); + p, "\"common_fields\":{\"protocol_type\":[\"QUIC\"],\"time_format\":" + "\"relative\",\"reference_time\":0,\"group_id\":"); p = write_cid(p, odcid); *p++ = '}'; return p; @@ -198,7 +197,7 @@ static uint8_t *write_common_fields(uint8_t *p, const ngtcp2_cid *odcid) { static uint8_t *write_trace(uint8_t *p, int server, const ngtcp2_cid *odcid) { p = write_verbatim( - p, "\"trace\":{\"vantage_point\":{\"name\":\"ngtcp2\",\"type\":"); + p, "\"trace\":{\"vantage_point\":{\"name\":\"ngtcp2\",\"type\":"); if (server) { p = write_string(p, "server"); } else { @@ -219,7 +218,7 @@ void ngtcp2_qlog_start(ngtcp2_qlog *qlog, const ngtcp2_cid *odcid, int server) { } p = write_verbatim( - p, "\x1e{\"qlog_format\":\"JSON-SEQ\",\"qlog_version\":\"0.3\","); + p, "\x1e{\"qlog_format\":\"JSON-SEQ\",\"qlog_version\":\"0.3\","); p = write_trace(p, server, odcid); p = write_verbatim(p, "}\n"); @@ -243,9 +242,9 @@ static ngtcp2_vec vec_pkt_type_0rtt = ngtcp2_make_vec_lit("0RTT"); static ngtcp2_vec vec_pkt_type_1rtt = ngtcp2_make_vec_lit("1RTT"); static ngtcp2_vec vec_pkt_type_retry = ngtcp2_make_vec_lit("retry"); static ngtcp2_vec vec_pkt_type_version_negotiation = - ngtcp2_make_vec_lit("version_negotiation"); + ngtcp2_make_vec_lit("version_negotiation"); static ngtcp2_vec vec_pkt_type_stateless_reset = - ngtcp2_make_vec_lit("stateless_reset"); + ngtcp2_make_vec_lit("stateless_reset"); static ngtcp2_vec vec_pkt_type_unknown = ngtcp2_make_vec_lit("unknown"); static const ngtcp2_vec *qlog_pkt_type(const ngtcp2_pkt_hd *hd) { @@ -638,8 +637,8 @@ write_connection_close_frame(uint8_t *p, const ngtcp2_connection_close *fr) { */ #define NGTCP2_QLOG_CONNECTION_CLOSE_FRAME_OVERHEAD 131 - p = write_verbatim(p, - "{\"frame_type\":\"connection_close\",\"error_space\":"); + p = + write_verbatim(p, "{\"frame_type\":\"connection_close\",\"error_space\":"); if (fr->type == NGTCP2_FRAME_CONNECTION_CLOSE) { p = write_string(p, "transport"); } else { @@ -772,10 +771,10 @@ void ngtcp2_qlog_write_frame(ngtcp2_qlog *qlog, const ngtcp2_frame *fr) { case NGTCP2_FRAME_ACK_ECN: if (ngtcp2_buf_left(&qlog->buf) < NGTCP2_QLOG_ACK_FRAME_BASE_OVERHEAD + - (size_t)(fr->type == NGTCP2_FRAME_ACK_ECN - ? NGTCP2_QLOG_ACK_FRAME_ECN_OVERHEAD - : 0) + - NGTCP2_QLOG_ACK_FRAME_RANGE_OVERHEAD * (1 + fr->ack.rangecnt) + 1) { + (size_t)(fr->type == NGTCP2_FRAME_ACK_ECN + ? NGTCP2_QLOG_ACK_FRAME_ECN_OVERHEAD + : 0) + + NGTCP2_QLOG_ACK_FRAME_RANGE_OVERHEAD * (1 + fr->ack.rangecnt) + 1) { return; } p = write_ack_frame(p, &fr->ack); @@ -934,8 +933,8 @@ void ngtcp2_qlog_pkt_sent_end(ngtcp2_qlog *qlog, const ngtcp2_pkt_hd *hd, } void ngtcp2_qlog_parameters_set_transport_params( - ngtcp2_qlog *qlog, const ngtcp2_transport_params *params, int server, - ngtcp2_qlog_side side) { + ngtcp2_qlog *qlog, const ngtcp2_transport_params *params, int server, + ngtcp2_qlog_side side) { uint8_t buf[1024]; uint8_t *p = buf; const ngtcp2_preferred_addr *paddr; @@ -950,7 +949,7 @@ void ngtcp2_qlog_parameters_set_transport_params( *p++ = '{'; p = qlog_write_time(qlog, p); p = write_verbatim( - p, ",\"name\":\"transport:parameters_set\",\"data\":{\"owner\":"); + p, ",\"name\":\"transport:parameters_set\",\"data\":{\"owner\":"); if (side == NGTCP2_QLOG_SIDE_LOCAL) { p = write_string(p, "local"); @@ -982,8 +981,8 @@ void ngtcp2_qlog_parameters_set_transport_params( *p++ = ','; p = write_pair_duration(p, "max_idle_timeout", params->max_idle_timeout); *p++ = ','; - p = write_pair_number(p, "max_udp_payload_size", - params->max_udp_payload_size); + p = + write_pair_number(p, "max_udp_payload_size", params->max_udp_payload_size); *p++ = ','; p = write_pair_number(p, "ack_delay_exponent", params->ack_delay_exponent); *p++ = ','; @@ -1106,7 +1105,7 @@ void ngtcp2_qlog_pkt_lost(ngtcp2_qlog *qlog, ngtcp2_rtb_entry *ent) { *p++ = '{'; p = qlog_write_time(qlog, p); p = write_verbatim( - p, ",\"name\":\"recovery:packet_lost\",\"data\":{\"header\":"); + p, ",\"name\":\"recovery:packet_lost\",\"data\":{\"header\":"); hd.type = ent->hd.type; hd.flags = ent->hd.flags; @@ -1134,13 +1133,11 @@ void ngtcp2_qlog_retry_pkt_received(ngtcp2_qlog *qlog, const ngtcp2_pkt_hd *hd, *buf.last++ = '{'; buf.last = qlog_write_time(qlog, buf.last); buf.last = write_verbatim( - buf.last, - ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); + buf.last, ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); - if (ngtcp2_buf_left(&buf) < - NGTCP2_QLOG_PKT_HD_OVERHEAD + hd->tokenlen * 2 + - sizeof(",\"retry_token\":{\"data\":\"\"}}}\n") - 1 + - retry->tokenlen * 2) { + if (ngtcp2_buf_left(&buf) < NGTCP2_QLOG_PKT_HD_OVERHEAD + hd->tokenlen * 2 + + sizeof(",\"retry_token\":{\"data\":\"\"}}}\n") - + 1 + retry->tokenlen * 2) { return; } @@ -1154,7 +1151,7 @@ void ngtcp2_qlog_retry_pkt_received(ngtcp2_qlog *qlog, const ngtcp2_pkt_hd *hd, } void ngtcp2_qlog_stateless_reset_pkt_received( - ngtcp2_qlog *qlog, const ngtcp2_pkt_stateless_reset *sr) { + ngtcp2_qlog *qlog, const ngtcp2_pkt_stateless_reset *sr) { uint8_t buf[256]; uint8_t *p = buf; ngtcp2_pkt_hd hd = {0}; @@ -1169,7 +1166,7 @@ void ngtcp2_qlog_stateless_reset_pkt_received( *p++ = '{'; p = qlog_write_time(qlog, p); p = write_verbatim( - p, ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); + p, ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); p = write_pkt_hd(p, &hd); *p++ = ','; p = write_pair_hex(p, "stateless_reset_token", sr->stateless_reset_token, @@ -1199,8 +1196,7 @@ void ngtcp2_qlog_version_negotiation_pkt_received(ngtcp2_qlog *qlog, *buf.last++ = '{'; buf.last = qlog_write_time(qlog, buf.last); buf.last = write_verbatim( - buf.last, - ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); + buf.last, ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); buf.last = write_pkt_hd(buf.last, hd); buf.last = write_verbatim(buf.last, ",\"supported_versions\":["); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.h index b9107c0e5c031a..d2a5f1038c0f42 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -120,8 +120,8 @@ void ngtcp2_qlog_pkt_sent_end(ngtcp2_qlog *qlog, const ngtcp2_pkt_hd *hd, * "local", otherwise "remote". */ void ngtcp2_qlog_parameters_set_transport_params( - ngtcp2_qlog *qlog, const ngtcp2_transport_params *params, int server, - ngtcp2_qlog_side side); + ngtcp2_qlog *qlog, const ngtcp2_transport_params *params, int server, + ngtcp2_qlog_side side); /* * ngtcp2_qlog_metrics_updated writes metrics_updated event of @@ -147,7 +147,7 @@ void ngtcp2_qlog_retry_pkt_received(ngtcp2_qlog *qlog, const ngtcp2_pkt_hd *hd, * event for a received Stateless Reset packet. */ void ngtcp2_qlog_stateless_reset_pkt_received( - ngtcp2_qlog *qlog, const ngtcp2_pkt_stateless_reset *sr); + ngtcp2_qlog *qlog, const ngtcp2_pkt_stateless_reset *sr); /* * ngtcp2_qlog_version_negotiation_pkt_received writes packet_received @@ -158,4 +158,4 @@ void ngtcp2_qlog_version_negotiation_pkt_received(ngtcp2_qlog *qlog, const uint32_t *sv, size_t nsv); -#endif /* NGTCP2_QLOG_H */ +#endif /* !defined(NGTCP2_QLOG_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.c index 9379496b7d4b53..7bbefc0175c595 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.c @@ -33,11 +33,13 @@ void ngtcp2_range_init(ngtcp2_range *r, uint64_t begin, uint64_t end) { ngtcp2_range ngtcp2_range_intersect(const ngtcp2_range *a, const ngtcp2_range *b) { ngtcp2_range r = {0, 0}; - uint64_t begin = ngtcp2_max(a->begin, b->begin); - uint64_t end = ngtcp2_min(a->end, b->end); + uint64_t begin = ngtcp2_max_uint64(a->begin, b->begin); + uint64_t end = ngtcp2_min_uint64(a->end, b->end); + if (begin < end) { ngtcp2_range_init(&r, begin, end); } + return r; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.h index a776c4ec4768ce..22cd2951859ef1 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -58,7 +58,7 @@ uint64_t ngtcp2_range_len(const ngtcp2_range *r); /* * ngtcp2_range_eq returns nonzero if |a| equals |b|, such that - * a->begin == b->begin, and a->end == b->end hold. + * a->begin == b->begin and a->end == b->end hold. */ int ngtcp2_range_eq(const ngtcp2_range *a, const ngtcp2_range *b); @@ -77,4 +77,4 @@ void ngtcp2_range_cut(ngtcp2_range *left, ngtcp2_range *right, */ int ngtcp2_range_not_after(const ngtcp2_range *a, const ngtcp2_range *b); -#endif /* NGTCP2_RANGE_H */ +#endif /* !defined(NGTCP2_RANGE_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rcvry.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rcvry.h index 4cb40882192a77..e6321061b59cd1 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rcvry.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rcvry.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -37,4 +37,4 @@ /* NGTCP2_GRANULARITY is kGranularity described in RFC 9002. */ #define NGTCP2_GRANULARITY NGTCP2_MILLISECONDS -#endif /* NGTCP2_RCVRY_H */ +#endif /* !defined(NGTCP2_RCVRY_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c index ecfdeb63b3485b..41446739bf699d 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c @@ -27,24 +27,29 @@ #include #ifdef WIN32 # include -#endif +#endif /* defined(WIN32) */ #include "ngtcp2_macro.h" -#if defined(_MSC_VER) && _MSC_VER < 1941 && !defined(__clang__) && \ - (defined(_M_ARM) || defined(_M_ARM64)) -static unsigned int __popcnt(unsigned int x) { - unsigned int c = 0; - for (; x; ++c) { - x &= x - 1; - } - return c; +#ifndef NDEBUG +static int ispow2(size_t n) { +# if defined(_MSC_VER) && !defined(__clang__) && \ + (defined(_M_ARM) || (defined(_M_ARM64) && _MSC_VER < 1941)) + return n && !(n & (n - 1)); +# elif defined(WIN32) + return 1 == __popcnt((unsigned int)n); +# else /* !((defined(_MSC_VER) && !defined(__clang__) && (defined(_M_ARM) || \ + (defined(_M_ARM64) && _MSC_VER < 1941))) || defined(WIN32)) */ + return 1 == __builtin_popcount((unsigned int)n); +# endif /* !((defined(_MSC_VER) && !defined(__clang__) && (defined(_M_ARM) || \ + (defined(_M_ARM64) && _MSC_VER < 1941))) || defined(WIN32)) */ } -#endif +#endif /* !defined(NDEBUG) */ int ngtcp2_ringbuf_init(ngtcp2_ringbuf *rb, size_t nmemb, size_t size, const ngtcp2_mem *mem) { uint8_t *buf = ngtcp2_mem_malloc(mem, nmemb * size); + if (buf == NULL) { return NGTCP2_ERR_NOMEM; } @@ -56,11 +61,7 @@ int ngtcp2_ringbuf_init(ngtcp2_ringbuf *rb, size_t nmemb, size_t size, void ngtcp2_ringbuf_buf_init(ngtcp2_ringbuf *rb, size_t nmemb, size_t size, uint8_t *buf, const ngtcp2_mem *mem) { -#ifdef WIN32 - assert(1 == __popcnt((unsigned int)nmemb)); -#else - assert(1 == __builtin_popcount((unsigned int)nmemb)); -#endif + assert(ispow2(nmemb)); rb->buf = buf; rb->mem = mem; @@ -114,9 +115,10 @@ void ngtcp2_ringbuf_resize(ngtcp2_ringbuf *rb, size_t len) { rb->len = len; } -void *ngtcp2_ringbuf_get(ngtcp2_ringbuf *rb, size_t offset) { +void *ngtcp2_ringbuf_get(const ngtcp2_ringbuf *rb, size_t offset) { assert(offset < rb->len); offset = (rb->first + offset) & rb->mask; + return &rb->buf[offset * rb->size]; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h index b28a882c4bae84..6953ea6278f88d 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -51,7 +51,7 @@ typedef struct ngtcp2_ringbuf { /* * ngtcp2_ringbuf_init initializes |rb|. |nmemb| is the number of * elements that can be stored in this buffer. |size| is the size of - * each element. |size| must be power of 2. + * each element. |nmemb| must be power of 2. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -64,7 +64,7 @@ int ngtcp2_ringbuf_init(ngtcp2_ringbuf *rb, size_t nmemb, size_t size, /* * ngtcp2_ringbuf_buf_init initializes |rb| with given buffer and - * size. + * size. Same restrictions are applied as ngtcp2_ringbuf_init. */ void ngtcp2_ringbuf_buf_init(ngtcp2_ringbuf *rb, size_t nmemb, size_t size, uint8_t *buf, const ngtcp2_mem *mem); @@ -79,15 +79,16 @@ void ngtcp2_ringbuf_free(ngtcp2_ringbuf *rb); the buffer backward, and returns the pointer to the element. Caller can store data to the buffer pointed by the returned pointer. If this action exceeds the capacity of the ring buffer, - the last element is silently overwritten, and rb->len remains - unchanged. */ + this function returns the pointer to the last element, and rb->len + remains unchanged. */ void *ngtcp2_ringbuf_push_front(ngtcp2_ringbuf *rb); /* ngtcp2_ringbuf_push_back moves the offset to the last element in the buffer forward, and returns the pointer to the element. Caller can store data to the buffer pointed by the returned pointer. If - this action exceeds the capacity of the ring buffer, the first - element is silently overwritten, and rb->len remains unchanged. */ + this action exceeds the capacity of the ring buffer, this function + returns the pointer to the first element, and rb->len remains + unchanged. */ void *ngtcp2_ringbuf_push_back(ngtcp2_ringbuf *rb); /* @@ -106,7 +107,7 @@ void ngtcp2_ringbuf_resize(ngtcp2_ringbuf *rb, size_t len); /* ngtcp2_ringbuf_get returns the pointer to the element at |offset|. */ -void *ngtcp2_ringbuf_get(ngtcp2_ringbuf *rb, size_t offset); +void *ngtcp2_ringbuf_get(const ngtcp2_ringbuf *rb, size_t offset); /* ngtcp2_ringbuf_len returns the number of elements stored. */ #define ngtcp2_ringbuf_len(RB) ((RB)->len) @@ -115,9 +116,8 @@ void *ngtcp2_ringbuf_get(ngtcp2_ringbuf *rb, size_t offset); int ngtcp2_ringbuf_full(ngtcp2_ringbuf *rb); /* ngtcp2_static_ringbuf_def defines ngtcp2_ringbuf struct wrapper - which uses a statically allocated buffer that is suitable for a - usage that does not change buffer size with ngtcp2_ringbuf_resize. - ngtcp2_ringbuf_free should never be called for rb field. */ + which uses a statically allocated buffer. ngtcp2_ringbuf_free + should never be called for rb field. */ #define ngtcp2_static_ringbuf_def(NAME, NMEMB, SIZE) \ typedef struct ngtcp2_static_ringbuf_##NAME { \ ngtcp2_ringbuf rb; \ @@ -125,8 +125,8 @@ int ngtcp2_ringbuf_full(ngtcp2_ringbuf *rb); } ngtcp2_static_ringbuf_##NAME; \ \ static inline void ngtcp2_static_ringbuf_##NAME##_init( \ - ngtcp2_static_ringbuf_##NAME *srb) { \ + ngtcp2_static_ringbuf_##NAME *srb) { \ ngtcp2_ringbuf_buf_init(&srb->rb, (NMEMB), (SIZE), srb->buf, NULL); \ } -#endif /* NGTCP2_RINGBUF_H */ +#endif /* !defined(NGTCP2_RINGBUF_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c index 5cac383f7bb166..ce6c2113ddf1ce 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c @@ -56,7 +56,6 @@ int ngtcp2_rob_data_new(ngtcp2_rob_data **pd, uint64_t offset, size_t chunk, (*pd)->range.begin = offset; (*pd)->range.end = offset + chunk; (*pd)->begin = (uint8_t *)(*pd) + sizeof(ngtcp2_rob_data); - (*pd)->end = (*pd)->begin + chunk; return 0; } @@ -69,8 +68,8 @@ int ngtcp2_rob_init(ngtcp2_rob *rob, size_t chunk, const ngtcp2_mem *mem) { int rv; ngtcp2_rob_gap *g; - ngtcp2_ksl_init(&rob->gapksl, ngtcp2_ksl_range_compar, sizeof(ngtcp2_range), - mem); + ngtcp2_ksl_init(&rob->gapksl, ngtcp2_ksl_range_compar, + ngtcp2_ksl_range_search, sizeof(ngtcp2_range), mem); rv = ngtcp2_rob_gap_new(&g, 0, UINT64_MAX, mem); if (rv != 0) { @@ -82,8 +81,8 @@ int ngtcp2_rob_init(ngtcp2_rob *rob, size_t chunk, const ngtcp2_mem *mem) { goto fail_gapksl_ksl_insert; } - ngtcp2_ksl_init(&rob->dataksl, ngtcp2_ksl_range_compar, sizeof(ngtcp2_range), - mem); + ngtcp2_ksl_init(&rob->dataksl, ngtcp2_ksl_range_compar, + ngtcp2_ksl_range_search, sizeof(ngtcp2_range), mem); rob->chunk = chunk; rob->mem = mem; @@ -126,8 +125,8 @@ static int rob_write_data(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, ngtcp2_range range = {offset, offset + len}; ngtcp2_ksl_it it; - for (it = ngtcp2_ksl_lower_bound_compar(&rob->dataksl, &range, - ngtcp2_ksl_range_exclusive_compar); + for (it = ngtcp2_ksl_lower_bound_search(&rob->dataksl, &range, + ngtcp2_ksl_range_exclusive_search); len; ngtcp2_ksl_it_next(&it)) { if (ngtcp2_ksl_it_end(&it)) { d = NULL; @@ -149,7 +148,8 @@ static int rob_write_data(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, } } - n = (size_t)ngtcp2_min((uint64_t)len, d->range.begin + rob->chunk - offset); + n = (size_t)ngtcp2_min_uint64((uint64_t)len, + d->range.begin + rob->chunk - offset); memcpy(d->begin + (offset - d->range.begin), data, n); offset += n; data += n; @@ -166,8 +166,8 @@ int ngtcp2_rob_push(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, ngtcp2_range m, l, r, q = {offset, offset + datalen}; ngtcp2_ksl_it it; - it = ngtcp2_ksl_lower_bound_compar(&rob->gapksl, &q, - ngtcp2_ksl_range_exclusive_compar); + it = ngtcp2_ksl_lower_bound_search(&rob->gapksl, &q, + ngtcp2_ksl_range_exclusive_search); for (; !ngtcp2_ksl_it_end(&it);) { g = ngtcp2_ksl_it_get(&it); @@ -176,9 +176,11 @@ int ngtcp2_rob_push(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, if (!ngtcp2_range_len(&m)) { break; } + if (ngtcp2_range_eq(&g->range, &m)) { ngtcp2_ksl_remove_hint(&rob->gapksl, &it, &it, &g->range); ngtcp2_rob_gap_del(g, rob->mem); + rv = rob_write_data(rob, m.begin, data + (m.begin - offset), (size_t)ngtcp2_range_len(&m)); if (rv != 0) { @@ -187,17 +189,21 @@ int ngtcp2_rob_push(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, continue; } + ngtcp2_range_cut(&l, &r, &g->range, &m); + if (ngtcp2_range_len(&l)) { ngtcp2_ksl_update_key(&rob->gapksl, &g->range, &l); g->range = l; if (ngtcp2_range_len(&r)) { ngtcp2_rob_gap *ng; + rv = ngtcp2_rob_gap_new(&ng, r.begin, r.end, rob->mem); if (rv != 0) { return rv; } + rv = ngtcp2_ksl_insert(&rob->gapksl, &it, &ng->range, ng); if (rv != 0) { ngtcp2_rob_gap_del(ng, rob->mem); @@ -208,13 +214,16 @@ int ngtcp2_rob_push(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, ngtcp2_ksl_update_key(&rob->gapksl, &g->range, &r); g->range = r; } + rv = rob_write_data(rob, m.begin, data + (m.begin - offset), (size_t)ngtcp2_range_len(&m)); if (rv != 0) { return rv; } + ngtcp2_ksl_it_next(&it); } + return 0; } @@ -230,12 +239,16 @@ void ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset) { if (offset <= g->range.begin) { break; } + if (offset < g->range.end) { ngtcp2_range r = {offset, g->range.end}; + ngtcp2_ksl_update_key(&rob->gapksl, &g->range, &r); g->range.begin = offset; + break; } + ngtcp2_ksl_remove_hint(&rob->gapksl, &it, &it, &g->range); ngtcp2_rob_gap_del(g, rob->mem); } @@ -247,12 +260,13 @@ void ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset) { if (offset < d->range.begin + rob->chunk) { return; } + ngtcp2_ksl_remove_hint(&rob->dataksl, &it, &it, &d->range); ngtcp2_rob_data_del(d, rob->mem); } } -size_t ngtcp2_rob_data_at(ngtcp2_rob *rob, const uint8_t **pdest, +size_t ngtcp2_rob_data_at(const ngtcp2_rob *rob, const uint8_t **pdest, uint64_t offset) { ngtcp2_rob_gap *g; ngtcp2_rob_data *d; @@ -278,8 +292,9 @@ size_t ngtcp2_rob_data_at(ngtcp2_rob *rob, const uint8_t **pdest, *pdest = d->begin + (offset - d->range.begin); - return (size_t)(ngtcp2_min(g->range.begin, d->range.begin + rob->chunk) - - offset); + return ( + size_t)(ngtcp2_min_uint64(g->range.begin, d->range.begin + rob->chunk) - + offset); } void ngtcp2_rob_pop(ngtcp2_rob *rob, uint64_t offset, size_t len) { @@ -299,7 +314,7 @@ void ngtcp2_rob_pop(ngtcp2_rob *rob, uint64_t offset, size_t len) { ngtcp2_rob_data_del(d, rob->mem); } -uint64_t ngtcp2_rob_first_gap_offset(ngtcp2_rob *rob) { +uint64_t ngtcp2_rob_first_gap_offset(const ngtcp2_rob *rob) { ngtcp2_ksl_it it = ngtcp2_ksl_begin(&rob->gapksl); ngtcp2_rob_gap *g; @@ -312,6 +327,6 @@ uint64_t ngtcp2_rob_first_gap_offset(ngtcp2_rob *rob) { return g->range.begin; } -int ngtcp2_rob_data_buffered(ngtcp2_rob *rob) { +int ngtcp2_rob_data_buffered(const ngtcp2_rob *rob) { return ngtcp2_ksl_len(&rob->dataksl) != 0; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h index 6518d56c539185..d53b5160b10230 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -70,12 +70,10 @@ void ngtcp2_rob_gap_del(ngtcp2_rob_gap *g, const ngtcp2_mem *mem); * ngtcp2_rob_data holds the buffered stream data. */ typedef struct ngtcp2_rob_data { - /* range is the range of this gap. */ + /* range is the range of this data. */ ngtcp2_range range; /* begin points to the buffer. */ uint8_t *begin; - /* end points to the one beyond of the last byte of the buffer */ - uint8_t *end; } ngtcp2_rob_data; /* @@ -110,8 +108,8 @@ typedef struct ngtcp2_rob { /* gapksl maintains the range of offset which is not received yet. Initially, its range is [0, UINT64_MAX). */ ngtcp2_ksl gapksl; - /* dataksl maintains the list of buffers which store received data - ordered by stream offset. */ + /* dataksl maintains the buffers which store received out-of-order + data ordered by stream offset. */ ngtcp2_ksl dataksl; /* mem is custom memory allocator */ const ngtcp2_mem *mem; @@ -137,8 +135,8 @@ int ngtcp2_rob_init(ngtcp2_rob *rob, size_t chunk, const ngtcp2_mem *mem); void ngtcp2_rob_free(ngtcp2_rob *rob); /* - * ngtcp2_rob_push adds new data of length |datalen| at the stream - * offset |offset|. + * ngtcp2_rob_push adds new data pointed by |data| of length |datalen| + * at the stream offset |offset|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -151,7 +149,8 @@ int ngtcp2_rob_push(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, /* * ngtcp2_rob_remove_prefix removes gap up to |offset|, exclusive. It - * also removes data buffer if it is completely included in |offset|. + * also removes buffered data if it is completely included in + * |offset|. */ void ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset); @@ -159,9 +158,10 @@ void ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset); * ngtcp2_rob_data_at stores the pointer to the buffer of stream * offset |offset| to |*pdest| if it is available, and returns the * valid length of available data. If no data is available, it - * returns 0. + * returns 0. This function only returns the data before the first + * gap. It returns 0 even if data is available after the first gap. */ -size_t ngtcp2_rob_data_at(ngtcp2_rob *rob, const uint8_t **pdest, +size_t ngtcp2_rob_data_at(const ngtcp2_rob *rob, const uint8_t **pdest, uint64_t offset); /* @@ -181,11 +181,11 @@ void ngtcp2_rob_pop(ngtcp2_rob *rob, uint64_t offset, size_t len); * ngtcp2_rob_first_gap_offset returns the offset to the first gap. * If there is no gap, it returns UINT64_MAX. */ -uint64_t ngtcp2_rob_first_gap_offset(ngtcp2_rob *rob); +uint64_t ngtcp2_rob_first_gap_offset(const ngtcp2_rob *rob); /* * ngtcp2_rob_data_buffered returns nonzero if any data is buffered. */ -int ngtcp2_rob_data_buffered(ngtcp2_rob *rob); +int ngtcp2_rob_data_buffered(const ngtcp2_rob *rob); -#endif /* NGTCP2_ROB_H */ +#endif /* !defined(NGTCP2_ROB_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c index b8587e3e9dbac8..89c89acdc265a2 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c @@ -35,26 +35,25 @@ void ngtcp2_rs_init(ngtcp2_rs *rs) { rs->interval = UINT64_MAX; rs->delivered = 0; rs->prior_delivered = 0; - rs->prior_ts = 0; + rs->prior_ts = UINT64_MAX; rs->tx_in_flight = 0; rs->lost = 0; rs->prior_lost = 0; rs->send_elapsed = 0; rs->ack_elapsed = 0; + rs->last_end_seq = -1; rs->is_app_limited = 0; } void ngtcp2_rst_init(ngtcp2_rst *rst) { ngtcp2_rs_init(&rst->rs); - ngtcp2_window_filter_init(&rst->wf, 12); rst->delivered = 0; rst->delivered_ts = 0; rst->first_sent_ts = 0; rst->app_limited = 0; - rst->next_round_delivered = 0; - rst->round_count = 0; rst->is_cwnd_limited = 0; rst->lost = 0; + rst->last_seq = -1; } void ngtcp2_rst_on_pkt_sent(ngtcp2_rst *rst, ngtcp2_rtb_entry *ent, @@ -68,27 +67,21 @@ void ngtcp2_rst_on_pkt_sent(ngtcp2_rst *rst, ngtcp2_rtb_entry *ent, ent->rst.is_app_limited = rst->app_limited != 0; ent->rst.tx_in_flight = cstat->bytes_in_flight + ent->pktlen; ent->rst.lost = rst->lost; + ent->rst.end_seq = ++rst->last_seq; } -void ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, - uint64_t pkt_delivered) { +void ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat) { ngtcp2_rs *rs = &rst->rs; - uint64_t rate; if (rst->app_limited && rst->delivered > rst->app_limited) { rst->app_limited = 0; } - if (pkt_delivered >= rst->next_round_delivered) { - rst->next_round_delivered = pkt_delivered; - ++rst->round_count; - } - - if (rs->prior_ts == 0) { + if (rs->prior_ts == UINT64_MAX) { return; } - rs->interval = ngtcp2_max(rs->send_elapsed, rs->ack_elapsed); + rs->interval = ngtcp2_max_uint64(rs->send_elapsed, rs->ack_elapsed); rs->delivered = rst->delivered - rs->prior_delivered; rs->lost = rst->lost - rs->prior_lost; @@ -102,12 +95,13 @@ void ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, return; } - rate = rs->delivered * NGTCP2_SECONDS / rs->interval; + cstat->delivery_rate_sec = rs->delivered * NGTCP2_SECONDS / rs->interval; +} - if (rate > ngtcp2_window_filter_get_best(&rst->wf) || !rst->app_limited) { - ngtcp2_window_filter_update(&rst->wf, rate, rst->round_count); - cstat->delivery_rate_sec = ngtcp2_window_filter_get_best(&rst->wf); - } +static int rst_is_newest_pkt(const ngtcp2_rst *rst, const ngtcp2_rtb_entry *ent, + const ngtcp2_rs *rs) { + return ent->ts > rst->first_sent_ts || + (ent->ts == rst->first_sent_ts && ent->rst.end_seq > rs->last_end_seq); } void ngtcp2_rst_update_rate_sample(ngtcp2_rst *rst, const ngtcp2_rtb_entry *ent, @@ -117,7 +111,7 @@ void ngtcp2_rst_update_rate_sample(ngtcp2_rst *rst, const ngtcp2_rtb_entry *ent, rst->delivered += ent->pktlen; rst->delivered_ts = ts; - if (ent->rst.delivered > rs->prior_delivered) { + if (rs->prior_ts == UINT64_MAX || rst_is_newest_pkt(rst, ent, rs)) { rs->prior_delivered = ent->rst.delivered; rs->prior_ts = ent->rst.delivered_ts; rs->is_app_limited = ent->rst.is_app_limited; @@ -125,6 +119,7 @@ void ngtcp2_rst_update_rate_sample(ngtcp2_rst *rst, const ngtcp2_rtb_entry *ent, rs->ack_elapsed = rst->delivered_ts - ent->rst.delivered_ts; rs->tx_in_flight = ent->rst.tx_in_flight; rs->prior_lost = ent->rst.lost; + rs->last_end_seq = ent->rst.end_seq; rst->first_sent_ts = ent->ts; } } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h index c9e1e161b7766f..95616eee97d99f 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -51,6 +51,7 @@ typedef struct ngtcp2_rs { uint64_t prior_lost; ngtcp2_duration send_elapsed; ngtcp2_duration ack_elapsed; + int64_t last_end_seq; int is_app_limited; } ngtcp2_rs; @@ -58,18 +59,20 @@ void ngtcp2_rs_init(ngtcp2_rs *rs); /* * ngtcp2_rst implements delivery rate estimation described in - * https://tools.ietf.org/html/draft-cheng-iccrg-delivery-rate-estimation-00 + * https://ietf-wg-ccwg.github.io/draft-cardwell-ccwg-bbr/draft-cardwell-ccwg-bbr.html */ typedef struct ngtcp2_rst { ngtcp2_rs rs; - ngtcp2_window_filter wf; uint64_t delivered; ngtcp2_tstamp delivered_ts; ngtcp2_tstamp first_sent_ts; uint64_t app_limited; - uint64_t next_round_delivered; - uint64_t round_count; uint64_t lost; + /* last_seq is the sequence number of packets across all packet + number spaces. If we would adopt single packet number sequence + across all packet number spaces, we can replace this with a + packet number. */ + int64_t last_seq; int is_cwnd_limited; } ngtcp2_rst; @@ -77,10 +80,9 @@ void ngtcp2_rst_init(ngtcp2_rst *rst); void ngtcp2_rst_on_pkt_sent(ngtcp2_rst *rst, ngtcp2_rtb_entry *ent, const ngtcp2_conn_stat *cstat); -void ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, - uint64_t pkt_delivered); +void ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat); void ngtcp2_rst_update_rate_sample(ngtcp2_rst *rst, const ngtcp2_rtb_entry *ent, ngtcp2_tstamp ts); void ngtcp2_rst_update_app_limited(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat); -#endif /* NGTCP2_RST_H */ +#endif /* !defined(NGTCP2_RST_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c index 5ebdce7d0e2715..4d417186e15854 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c @@ -38,7 +38,7 @@ #include "ngtcp2_tstamp.h" #include "ngtcp2_frame_chain.h" -ngtcp2_objalloc_def(rtb_entry, ngtcp2_rtb_entry, oplent); +ngtcp2_objalloc_def(rtb_entry, ngtcp2_rtb_entry, oplent) static void rtb_entry_init(ngtcp2_rtb_entry *ent, const ngtcp2_pkt_hd *hd, ngtcp2_frame_chain *frc, ngtcp2_tstamp ts, @@ -53,7 +53,6 @@ static void rtb_entry_init(ngtcp2_rtb_entry *ent, const ngtcp2_pkt_hd *hd, ent->lost_ts = UINT64_MAX; ent->pktlen = pktlen; ent->flags = flags; - ent->next = NULL; } int ngtcp2_rtb_entry_objalloc_new(ngtcp2_rtb_entry **pent, @@ -82,19 +81,14 @@ void ngtcp2_rtb_entry_objalloc_del(ngtcp2_rtb_entry *ent, ngtcp2_objalloc_rtb_entry_release(objalloc, ent); } -static int greater(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { - return *(int64_t *)lhs > *(int64_t *)rhs; -} - -void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_pktns_id pktns_id, - ngtcp2_strm *crypto, ngtcp2_rst *rst, ngtcp2_cc *cc, +void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_rst *rst, ngtcp2_cc *cc, int64_t cc_pkt_num, ngtcp2_log *log, ngtcp2_qlog *qlog, ngtcp2_objalloc *rtb_entry_objalloc, ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { rtb->rtb_entry_objalloc = rtb_entry_objalloc; rtb->frc_objalloc = frc_objalloc; - ngtcp2_ksl_init(&rtb->ents, greater, sizeof(int64_t), mem); - rtb->crypto = crypto; + ngtcp2_ksl_init(&rtb->ents, ngtcp2_ksl_int64_greater, + ngtcp2_ksl_int64_greater_search, sizeof(int64_t), mem); rtb->rst = rst; rtb->cc = cc; rtb->log = log; @@ -105,7 +99,6 @@ void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_pktns_id pktns_id, rtb->num_retransmittable = 0; rtb->num_pto_eliciting = 0; rtb->probe_pkt_left = 0; - rtb->pktns_id = pktns_id; rtb->cc_pkt_num = cc_pkt_num; rtb->cc_bytes_in_flight = 0; rtb->persistent_congestion_start_ts = UINT64_MAX; @@ -145,9 +138,11 @@ static void rtb_on_add(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) { ++rtb->num_ack_eliciting; } + if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE) { ++rtb->num_retransmittable; } + if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING) { ++rtb->num_pto_eliciting; } @@ -209,10 +204,10 @@ static size_t rtb_on_remove(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, #define NGTCP2_RECLAIM_FLAG_ON_LOSS 0x01u /* - * rtb_reclaim_frame queues unacknowledged frames included in |ent| - * for retransmission. The re-queued frames are not deleted from - * |ent|. It returns the number of frames queued. |flags| is bitwise - * OR of 0 or more of NGTCP2_RECLAIM_FLAG_*. + * rtb_reclaim_frame copies and queues frames included in |ent| for + * retransmission. The frames are not deleted from |ent|. It returns + * the number of frames queued. |flags| is bitwise OR of 0 or more of + * NGTCP2_RECLAIM_FLAG_*. */ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, ngtcp2_conn *conn, ngtcp2_pktns *pktns, @@ -229,11 +224,13 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, /* TODO Reconsider the order of pfrc */ for (frc = ent->frc; frc; frc = frc->next) { fr = &frc->fr; + /* Check that a late ACK acknowledged this frame. */ if (frc->binder && (frc->binder->flags & NGTCP2_FRAME_CHAIN_BINDER_FLAG_ACK)) { continue; } + switch (frc->fr.type) { case NGTCP2_FRAME_STREAM: strm = ngtcp2_conn_find_stream(conn, fr->stream.stream_id); @@ -244,14 +241,15 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, gap = ngtcp2_strm_get_unacked_range_after(strm, fr->stream.offset); range.begin = fr->stream.offset; - range.end = fr->stream.offset + - ngtcp2_vec_len(fr->stream.data, fr->stream.datacnt); + range.end = + fr->stream.offset + ngtcp2_vec_len(fr->stream.data, fr->stream.datacnt); range = ngtcp2_range_intersect(&range, &gap); + if (ngtcp2_range_len(&range) == 0) { if (!fr->stream.fin) { /* 0 length STREAM frame with offset == 0 must be - retransmitted if no non-empty data is sent to this stream - and no data in this stream is acknowledged. */ + retransmitted if no non-empty data are sent to this + stream, and no data in this stream are acknowledged. */ if (fr->stream.offset != 0 || fr->stream.datacnt != 0 || strm->tx.offset || (strm->flags & NGTCP2_STRM_FLAG_ANY_ACKED)) { continue; @@ -268,7 +266,7 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, } rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, fr->stream.datacnt, rtb->frc_objalloc, rtb->mem); + &nfrc, fr->stream.datacnt, rtb->frc_objalloc, rtb->mem); if (rv != 0) { return rv; } @@ -282,8 +280,10 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, ngtcp2_frame_chain_objalloc_del(nfrc, rtb->frc_objalloc, rtb->mem); return rv; } + if (!ngtcp2_strm_is_tx_queued(strm)) { strm->cycle = ngtcp2_conn_tx_strmq_first_cycle(conn); + rv = ngtcp2_conn_tx_strmq_push(conn, strm); if (rv != 0) { return rv; @@ -294,20 +294,22 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, continue; case NGTCP2_FRAME_CRYPTO: - /* Don't resend CRYPTO frame if the whole region it contains has - been acknowledged */ - gap = ngtcp2_strm_get_unacked_range_after(rtb->crypto, fr->stream.offset); + /* Do not resend CRYPTO frame if the whole region it contains + has been acknowledged */ + gap = ngtcp2_strm_get_unacked_range_after(&pktns->crypto.strm, + fr->stream.offset); range.begin = fr->stream.offset; - range.end = fr->stream.offset + - ngtcp2_vec_len(fr->stream.data, fr->stream.datacnt); + range.end = + fr->stream.offset + ngtcp2_vec_len(fr->stream.data, fr->stream.datacnt); range = ngtcp2_range_intersect(&range, &gap); + if (ngtcp2_range_len(&range) == 0) { continue; } rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, fr->stream.datacnt, rtb->frc_objalloc, rtb->mem); + &nfrc, fr->stream.datacnt, rtb->frc_objalloc, rtb->mem); if (rv != 0) { return rv; } @@ -328,8 +330,8 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, continue; case NGTCP2_FRAME_NEW_TOKEN: rv = ngtcp2_frame_chain_new_token_objalloc_new( - &nfrc, fr->new_token.token, fr->new_token.tokenlen, rtb->frc_objalloc, - rtb->mem); + &nfrc, fr->new_token.token, fr->new_token.tokenlen, rtb->frc_objalloc, + rtb->mem); if (rv != 0) { return rv; } @@ -366,7 +368,7 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, case NGTCP2_FRAME_MAX_STREAM_DATA: strm = ngtcp2_conn_find_stream(conn, fr->max_stream_data.stream_id); if (strm == NULL || !ngtcp2_strm_require_retransmit_max_stream_data( - strm, &fr->max_stream_data)) { + strm, &fr->max_stream_data)) { continue; } @@ -374,7 +376,7 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, case NGTCP2_FRAME_STREAM_DATA_BLOCKED: strm = ngtcp2_conn_find_stream(conn, fr->stream_data_blocked.stream_id); if (strm == NULL || !ngtcp2_strm_require_retransmit_stream_data_blocked( - strm, &fr->stream_data_blocked)) { + strm, &fr->stream_data_blocked)) { continue; } @@ -423,6 +425,7 @@ static int conn_process_lost_datagram(ngtcp2_conn *conn, if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } + break; } } @@ -430,10 +433,9 @@ static int conn_process_lost_datagram(ngtcp2_conn *conn, return 0; } -static int rtb_on_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_ksl_it *it, - ngtcp2_rtb_entry *ent, ngtcp2_conn_stat *cstat, - ngtcp2_conn *conn, ngtcp2_pktns *pktns, - ngtcp2_tstamp ts) { +static int rtb_on_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, + ngtcp2_conn_stat *cstat, ngtcp2_conn *conn, + ngtcp2_pktns *pktns, ngtcp2_tstamp ts) { int rv; ngtcp2_ssize reclaimed; ngtcp2_cc *cc = rtb->cc; @@ -451,7 +453,7 @@ static int rtb_on_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_ksl_it *it, } else if (rtb->cc->on_pkt_lost) { cc->on_pkt_lost(cc, cstat, ngtcp2_cc_pkt_init(&pkt, ent->hd.pkt_num, ent->pktlen, - rtb->pktns_id, ent->ts, ent->rst.lost, + pktns->id, ent->ts, ent->rst.lost, ent->rst.tx_in_flight, ent->rst.is_app_limited), ts); @@ -463,34 +465,25 @@ static int rtb_on_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_ksl_it *it, ent->hd.pkt_num); assert(!(ent->flags & NGTCP2_RTB_ENTRY_FLAG_LOST_RETRANSMITTED)); assert(UINT64_MAX == ent->lost_ts); - - ent->flags |= NGTCP2_RTB_ENTRY_FLAG_LOST_RETRANSMITTED; - ent->lost_ts = ts; - - ++rtb->num_lost_pkts; - - ngtcp2_ksl_it_next(it); - - return 0; - } - - if (conn->callbacks.lost_datagram && - (ent->flags & NGTCP2_RTB_ENTRY_FLAG_DATAGRAM)) { - rv = conn_process_lost_datagram(conn, ent); - if (rv != 0) { - return rv; + } else { + if (conn->callbacks.lost_datagram && + (ent->flags & NGTCP2_RTB_ENTRY_FLAG_DATAGRAM)) { + rv = conn_process_lost_datagram(conn, ent); + if (rv != 0) { + return rv; + } } - } - if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE) { - assert(ent->frc); - assert(!(ent->flags & NGTCP2_RTB_ENTRY_FLAG_LOST_RETRANSMITTED)); - assert(UINT64_MAX == ent->lost_ts); + if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE) { + assert(ent->frc); + assert(!(ent->flags & NGTCP2_RTB_ENTRY_FLAG_LOST_RETRANSMITTED)); + assert(UINT64_MAX == ent->lost_ts); - reclaimed = + reclaimed = rtb_reclaim_frame(rtb, NGTCP2_RECLAIM_FLAG_ON_LOSS, conn, pktns, ent); - if (reclaimed < 0) { - return (int)reclaimed; + if (reclaimed < 0) { + return (int)reclaimed; + } } } @@ -499,8 +492,6 @@ static int rtb_on_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_ksl_it *it, ++rtb->num_lost_pkts; - ngtcp2_ksl_it_next(it); - return 0; } @@ -518,7 +509,7 @@ int ngtcp2_rtb_add(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, return 0; } -ngtcp2_ksl_it ngtcp2_rtb_head(ngtcp2_rtb *rtb) { +ngtcp2_ksl_it ngtcp2_rtb_head(const ngtcp2_rtb *rtb) { return ngtcp2_ksl_begin(&rtb->ents); } @@ -566,22 +557,24 @@ static void conn_ack_crypto_data(ngtcp2_conn *conn, ngtcp2_pktns *pktns, return; } -static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, - ngtcp2_conn *conn) { +static int process_acked_pkt(ngtcp2_rtb_entry *ent, ngtcp2_conn *conn, + ngtcp2_pktns *pktns) { ngtcp2_frame_chain *frc; uint64_t prev_stream_offset, stream_offset; ngtcp2_strm *strm; int rv; uint64_t datalen; - ngtcp2_strm *crypto = rtb->crypto; - ngtcp2_pktns *pktns = NULL; + ngtcp2_strm *crypto = &pktns->crypto.strm; if ((ent->flags & NGTCP2_RTB_ENTRY_FLAG_PMTUD_PROBE) && conn->pmtud && conn->pmtud->tx_pkt_num <= ent->hd.pkt_num) { ngtcp2_pmtud_probe_success(conn->pmtud, ent->pktlen); - conn->dcid.current.max_udp_payload_size = - ngtcp2_max(conn->dcid.current.max_udp_payload_size, ent->pktlen); + if (conn->dcid.current.max_udp_payload_size < ent->pktlen) { + conn->dcid.current.max_udp_payload_size = ent->pktlen; + conn->cstat.max_tx_udp_payload_size = + ngtcp2_conn_get_path_max_tx_udp_payload_size(conn); + } if (ngtcp2_pmtud_finished(conn->pmtud)) { ngtcp2_conn_stop_pmtud(conn); @@ -611,23 +604,25 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, } prev_stream_offset = ngtcp2_strm_get_acked_offset(strm); + rv = ngtcp2_strm_ack_data( - strm, frc->fr.stream.offset, - ngtcp2_vec_len(frc->fr.stream.data, frc->fr.stream.datacnt)); + strm, frc->fr.stream.offset, + ngtcp2_vec_len(frc->fr.stream.data, frc->fr.stream.datacnt)); if (rv != 0) { return rv; } if (conn->callbacks.acked_stream_data_offset) { stream_offset = ngtcp2_strm_get_acked_offset(strm); + datalen = stream_offset - prev_stream_offset; if (datalen == 0 && !frc->fr.stream.fin) { break; } rv = conn->callbacks.acked_stream_data_offset( - conn, strm->stream_id, prev_stream_offset, datalen, conn->user_data, - strm->stream_user_data); + conn, strm->stream_id, prev_stream_offset, datalen, conn->user_data, + strm->stream_user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -637,36 +632,25 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, if (rv != 0) { return rv; } + break; case NGTCP2_FRAME_CRYPTO: prev_stream_offset = ngtcp2_strm_get_acked_offset(crypto); + rv = ngtcp2_strm_ack_data( - crypto, frc->fr.stream.offset, - ngtcp2_vec_len(frc->fr.stream.data, frc->fr.stream.datacnt)); + crypto, frc->fr.stream.offset, + ngtcp2_vec_len(frc->fr.stream.data, frc->fr.stream.datacnt)); if (rv != 0) { return rv; } stream_offset = ngtcp2_strm_get_acked_offset(crypto); + datalen = stream_offset - prev_stream_offset; if (datalen == 0) { break; } - switch (rtb->pktns_id) { - case NGTCP2_PKTNS_ID_INITIAL: - pktns = conn->in_pktns; - break; - case NGTCP2_PKTNS_ID_HANDSHAKE: - pktns = conn->hs_pktns; - break; - case NGTCP2_PKTNS_ID_APPLICATION: - pktns = &conn->pktns; - break; - default: - ngtcp2_unreachable(); - } - conn_ack_crypto_data(conn, pktns, datalen); break; @@ -675,11 +659,14 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, if (strm == NULL) { break; } + strm->flags |= NGTCP2_STRM_FLAG_RESET_STREAM_ACKED; + rv = ngtcp2_conn_close_stream_if_shut_rdwr(conn, strm); if (rv != 0) { return rv; } + break; case NGTCP2_FRAME_RETIRE_CONNECTION_ID: ngtcp2_conn_untrack_retired_dcid_seq(conn, @@ -702,14 +689,17 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } + break; } } + return 0; } static void rtb_on_pkt_acked(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, - ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { + ngtcp2_conn_stat *cstat, const ngtcp2_pktns *pktns, + ngtcp2_tstamp ts) { ngtcp2_cc *cc = rtb->cc; ngtcp2_cc_pkt pkt; @@ -718,7 +708,7 @@ static void rtb_on_pkt_acked(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, if (cc->on_pkt_acked) { cc->on_pkt_acked(cc, cstat, ngtcp2_cc_pkt_init(&pkt, ent->hd.pkt_num, ent->pktlen, - rtb->pktns_id, ent->ts, ent->rst.lost, + pktns->id, ent->ts, ent->rst.lost, ent->rst.tx_in_flight, ent->rst.is_app_limited), ts); @@ -745,12 +735,13 @@ static void conn_verify_ecn(ngtcp2_conn *conn, ngtcp2_pktns *pktns, pktns->rx.ecn.ack.ect1 > fr->ecn.ect1 || pktns->rx.ecn.ack.ce > fr->ecn.ce || (fr->ecn.ect0 - pktns->rx.ecn.ack.ect0) + - (fr->ecn.ce - pktns->rx.ecn.ack.ce) < - ecn_acked || + (fr->ecn.ce - pktns->rx.ecn.ack.ce) < + ecn_acked || fr->ecn.ect0 > pktns->tx.ecn.ect0 || fr->ecn.ect1))) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CON, "path is not ECN capable"); conn->tx.ecn.state = NGTCP2_ECN_STATE_FAILED; + return; } @@ -762,7 +753,7 @@ static void conn_verify_ecn(ngtcp2_conn *conn, ngtcp2_pktns *pktns, if (fr->type == NGTCP2_FRAME_ACK_ECN) { if (cc->congestion_event && largest_pkt_sent_ts != UINT64_MAX && fr->ecn.ce > pktns->rx.ecn.ack.ce) { - cc->congestion_event(cc, cstat, largest_pkt_sent_ts, ts); + cc->congestion_event(cc, cstat, largest_pkt_sent_ts, 0, ts); } pktns->rx.ecn.ack.ect0 = fr->ecn.ect0; @@ -784,7 +775,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, size_t i; int rv; ngtcp2_ksl_it it; - ngtcp2_ssize num_acked = 0; + size_t num_acked = 0; ngtcp2_tstamp largest_pkt_sent_ts = UINT64_MAX; int64_t pkt_num; ngtcp2_cc *cc = rtb->cc; @@ -820,6 +811,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, conn_verify_ecn(conn, pktns, rtb->cc, cstat, fr, ecn_acked, largest_pkt_sent_ts, ts); } + return 0; } @@ -848,7 +840,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, ++num_acked; } - for (i = 0; i < fr->rangecnt;) { + for (i = 0; i < fr->rangecnt; ++i) { largest_ack = min_ack - (int64_t)fr->ranges[i].gap - 2; min_ack = largest_ack - (int64_t)fr->ranges[i].len; @@ -862,6 +854,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, if (pkt_num < min_ack) { break; } + ent = ngtcp2_ksl_it_get(&it); if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) { @@ -871,12 +864,11 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, rtb_remove(rtb, &it, &acked_ent, ent, cstat); ++num_acked; } - - ++i; } if (largest_pkt_sent_ts != UINT64_MAX && ack_eliciting_pkt_acked) { - cc_ack.rtt = pkt_ts - largest_pkt_sent_ts; + cc_ack.rtt = + ngtcp2_max_uint64(pkt_ts - largest_pkt_sent_ts, NGTCP2_NANOSECONDS); rv = ngtcp2_conn_update_rtt(conn, cc_ack.rtt, fr->ack_delay_unscaled, ts); if (rv == 0 && cc->new_rtt_sample) { @@ -891,7 +883,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, ++ecn_acked; } - rv = rtb_process_acked_pkt(rtb, ent, conn); + rv = process_acked_pkt(ent, conn, pktns); if (rv != 0) { goto fail; } @@ -903,7 +895,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, cc_ack.pkt_delivered = ent->rst.delivered; } - rtb_on_pkt_acked(rtb, ent, cstat, ts); + rtb_on_pkt_acked(rtb, ent, cstat, pktns, ts); acked_ent = ent->next; ngtcp2_rtb_entry_objalloc_del(ent, rtb->rtb_entry_objalloc, rtb->frc_objalloc, rtb->mem); @@ -916,7 +908,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, } else { /* For unit tests */ for (ent = acked_ent; ent; ent = acked_ent) { - rtb_on_pkt_acked(rtb, ent, cstat, ts); + rtb_on_pkt_acked(rtb, ent, cstat, pktns, ts); acked_ent = ent->next; ngtcp2_rtb_entry_objalloc_del(ent, rtb->rtb_entry_objalloc, rtb->frc_objalloc, rtb->mem); @@ -924,27 +916,29 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, } if (rtb->cc->on_spurious_congestion && num_lost_pkts && - rtb->num_lost_pkts - rtb->num_lost_pmtud_pkts == 0) { + rtb->num_lost_pkts == rtb->num_lost_pmtud_pkts) { rtb->cc->on_spurious_congestion(cc, cstat, ts); } - ngtcp2_rst_on_ack_recv(rtb->rst, cstat, cc_ack.pkt_delivered); + if (num_acked) { + ngtcp2_rst_on_ack_recv(rtb->rst, cstat); - if (conn && num_acked > 0) { - rv = rtb_detect_lost_pkt(rtb, &cc_ack.bytes_lost, conn, pktns, cstat, ts); - if (rv != 0) { - return rv; + if (conn) { + rv = rtb_detect_lost_pkt(rtb, &cc_ack.bytes_lost, conn, pktns, cstat, ts); + if (rv != 0) { + return rv; + } } } rtb->rst->lost += cc_ack.bytes_lost; cc_ack.largest_pkt_sent_ts = largest_pkt_sent_ts; - if (cc->on_ack_recv) { + if (num_acked && cc->on_ack_recv) { cc->on_ack_recv(cc, cstat, &cc_ack, ts); } - return num_acked; + return (ngtcp2_ssize)num_acked; fail: for (ent = acked_ent; ent; ent = acked_ent) { @@ -958,7 +952,8 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, static int rtb_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_conn_stat *cstat, const ngtcp2_rtb_entry *ent, ngtcp2_duration loss_delay, - size_t pkt_thres, ngtcp2_tstamp ts) { + size_t pkt_thres, const ngtcp2_pktns *pktns, + ngtcp2_tstamp ts) { ngtcp2_tstamp loss_time; if (ngtcp2_tstamp_elapsed(ent->ts, loss_delay, ts) || @@ -966,27 +961,27 @@ static int rtb_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_conn_stat *cstat, return 1; } - loss_time = cstat->loss_time[rtb->pktns_id]; + loss_time = cstat->loss_time[pktns->id]; if (loss_time == UINT64_MAX) { loss_time = ent->ts + loss_delay; } else { - loss_time = ngtcp2_min(loss_time, ent->ts + loss_delay); + loss_time = ngtcp2_min_uint64(loss_time, ent->ts + loss_delay); } - cstat->loss_time[rtb->pktns_id] = loss_time; + cstat->loss_time[pktns->id] = loss_time; return 0; } /* - * rtb_compute_pkt_loss_delay computes loss delay. + * compute_pkt_loss_delay computes loss delay. */ static ngtcp2_duration compute_pkt_loss_delay(const ngtcp2_conn_stat *cstat) { /* 9/8 is kTimeThreshold */ ngtcp2_duration loss_delay = - ngtcp2_max(cstat->latest_rtt, cstat->smoothed_rtt) * 9 / 8; - return ngtcp2_max(loss_delay, NGTCP2_GRANULARITY); + ngtcp2_max_uint64(cstat->latest_rtt, cstat->smoothed_rtt) * 9 / 8; + return ngtcp2_max_uint64(loss_delay, NGTCP2_GRANULARITY); } /* @@ -999,9 +994,9 @@ static int conn_all_ecn_pkt_lost(ngtcp2_conn *conn) { ngtcp2_pktns *pktns = &conn->pktns; return (!in_pktns || in_pktns->tx.ecn.validation_pkt_sent == - in_pktns->tx.ecn.validation_pkt_lost) && + in_pktns->tx.ecn.validation_pkt_lost) && (!hs_pktns || hs_pktns->tx.ecn.validation_pkt_sent == - hs_pktns->tx.ecn.validation_pkt_lost) && + hs_pktns->tx.ecn.validation_pkt_lost) && pktns->tx.ecn.validation_pkt_sent == pktns->tx.ecn.validation_pkt_lost; } @@ -1017,16 +1012,16 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, ngtcp2_cc *cc = rtb->cc; int rv; uint64_t pkt_thres = - rtb->cc_bytes_in_flight / cstat->max_tx_udp_payload_size / 2; + rtb->cc_bytes_in_flight / cstat->max_tx_udp_payload_size / 2; size_t ecn_pkt_lost = 0; ngtcp2_tstamp start_ts; ngtcp2_duration pto = ngtcp2_conn_compute_pto(conn, pktns); uint64_t bytes_lost = 0; ngtcp2_duration max_ack_delay; - pkt_thres = ngtcp2_max(pkt_thres, NGTCP2_PKT_THRESHOLD); - pkt_thres = ngtcp2_min(pkt_thres, 256); - cstat->loss_time[rtb->pktns_id] = UINT64_MAX; + pkt_thres = ngtcp2_max_uint64(pkt_thres, NGTCP2_PKT_THRESHOLD); + pkt_thres = ngtcp2_min_uint64(pkt_thres, 256); + cstat->loss_time[pktns->id] = UINT64_MAX; loss_delay = compute_pkt_loss_delay(cstat); it = ngtcp2_ksl_lower_bound(&rtb->ents, &rtb->largest_acked_tx_pkt_num); @@ -1037,25 +1032,27 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, break; } - if (rtb_pkt_lost(rtb, cstat, ent, loss_delay, (size_t)pkt_thres, ts)) { + if (rtb_pkt_lost(rtb, cstat, ent, loss_delay, (size_t)pkt_thres, pktns, + ts)) { /* All entries from ent are considered to be lost. */ latest_ts = oldest_ts = ent->ts; /* +1 to pick this packet for persistent congestion in the following loop. */ last_lost_pkt_num = ent->hd.pkt_num + 1; max_ack_delay = conn->remote.transport_params - ? conn->remote.transport_params->max_ack_delay - : 0; + ? conn->remote.transport_params->max_ack_delay + : 0; congestion_period = - (cstat->smoothed_rtt + - ngtcp2_max(4 * cstat->rttvar, NGTCP2_GRANULARITY) + max_ack_delay) * - NGTCP2_PERSISTENT_CONGESTION_THRESHOLD; + (cstat->smoothed_rtt + + ngtcp2_max_uint64(4 * cstat->rttvar, NGTCP2_GRANULARITY) + + max_ack_delay) * + NGTCP2_PERSISTENT_CONGESTION_THRESHOLD; - start_ts = ngtcp2_max(rtb->persistent_congestion_start_ts, - cstat->first_rtt_sample_ts); + start_ts = ngtcp2_max_uint64(rtb->persistent_congestion_start_ts, + cstat->first_rtt_sample_ts); - for (; !ngtcp2_ksl_it_end(&it);) { + for (; !ngtcp2_ksl_it_end(&it); ngtcp2_ksl_it_next(&it)) { ent = ngtcp2_ksl_it_get(&it); if (last_lost_pkt_num == ent->hd.pkt_num + 1 && ent->ts >= start_ts) { @@ -1066,12 +1063,12 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, } if ((ent->flags & NGTCP2_RTB_ENTRY_FLAG_LOST_RETRANSMITTED)) { - if (rtb->pktns_id != NGTCP2_PKTNS_ID_APPLICATION || + if (pktns->id != NGTCP2_PKTNS_ID_APPLICATION || last_lost_pkt_num == -1 || latest_ts - oldest_ts >= congestion_period) { break; } - ngtcp2_ksl_it_next(&it); + continue; } @@ -1081,7 +1078,7 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, } bytes_lost += rtb_on_remove(rtb, ent, cstat); - rv = rtb_on_pkt_lost(rtb, &it, ent, cstat, conn, pktns, ts); + rv = rtb_on_pkt_lost(rtb, ent, cstat, conn, pktns, ts); if (rv != 0) { return rv; } @@ -1098,13 +1095,16 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, if (conn->tx.ecn.validation_start_ts == UINT64_MAX) { break; } + if (ts - conn->tx.ecn.validation_start_ts < 3 * pto) { pktns->tx.ecn.validation_pkt_lost += ecn_pkt_lost; assert(pktns->tx.ecn.validation_pkt_sent >= pktns->tx.ecn.validation_pkt_lost); break; } + conn->tx.ecn.state = NGTCP2_ECN_STATE_UNKNOWN; + /* fall through */ case NGTCP2_ECN_STATE_UNKNOWN: pktns->tx.ecn.validation_pkt_lost += ecn_pkt_lost; @@ -1119,7 +1119,7 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, } if (cc->congestion_event) { - cc->congestion_event(cc, cstat, latest_ts, ts); + cc->congestion_event(cc, cstat, latest_ts, bytes_lost, ts); } loss_window = latest_ts - oldest_ts; @@ -1130,23 +1130,22 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, * persistent congestion there, then it is a lot easier to just * not enable it during handshake. */ - if (rtb->pktns_id == NGTCP2_PKTNS_ID_APPLICATION && loss_window > 0) { - if (loss_window >= congestion_period) { - ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_LDC, - "persistent congestion loss_window=%" PRIu64 - " congestion_period=%" PRIu64, - loss_window, congestion_period); - - /* Reset min_rtt, srtt, and rttvar here. Next new RTT - sample will be used to recalculate these values. */ - cstat->min_rtt = UINT64_MAX; - cstat->smoothed_rtt = conn->local.settings.initial_rtt; - cstat->rttvar = conn->local.settings.initial_rtt / 2; - cstat->first_rtt_sample_ts = UINT64_MAX; - - if (cc->on_persistent_congestion) { - cc->on_persistent_congestion(cc, cstat, ts); - } + if (pktns->id == NGTCP2_PKTNS_ID_APPLICATION && loss_window && + loss_window >= congestion_period) { + ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_LDC, + "persistent congestion loss_window=%" PRIu64 + " congestion_period=%" PRIu64, + loss_window, congestion_period); + + /* Reset min_rtt, srtt, and rttvar here. Next new RTT + sample will be used to recalculate these values. */ + cstat->min_rtt = UINT64_MAX; + cstat->smoothed_rtt = conn->local.settings.initial_rtt; + cstat->rttvar = conn->local.settings.initial_rtt / 2; + cstat->first_rtt_sample_ts = UINT64_MAX; + + if (cc->on_persistent_congestion) { + cc->on_persistent_congestion(cc, cstat, ts); } } @@ -1243,7 +1242,7 @@ void ngtcp2_rtb_remove_expired_lost_pkt(ngtcp2_rtb *rtb, ngtcp2_duration pto, } } -ngtcp2_tstamp ngtcp2_rtb_lost_pkt_ts(ngtcp2_rtb *rtb) { +ngtcp2_tstamp ngtcp2_rtb_lost_pkt_ts(const ngtcp2_rtb *rtb) { ngtcp2_ksl_it it; ngtcp2_rtb_entry *ent; @@ -1338,11 +1337,13 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); break; } + rv = ngtcp2_strm_streamfrq_push(strm, frc); if (rv != 0) { ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); return rv; } + if (!ngtcp2_strm_is_tx_queued(strm)) { strm->cycle = ngtcp2_conn_tx_strmq_first_cycle(conn); rv = ngtcp2_conn_tx_strmq_push(conn, strm); @@ -1350,6 +1351,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, return rv; } } + break; case NGTCP2_FRAME_CRYPTO: frc = *pfrc; @@ -1363,6 +1365,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); return rv; } + break; case NGTCP2_FRAME_DATAGRAM: case NGTCP2_FRAME_DATAGRAM_LEN: @@ -1379,6 +1382,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, *pfrc = (*pfrc)->next; ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); + break; default: pfrc = &(*pfrc)->next; @@ -1408,8 +1412,10 @@ int ngtcp2_rtb_remove_all(ngtcp2_rtb *rtb, ngtcp2_conn *conn, assert(0 == rv); rv = rtb_on_pkt_lost_resched_move(rtb, conn, pktns, ent); + ngtcp2_rtb_entry_objalloc_del(ent, rtb->rtb_entry_objalloc, rtb->frc_objalloc, rtb->mem); + if (rv != 0) { return rv; } @@ -1443,7 +1449,7 @@ void ngtcp2_rtb_remove_early_data(ngtcp2_rtb *rtb, ngtcp2_conn_stat *cstat) { } } -int ngtcp2_rtb_empty(ngtcp2_rtb *rtb) { +int ngtcp2_rtb_empty(const ngtcp2_rtb *rtb) { return ngtcp2_ksl_len(&rtb->ents) == 0; } @@ -1460,7 +1466,7 @@ ngtcp2_ssize ngtcp2_rtb_reclaim_on_pto(ngtcp2_rtb *rtb, ngtcp2_conn *conn, size_t atmost = num_pkts; it = ngtcp2_ksl_end(&rtb->ents); - for (; !ngtcp2_ksl_it_begin(&it) && num_pkts >= 1;) { + for (; !ngtcp2_ksl_it_begin(&it) && num_pkts;) { ngtcp2_ksl_it_prev(&it); ent = ngtcp2_ksl_it_get(&it); @@ -1473,13 +1479,13 @@ ngtcp2_ssize ngtcp2_rtb_reclaim_on_pto(ngtcp2_rtb *rtb, ngtcp2_conn *conn, assert(ent->frc); reclaimed = - rtb_reclaim_frame(rtb, NGTCP2_RECLAIM_FLAG_NONE, conn, pktns, ent); + rtb_reclaim_frame(rtb, NGTCP2_RECLAIM_FLAG_NONE, conn, pktns, ent); if (reclaimed < 0) { return reclaimed; } - /* Mark reclaimed even if reclaimed == 0 so that we can skip it in - the next run. */ + /* Mark ent reclaimed even if reclaimed == 0 so that we can skip + it in the next run. */ ent->flags |= NGTCP2_RTB_ENTRY_FLAG_PTO_RECLAIMED; assert(rtb->num_retransmittable); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h index a1ff208b19eac7..2ef772b2e14f4b 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -85,7 +85,7 @@ typedef struct ngtcp2_rtb_entry ngtcp2_rtb_entry; /* * ngtcp2_rtb_entry is an object stored in ngtcp2_rtb. It corresponds - * to the one packet which is waiting for its ACK. + * to the one packet which is waiting for its acknowledgement. */ struct ngtcp2_rtb_entry { union { @@ -98,10 +98,11 @@ struct ngtcp2_rtb_entry { uint8_t flags; } hd; ngtcp2_frame_chain *frc; - /* ts is the time point when a packet included in this entry is sent - to a peer. */ + /* ts is the time point when a packet included in this entry is + sent to a remote endpoint. */ ngtcp2_tstamp ts; - /* lost_ts is the time when this entry is marked lost. */ + /* lost_ts is the time when this entry is declared to be + lost. */ ngtcp2_tstamp lost_ts; /* pktlen is the length of QUIC packet */ size_t pktlen; @@ -111,6 +112,7 @@ struct ngtcp2_rtb_entry { ngtcp2_tstamp first_sent_ts; uint64_t tx_in_flight; uint64_t lost; + int64_t end_seq; int is_app_limited; } rst; /* flags is bitwise-OR of zero or more of @@ -122,11 +124,11 @@ struct ngtcp2_rtb_entry { }; }; -ngtcp2_objalloc_decl(rtb_entry, ngtcp2_rtb_entry, oplent); +ngtcp2_objalloc_decl(rtb_entry, ngtcp2_rtb_entry, oplent) /* - * ngtcp2_rtb_entry_new allocates ngtcp2_rtb_entry object, and assigns - * its pointer to |*pent|. + * ngtcp2_rtb_entry_objalloc_new allocates ngtcp2_rtb_entry object via + * |objalloc|, and assigns its pointer to |*pent|. */ int ngtcp2_rtb_entry_objalloc_new(ngtcp2_rtb_entry **pent, const ngtcp2_pkt_hd *hd, @@ -145,7 +147,7 @@ void ngtcp2_rtb_entry_objalloc_del(ngtcp2_rtb_entry *ent, const ngtcp2_mem *mem); /* - * ngtcp2_rtb tracks sent packets, and its ACK timeout for + * ngtcp2_rtb tracks sent packets, and its acknowledgement timeout for * retransmission. */ typedef struct ngtcp2_rtb { @@ -154,34 +156,33 @@ typedef struct ngtcp2_rtb { /* ents includes ngtcp2_rtb_entry sorted by decreasing order of packet number. */ ngtcp2_ksl ents; - /* crypto is CRYPTO stream. */ - ngtcp2_strm *crypto; ngtcp2_rst *rst; ngtcp2_cc *cc; ngtcp2_log *log; ngtcp2_qlog *qlog; const ngtcp2_mem *mem; /* largest_acked_tx_pkt_num is the largest packet number - acknowledged by the peer. */ + acknowledged by a remote endpoint. */ int64_t largest_acked_tx_pkt_num; - /* num_ack_eliciting is the number of ACK eliciting entries. */ + /* num_ack_eliciting is the number of ACK eliciting entries in + ents. */ size_t num_ack_eliciting; /* num_retransmittable is the number of packets which contain frames - that must be retransmitted on loss. */ + that must be retransmitted on loss in ents. */ size_t num_retransmittable; /* num_pto_eliciting is the number of packets that elicit PTO probe - packets. */ + packets in ents. */ size_t num_pto_eliciting; /* probe_pkt_left is the number of probe packet to send */ size_t probe_pkt_left; - /* pktns_id is the identifier of packet number space. */ - ngtcp2_pktns_id pktns_id; /* cc_pkt_num is the smallest packet number that is contributed to ngtcp2_conn_stat.bytes_in_flight. */ int64_t cc_pkt_num; /* cc_bytes_in_flight is the number of in-flight bytes that is contributed to ngtcp2_conn_stat.bytes_in_flight. It only - includes the bytes after congestion state is reset. */ + includes the bytes after congestion state is reset, that is only + count a packet whose packet number is greater than or equals to + cc_pkt_num. */ uint64_t cc_bytes_in_flight; /* persistent_congestion_start_ts is the time when persistent congestion evaluation is started. It happens roughly after @@ -199,8 +200,7 @@ typedef struct ngtcp2_rtb { /* * ngtcp2_rtb_init initializes |rtb|. */ -void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_pktns_id pktns_id, - ngtcp2_strm *crypto, ngtcp2_rst *rst, ngtcp2_cc *cc, +void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_rst *rst, ngtcp2_cc *cc, int64_t cc_pkt_num, ngtcp2_log *log, ngtcp2_qlog *qlog, ngtcp2_objalloc *rtb_entry_objalloc, ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem); @@ -227,13 +227,13 @@ int ngtcp2_rtb_add(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, * which has the largest packet number. If there is no entry, * returned value satisfies ngtcp2_ksl_it_end(&it) != 0. */ -ngtcp2_ksl_it ngtcp2_rtb_head(ngtcp2_rtb *rtb); +ngtcp2_ksl_it ngtcp2_rtb_head(const ngtcp2_rtb *rtb); /* - * ngtcp2_rtb_recv_ack removes acked ngtcp2_rtb_entry from |rtb|. - * |pkt_num| is a packet number which includes |fr|. |pkt_ts| is the - * timestamp when packet is received. |ts| should be the current - * time. Usually they are the same, but for buffered packets, + * ngtcp2_rtb_recv_ack removes an acknowledged ngtcp2_rtb_entry from + * |rtb|. |pkt_num| is a packet number which includes |fr|. |pkt_ts| + * is the timestamp when packet is received. |ts| should be the + * current time. Usually they are the same, but for buffered packets, * |pkt_ts| would be earlier than |ts|. * * This function returns the number of newly acknowledged packets if @@ -252,7 +252,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, /* * ngtcp2_rtb_detect_lost_pkt detects lost packets and prepends the * frames contained them to |*pfrc|. Even when this function fails, - * some frames might be prepended to |*pfrc| and the caller should + * some frames might be prepended to |*pfrc|, and the caller should * handle them. */ int ngtcp2_rtb_detect_lost_pkt(ngtcp2_rtb *rtb, ngtcp2_conn *conn, @@ -266,16 +266,16 @@ void ngtcp2_rtb_remove_expired_lost_pkt(ngtcp2_rtb *rtb, ngtcp2_duration pto, ngtcp2_tstamp ts); /* - * ngtcp2_rtb_lost_pkt_ts returns the earliest time when the still - * retained packet was lost. It returns UINT64_MAX if no such packet - * exists. + * ngtcp2_rtb_lost_pkt_ts returns the timestamp when an oldest lost + * packet tracked by |rtb| was declared lost. It returns UINT64_MAX + * if no such packet exists. */ -ngtcp2_tstamp ngtcp2_rtb_lost_pkt_ts(ngtcp2_rtb *rtb); +ngtcp2_tstamp ngtcp2_rtb_lost_pkt_ts(const ngtcp2_rtb *rtb); /* - * ngtcp2_rtb_remove_all removes all packets from |rtb| and prepends + * ngtcp2_rtb_remove_all removes all packets from |rtb|, and prepends * all frames to |*pfrc|. Even when this function fails, some frames - * might be prepended to |*pfrc| and the caller should handle them. + * might be prepended to |*pfrc|, and the caller should handle them. */ int ngtcp2_rtb_remove_all(ngtcp2_rtb *rtb, ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_conn_stat *cstat); @@ -286,9 +286,9 @@ int ngtcp2_rtb_remove_all(ngtcp2_rtb *rtb, ngtcp2_conn *conn, void ngtcp2_rtb_remove_early_data(ngtcp2_rtb *rtb, ngtcp2_conn_stat *cstat); /* - * ngtcp2_rtb_empty returns nonzero if |rtb| have no entry. + * ngtcp2_rtb_empty returns nonzero if |rtb| has no entry. */ -int ngtcp2_rtb_empty(ngtcp2_rtb *rtb); +int ngtcp2_rtb_empty(const ngtcp2_rtb *rtb); /* * ngtcp2_rtb_reset_cc_state resets congestion state in |rtb|. @@ -298,15 +298,15 @@ int ngtcp2_rtb_empty(ngtcp2_rtb *rtb); void ngtcp2_rtb_reset_cc_state(ngtcp2_rtb *rtb, int64_t cc_pkt_num); /* - * ngtcp2_rtb_remove_expired_lost_pkt ensures that the number of lost - * packets at most |n|. + * ngtcp2_rtb_remove_excessive_lost_pkt ensures that the number of + * lost packets is at most |n|. */ void ngtcp2_rtb_remove_excessive_lost_pkt(ngtcp2_rtb *rtb, size_t n); /* * ngtcp2_rtb_reclaim_on_pto reclaims up to |num_pkts| packets which - * are in-flight and not marked lost to send them in PTO probe. The - * reclaimed frames are chained to |*pfrc|. + * are in-flight and not marked lost. The reclaimed frames may be + * sent in a PTO probe packet. * * This function returns the number of packets reclaimed if it * succeeds, or one of the following negative error codes: @@ -317,4 +317,4 @@ void ngtcp2_rtb_remove_excessive_lost_pkt(ngtcp2_rtb *rtb, size_t n); ngtcp2_ssize ngtcp2_rtb_reclaim_on_pto(ngtcp2_rtb *rtb, ngtcp2_conn *conn, ngtcp2_pktns *pktns, size_t num_pkts); -#endif /* NGTCP2_RTB_H */ +#endif /* !defined(NGTCP2_RTB_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.c new file mode 100644 index 00000000000000..77a68bd112e3b2 --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.c @@ -0,0 +1,91 @@ +/* + * ngtcp2 + * + * Copyright (c) 2024 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include "ngtcp2_settings.h" + +#include +#include + +#include "ngtcp2_unreachable.h" + +void ngtcp2_settings_default_versioned(int settings_version, + ngtcp2_settings *settings) { + size_t len = ngtcp2_settingslen_version(settings_version); + + memset(settings, 0, len); + + switch (settings_version) { + case NGTCP2_SETTINGS_VERSION: + case NGTCP2_SETTINGS_V1: + settings->cc_algo = NGTCP2_CC_ALGO_CUBIC; + settings->initial_rtt = NGTCP2_DEFAULT_INITIAL_RTT; + settings->ack_thresh = 2; + settings->max_tx_udp_payload_size = 1500 - 48; + settings->handshake_timeout = UINT64_MAX; + + break; + } +} + +static void settings_copy(ngtcp2_settings *dest, const ngtcp2_settings *src, + int settings_version) { + assert(settings_version != NGTCP2_SETTINGS_VERSION); + + memcpy(dest, src, ngtcp2_settingslen_version(settings_version)); +} + +const ngtcp2_settings * +ngtcp2_settings_convert_to_latest(ngtcp2_settings *dest, int settings_version, + const ngtcp2_settings *src) { + if (settings_version == NGTCP2_SETTINGS_VERSION) { + return src; + } + + ngtcp2_settings_default(dest); + + settings_copy(dest, src, settings_version); + + return dest; +} + +void ngtcp2_settings_convert_to_old(int settings_version, ngtcp2_settings *dest, + const ngtcp2_settings *src) { + assert(settings_version != NGTCP2_SETTINGS_VERSION); + + settings_copy(dest, src, settings_version); +} + +size_t ngtcp2_settingslen_version(int settings_version) { + ngtcp2_settings settings; + + switch (settings_version) { + case NGTCP2_SETTINGS_VERSION: + return sizeof(settings); + case NGTCP2_SETTINGS_V1: + return offsetof(ngtcp2_settings, initial_pkt_num) + + sizeof(settings.initial_pkt_num); + default: + ngtcp2_unreachable(); + } +} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.h new file mode 100644 index 00000000000000..80466d43e47a7a --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.h @@ -0,0 +1,73 @@ +/* + * ngtcp2 + * + * Copyright (c) 2024 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef NGTCP2_SETTINGS_H +#define NGTCP2_SETTINGS_H + +#ifdef HAVE_CONFIG_H +# include +#endif /* defined(HAVE_CONFIG_H) */ + +#include + +/* + * ngtcp2_settings_convert_to_latest converts |src| of version + * |settings_version| to the latest version NGTCP2_SETTINGS_VERSION. + * + * |dest| must point to the latest version. |src| may be the older + * version, and if so, it may have fewer fields. Accessing those + * fields causes undefined behavior. + * + * If |settings_version| == NGTCP2_SETTINGS_VERSION, no conversion is + * made, and |src| is returned. Otherwise, first |dest| is + * initialized via ngtcp2_settings_default, and then all valid fields + * in |src| are copied into |dest|. Finally, |dest| is returned. + */ +const ngtcp2_settings * +ngtcp2_settings_convert_to_latest(ngtcp2_settings *dest, int settings_version, + const ngtcp2_settings *src); + +/* + * ngtcp2_settings_convert_to_old converts |src| of the latest version + * to |dest| of version |settings_version|. + * + * |settings_version| must not be the latest version + * NGTCP2_SETTINGS_VERSION. + * + * |dest| points to the older version, and it may have fewer fields. + * Accessing those fields causes undefined behavior. + * + * This function copies all valid fields in version |settings_version| + * from |src| to |dest|. + */ +void ngtcp2_settings_convert_to_old(int settings_version, ngtcp2_settings *dest, + const ngtcp2_settings *src); + +/* + * ngtcp2_settingslen_version returns the effective length of + * ngtcp2_settings at the version |settings_version|. + */ +size_t ngtcp2_settingslen_version(int settings_version); + +#endif /* !defined(NGTCP2_SETTINGS_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h index deb75e356d70d4..f970c153e805a8 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -91,4 +91,4 @@ char *ngtcp2_encode_printable_ascii(char *dest, const uint8_t *data, */ int ngtcp2_cmemeq(const uint8_t *a, const uint8_t *b, size_t n); -#endif /* NGTCP2_STR_H */ +#endif /* !defined(NGTCP2_STR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c index c00e86fa8c1afa..8ea969c4addbdc 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c @@ -32,16 +32,13 @@ #include "ngtcp2_vec.h" #include "ngtcp2_frame_chain.h" -static int offset_less(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { - return *(int64_t *)lhs < *(int64_t *)rhs; -} - void ngtcp2_strm_init(ngtcp2_strm *strm, int64_t stream_id, uint32_t flags, uint64_t max_rx_offset, uint64_t max_tx_offset, void *stream_user_data, ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { - strm->frc_objalloc = frc_objalloc; + strm->pe.index = NGTCP2_PQ_BAD_INDEX; strm->cycle = 0; + strm->frc_objalloc = frc_objalloc; strm->tx.acked_offset = NULL; strm->tx.cont_acked_offset = 0; strm->tx.streamfrq = NULL; @@ -56,13 +53,12 @@ void ngtcp2_strm_init(ngtcp2_strm *strm, int64_t stream_id, uint32_t flags, strm->rx.rob = NULL; strm->rx.cont_offset = 0; strm->rx.last_offset = 0; + strm->rx.max_offset = strm->rx.unsent_max_offset = strm->rx.window = + max_rx_offset; + strm->mem = mem; strm->stream_id = stream_id; - strm->flags = flags; strm->stream_user_data = stream_user_data; - strm->rx.window = strm->rx.max_offset = strm->rx.unsent_max_offset = - max_rx_offset; - strm->pe.index = NGTCP2_PQ_BAD_INDEX; - strm->mem = mem; + strm->flags = flags; strm->app_error_code = 0; } @@ -114,7 +110,7 @@ static int strm_rob_init(ngtcp2_strm *strm) { return 0; } -uint64_t ngtcp2_strm_rx_offset(ngtcp2_strm *strm) { +uint64_t ngtcp2_strm_rx_offset(const ngtcp2_strm *strm) { if (strm->rx.rob == NULL) { return strm->rx.cont_offset; } @@ -123,7 +119,7 @@ uint64_t ngtcp2_strm_rx_offset(ngtcp2_strm *strm) { /* strm_rob_heavily_fragmented returns nonzero if the number of gaps in |rob| exceeds the limit. */ -static int strm_rob_heavily_fragmented(ngtcp2_rob *rob) { +static int strm_rob_heavily_fragmented(const ngtcp2_rob *rob) { return ngtcp2_ksl_len(&rob->gapksl) >= 5000; } @@ -180,7 +176,8 @@ static int strm_streamfrq_init(ngtcp2_strm *strm) { return NGTCP2_ERR_NOMEM; } - ngtcp2_ksl_init(streamfrq, offset_less, sizeof(uint64_t), strm->mem); + ngtcp2_ksl_init(streamfrq, ngtcp2_ksl_uint64_less, + ngtcp2_ksl_uint64_less_search, sizeof(uint64_t), strm->mem); strm->tx.streamfrq = streamfrq; @@ -271,6 +268,7 @@ static int strm_streamfrq_unacked_pop(ngtcp2_strm *strm, } ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + continue; } @@ -306,11 +304,12 @@ static int strm_streamfrq_unacked_pop(ngtcp2_strm *strm, fr->data[0].len -= (size_t)base_offset; *pfrc = frc; + return 0; } rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, fr->datacnt - end_idx, strm->frc_objalloc, strm->mem); + &nfrc, fr->datacnt - end_idx, strm->frc_objalloc, strm->mem); if (rv != 0) { ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); return rv; @@ -336,6 +335,7 @@ static int strm_streamfrq_unacked_pop(ngtcp2_strm *strm, assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; } @@ -350,14 +350,17 @@ static int strm_streamfrq_unacked_pop(ngtcp2_strm *strm, fr->fin = 0; fr->offset = offset + base_offset; fr->datacnt = end_idx - idx; + if (end_base_offset) { assert(fr->data[fr->datacnt - 1].len > end_base_offset); fr->data[fr->datacnt - 1].len = (size_t)end_base_offset; } + fr->data[0].base += base_offset; fr->data[0].len -= (size_t)base_offset; *pfrc = frc; + return 0; } @@ -367,7 +370,7 @@ static int strm_streamfrq_unacked_pop(ngtcp2_strm *strm, int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, size_t left) { ngtcp2_stream *fr, *nfr; - ngtcp2_frame_chain *frc, *nfrc; + ngtcp2_frame_chain *frc, *nfrc, *sfrc; int rv; size_t nmerged; uint64_t datalen; @@ -385,6 +388,7 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, if (rv != 0) { return rv; } + if (frc == NULL) { *pfrc = NULL; return 0; @@ -401,7 +405,9 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); return rv; } + *pfrc = NULL; + return 0; } @@ -410,13 +416,13 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, acnt = fr->datacnt; bcnt = 0; - ngtcp2_vec_split(a, &acnt, b, &bcnt, left, NGTCP2_MAX_STREAM_DATACNT); + ngtcp2_vec_split(b, &bcnt, a, &acnt, left, NGTCP2_MAX_STREAM_DATACNT); assert(acnt > 0); assert(bcnt > 0); rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, bcnt, strm->frc_objalloc, strm->mem); + &nfrc, bcnt, strm->frc_objalloc, strm->mem); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); @@ -437,11 +443,12 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; } rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, acnt, strm->frc_objalloc, strm->mem); + &nfrc, acnt, strm->frc_objalloc, strm->mem); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); @@ -491,7 +498,9 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, break; } - nmerged = ngtcp2_vec_merge(a, &acnt, nfr->data, &nfr->datacnt, left, + bcnt = nfr->datacnt; + + nmerged = ngtcp2_vec_merge(a, &acnt, nfr->data, &bcnt, left, NGTCP2_MAX_STREAM_DATACNT); if (nmerged == 0) { rv = ngtcp2_ksl_insert(strm->tx.streamfrq, NULL, &nfr->offset, nfrc); @@ -499,27 +508,56 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; } + break; } datalen += nmerged; left -= nmerged; - if (nfr->datacnt == 0) { + if (bcnt == 0) { fr->fin = nfr->fin; ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); continue; } - nfr->offset += nmerged; + if (nfr->datacnt <= NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES || + bcnt > NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES) { + nfr->offset += nmerged; + nfr->datacnt = bcnt; + + rv = ngtcp2_ksl_insert(strm->tx.streamfrq, NULL, &nfr->offset, nfrc); + if (rv != 0) { + ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); + ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; + } + } else { + rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( + &sfrc, bcnt, strm->frc_objalloc, strm->mem); + if (rv != 0) { + ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); + ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; + } + + sfrc->fr.stream = nfrc->fr.stream; + sfrc->fr.stream.offset += nmerged; + sfrc->fr.stream.datacnt = bcnt; + ngtcp2_vec_copy(sfrc->fr.stream.data, nfrc->fr.stream.data, bcnt); - rv = ngtcp2_ksl_insert(strm->tx.streamfrq, NULL, &nfr->offset, nfrc); - if (rv != 0) { ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); - ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); - return rv; + + rv = ngtcp2_ksl_insert(strm->tx.streamfrq, NULL, &sfrc->fr.stream.offset, + sfrc); + if (rv != 0) { + ngtcp2_frame_chain_objalloc_del(sfrc, strm->frc_objalloc, strm->mem); + ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; + } } break; @@ -531,13 +569,14 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, } *pfrc = frc; + return 0; } assert(acnt > fr->datacnt); rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, acnt, strm->frc_objalloc, strm->mem); + &nfrc, acnt, strm->frc_objalloc, strm->mem); if (rv != 0) { ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); return rv; @@ -555,7 +594,7 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, return 0; } -uint64_t ngtcp2_strm_streamfrq_unacked_offset(ngtcp2_strm *strm) { +uint64_t ngtcp2_strm_streamfrq_unacked_offset(const ngtcp2_strm *strm) { ngtcp2_frame_chain *frc; ngtcp2_stream *fr; ngtcp2_range gap; @@ -577,9 +616,11 @@ uint64_t ngtcp2_strm_streamfrq_unacked_offset(ngtcp2_strm *strm) { if (gap.begin <= fr->offset) { return fr->offset; } + if (gap.begin < fr->offset + datalen) { return gap.begin; } + if (fr->offset + datalen == gap.begin && fr->fin && !(strm->flags & NGTCP2_STRM_FLAG_FIN_ACKED)) { return fr->offset + datalen; @@ -589,17 +630,18 @@ uint64_t ngtcp2_strm_streamfrq_unacked_offset(ngtcp2_strm *strm) { return (uint64_t)-1; } -ngtcp2_frame_chain *ngtcp2_strm_streamfrq_top(ngtcp2_strm *strm) { +ngtcp2_frame_chain *ngtcp2_strm_streamfrq_top(const ngtcp2_strm *strm) { ngtcp2_ksl_it it; assert(strm->tx.streamfrq); assert(ngtcp2_ksl_len(strm->tx.streamfrq)); it = ngtcp2_ksl_begin(strm->tx.streamfrq); + return ngtcp2_ksl_it_get(&it); } -int ngtcp2_strm_streamfrq_empty(ngtcp2_strm *strm) { +int ngtcp2_strm_streamfrq_empty(const ngtcp2_strm *strm) { return strm->tx.streamfrq == NULL || ngtcp2_ksl_len(strm->tx.streamfrq) == 0; } @@ -616,14 +658,15 @@ void ngtcp2_strm_streamfrq_clear(ngtcp2_strm *strm) { frc = ngtcp2_ksl_it_get(&it); ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); } + ngtcp2_ksl_clear(strm->tx.streamfrq); } -int ngtcp2_strm_is_tx_queued(ngtcp2_strm *strm) { +int ngtcp2_strm_is_tx_queued(const ngtcp2_strm *strm) { return strm->pe.index != NGTCP2_PQ_BAD_INDEX; } -int ngtcp2_strm_is_all_tx_data_acked(ngtcp2_strm *strm) { +int ngtcp2_strm_is_all_tx_data_acked(const ngtcp2_strm *strm) { if (strm->tx.acked_offset == NULL) { return strm->tx.cont_acked_offset == strm->tx.offset; } @@ -632,12 +675,12 @@ int ngtcp2_strm_is_all_tx_data_acked(ngtcp2_strm *strm) { strm->tx.offset; } -int ngtcp2_strm_is_all_tx_data_fin_acked(ngtcp2_strm *strm) { +int ngtcp2_strm_is_all_tx_data_fin_acked(const ngtcp2_strm *strm) { return (strm->flags & NGTCP2_STRM_FLAG_FIN_ACKED) && ngtcp2_strm_is_all_tx_data_acked(strm); } -ngtcp2_range ngtcp2_strm_get_unacked_range_after(ngtcp2_strm *strm, +ngtcp2_range ngtcp2_strm_get_unacked_range_after(const ngtcp2_strm *strm, uint64_t offset) { ngtcp2_range gap; @@ -650,7 +693,7 @@ ngtcp2_range ngtcp2_strm_get_unacked_range_after(ngtcp2_strm *strm, return ngtcp2_gaptr_get_first_gap_after(strm->tx.acked_offset, offset); } -uint64_t ngtcp2_strm_get_acked_offset(ngtcp2_strm *strm) { +uint64_t ngtcp2_strm_get_acked_offset(const ngtcp2_strm *strm) { if (strm->tx.acked_offset == NULL) { return strm->tx.cont_acked_offset; } @@ -660,7 +703,7 @@ uint64_t ngtcp2_strm_get_acked_offset(ngtcp2_strm *strm) { static int strm_acked_offset_init(ngtcp2_strm *strm) { ngtcp2_gaptr *acked_offset = - ngtcp2_mem_malloc(strm->mem, sizeof(*acked_offset)); + ngtcp2_mem_malloc(strm->mem, sizeof(*acked_offset)); if (acked_offset == NULL) { return NGTCP2_ERR_NOMEM; @@ -688,7 +731,7 @@ int ngtcp2_strm_ack_data(ngtcp2_strm *strm, uint64_t offset, uint64_t len) { } rv = - ngtcp2_gaptr_push(strm->tx.acked_offset, 0, strm->tx.cont_acked_offset); + ngtcp2_gaptr_push(strm->tx.acked_offset, 0, strm->tx.cont_acked_offset); if (rv != 0) { return rv; } @@ -709,24 +752,24 @@ void ngtcp2_strm_set_app_error_code(ngtcp2_strm *strm, strm->app_error_code = app_error_code; } -int ngtcp2_strm_require_retransmit_reset_stream(ngtcp2_strm *strm) { +int ngtcp2_strm_require_retransmit_reset_stream(const ngtcp2_strm *strm) { return !ngtcp2_strm_is_all_tx_data_fin_acked(strm); } -int ngtcp2_strm_require_retransmit_stop_sending(ngtcp2_strm *strm) { +int ngtcp2_strm_require_retransmit_stop_sending(const ngtcp2_strm *strm) { return !(strm->flags & NGTCP2_STRM_FLAG_SHUT_RD) || ngtcp2_strm_rx_offset(strm) != strm->rx.last_offset; } -int ngtcp2_strm_require_retransmit_max_stream_data(ngtcp2_strm *strm, - ngtcp2_max_stream_data *fr) { +int ngtcp2_strm_require_retransmit_max_stream_data( + const ngtcp2_strm *strm, const ngtcp2_max_stream_data *fr) { return fr->max_stream_data == strm->rx.max_offset && !(strm->flags & (NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_STOP_SENDING)); } int ngtcp2_strm_require_retransmit_stream_data_blocked( - ngtcp2_strm *strm, ngtcp2_stream_data_blocked *fr) { + const ngtcp2_strm *strm, const ngtcp2_stream_data_blocked *fr) { return fr->offset == strm->tx.max_offset && !(strm->flags & NGTCP2_STRM_FLAG_SHUT_WR); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h index 385302a5eafa9f..c72f8b9dc89aca 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -86,6 +86,9 @@ typedef struct ngtcp2_frame_chain ngtcp2_frame_chain; received from the remote endpoint. In this case, NGTCP2_STRM_FLAG_SHUT_WR is also set. */ #define NGTCP2_STRM_FLAG_STOP_SENDING_RECVED 0x800u +/* NGTCP2_STRM_FLAG_ANY_SENT indicates that any STREAM frame, + including empty one, has been sent. */ +#define NGTCP2_STRM_FLAG_ANY_SENT 0x1000u typedef struct ngtcp2_strm ngtcp2_strm; @@ -105,13 +108,13 @@ struct ngtcp2_strm { acked_offset is used instead. */ uint64_t cont_acked_offset; /* streamfrq contains STREAM or CRYPTO frame for - retransmission. The flow control credits have been paid - when they are transmitted first time. There are no + retransmission. The flow control credits have already been + paid when they are transmitted first time. There are no restriction regarding flow control for retransmission. */ ngtcp2_ksl *streamfrq; - /* offset is the next offset of outgoing data. In other words, it - is the number of bytes sent in this stream without - duplication. */ + /* offset is the next offset of new outgoing data. In other + words, it is the number of bytes sent in this stream + without duplication. */ uint64_t offset; /* max_tx_offset is the maximum offset that local endpoint can send for this stream. */ @@ -200,7 +203,7 @@ void ngtcp2_strm_free(ngtcp2_strm *strm); * ngtcp2_strm_rx_offset returns the minimum offset of stream data * which is not received yet. */ -uint64_t ngtcp2_strm_rx_offset(ngtcp2_strm *strm); +uint64_t ngtcp2_strm_rx_offset(const ngtcp2_strm *strm); /* * ngtcp2_strm_recv_reordering handles reordered data. @@ -215,8 +218,8 @@ int ngtcp2_strm_recv_reordering(ngtcp2_strm *strm, const uint8_t *data, size_t datalen, uint64_t offset); /* - * ngtcp2_strm_update_rx_offset tells that data up to offset bytes are - * received in order. + * ngtcp2_strm_update_rx_offset tells that data up to |offset| bytes + * are received in order. */ void ngtcp2_strm_update_rx_offset(ngtcp2_strm *strm, uint64_t offset); @@ -227,13 +230,14 @@ void ngtcp2_strm_update_rx_offset(ngtcp2_strm *strm, uint64_t offset); void ngtcp2_strm_discard_reordered_data(ngtcp2_strm *strm); /* - * ngtcp2_strm_shutdown shutdowns |strm|. |flags| should be - * NGTCP2_STRM_FLAG_SHUT_RD, and/or NGTCP2_STRM_FLAG_SHUT_WR. + * ngtcp2_strm_shutdown shutdowns |strm|. |flags| should be one of + * NGTCP2_STRM_FLAG_SHUT_RD, NGTCP2_STRM_FLAG_SHUT_WR, and + * NGTCP2_STRM_FLAG_SHUT_RDWR. */ void ngtcp2_strm_shutdown(ngtcp2_strm *strm, uint32_t flags); /* - * ngtcp2_strm_streamfrq_push pushes |frc| to streamfrq for + * ngtcp2_strm_streamfrq_push pushes |frc| to strm->tx.streamfrq for * retransmission. * * This function returns 0 if it succeeds, or one of the following @@ -245,11 +249,12 @@ void ngtcp2_strm_shutdown(ngtcp2_strm *strm, uint32_t flags); int ngtcp2_strm_streamfrq_push(ngtcp2_strm *strm, ngtcp2_frame_chain *frc); /* - * ngtcp2_strm_streamfrq_pop pops the first ngtcp2_frame_chain and - * assigns it to |*pfrc|. This function splits into or merges several - * ngtcp2_frame_chain objects so that the returned ngtcp2_frame_chain - * has at most |left| data length. If there is no frames to send, - * this function returns 0 and |*pfrc| is NULL. + * ngtcp2_strm_streamfrq_pop assigns a ngtcp2_frame_chain that only + * contains unacknowledged stream data with smallest offset to |*pfrc| + * for retransmission. The assigned ngtcp2_frame_chain has stream + * data at most |left| bytes. strm->tx.streamfrq is adjusted to + * exclude the portion of data included in it. If there is no stream + * data to send, this function returns 0 and |*pfrc| is NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -264,18 +269,18 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, * ngtcp2_strm_streamfrq_unacked_offset returns the smallest offset of * unacknowledged stream data held in strm->tx.streamfrq. */ -uint64_t ngtcp2_strm_streamfrq_unacked_offset(ngtcp2_strm *strm); +uint64_t ngtcp2_strm_streamfrq_unacked_offset(const ngtcp2_strm *strm); /* * ngtcp2_strm_streamfrq_top returns the first ngtcp2_frame_chain. * The queue must not be empty. */ -ngtcp2_frame_chain *ngtcp2_strm_streamfrq_top(ngtcp2_strm *strm); +ngtcp2_frame_chain *ngtcp2_strm_streamfrq_top(const ngtcp2_strm *strm); /* * ngtcp2_strm_streamfrq_empty returns nonzero if streamfrq is empty. */ -int ngtcp2_strm_streamfrq_empty(ngtcp2_strm *strm); +int ngtcp2_strm_streamfrq_empty(const ngtcp2_strm *strm); /* * ngtcp2_strm_streamfrq_clear removes all frames from streamfrq. @@ -285,26 +290,26 @@ void ngtcp2_strm_streamfrq_clear(ngtcp2_strm *strm); /* * ngtcp2_strm_is_tx_queued returns nonzero if |strm| is queued. */ -int ngtcp2_strm_is_tx_queued(ngtcp2_strm *strm); +int ngtcp2_strm_is_tx_queued(const ngtcp2_strm *strm); /* * ngtcp2_strm_is_all_tx_data_acked returns nonzero if all outgoing * data for |strm| which have sent so far have been acknowledged. */ -int ngtcp2_strm_is_all_tx_data_acked(ngtcp2_strm *strm); +int ngtcp2_strm_is_all_tx_data_acked(const ngtcp2_strm *strm); /* * ngtcp2_strm_is_all_tx_data_fin_acked behaves like * ngtcp2_strm_is_all_tx_data_acked, but it also requires that STREAM * frame with fin bit set is acknowledged. */ -int ngtcp2_strm_is_all_tx_data_fin_acked(ngtcp2_strm *strm); +int ngtcp2_strm_is_all_tx_data_fin_acked(const ngtcp2_strm *strm); /* * ngtcp2_strm_get_unacked_range_after returns the range that is not - * acknowledged yet and intersects or comes after |offset|. + * acknowledged yet and includes or comes after |offset|. */ -ngtcp2_range ngtcp2_strm_get_unacked_range_after(ngtcp2_strm *strm, +ngtcp2_range ngtcp2_strm_get_unacked_range_after(const ngtcp2_strm *strm, uint64_t offset); /* @@ -312,11 +317,11 @@ ngtcp2_range ngtcp2_strm_get_unacked_range_after(ngtcp2_strm *strm, * this offset have been acknowledged by a remote endpoint. It * returns 0 if no data is acknowledged. */ -uint64_t ngtcp2_strm_get_acked_offset(ngtcp2_strm *strm); +uint64_t ngtcp2_strm_get_acked_offset(const ngtcp2_strm *strm); /* - * ngtcp2_strm_ack_data tells |strm| that the data [offset, - * offset+len) is acknowledged by a remote endpoint. + * ngtcp2_strm_ack_data tells |strm| that the data [|offset|, |offset| + * + |len|) is acknowledged by a remote endpoint. */ int ngtcp2_strm_ack_data(ngtcp2_strm *strm, uint64_t offset, uint64_t len); @@ -331,26 +336,26 @@ void ngtcp2_strm_set_app_error_code(ngtcp2_strm *strm, uint64_t app_error_code); * ngtcp2_strm_require_retransmit_reset_stream returns nonzero if * RESET_STREAM frame should be retransmitted. */ -int ngtcp2_strm_require_retransmit_reset_stream(ngtcp2_strm *strm); +int ngtcp2_strm_require_retransmit_reset_stream(const ngtcp2_strm *strm); /* * ngtcp2_strm_require_retransmit_stop_sending returns nonzero if * STOP_SENDING frame should be retransmitted. */ -int ngtcp2_strm_require_retransmit_stop_sending(ngtcp2_strm *strm); +int ngtcp2_strm_require_retransmit_stop_sending(const ngtcp2_strm *strm); /* * ngtcp2_strm_require_retransmit_max_stream_data returns nonzero if * MAX_STREAM_DATA frame should be retransmitted. */ -int ngtcp2_strm_require_retransmit_max_stream_data(ngtcp2_strm *strm, - ngtcp2_max_stream_data *fr); +int ngtcp2_strm_require_retransmit_max_stream_data( + const ngtcp2_strm *strm, const ngtcp2_max_stream_data *fr); /* * ngtcp2_strm_require_retransmit_stream_data_blocked returns nonzero * if STREAM_DATA_BLOCKED frame frame should be retransmitted. */ int ngtcp2_strm_require_retransmit_stream_data_blocked( - ngtcp2_strm *strm, ngtcp2_stream_data_blocked *fr); + const ngtcp2_strm *strm, const ngtcp2_stream_data_blocked *fr); -#endif /* NGTCP2_STRM_H */ +#endif /* !defined(NGTCP2_STRM_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.c new file mode 100644 index 00000000000000..dda59c48858185 --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.c @@ -0,0 +1,886 @@ +/* + * ngtcp2 + * + * Copyright (c) 2023 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include "ngtcp2_transport_params.h" + +#include +#include + +#include "ngtcp2_conv.h" +#include "ngtcp2_str.h" +#include "ngtcp2_mem.h" +#include "ngtcp2_unreachable.h" + +void ngtcp2_transport_params_default_versioned( + int transport_params_version, ngtcp2_transport_params *params) { + size_t len; + + switch (transport_params_version) { + case NGTCP2_TRANSPORT_PARAMS_VERSION: + len = sizeof(*params); + + break; + default: + ngtcp2_unreachable(); + } + + memset(params, 0, len); + + switch (transport_params_version) { + case NGTCP2_TRANSPORT_PARAMS_VERSION: + params->max_udp_payload_size = NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE; + params->active_connection_id_limit = + NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT; + params->ack_delay_exponent = NGTCP2_DEFAULT_ACK_DELAY_EXPONENT; + params->max_ack_delay = NGTCP2_DEFAULT_MAX_ACK_DELAY; + + break; + } +} + +/* + * varint_paramlen returns the length of a single transport parameter + * which has variable integer in its parameter. + */ +static size_t varint_paramlen(ngtcp2_transport_param_id id, uint64_t param) { + size_t valuelen = ngtcp2_put_uvarintlen(param); + return ngtcp2_put_uvarintlen(id) + ngtcp2_put_uvarintlen(valuelen) + valuelen; +} + +/* + * write_varint_param writes parameter |id| of the given |value| in + * varint encoding. It returns p + the number of bytes written. + */ +static uint8_t *write_varint_param(uint8_t *p, ngtcp2_transport_param_id id, + uint64_t value) { + p = ngtcp2_put_uvarint(p, id); + p = ngtcp2_put_uvarint(p, ngtcp2_put_uvarintlen(value)); + return ngtcp2_put_uvarint(p, value); +} + +/* + * zero_paramlen returns the length of a single transport parameter + * which has zero length value in its parameter. + */ +static size_t zero_paramlen(ngtcp2_transport_param_id id) { + return ngtcp2_put_uvarintlen(id) + 1; +} + +/* + * write_zero_param writes parameter |id| that has zero length value. + * It returns p + the number of bytes written. + */ +static uint8_t *write_zero_param(uint8_t *p, ngtcp2_transport_param_id id) { + p = ngtcp2_put_uvarint(p, id); + *p++ = 0; + + return p; +} + +/* + * cid_paramlen returns the length of a single transport parameter + * which has |cid| as value. + */ +static size_t cid_paramlen(ngtcp2_transport_param_id id, + const ngtcp2_cid *cid) { + return ngtcp2_put_uvarintlen(id) + ngtcp2_put_uvarintlen(cid->datalen) + + cid->datalen; +} + +/* + * write_cid_param writes parameter |id| of the given |cid|. It + * returns p + the number of bytes written. + */ +static uint8_t *write_cid_param(uint8_t *p, ngtcp2_transport_param_id id, + const ngtcp2_cid *cid) { + assert(cid->datalen == 0 || cid->datalen >= NGTCP2_MIN_CIDLEN); + assert(cid->datalen <= NGTCP2_MAX_CIDLEN); + + p = ngtcp2_put_uvarint(p, id); + p = ngtcp2_put_uvarint(p, cid->datalen); + if (cid->datalen) { + p = ngtcp2_cpymem(p, cid->data, cid->datalen); + } + return p; +} + +static const uint8_t empty_address[16]; + +ngtcp2_ssize ngtcp2_transport_params_encode_versioned( + uint8_t *dest, size_t destlen, int transport_params_version, + const ngtcp2_transport_params *params) { + uint8_t *p; + size_t len = 0; + /* For some reason, gcc 7.3.0 requires this initialization. */ + size_t preferred_addrlen = 0; + size_t version_infolen = 0; + const ngtcp2_sockaddr_in *sa_in; + const ngtcp2_sockaddr_in6 *sa_in6; + ngtcp2_transport_params paramsbuf; + + params = ngtcp2_transport_params_convert_to_latest( + ¶msbuf, transport_params_version, params); + + if (params->original_dcid_present) { + len += + cid_paramlen(NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID, + ¶ms->original_dcid); + } + + if (params->stateless_reset_token_present) { + len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN) + + ngtcp2_put_uvarintlen(NGTCP2_STATELESS_RESET_TOKENLEN) + + NGTCP2_STATELESS_RESET_TOKENLEN; + } + + if (params->preferred_addr_present) { + assert(params->preferred_addr.cid.datalen >= NGTCP2_MIN_CIDLEN); + assert(params->preferred_addr.cid.datalen <= NGTCP2_MAX_CIDLEN); + preferred_addrlen = 4 /* ipv4Address */ + 2 /* ipv4Port */ + + 16 /* ipv6Address */ + 2 /* ipv6Port */ + + 1 + params->preferred_addr.cid.datalen /* CID */ + + NGTCP2_STATELESS_RESET_TOKENLEN; + len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS) + + ngtcp2_put_uvarintlen(preferred_addrlen) + preferred_addrlen; + } + if (params->retry_scid_present) { + len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID, + ¶ms->retry_scid); + } + + if (params->initial_scid_present) { + len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID, + ¶ms->initial_scid); + } + + if (params->initial_max_stream_data_bidi_local) { + len += + varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, + params->initial_max_stream_data_bidi_local); + } + if (params->initial_max_stream_data_bidi_remote) { + len += varint_paramlen( + NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, + params->initial_max_stream_data_bidi_remote); + } + if (params->initial_max_stream_data_uni) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI, + params->initial_max_stream_data_uni); + } + if (params->initial_max_data) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA, + params->initial_max_data); + } + if (params->initial_max_streams_bidi) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI, + params->initial_max_streams_bidi); + } + if (params->initial_max_streams_uni) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI, + params->initial_max_streams_uni); + } + if (params->max_udp_payload_size != + NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE, + params->max_udp_payload_size); + } + if (params->ack_delay_exponent != NGTCP2_DEFAULT_ACK_DELAY_EXPONENT) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT, + params->ack_delay_exponent); + } + if (params->disable_active_migration) { + len += zero_paramlen(NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); + } + if (params->max_ack_delay != NGTCP2_DEFAULT_MAX_ACK_DELAY) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY, + params->max_ack_delay / NGTCP2_MILLISECONDS); + } + if (params->max_idle_timeout) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT, + params->max_idle_timeout / NGTCP2_MILLISECONDS); + } + if (params->active_connection_id_limit && + params->active_connection_id_limit != + NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT, + params->active_connection_id_limit); + } + if (params->max_datagram_frame_size) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE, + params->max_datagram_frame_size); + } + if (params->grease_quic_bit) { + len += zero_paramlen(NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); + } + if (params->version_info_present) { + version_infolen = + sizeof(uint32_t) + params->version_info.available_versionslen; + len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION) + + ngtcp2_put_uvarintlen(version_infolen) + version_infolen; + } + + if (dest == NULL && destlen == 0) { + return (ngtcp2_ssize)len; + } + + if (destlen < len) { + return NGTCP2_ERR_NOBUF; + } + + p = dest; + + if (params->original_dcid_present) { + p = write_cid_param( + p, NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID, + ¶ms->original_dcid); + } + + if (params->stateless_reset_token_present) { + p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN); + p = ngtcp2_put_uvarint(p, sizeof(params->stateless_reset_token)); + p = ngtcp2_cpymem(p, params->stateless_reset_token, + sizeof(params->stateless_reset_token)); + } + + if (params->preferred_addr_present) { + p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS); + p = ngtcp2_put_uvarint(p, preferred_addrlen); + + if (params->preferred_addr.ipv4_present) { + sa_in = ¶ms->preferred_addr.ipv4; + p = ngtcp2_cpymem(p, &sa_in->sin_addr, sizeof(sa_in->sin_addr)); + p = ngtcp2_put_uint16(p, sa_in->sin_port); + } else { + p = ngtcp2_cpymem(p, empty_address, sizeof(sa_in->sin_addr)); + p = ngtcp2_put_uint16(p, 0); + } + + if (params->preferred_addr.ipv6_present) { + sa_in6 = ¶ms->preferred_addr.ipv6; + p = ngtcp2_cpymem(p, &sa_in6->sin6_addr, sizeof(sa_in6->sin6_addr)); + p = ngtcp2_put_uint16(p, sa_in6->sin6_port); + } else { + p = ngtcp2_cpymem(p, empty_address, sizeof(sa_in6->sin6_addr)); + p = ngtcp2_put_uint16(p, 0); + } + + *p++ = (uint8_t)params->preferred_addr.cid.datalen; + if (params->preferred_addr.cid.datalen) { + p = ngtcp2_cpymem(p, params->preferred_addr.cid.data, + params->preferred_addr.cid.datalen); + } + p = ngtcp2_cpymem(p, params->preferred_addr.stateless_reset_token, + sizeof(params->preferred_addr.stateless_reset_token)); + } + + if (params->retry_scid_present) { + p = write_cid_param(p, NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID, + ¶ms->retry_scid); + } + + if (params->initial_scid_present) { + p = write_cid_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID, + ¶ms->initial_scid); + } + + if (params->initial_max_stream_data_bidi_local) { + p = write_varint_param( + p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, + params->initial_max_stream_data_bidi_local); + } + + if (params->initial_max_stream_data_bidi_remote) { + p = write_varint_param( + p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, + params->initial_max_stream_data_bidi_remote); + } + + if (params->initial_max_stream_data_uni) { + p = + write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI, + params->initial_max_stream_data_uni); + } + + if (params->initial_max_data) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA, + params->initial_max_data); + } + + if (params->initial_max_streams_bidi) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI, + params->initial_max_streams_bidi); + } + + if (params->initial_max_streams_uni) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI, + params->initial_max_streams_uni); + } + + if (params->max_udp_payload_size != + NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE, + params->max_udp_payload_size); + } + + if (params->ack_delay_exponent != NGTCP2_DEFAULT_ACK_DELAY_EXPONENT) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT, + params->ack_delay_exponent); + } + + if (params->disable_active_migration) { + p = write_zero_param(p, NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); + } + + if (params->max_ack_delay != NGTCP2_DEFAULT_MAX_ACK_DELAY) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY, + params->max_ack_delay / NGTCP2_MILLISECONDS); + } + + if (params->max_idle_timeout) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT, + params->max_idle_timeout / NGTCP2_MILLISECONDS); + } + + if (params->active_connection_id_limit && + params->active_connection_id_limit != + NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT, + params->active_connection_id_limit); + } + + if (params->max_datagram_frame_size) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE, + params->max_datagram_frame_size); + } + + if (params->grease_quic_bit) { + p = write_zero_param(p, NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); + } + + if (params->version_info_present) { + p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION); + p = ngtcp2_put_uvarint(p, version_infolen); + p = ngtcp2_put_uint32be(p, params->version_info.chosen_version); + if (params->version_info.available_versionslen) { + p = ngtcp2_cpymem(p, params->version_info.available_versions, + params->version_info.available_versionslen); + } + } + + assert((size_t)(p - dest) == len); + + return (ngtcp2_ssize)len; +} + +/* + * decode_varint decodes a single varint from the buffer pointed by + * |*pp| of length |end - *pp|. If it decodes an integer + * successfully, it stores the integer in |*pdest|, increment |*pp| by + * the number of bytes read from |*pp|, and returns 0. Otherwise it + * returns -1. + */ +static int decode_varint(uint64_t *pdest, const uint8_t **pp, + const uint8_t *end) { + const uint8_t *p = *pp; + size_t len; + + if (p == end) { + return -1; + } + + len = ngtcp2_get_uvarintlen(p); + if ((uint64_t)(end - p) < len) { + return -1; + } + + *pp = ngtcp2_get_uvarint(pdest, p); + + return 0; +} + +/* + * decode_varint_param decodes length prefixed value from the buffer + * pointed by |*pp| of length |end - *pp|. The length and value are + * encoded in varint form. If it decodes a value successfully, it + * stores the value in |*pdest|, increment |*pp| by the number of + * bytes read from |*pp|, and returns 0. Otherwise it returns -1. + */ +static int decode_varint_param(uint64_t *pdest, const uint8_t **pp, + const uint8_t *end) { + const uint8_t *p = *pp; + uint64_t valuelen; + + if (decode_varint(&valuelen, &p, end) != 0) { + return -1; + } + + if (p == end) { + return -1; + } + + if ((uint64_t)(end - p) < valuelen) { + return -1; + } + + if (ngtcp2_get_uvarintlen(p) != valuelen) { + return -1; + } + + *pp = ngtcp2_get_uvarint(pdest, p); + + return 0; +} + +/* + * decode_zero_param decodes zero length value from the buffer pointed + * by |*pp| of length |end - *pp|. The length is encoded in varint + * form. If it decodes zero length value successfully, it increments + * |*pp| by 1, and returns 0. Otherwise it returns -1. + */ +static int decode_zero_param(const uint8_t **pp, const uint8_t *end) { + if (*pp == end || **pp != 0) { + return -1; + } + + ++*pp; + + return 0; +} + +/* + * decode_cid_param decodes length prefixed ngtcp2_cid from the buffer + * pointed by |*pp| of length |end - *pp|. The length is encoded in + * varint form. If it decodes a value successfully, it stores the + * value in |*pdest|, increment |*pp| by the number of read from + * |*pp|, and returns the number of bytes read. Otherwise it returns + * the one of the negative error code: + * + * NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM + * Could not decode Connection ID. + */ +static int decode_cid_param(ngtcp2_cid *pdest, const uint8_t **pp, + const uint8_t *end) { + const uint8_t *p = *pp; + uint64_t valuelen; + + if (decode_varint(&valuelen, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + + if ((valuelen != 0 && valuelen < NGTCP2_MIN_CIDLEN) || + valuelen > NGTCP2_MAX_CIDLEN || (size_t)(end - p) < valuelen) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + + ngtcp2_cid_init(pdest, p, (size_t)valuelen); + + p += valuelen; + + *pp = p; + + return 0; +} + +int ngtcp2_transport_params_decode_versioned(int transport_params_version, + ngtcp2_transport_params *dest, + const uint8_t *data, + size_t datalen) { + const uint8_t *p, *end, *lend; + size_t len; + uint64_t param_type; + uint64_t valuelen; + int rv; + ngtcp2_sockaddr_in *sa_in; + ngtcp2_sockaddr_in6 *sa_in6; + uint32_t version; + ngtcp2_transport_params *params, paramsbuf; + + if (transport_params_version == NGTCP2_TRANSPORT_PARAMS_VERSION) { + params = dest; + } else { + params = ¶msbuf; + } + + /* Set default values */ + memset(params, 0, sizeof(*params)); + params->max_udp_payload_size = NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE; + params->ack_delay_exponent = NGTCP2_DEFAULT_ACK_DELAY_EXPONENT; + params->max_ack_delay = NGTCP2_DEFAULT_MAX_ACK_DELAY; + params->active_connection_id_limit = + NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT; + + p = data; + end = data + datalen; + + for (; (size_t)(end - p) >= 2;) { + if (decode_varint(¶m_type, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + + switch (param_type) { + case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL: + if (decode_varint_param(¶ms->initial_max_stream_data_bidi_local, &p, + end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE: + if (decode_varint_param(¶ms->initial_max_stream_data_bidi_remote, &p, + end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI: + if (decode_varint_param(¶ms->initial_max_stream_data_uni, &p, end) != + 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA: + if (decode_varint_param(¶ms->initial_max_data, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI: + if (decode_varint_param(¶ms->initial_max_streams_bidi, &p, end) != + 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (params->initial_max_streams_bidi > NGTCP2_MAX_STREAMS) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI: + if (decode_varint_param(¶ms->initial_max_streams_uni, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (params->initial_max_streams_uni > NGTCP2_MAX_STREAMS) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT: + if (decode_varint_param(¶ms->max_idle_timeout, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + params->max_idle_timeout *= NGTCP2_MILLISECONDS; + break; + case NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE: + if (decode_varint_param(¶ms->max_udp_payload_size, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN: + if (decode_varint(&valuelen, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if ((size_t)valuelen != sizeof(params->stateless_reset_token)) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if ((size_t)(end - p) < sizeof(params->stateless_reset_token)) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + + p = ngtcp2_get_bytes(params->stateless_reset_token, p, + sizeof(params->stateless_reset_token)); + params->stateless_reset_token_present = 1; + + break; + case NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT: + if (decode_varint_param(¶ms->ack_delay_exponent, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (params->ack_delay_exponent > 20) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS: + if (decode_varint(&valuelen, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if ((size_t)(end - p) < valuelen) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + len = 4 /* ipv4Address */ + 2 /* ipv4Port */ + 16 /* ipv6Address */ + + 2 /* ipv6Port */ + + 1 /* cid length */ + NGTCP2_STATELESS_RESET_TOKENLEN; + if (valuelen < len) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + + sa_in = ¶ms->preferred_addr.ipv4; + + p = ngtcp2_get_bytes(&sa_in->sin_addr, p, sizeof(sa_in->sin_addr)); + p = ngtcp2_get_uint16(&sa_in->sin_port, p); + + if (sa_in->sin_port || memcmp(empty_address, &sa_in->sin_addr, + sizeof(sa_in->sin_addr)) != 0) { + sa_in->sin_family = NGTCP2_AF_INET; + params->preferred_addr.ipv4_present = 1; + } + + sa_in6 = ¶ms->preferred_addr.ipv6; + + p = ngtcp2_get_bytes(&sa_in6->sin6_addr, p, sizeof(sa_in6->sin6_addr)); + p = ngtcp2_get_uint16(&sa_in6->sin6_port, p); + + if (sa_in6->sin6_port || memcmp(empty_address, &sa_in6->sin6_addr, + sizeof(sa_in6->sin6_addr)) != 0) { + sa_in6->sin6_family = NGTCP2_AF_INET6; + params->preferred_addr.ipv6_present = 1; + } + + /* cid */ + params->preferred_addr.cid.datalen = *p++; + len += params->preferred_addr.cid.datalen; + if (valuelen != len || + params->preferred_addr.cid.datalen > NGTCP2_MAX_CIDLEN || + params->preferred_addr.cid.datalen < NGTCP2_MIN_CIDLEN) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (params->preferred_addr.cid.datalen) { + p = ngtcp2_get_bytes(params->preferred_addr.cid.data, p, + params->preferred_addr.cid.datalen); + } + + /* stateless reset token */ + p = + ngtcp2_get_bytes(params->preferred_addr.stateless_reset_token, p, + sizeof(params->preferred_addr.stateless_reset_token)); + params->preferred_addr_present = 1; + break; + case NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION: + if (decode_zero_param(&p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + params->disable_active_migration = 1; + break; + case NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID: + rv = decode_cid_param(¶ms->original_dcid, &p, end); + if (rv != 0) { + return rv; + } + params->original_dcid_present = 1; + break; + case NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID: + rv = decode_cid_param(¶ms->retry_scid, &p, end); + if (rv != 0) { + return rv; + } + params->retry_scid_present = 1; + break; + case NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID: + rv = decode_cid_param(¶ms->initial_scid, &p, end); + if (rv != 0) { + return rv; + } + params->initial_scid_present = 1; + break; + case NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY: + if (decode_varint_param(¶ms->max_ack_delay, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (params->max_ack_delay >= 16384) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + params->max_ack_delay *= NGTCP2_MILLISECONDS; + break; + case NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT: + if (decode_varint_param(¶ms->active_connection_id_limit, &p, end) != + 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE: + if (decode_varint_param(¶ms->max_datagram_frame_size, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT: + if (decode_zero_param(&p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + params->grease_quic_bit = 1; + break; + case NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION: + if (decode_varint(&valuelen, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if ((size_t)(end - p) < valuelen) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (valuelen < sizeof(uint32_t) || (valuelen & 0x3)) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + p = ngtcp2_get_uint32be(¶ms->version_info.chosen_version, p); + if (params->version_info.chosen_version == 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (valuelen > sizeof(uint32_t)) { + params->version_info.available_versions = (uint8_t *)p; + params->version_info.available_versionslen = + (size_t)valuelen - sizeof(uint32_t); + + for (lend = p + (valuelen - sizeof(uint32_t)); p != lend;) { + p = ngtcp2_get_uint32be(&version, p); + if (version == 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + } + } + params->version_info_present = 1; + break; + default: + /* Ignore unknown parameter */ + if (decode_varint(&valuelen, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if ((size_t)(end - p) < valuelen) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + p += valuelen; + break; + } + } + + if (end - p != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + + if (transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION) { + ngtcp2_transport_params_convert_to_old(transport_params_version, dest, + params); + } + + return 0; +} + +static int transport_params_copy_new(ngtcp2_transport_params **pdest, + const ngtcp2_transport_params *src, + const ngtcp2_mem *mem) { + size_t len = sizeof(**pdest); + ngtcp2_transport_params *dest; + uint8_t *p; + + if (src->version_info_present) { + len += src->version_info.available_versionslen; + } + + dest = ngtcp2_mem_malloc(mem, len); + if (dest == NULL) { + return NGTCP2_ERR_NOMEM; + } + + *dest = *src; + + if (src->version_info_present && src->version_info.available_versionslen) { + p = (uint8_t *)dest + sizeof(*dest); + memcpy(p, src->version_info.available_versions, + src->version_info.available_versionslen); + dest->version_info.available_versions = p; + } + + *pdest = dest; + + return 0; +} + +int ngtcp2_transport_params_decode_new(ngtcp2_transport_params **pparams, + const uint8_t *data, size_t datalen, + const ngtcp2_mem *mem) { + int rv; + ngtcp2_transport_params params; + + rv = ngtcp2_transport_params_decode(¶ms, data, datalen); + if (rv < 0) { + return rv; + } + + if (mem == NULL) { + mem = ngtcp2_mem_default(); + } + + return transport_params_copy_new(pparams, ¶ms, mem); +} + +void ngtcp2_transport_params_del(ngtcp2_transport_params *params, + const ngtcp2_mem *mem) { + if (params == NULL) { + return; + } + + if (mem == NULL) { + mem = ngtcp2_mem_default(); + } + + ngtcp2_mem_free(mem, params); +} + +int ngtcp2_transport_params_copy_new(ngtcp2_transport_params **pdest, + const ngtcp2_transport_params *src, + const ngtcp2_mem *mem) { + if (src == NULL) { + *pdest = NULL; + return 0; + } + + return transport_params_copy_new(pdest, src, mem); +} + +static void transport_params_copy(ngtcp2_transport_params *dest, + const ngtcp2_transport_params *src, + int transport_params_version) { + assert(transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION); + + switch (transport_params_version) { + case NGTCP2_TRANSPORT_PARAMS_V1: + memcpy(dest, src, + offsetof(ngtcp2_transport_params, version_info_present) + + sizeof(src->version_info_present)); + + break; + } +} + +const ngtcp2_transport_params * +ngtcp2_transport_params_convert_to_latest(ngtcp2_transport_params *dest, + int transport_params_version, + const ngtcp2_transport_params *src) { + if (transport_params_version == NGTCP2_TRANSPORT_PARAMS_VERSION) { + return src; + } + + ngtcp2_transport_params_default(dest); + + transport_params_copy(dest, src, transport_params_version); + + return dest; +} + +void ngtcp2_transport_params_convert_to_old( + int transport_params_version, ngtcp2_transport_params *dest, + const ngtcp2_transport_params *src) { + assert(transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION); + + transport_params_copy(dest, src, transport_params_version); +} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.h similarity index 55% rename from deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.h rename to deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.h index 3457a8f2053aba..c077f06a9dd717 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.h @@ -22,15 +22,62 @@ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef NGTCP2_CONVERSION_H -#define NGTCP2_CONVERSION_H +#ifndef NGTCP2_TRANSPORT_PARAMS_H +#define NGTCP2_TRANSPORT_PARAMS_H #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include +/* ngtcp2_transport_param_id is the registry of QUIC transport + parameter ID. */ +typedef uint64_t ngtcp2_transport_param_id; + +#define NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID 0x00 +#define NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT 0x01 +#define NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN 0x02 +#define NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE 0x03 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA 0x04 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL 0x05 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE 0x06 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI 0x07 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI 0x08 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI 0x09 +#define NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT 0x0a +#define NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY 0x0b +#define NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION 0x0c +#define NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS 0x0d +#define NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT 0x0e +#define NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID 0x0f +#define NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID 0x10 +/* https://datatracker.ietf.org/doc/html/rfc9221 */ +#define NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE 0x20 +#define NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT 0x2ab2 +/* https://datatracker.ietf.org/doc/html/rfc9368 */ +#define NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION 0x11 + +/* NGTCP2_MAX_STREAMS is the maximum number of streams. */ +#define NGTCP2_MAX_STREAMS (1LL << 60) + +/* + * ngtcp2_transport_params_copy_new makes a copy of |src|, and assigns + * it to |*pdest|. If |src| is NULL, NULL is assigned to |*pdest|. + * + * Caller is responsible to call ngtcp2_transport_params_del to free + * the memory assigned to |*pdest|. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * NGTCP2_ERR_NOMEM + * Out of memory. + */ +int ngtcp2_transport_params_copy_new(ngtcp2_transport_params **pdest, + const ngtcp2_transport_params *src, + const ngtcp2_mem *mem); + /* * ngtcp2_transport_params_convert_to_latest converts |src| of version * |transport_params_version| to the latest version @@ -68,4 +115,4 @@ void ngtcp2_transport_params_convert_to_old(int transport_params_version, ngtcp2_transport_params *dest, const ngtcp2_transport_params *src); -#endif /* NGTCP2_CONVERSION_H */ +#endif /* !defined(NGTCP2_TRANSPORT_PARAMS_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h index 9a210a320dc1ca..2b1bb51d87edc6 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -65,4 +65,4 @@ static inline int ngtcp2_tstamp_not_elapsed(ngtcp2_tstamp base, return base != UINT64_MAX && (base >= UINT64_MAX - d || base + d > ts); } -#endif /* NGTCP2_TSTAMP_H */ +#endif /* !defined(NGTCP2_TSTAMP_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c index 7c7d9ae78e914d..5ab1db72cc5384 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c @@ -26,20 +26,22 @@ #include #include +#include #ifdef HAVE_UNISTD_H +# define NGTCP2_UNREACHABLE_LOG # include -#endif /* HAVE_UNISTD_H */ -#include -#ifdef WIN32 +#elif defined(WIN32) +# define NGTCP2_UNREACHABLE_LOG # include -#endif /* WIN32 */ +#endif /* defined(WIN32) */ void ngtcp2_unreachable_fail(const char *file, int line, const char *func) { +#ifdef NGTCP2_UNREACHABLE_LOG char *buf; size_t buflen; int rv; -#define NGTCP2_UNREACHABLE_TEMPLATE "%s:%d %s: Unreachable.\n" +# define NGTCP2_UNREACHABLE_TEMPLATE "%s:%d %s: Unreachable.\n" rv = snprintf(NULL, 0, NGTCP2_UNREACHABLE_TEMPLATE, file, line, func); if (rv < 0) { @@ -58,14 +60,15 @@ void ngtcp2_unreachable_fail(const char *file, int line, const char *func) { abort(); } -#ifndef WIN32 +# ifndef WIN32 while (write(STDERR_FILENO, buf, (size_t)rv) == -1 && errno == EINTR) ; -#else /* WIN32 */ +# else /* defined(WIN32) */ _write(_fileno(stderr), buf, (unsigned int)rv); -#endif /* WIN32 */ +# endif /* defined(WIN32) */ free(buf); +#endif /* defined(NGTCP2_UNREACHABLE_LOG) */ abort(); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h index a5276fd505463f..ca7123865279f9 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h @@ -27,26 +27,26 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include #ifdef __FILE_NAME__ # define NGTCP2_FILE_NAME __FILE_NAME__ -#else /* !__FILE_NAME__ */ +#else /* !defined(__FILE_NAME__) */ # define NGTCP2_FILE_NAME "(file)" -#endif /* !__FILE_NAME__ */ +#endif /* !defined(__FILE_NAME__) */ #define ngtcp2_unreachable() \ ngtcp2_unreachable_fail(NGTCP2_FILE_NAME, __LINE__, __func__) #ifdef _MSC_VER __declspec(noreturn) -#endif /* _MSC_VER */ +#endif /* defined(_MSC_VER) */ void ngtcp2_unreachable_fail(const char *file, int line, const char *func) #ifndef _MSC_VER __attribute__((noreturn)) -#endif /* !_MSC_VER */ +#endif /* !defined(_MSC_VER) */ ; -#endif /* NGTCP2_UNREACHABLE_H */ +#endif /* !defined(NGTCP2_UNREACHABLE_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c index dbc7b668042695..0b9c92d47d7d88 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c @@ -50,6 +50,7 @@ int ngtcp2_vec_new(ngtcp2_vec **pvec, const uint8_t *data, size_t datalen, p = (uint8_t *)(*pvec) + sizeof(ngtcp2_vec); (*pvec)->base = p; (*pvec)->len = datalen; + if (datalen) { /* p = */ ngtcp2_cpymem(p, data, datalen); } @@ -89,8 +90,8 @@ int64_t ngtcp2_vec_len_varint(const ngtcp2_vec *vec, size_t n) { return (int64_t)res; } -ngtcp2_ssize ngtcp2_vec_split(ngtcp2_vec *src, size_t *psrccnt, ngtcp2_vec *dst, - size_t *pdstcnt, size_t left, size_t maxcnt) { +ngtcp2_ssize ngtcp2_vec_split(ngtcp2_vec *dst, size_t *pdstcnt, ngtcp2_vec *src, + size_t *psrccnt, size_t left, size_t maxcnt) { size_t i; size_t srccnt = *psrccnt; size_t nmove; @@ -203,6 +204,7 @@ size_t ngtcp2_vec_merge(ngtcp2_vec *dst, size_t *pdstcnt, ngtcp2_vec *src, } else { dst[(*pdstcnt)++] = *b; } + left -= b->len; } @@ -222,11 +224,14 @@ size_t ngtcp2_vec_copy_at_most(ngtcp2_vec *dst, size_t dstcnt, ++i; continue; } + dst[j] = src[i]; + if (dst[j].len > left) { dst[j].len = left; return j + 1; } + left -= dst[j].len; ++i; ++j; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.h index a39c4392fd2627..f7611efcb7d6ce 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -74,19 +74,21 @@ uint64_t ngtcp2_vec_len(const ngtcp2_vec *vec, size_t n); int64_t ngtcp2_vec_len_varint(const ngtcp2_vec *vec, size_t n); /* - * ngtcp2_vec_split splits |src| to |dst| so that the sum of the - * length in |src| does not exceed |left| bytes. The |maxcnt| is the - * maximum number of elements which |dst| array can contain. The - * caller must set |*psrccnt| to the number of elements of |src|. - * Similarly, the caller must set |*pdstcnt| to the number of elements - * of |dst|. The split does not necessarily occur at the boundary of - * ngtcp2_vec object. After split has done, this function updates - * |*psrccnt| and |*pdstcnt|. This function returns the number of - * bytes moved from |src| to |dst|. If split cannot be made because - * doing so exceeds |maxcnt|, this function returns -1. + * ngtcp2_vec_split splits |src| at the data position where + * ngtcp2_vec_len(|src|) does not exceed |left| bytes. The removed + * vectors are moved to |dst|. The existing elements in |dst| are + * moved forward to make up a space. The |maxcnt| is the maximum + * number of elements which |dst| array can contain. The caller must + * set |*psrccnt| to the number of elements of |src|. Similarly, the + * caller must set |*pdstcnt| to the number of elements of |dst|. The + * split does not necessarily occur at the boundary of ngtcp2_vec + * object. After split has done, this function updates |*psrccnt| and + * |*pdstcnt|. This function returns the number of bytes moved from + * |src| to |dst|. If split cannot be made because doing so exceeds + * |maxcnt|, this function returns -1. */ -ngtcp2_ssize ngtcp2_vec_split(ngtcp2_vec *src, size_t *psrccnt, ngtcp2_vec *dst, - size_t *pdstcnt, size_t left, size_t maxcnt); +ngtcp2_ssize ngtcp2_vec_split(ngtcp2_vec *dst, size_t *pdstcnt, ngtcp2_vec *src, + size_t *psrccnt, size_t left, size_t maxcnt); /* * ngtcp2_vec_merge merges |src| into |dst| by moving at most |left| @@ -117,4 +119,4 @@ size_t ngtcp2_vec_copy_at_most(ngtcp2_vec *dst, size_t dstcnt, */ void ngtcp2_vec_copy(ngtcp2_vec *dst, const ngtcp2_vec *src, size_t cnt); -#endif /* NGTCP2_VEC_H */ +#endif /* !defined(NGTCP2_VEC_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_version.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_version.c index b31162c3ebe0d7..d2557e9ef8a580 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_version.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_version.c @@ -24,7 +24,7 @@ */ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.h index 50415f10b8c37b..c90a9fdb9078da 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.h @@ -37,7 +37,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -62,4 +62,4 @@ void ngtcp2_window_filter_reset(ngtcp2_window_filter *wf, uint64_t new_sample, uint64_t ngtcp2_window_filter_get_best(ngtcp2_window_filter *wf); -#endif /* NGTCP2_WINDOW_FILTER_H */ +#endif /* !defined(NGTCP2_WINDOW_FILTER_H) */ From f99f95f62f3266bfc6f1b9eb0c3b79dc7aeb706a Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Mon, 25 Nov 2024 20:02:35 -0500 Subject: [PATCH 49/53] deps: update corepack to 0.30.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/55977 Reviewed-By: Michaël Zasso Reviewed-By: Trivikram Kamat Reviewed-By: Antoine du Hamel --- deps/corepack/CHANGELOG.md | 12 ++++++++++++ deps/corepack/dist/corepack.js | 1 + deps/corepack/dist/lib/corepack.cjs | 20 +++++++++++++------- deps/corepack/dist/npm.js | 1 + deps/corepack/dist/npx.js | 1 + deps/corepack/dist/pnpm.js | 1 + deps/corepack/dist/pnpx.js | 1 + deps/corepack/dist/yarn.js | 1 + deps/corepack/dist/yarnpkg.js | 1 + deps/corepack/package.json | 2 +- 10 files changed, 33 insertions(+), 8 deletions(-) diff --git a/deps/corepack/CHANGELOG.md b/deps/corepack/CHANGELOG.md index 7de934c0d2c0db..941d0b6b7e5e25 100644 --- a/deps/corepack/CHANGELOG.md +++ b/deps/corepack/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## [0.30.0](https://github.com/nodejs/corepack/compare/v0.29.4...v0.30.0) (2024-11-23) + + +### Features + +* update package manager versions ([#578](https://github.com/nodejs/corepack/issues/578)) ([a286c8f](https://github.com/nodejs/corepack/commit/a286c8f5537ea9ecf9b6ff53c7bc3e8da4e3c8bb)) + + +### Performance Improvements + +* prefer `module.enableCompileCache` over `v8-compile-cache` ([#574](https://github.com/nodejs/corepack/issues/574)) ([cba6905](https://github.com/nodejs/corepack/commit/cba690575bd606faeee54bd512ccb8797d49055f)) + ## [0.29.4](https://github.com/nodejs/corepack/compare/v0.29.3...v0.29.4) (2024-09-07) diff --git a/deps/corepack/dist/corepack.js b/deps/corepack/dist/corepack.js index b1b22662466f86..6179b11c083cb5 100755 --- a/deps/corepack/dist/corepack.js +++ b/deps/corepack/dist/corepack.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='0'; +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(process.argv.slice(2)); \ No newline at end of file diff --git a/deps/corepack/dist/lib/corepack.cjs b/deps/corepack/dist/lib/corepack.cjs index 2978fc336232e0..e1919339dc38bd 100644 --- a/deps/corepack/dist/lib/corepack.cjs +++ b/deps/corepack/dist/lib/corepack.cjs @@ -21260,7 +21260,7 @@ function String2(descriptor, ...args) { } // package.json -var version = "0.29.4"; +var version = "0.30.0"; // sources/Engine.ts var import_fs9 = __toESM(require("fs")); @@ -21274,7 +21274,7 @@ var import_valid3 = __toESM(require_valid2()); var config_default = { definitions: { npm: { - default: "10.8.3+sha1.e6085b2864fcfd9b1aad7b602601b5a2fc116699", + default: "10.9.1+sha1.ab141c1229765c11c8c59060fc9cf450a2207bd6", fetchLatestFrom: { type: "npm", package: "npm" @@ -21311,7 +21311,7 @@ var config_default = { } }, pnpm: { - default: "9.9.0+sha1.3edbe440f4e570aa8f049adbd06b9483d55cc2d2", + default: "9.14.2+sha1.5202b50ab92394b3c922d2e293f196e2df6d441b", fetchLatestFrom: { type: "npm", package: "pnpm" @@ -21375,7 +21375,7 @@ var config_default = { package: "yarn" }, transparent: { - default: "4.4.1+sha224.fd21d9eb5fba020083811af1d4953acc21eeb9f6ff97efd1b3f9d4de", + default: "4.5.2+sha224.c2e2e9ed3cdadd6ec250589b3393f71ae56d5ec297af11cec1eba3b4", commands: [ [ "yarn", @@ -21965,8 +21965,11 @@ async function runVersion(locator, installSpec, binName, args) { } if (!binPath) throw new Error(`Assertion failed: Unable to locate path for bin '${binName}'`); - if (locator.name !== `npm` || (0, import_lt.default)(locator.reference, `9.7.0`)) - await Promise.resolve().then(() => __toESM(require_v8_compile_cache())); + if (!import_module.default.enableCompileCache) { + if (locator.name !== `npm` || (0, import_lt.default)(locator.reference, `9.7.0`)) { + await Promise.resolve().then(() => __toESM(require_v8_compile_cache())); + } + } process.env.COREPACK_ROOT = import_path7.default.dirname(require.resolve("corepack/package.json")); process.argv = [ process.execPath, @@ -21976,6 +21979,9 @@ async function runVersion(locator, installSpec, binName, args) { process.execArgv = []; process.mainModule = void 0; process.nextTick(import_module.default.runMain, binPath); + if (import_module.default.flushCompileCache) { + setImmediate(import_module.default.flushCompileCache); + } } function shouldSkipIntegrityCheck() { return process.env.COREPACK_INTEGRITY_KEYS === `` || process.env.COREPACK_INTEGRITY_KEYS === `0`; @@ -22553,7 +22559,7 @@ var EnableCommand = class extends Command { [`enable`] ]; static usage = Command.Usage({ - description: `Add the Corepack shims to the install directories`, + description: `Add the Corepack shims to the install directory`, details: ` When run, this command will check whether the shims for the specified package managers can be found with the correct values inside the install directory. If not, or if they don't exist, they will be created. diff --git a/deps/corepack/dist/npm.js b/deps/corepack/dist/npm.js index 7d10ba5bdf36b2..75f68b058f2dd6 100755 --- a/deps/corepack/dist/npm.js +++ b/deps/corepack/dist/npm.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='1' +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(['npm', ...process.argv.slice(2)]); \ No newline at end of file diff --git a/deps/corepack/dist/npx.js b/deps/corepack/dist/npx.js index a8bd3e69014313..b1138bb48e1a82 100755 --- a/deps/corepack/dist/npx.js +++ b/deps/corepack/dist/npx.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='1' +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(['npx', ...process.argv.slice(2)]); \ No newline at end of file diff --git a/deps/corepack/dist/pnpm.js b/deps/corepack/dist/pnpm.js index a0a87263435562..56ba509405033d 100755 --- a/deps/corepack/dist/pnpm.js +++ b/deps/corepack/dist/pnpm.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='1' +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(['pnpm', ...process.argv.slice(2)]); \ No newline at end of file diff --git a/deps/corepack/dist/pnpx.js b/deps/corepack/dist/pnpx.js index 57ad4842631cd7..ee36be2e99c686 100755 --- a/deps/corepack/dist/pnpx.js +++ b/deps/corepack/dist/pnpx.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='1' +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(['pnpx', ...process.argv.slice(2)]); \ No newline at end of file diff --git a/deps/corepack/dist/yarn.js b/deps/corepack/dist/yarn.js index eaed8596eabaa3..ce628c82b6a782 100755 --- a/deps/corepack/dist/yarn.js +++ b/deps/corepack/dist/yarn.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='1' +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(['yarn', ...process.argv.slice(2)]); \ No newline at end of file diff --git a/deps/corepack/dist/yarnpkg.js b/deps/corepack/dist/yarnpkg.js index aada6032fa67ff..9541ed726aaa3b 100755 --- a/deps/corepack/dist/yarnpkg.js +++ b/deps/corepack/dist/yarnpkg.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='1' +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(['yarnpkg', ...process.argv.slice(2)]); \ No newline at end of file diff --git a/deps/corepack/package.json b/deps/corepack/package.json index 571c359407e07a..c9c6662e99e6c9 100644 --- a/deps/corepack/package.json +++ b/deps/corepack/package.json @@ -1,6 +1,6 @@ { "name": "corepack", - "version": "0.29.4", + "version": "0.30.0", "homepage": "https://github.com/nodejs/corepack#readme", "bugs": { "url": "https://github.com/nodejs/corepack/issues" From 9289374248deeaf761bbbf560fd536902629ecec Mon Sep 17 00:00:00 2001 From: ywave620 <60539365+ywave620@users.noreply.github.com> Date: Tue, 26 Nov 2024 11:25:22 +0800 Subject: [PATCH 50/53] http2: fix memory leak caused by premature listener removing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Http2Session should always call ondone into JS to detach the handle. In some case, ondone is defered to be called by the StreamListener through WriteWrap, we should be careful of this before getting rid of the StreamListener. PR-URL: https://github.com/nodejs/node/pull/55966 Reviewed-By: Matteo Collina Reviewed-By: James M Snell Reviewed-By: Juan José Arboleda --- src/node_http2.cc | 4 +- ...-h2leak-destroy-session-on-socket-ended.js | 80 +++++++++++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) create mode 100644 test/parallel/test-h2leak-destroy-session-on-socket-ended.js diff --git a/src/node_http2.cc b/src/node_http2.cc index 593c8b5f07a2a4..888f70bd4df8a3 100644 --- a/src/node_http2.cc +++ b/src/node_http2.cc @@ -803,13 +803,15 @@ void Http2Session::Close(uint32_t code, bool socket_closed) { CHECK_EQ(nghttp2_session_terminate_session(session_.get(), code), 0); SendPendingData(); } else if (stream_ != nullptr) { + // so that the previous listener of the socket, typically, JS code of a + // (tls) socket will be notified of any activity later stream_->RemoveStreamListener(this); } set_destroyed(); // If we are writing we will get to make the callback in OnStreamAfterWrite. - if (!is_write_in_progress()) { + if (!is_write_in_progress() || !stream_) { Debug(this, "make done session callback"); HandleScope scope(env()->isolate()); MakeCallback(env()->ondone_string(), 0, nullptr); diff --git a/test/parallel/test-h2leak-destroy-session-on-socket-ended.js b/test/parallel/test-h2leak-destroy-session-on-socket-ended.js new file mode 100644 index 00000000000000..3f0fe3e69d924d --- /dev/null +++ b/test/parallel/test-h2leak-destroy-session-on-socket-ended.js @@ -0,0 +1,80 @@ +'use strict'; +// Flags: --expose-gc + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const http2 = require('http2'); +const tls = require('tls'); +const fixtures = require('../common/fixtures'); +const assert = require('assert'); + +const registry = new FinalizationRegistry(common.mustCall((name) => { + assert(name, 'session'); +})); + +const server = http2.createSecureServer({ + key: fixtures.readKey('agent1-key.pem'), + cert: fixtures.readKey('agent1-cert.pem'), +}); + +let firstServerStream; + + +server.on('secureConnection', (s) => { + console.log('secureConnection'); + s.on('end', () => { + console.log(s.destroyed); // false !! + s.destroy(); + firstServerStream.session.destroy(); + + firstServerStream = null; + + setImmediate(() => { + global.gc(); + global.gc(); + + server.close(); + }); + }); +}); + +server.on('session', (s) => { + registry.register(s, 'session'); +}); + +server.on('stream', (stream) => { + console.log('stream...'); + stream.write('a'.repeat(1024)); + firstServerStream = stream; + setImmediate(() => console.log('Draining setImmediate after writing')); +}); + + +server.listen(() => { + client(); +}); + + +const h2fstStream = [ + 'UFJJICogSFRUUC8yLjANCg0KU00NCg0K', + // http message (1st stream:) + 'AAAABAAAAAAA', + 'AAAPAQUAAAABhIJBiqDkHROdCbjwHgeG', +]; +function client() { + const client = tls.connect({ + port: server.address().port, + host: 'localhost', + rejectUnauthorized: false, + ALPNProtocols: ['h2'] + }, () => { + client.end(Buffer.concat(h2fstStream.map((s) => Buffer.from(s, 'base64'))), (err) => { + assert.ifError(err); + }); + }); + + client.on('error', (error) => { + console.error('Connection error:', error); + }); +} From ce53f1689f678ff04aa8460f642f176830163cf9 Mon Sep 17 00:00:00 2001 From: Shelley Vohr Date: Tue, 26 Nov 2024 11:01:09 +0100 Subject: [PATCH 51/53] build: set node_arch to target_cpu in GN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/55967 Reviewed-By: James M Snell Reviewed-By: Luigi Pinca Reviewed-By: Juan José Arboleda --- unofficial.gni | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unofficial.gni b/unofficial.gni index e2d370329c0e8c..7c635322b770fa 100644 --- a/unofficial.gni +++ b/unofficial.gni @@ -91,7 +91,7 @@ template("node_gn_build") { if (current_cpu == "x86") { node_arch = "ia32" } else { - node_arch = current_cpu + node_arch = target_cpu } if (target_os == "win") { node_platform = "win32" From 7133c0459f0c1551b64ffd3deaf5ce917bc39bd2 Mon Sep 17 00:00:00 2001 From: Stefan Stojanovic Date: Tue, 26 Nov 2024 11:12:34 +0100 Subject: [PATCH 52/53] build: avoid compiling with VS v17.12 Refs: https://github.com/nodejs/build/issues/3963 PR-URL: https://github.com/nodejs/node/pull/55930 Refs: https://github.com/nodejs/node/pull/53863 Reviewed-By: Richard Lau Reviewed-By: Luigi Pinca Reviewed-By: Jake Yuesong Li Reviewed-By: James M Snell --- vcbuild.bat | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/vcbuild.bat b/vcbuild.bat index f93998d2b14901..59197c1ccc36e3 100644 --- a/vcbuild.bat +++ b/vcbuild.bat @@ -292,10 +292,19 @@ goto exit @rem Visual Studio v17.10 has a bug that causes the build to fail. @rem Check if the version is v17.10 and exit if it is. echo %VSCMD_VER% | findstr /b /c:"17.10" >nul -if %errorlevel% neq 1 ( +if %errorlevel% neq 1 ( echo Node.js doesn't compile with Visual Studio 17.10 Please use a different version. goto exit ) +@rem Same applies to v17.12 for MSVC. +echo %VSCMD_VER% | findstr /b /c:"17.12" >nul +if %errorlevel% neq 1 ( + @rem Clang 18.1.8 Provided with VS 17.12 works fine. + if not defined clang_cl ( + echo Node.js doesn't compile with Visual Studio 17.12 Please use a different version. + goto exit + ) +) @rem check if the clang-cl build is requested if not defined clang_cl goto clang-skip From 971f5f54df43b301979b5848d56442fafd1c1bbf Mon Sep 17 00:00:00 2001 From: Shima Ryuhei <65934663+islandryu@users.noreply.github.com> Date: Tue, 26 Nov 2024 19:12:50 +0900 Subject: [PATCH 53/53] src: safely remove the last line from dotenv MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refs: https://github.com/nodejs/node/issues/55925 PR-URL: https://github.com/nodejs/node/pull/55982 Reviewed-By: Yagiz Nizipli Reviewed-By: Richard Lau Reviewed-By: Juan José Arboleda --- src/node_dotenv.cc | 10 +++++++-- .../dotenv/no-final-newline-single-quotes.env | 1 + test/fixtures/dotenv/no-final-newline.env | 1 + test/parallel/test-dotenv-edge-cases.js | 22 +++++++++++++++++++ 4 files changed, 32 insertions(+), 2 deletions(-) create mode 100644 test/fixtures/dotenv/no-final-newline-single-quotes.env create mode 100644 test/fixtures/dotenv/no-final-newline.env diff --git a/src/node_dotenv.cc b/src/node_dotenv.cc index f594df875d7a0c..049f5cfcb77b9c 100644 --- a/src/node_dotenv.cc +++ b/src/node_dotenv.cc @@ -182,7 +182,10 @@ void Dotenv::ParseContent(const std::string_view input) { } store_.insert_or_assign(std::string(key), multi_line_value); - content.remove_prefix(content.find('\n', closing_quote + 1)); + auto newline = content.find('\n', closing_quote + 1); + if (newline != std::string_view::npos) { + content.remove_prefix(newline); + } continue; } } @@ -210,7 +213,10 @@ void Dotenv::ParseContent(const std::string_view input) { store_.insert_or_assign(std::string(key), value); // Select the first newline after the closing quotation mark // since there could be newline characters inside the value. - content.remove_prefix(content.find('\n', closing_quote + 1)); + auto newline = content.find('\n', closing_quote + 1); + if (newline != std::string_view::npos) { + content.remove_prefix(newline); + } } } else { // Regular key value pair. diff --git a/test/fixtures/dotenv/no-final-newline-single-quotes.env b/test/fixtures/dotenv/no-final-newline-single-quotes.env new file mode 100644 index 00000000000000..4f1b37d7741e29 --- /dev/null +++ b/test/fixtures/dotenv/no-final-newline-single-quotes.env @@ -0,0 +1 @@ +BASIC='basic' \ No newline at end of file diff --git a/test/fixtures/dotenv/no-final-newline.env b/test/fixtures/dotenv/no-final-newline.env new file mode 100644 index 00000000000000..ef996552bd9c90 --- /dev/null +++ b/test/fixtures/dotenv/no-final-newline.env @@ -0,0 +1 @@ +BASIC="basic" \ No newline at end of file diff --git a/test/parallel/test-dotenv-edge-cases.js b/test/parallel/test-dotenv-edge-cases.js index 30fd9d20618ba0..769d33a13b8ce9 100644 --- a/test/parallel/test-dotenv-edge-cases.js +++ b/test/parallel/test-dotenv-edge-cases.js @@ -8,6 +8,8 @@ const fixtures = require('../common/fixtures'); const validEnvFilePath = '../fixtures/dotenv/valid.env'; const nodeOptionsEnvFilePath = '../fixtures/dotenv/node-options.env'; +const noFinalNewlineEnvFilePath = '../fixtures/dotenv/no-final-newline.env'; +const noFinalNewlineSingleQuotesEnvFilePath = '../fixtures/dotenv/no-final-newline-single-quotes.env'; describe('.env supports edge cases', () => { it('supports multiple declarations, including optional ones', async () => { @@ -148,4 +150,24 @@ describe('.env supports edge cases', () => { assert.strictEqual(child.stderr, ''); assert.strictEqual(child.code, 0); }); + + it('should handle file without a final newline', async () => { + const code = ` + require('assert').strictEqual(process.env.BASIC, 'basic'); + `.trim(); + const child = await common.spawnPromisified( + process.execPath, + [ `--env-file=${path.resolve(__dirname, noFinalNewlineEnvFilePath)}`, '--eval', code ], + ); + + const SingleQuotesChild = await common.spawnPromisified( + process.execPath, + [ `--env-file=${path.resolve(__dirname, noFinalNewlineSingleQuotesEnvFilePath)}`, '--eval', code ], + ); + + assert.strictEqual(child.stderr, ''); + assert.strictEqual(child.code, 0); + assert.strictEqual(SingleQuotesChild.stderr, ''); + assert.strictEqual(SingleQuotesChild.code, 0); + }); });