From 0c6b69251de792091ea0aafdbccb28634408c4b3 Mon Sep 17 00:00:00 2001 From: Jean-Christophe Fillion-Robin Date: Thu, 5 Sep 2024 18:36:49 -0400 Subject: [PATCH 1/2] fix: Update ncc version to fix generation of distribution This commit addresses the following warning: > npm warn deprecated @zeit/ncc@0.22.3: @zeit/ncc is no longer maintained. Please use @vercel/ncc instead. and fixes the following error: ``` ncc: Compiling file index.js Error: error:0308010C:digital envelope routines::unsupported at new Hash (node:internal/crypto/hash:79:19) at Object.createHash (node:crypto:139:10) at hashOf (/home/runner/work/git-publish-subdir-action/git-publish-subdir-action/action/node_modules/@zeit/ncc/dist/ncc/index.js.cache.js:3:58216) at module.exports (/home/runner/work/git-publish-subdir-action/git-publish-subdir-action/action/node_modules/@zeit/ncc/dist/ncc/index.js.cache.js:3:60642) at runCmd (/home/runner/work/git-publish-subdir-action/git-publish-subdir-action/action/node_modules/@zeit/ncc/dist/ncc/cli.js.cache.js:1:47355) at 819 (/home/runner/work/git-publish-subdir-action/git-publish-subdir-action/action/node_modules/@zeit/ncc/dist/ncc/cli.js.cache.js:1:44227) at __webpack_require__ (/home/runner/work/git-publish-subdir-action/git-publish-subdir-action/action/node_modules/@zeit/ncc/dist/ncc/cli.js.cache.js:1:169) at startup (/home/runner/work/git-publish-subdir-action/git-publish-subdir-action/action/node_modules/@zeit/ncc/dist/ncc/cli.js.cache.js:1:339) at module.exports.8 (/home/runner/work/git-publish-subdir-action/git-publish-subdir-action/action/node_modules/@zeit/ncc/dist/ncc/cli.js.cache.js:1:371) at /home/runner/work/git-publish-subdir-action/git-publish-subdir-action/action/node_modules/@zeit/ncc/dist/ncc/cli.js.cache.js:1:381 { opensslErrorStack: [ 'error:03000086:digital envelope routines::initialization error', 'error:0308010C:digital envelope routines::unsupported' ], library: 'digital envelope routines', reason: 'unsupported', code: 'ERR_OSSL_EVP_UNSUPPORTED' } ``` --- action/dist/index.js | 53701 +++++++++++++++++++------------------ action/package-lock.json | 20 +- action/package.json | 2 +- 3 files changed, 26862 insertions(+), 26861 deletions(-) diff --git a/action/dist/index.js b/action/dist/index.js index 964bf285..d00ab4dd 100644 --- a/action/dist/index.js +++ b/action/dist/index.js @@ -1,56 +1,8 @@ -module.exports = -/******/ (function(modules, runtime) { // webpackBootstrap -/******/ "use strict"; -/******/ // The module cache -/******/ var installedModules = {}; -/******/ -/******/ // The require function -/******/ function __webpack_require__(moduleId) { -/******/ -/******/ // Check if module is in cache -/******/ if(installedModules[moduleId]) { -/******/ return installedModules[moduleId].exports; -/******/ } -/******/ // Create a new module (and put it into the cache) -/******/ var module = installedModules[moduleId] = { -/******/ i: moduleId, -/******/ l: false, -/******/ exports: {} -/******/ }; -/******/ -/******/ // Execute the module function -/******/ var threw = true; -/******/ try { -/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); -/******/ threw = false; -/******/ } finally { -/******/ if(threw) delete installedModules[moduleId]; -/******/ } -/******/ -/******/ // Flag the module as loaded -/******/ module.l = true; -/******/ -/******/ // Return the exports of the module -/******/ return module.exports; -/******/ } -/******/ -/******/ -/******/ __webpack_require__.ab = __dirname + "/"; -/******/ -/******/ // the startup function -/******/ function startup() { -/******/ // Load entry module and return exports -/******/ return __webpack_require__(81); -/******/ }; -/******/ -/******/ // run startup -/******/ return startup(); -/******/ }) -/************************************************************************/ -/******/ ({ +/******/ (() => { // webpackBootstrap +/******/ var __webpack_modules__ = ({ -/***/ 1: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 9726: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { "use strict"; @@ -69,3460 +21,3029 @@ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? ( var __importStar = (this && this.__importStar) || function (mod) { if (mod && mod.__esModule) return mod; var result = {}; - if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); __setModuleDefault(result, mod); return result; }; -var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { - function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } - return new (P || (P = Promise))(function (resolve, reject) { - function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } - function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } - function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } - step((generator = generator.apply(thisArg, _arguments || [])).next()); - }); +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; }; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.findInPath = exports.which = exports.mkdirP = exports.rmRF = exports.mv = exports.cp = void 0; -const assert_1 = __webpack_require__(59); -const childProcess = __importStar(__webpack_require__(129)); -const path = __importStar(__webpack_require__(622)); -const util_1 = __webpack_require__(669); -const ioUtil = __importStar(__webpack_require__(672)); -const exec = util_1.promisify(childProcess.exec); -const execFile = util_1.promisify(childProcess.execFile); +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.main = exports.exec = void 0; +const child_process = __importStar(__nccwpck_require__(2081)); +const fast_glob_1 = __nccwpck_require__(3664); +const fs_1 = __importStar(__nccwpck_require__(7147)); +const git_url_parse_1 = __importDefault(__nccwpck_require__(8244)); +const os_1 = __nccwpck_require__(2037); +const path = __importStar(__nccwpck_require__(1017)); +const isomorphic_git_1 = __importDefault(__nccwpck_require__(5114)); +const io_1 = __nccwpck_require__(7351); /** - * Copies a file or folder. - * Based off of shelljs - https://github.com/shelljs/shelljs/blob/9237f66c52e5daa40458f94f9565e18e8132f5a6/src/cp.js - * - * @param source source path - * @param dest destination path - * @param options optional. See CopyOptions. + * Custom wrapper around the child_process module */ -function cp(source, dest, options = {}) { - return __awaiter(this, void 0, void 0, function* () { - const { force, recursive, copySourceDirectory } = readCopyOptions(options); - const destStat = (yield ioUtil.exists(dest)) ? yield ioUtil.stat(dest) : null; - // Dest is an existing file, but not forcing - if (destStat && destStat.isFile() && !force) { - return; +const exec = async (cmd, opts) => { + const { log } = opts; + const env = (opts === null || opts === void 0 ? void 0 : opts.env) || {}; + const ps = child_process.spawn('bash', ['-c', cmd], { + env: { + HOME: process.env.HOME, + ...env, + }, + cwd: opts.cwd, + stdio: ['pipe', 'pipe', 'pipe'], + }); + const output = { + stderr: '', + stdout: '', + }; + // We won't be providing any input to command + ps.stdin.end(); + ps.stdout.on('data', (data) => { + output.stdout += data; + log.log(`data`, data.toString()); + }); + ps.stderr.on('data', (data) => { + output.stderr += data; + log.error(data.toString()); + }); + return new Promise((resolve, reject) => ps.on('close', (code) => { + if (code !== 0) { + reject(new Error('Process exited with code: ' + code + ':\n' + output.stderr)); } - // If dest is an existing directory, should copy inside. - const newDest = destStat && destStat.isDirectory() && copySourceDirectory - ? path.join(dest, path.basename(source)) - : dest; - if (!(yield ioUtil.exists(source))) { - throw new Error(`no such file or directory: ${source}`); + else { + resolve(output); } - const sourceStat = yield ioUtil.stat(source); - if (sourceStat.isDirectory()) { - if (!recursive) { - throw new Error(`Failed to copy. ${source} is a directory, but tried to copy without recursive flag.`); - } - else { - yield cpDirRecursive(source, newDest, 0, force); - } + })); +}; +exports.exec = exec; +const DEFAULT_MESSAGE = 'Update {target-branch} to output generated at {sha}'; +// Error messages +const KNOWN_HOSTS_WARNING = ` +##[warning] KNOWN_HOSTS_FILE not set +This will probably mean that host verification will fail later on +`; +const KNOWN_HOSTS_ERROR = (host) => ` +##[error] Host key verification failed! +This is probably because you forgot to supply a value for KNOWN_HOSTS_FILE +or the file is invalid or doesn't correctly verify the host ${host} +`; +const SSH_KEY_ERROR = ` +##[error] Permission denied (publickey) +Make sure that the ssh private key is set correctly, and +that the public key has been added to the target repo +`; +const INVALID_KEY_ERROR = (/* unused pure expression or super */ null && (` +##[error] Error loading key: invalid format +Please check that you're setting the environment variable +SSH_PRIVATE_KEY correctly +`)); +// Paths +const REPO_SELF = 'self'; +const RESOURCES = path.join(path.dirname(__dirname), 'resources'); +const KNOWN_HOSTS_GITHUB = path.join(RESOURCES, 'known_hosts_github.com'); +const SSH_FOLDER = path.join((0, os_1.homedir)(), '.ssh'); +const KNOWN_HOSTS_TARGET = path.join(SSH_FOLDER, 'known_hosts'); +const SSH_AGENT_PID_EXTRACT = /SSH_AGENT_PID=([0-9]+);/; +const genConfig = (env = process.env) => { + if (!env.REPO) + throw new Error('REPO must be specified'); + if (!env.BRANCH) + throw new Error('BRANCH must be specified'); + if (!env.FOLDER) + throw new Error('FOLDER must be specified'); + const repo = env.REPO; + const branch = env.BRANCH; + const folder = env.FOLDER; + const squashHistory = env.SQUASH_HISTORY === 'true'; + const skipEmptyCommits = env.SKIP_EMPTY_COMMITS === 'true'; + const message = env.MESSAGE || DEFAULT_MESSAGE; + const tag = env.TAG; + // Determine the type of URL + if (repo === REPO_SELF) { + if (!env.GITHUB_TOKEN) + throw new Error('GITHUB_TOKEN must be specified when REPO == self'); + if (!env.GITHUB_REPOSITORY) + throw new Error('GITHUB_REPOSITORY must be specified when REPO == self'); + const url = `https://x-access-token:${env.GITHUB_TOKEN}@github.com/${env.GITHUB_REPOSITORY}.git`; + const config = { + repo: url, + branch, + folder, + squashHistory, + skipEmptyCommits, + mode: 'self', + message, + tag, + }; + return config; + } + const parsedUrl = (0, git_url_parse_1.default)(repo); + if (parsedUrl.protocol === 'ssh') { + if (!env.SSH_PRIVATE_KEY) + throw new Error('SSH_PRIVATE_KEY must be specified when REPO uses ssh'); + const config = { + repo, + branch, + folder, + squashHistory, + skipEmptyCommits, + mode: 'ssh', + parsedUrl, + privateKey: env.SSH_PRIVATE_KEY, + knownHostsFile: env.KNOWN_HOSTS_FILE, + message, + tag, + }; + return config; + } + throw new Error('Unsupported REPO URL'); +}; +const writeToProcess = (command, args, opts) => new Promise((resolve, reject) => { + const child = child_process.spawn(command, args, { + env: opts.env, + stdio: 'pipe', + }); + child.stdin.setDefaultEncoding('utf-8'); + child.stdin.write(opts.data); + child.stdin.end(); + child.on('error', reject); + let stderr = ''; + child.stdout.on('data', (data) => { + /* istanbul ignore next */ + opts.log.log(data.toString()); + }); + child.stderr.on('data', (data) => { + stderr += data; + opts.log.error(data.toString()); + }); + child.on('close', (code) => { + /* istanbul ignore else */ + if (code === 0) { + resolve(); } else { - if (path.relative(source, newDest) === '') { - // a file cannot be copied to itself - throw new Error(`'${newDest}' and '${source}' are the same file`); - } - yield copyFile(source, newDest, force); + reject(new Error(stderr)); } }); -} -exports.cp = cp; -/** - * Moves a path. - * - * @param source source path - * @param dest destination path - * @param options optional. See MoveOptions. - */ -function mv(source, dest, options = {}) { - return __awaiter(this, void 0, void 0, function* () { - if (yield ioUtil.exists(dest)) { - let destExists = true; - if (yield ioUtil.isDirectory(dest)) { - // If dest is directory copy src into dest - dest = path.join(dest, path.basename(source)); - destExists = yield ioUtil.exists(dest); +}); +const main = async ({ env = process.env, log, }) => { + var _a, _b; + const config = genConfig(env); + // Calculate paths that use temp diractory + const TMP_PATH = await fs_1.promises.mkdtemp(path.join((0, os_1.tmpdir)(), 'git-publish-subdir-action-')); + const REPO_TEMP = path.join(TMP_PATH, 'repo'); + const SSH_AUTH_SOCK = path.join(TMP_PATH, 'ssh_agent.sock'); + if (!env.GITHUB_EVENT_PATH) + throw new Error('Expected GITHUB_EVENT_PATH'); + const event = JSON.parse((await fs_1.promises.readFile(env.GITHUB_EVENT_PATH)).toString()); + const name = env.COMMIT_NAME || + ((_a = event.pusher) === null || _a === void 0 ? void 0 : _a.name) || + env.GITHUB_ACTOR || + 'Git Publish Subdirectory'; + const email = env.COMMIT_EMAIL || + ((_b = event.pusher) === null || _b === void 0 ? void 0 : _b.email) || + (env.GITHUB_ACTOR + ? `${env.GITHUB_ACTOR}@users.noreply.github.com` + : 'nobody@nowhere'); + const tag = env.TAG; + // Set Git Config + await (0, exports.exec)(`git config --global user.name "${name}"`, { log }); + await (0, exports.exec)(`git config --global user.email "${email}"`, { log }); + /** + * Get information about the current git repository + */ + const getGitInformation = async () => { + // Get the root git directory + let dir = process.cwd(); + while (true) { + const isGitRepo = await fs_1.promises + .stat(path.join(dir, '.git')) + .then((s) => s.isDirectory()) + .catch(() => false); + if (isGitRepo) { + break; } - if (destExists) { - if (options.force == null || options.force) { - yield rmRF(dest); - } - else { - throw new Error('Destination already exists'); - } - } - } - yield mkdirP(path.dirname(dest)); - yield ioUtil.rename(source, dest); - }); -} -exports.mv = mv; -/** - * Remove a path recursively with force - * - * @param inputPath path to remove - */ -function rmRF(inputPath) { - return __awaiter(this, void 0, void 0, function* () { - if (ioUtil.IS_WINDOWS) { - // Node doesn't provide a delete operation, only an unlink function. This means that if the file is being used by another - // program (e.g. antivirus), it won't be deleted. To address this, we shell out the work to rd/del. - // Check for invalid characters - // https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file - if (/[*"<>|]/.test(inputPath)) { - throw new Error('File path must not contain `*`, `"`, `<`, `>` or `|` on Windows'); - } - try { - const cmdPath = ioUtil.getCmdPath(); - if (yield ioUtil.isDirectory(inputPath, true)) { - yield exec(`${cmdPath} /s /c "rd /s /q "%inputPath%""`, { - env: { inputPath } - }); - } - else { - yield exec(`${cmdPath} /s /c "del /f /a "%inputPath%""`, { - env: { inputPath } - }); - } - } - catch (err) { - // if you try to delete a file that doesn't exist, desired result is achieved - // other errors are valid - if (err.code !== 'ENOENT') - throw err; - } - // Shelling out fails to remove a symlink folder with missing source, this unlink catches that - try { - yield ioUtil.unlink(inputPath); - } - catch (err) { - // if you try to delete a file that doesn't exist, desired result is achieved - // other errors are valid - if (err.code !== 'ENOENT') - throw err; - } - } - else { - let isDir = false; - try { - isDir = yield ioUtil.isDirectory(inputPath); - } - catch (err) { - // if you try to delete a file that doesn't exist, desired result is achieved - // other errors are valid - if (err.code !== 'ENOENT') - throw err; - return; - } - if (isDir) { - yield execFile(`rm`, [`-rf`, `${inputPath}`]); + // We need to traverse up one + const next = path.dirname(dir); + if (next === dir) { + log.log(`##[info] Not running in git directory, unable to get information about source commit`); + return { + commitMessage: '', + sha: '', + }; } else { - yield ioUtil.unlink(inputPath); - } - } - }); -} -exports.rmRF = rmRF; -/** - * Make a directory. Creates the full path with folders in between - * Will throw if it fails - * - * @param fsPath path to create - * @returns Promise - */ -function mkdirP(fsPath) { - return __awaiter(this, void 0, void 0, function* () { - assert_1.ok(fsPath, 'a path argument must be provided'); - yield ioUtil.mkdir(fsPath, { recursive: true }); - }); -} -exports.mkdirP = mkdirP; -/** - * Returns path of a tool had the tool actually been invoked. Resolves via paths. - * If you check and the tool does not exist, it will throw. - * - * @param tool name of the tool - * @param check whether to check if tool exists - * @returns Promise path to tool - */ -function which(tool, check) { - return __awaiter(this, void 0, void 0, function* () { - if (!tool) { - throw new Error("parameter 'tool' is required"); - } - // recursive when check=true - if (check) { - const result = yield which(tool, false); - if (!result) { - if (ioUtil.IS_WINDOWS) { - throw new Error(`Unable to locate executable file: ${tool}. Please verify either the file path exists or the file can be found within a directory specified by the PATH environment variable. Also verify the file has a valid extension for an executable file.`); - } - else { - throw new Error(`Unable to locate executable file: ${tool}. Please verify either the file path exists or the file can be found within a directory specified by the PATH environment variable. Also check the file mode to verify the file is executable.`); - } + dir = next; } - return result; } - const matches = yield findInPath(tool); - if (matches && matches.length > 0) { - return matches[0]; + // Get current sha of repo to use in commit message + const gitLog = await isomorphic_git_1.default.log({ + fs: fs_1.default, + depth: 1, + dir, + }); + const commit = gitLog.length > 0 ? gitLog[0] : undefined; + if (!commit) { + log.log(`##[info] Unable to get information about HEAD commit`); + return { + commitMessage: '', + sha: '', + }; } - return ''; + return { + // Use trim to remove the trailing newline + commitMessage: commit.commit.message.trim(), + sha: commit.oid, + }; + }; + const gitInfo = await getGitInformation(); + // Environment to pass to children + const childEnv = Object.assign({}, process.env, { + SSH_AUTH_SOCK, }); -} -exports.which = which; -/** - * Returns a list of all occurrences of the given tool on the system path. - * - * @returns Promise the paths of the tool - */ -function findInPath(tool) { - return __awaiter(this, void 0, void 0, function* () { - if (!tool) { - throw new Error("parameter 'tool' is required"); - } - // build the list of extensions to try - const extensions = []; - if (ioUtil.IS_WINDOWS && process.env['PATHEXT']) { - for (const extension of process.env['PATHEXT'].split(path.delimiter)) { - if (extension) { - extensions.push(extension); - } - } + if (config.mode === 'ssh') { + // Copy over the known_hosts file if set + let known_hosts = config.knownHostsFile; + // Use well-known known_hosts for certain domains + if (!known_hosts && config.parsedUrl.resource === 'github.com') { + known_hosts = KNOWN_HOSTS_GITHUB; } - // if it's rooted, return it if exists. otherwise return empty. - if (ioUtil.isRooted(tool)) { - const filePath = yield ioUtil.tryGetExecutablePath(tool, extensions); - if (filePath) { - return [filePath]; - } - return []; + if (!known_hosts) { + log.warn(KNOWN_HOSTS_WARNING); } - // if any path separators, return empty - if (tool.includes(path.sep)) { - return []; + else { + await (0, io_1.mkdirP)(SSH_FOLDER); + await fs_1.promises.copyFile(known_hosts, KNOWN_HOSTS_TARGET); } - // build the list of directories - // - // Note, technically "where" checks the current directory on Windows. From a toolkit perspective, - // it feels like we should not do this. Checking the current directory seems like more of a use - // case of a shell, and the which() function exposed by the toolkit should strive for consistency - // across platforms. - const directories = []; - if (process.env.PATH) { - for (const p of process.env.PATH.split(path.delimiter)) { - if (p) { - directories.push(p); - } + // Setup ssh-agent with private key + log.log(`Setting up ssh-agent on ${SSH_AUTH_SOCK}`); + const sshAgentMatch = SSH_AGENT_PID_EXTRACT.exec((await (0, exports.exec)(`ssh-agent -a ${SSH_AUTH_SOCK}`, { log, env: childEnv })) + .stdout); + /* istanbul ignore if */ + if (!sshAgentMatch) + throw new Error('Unexpected output from ssh-agent'); + childEnv.SSH_AGENT_PID = sshAgentMatch[1]; + log.log(`Adding private key to ssh-agent at ${SSH_AUTH_SOCK}`); + await writeToProcess('ssh-add', ['-'], { + data: config.privateKey + '\n', + env: childEnv, + log, + }); + log.log(`Private key added`); + } + // Clone the target repo + await (0, exports.exec)(`git clone "${config.repo}" "${REPO_TEMP}"`, { + log, + env: childEnv, + }).catch((err) => { + const s = err.toString(); + /* istanbul ignore else */ + if (config.mode === 'ssh') { + /* istanbul ignore else */ + if (s.indexOf('Host key verification failed') !== -1) { + log.error(KNOWN_HOSTS_ERROR(config.parsedUrl.resource)); } - } - // find all matches - const matches = []; - for (const directory of directories) { - const filePath = yield ioUtil.tryGetExecutablePath(path.join(directory, tool), extensions); - if (filePath) { - matches.push(filePath); + else if (s.indexOf('Permission denied (publickey') !== -1) { + log.error(SSH_KEY_ERROR); } } - return matches; + throw err; }); -} -exports.findInPath = findInPath; -function readCopyOptions(options) { - const force = options.force == null ? true : options.force; - const recursive = Boolean(options.recursive); - const copySourceDirectory = options.copySourceDirectory == null - ? true - : Boolean(options.copySourceDirectory); - return { force, recursive, copySourceDirectory }; -} -function cpDirRecursive(sourceDir, destDir, currentDepth, force) { - return __awaiter(this, void 0, void 0, function* () { - // Ensure there is not a run away recursive copy - if (currentDepth >= 255) - return; - currentDepth++; - yield mkdirP(destDir); - const files = yield ioUtil.readdir(sourceDir); - for (const fileName of files) { - const srcFile = `${sourceDir}/${fileName}`; - const destFile = `${destDir}/${fileName}`; - const srcFileStat = yield ioUtil.lstat(srcFile); - if (srcFileStat.isDirectory()) { - // Recurse - yield cpDirRecursive(srcFile, destFile, currentDepth, force); - } - else { - yield copyFile(srcFile, destFile, force); + if (!config.squashHistory) { + // Fetch branch if it exists + await (0, exports.exec)(`git fetch -u origin ${config.branch}:${config.branch}`, { + log, + env: childEnv, + cwd: REPO_TEMP, + }).catch((err) => { + const s = err.toString(); + /* istanbul ignore if */ + if (s.indexOf("Couldn't find remote ref") === -1) { + log.error("##[warning] Failed to fetch target branch, probably doesn't exist"); + log.error(err); } + }); + // Check if branch already exists + log.log(`##[info] Checking if branch ${config.branch} exists already`); + const branchCheck = await (0, exports.exec)(`git branch --list "${config.branch}"`, { + log, + env: childEnv, + cwd: REPO_TEMP, + }); + if (branchCheck.stdout.trim() === '') { + // Branch does not exist yet, let's check it out as an orphan + log.log(`##[info] ${config.branch} does not exist, creating as orphan`); + await (0, exports.exec)(`git checkout --orphan "${config.branch}"`, { + log, + env: childEnv, + cwd: REPO_TEMP, + }); } - // Change the mode for the newly created directory - yield ioUtil.chmod(destDir, (yield ioUtil.stat(sourceDir)).mode); - }); -} -// Buffered file copy -function copyFile(srcFile, destFile, force) { - return __awaiter(this, void 0, void 0, function* () { - if ((yield ioUtil.lstat(srcFile)).isSymbolicLink()) { - // unlink/re-link it - try { - yield ioUtil.lstat(destFile); - yield ioUtil.unlink(destFile); - } - catch (e) { - // Try to override file permission - if (e.code === 'EPERM') { - yield ioUtil.chmod(destFile, '0666'); - yield ioUtil.unlink(destFile); - } - // other errors = it doesn't exist, no work to do - } - // Copy over symlink - const symlinkFull = yield ioUtil.readlink(srcFile); - yield ioUtil.symlink(symlinkFull, destFile, ioUtil.IS_WINDOWS ? 'junction' : null); + else { + await (0, exports.exec)(`git checkout "${config.branch}"`, { + log, + env: childEnv, + cwd: REPO_TEMP, + }); } - else if (!(yield ioUtil.exists(destFile)) || force) { - yield ioUtil.copyFile(srcFile, destFile); + } + else { + // Checkout a random branch so we can delete the target branch if it exists + log.log('Checking out temp branch'); + await (0, exports.exec)(`git checkout -b "${Math.random().toString(36).substring(2)}"`, { + log, + env: childEnv, + cwd: REPO_TEMP, + }); + // Delete the target branch if it exists + await (0, exports.exec)(`git branch -D "${config.branch}"`, { + log, + env: childEnv, + cwd: REPO_TEMP, + }).catch((err) => { }); + // Checkout target branch as an orphan + await (0, exports.exec)(`git checkout --orphan "${config.branch}"`, { + log, + env: childEnv, + cwd: REPO_TEMP, + }); + log.log('Checked out orphan'); + } + // // Update contents of branch + log.log(`##[info] Updating branch ${config.branch}`); + /** + * The list of globs we'll use for clearing + */ + const globs = await (async () => { + if (env.CLEAR_GLOBS_FILE) { + // We need to use a custom mechanism to clear the files + log.log(`##[info] Using custom glob file to clear target branch ${env.CLEAR_GLOBS_FILE}`); + const globList = (await fs_1.promises.readFile(env.CLEAR_GLOBS_FILE)) + .toString() + .split('\n') + .map((s) => s.trim()) + .filter((s) => s !== ''); + return globList; + } + else if (env.TARGET_DIR) { + log.log(`##[info] Removing all files from target dir ${env.TARGET_DIR} on target branch`); + return [`${env.TARGET_DIR}/**/*`, '!.git']; + } + else { + // Remove all files + log.log(`##[info] Removing all files from target branch`); + return ['**/*', '!.git']; } + })(); + const filesToDelete = (0, fast_glob_1.stream)(globs, { + absolute: true, + dot: true, + followSymbolicLinks: false, + cwd: REPO_TEMP, }); -} -//# sourceMappingURL=io.js.map - -/***/ }), - -/***/ 2: -/***/ (function(__unusedmodule, exports, __webpack_require__) { - -"use strict"; - -Object.defineProperty(exports, "__esModule", { value: true }); -const path = __webpack_require__(622); -const deep_1 = __webpack_require__(887); -const entry_1 = __webpack_require__(703); -const error_1 = __webpack_require__(375); -const entry_2 = __webpack_require__(317); -class Provider { - constructor(_settings) { - this._settings = _settings; - this.errorFilter = new error_1.default(this._settings); - this.entryFilter = new entry_1.default(this._settings, this._getMicromatchOptions()); - this.deepFilter = new deep_1.default(this._settings, this._getMicromatchOptions()); - this.entryTransformer = new entry_2.default(this._settings); + // Delete all files from the filestream + for await (const entry of filesToDelete) { + await fs_1.promises.unlink(entry); } - _getRootDirectory(task) { - return path.resolve(this._settings.cwd, task.base); + const folder = path.resolve(process.cwd(), config.folder); + const destinationFolder = env.TARGET_DIR ? env.TARGET_DIR : './'; + // Make sure the destination folder exists + await (0, io_1.mkdirP)(path.resolve(REPO_TEMP, destinationFolder)); + log.log(`##[info] Copying all files from ${folder}`); + await (0, io_1.cp)(`${folder}/`, `${REPO_TEMP}/${destinationFolder}/`, { + recursive: true, + copySourceDirectory: false, + }); + await (0, exports.exec)(`git add -A .`, { log, env: childEnv, cwd: REPO_TEMP }); + const message = config.message + .replace(/\{target\-branch\}/g, config.branch) + .replace(/\{sha\}/g, gitInfo.sha.substr(0, 7)) + .replace(/\{long\-sha\}/g, gitInfo.sha) + .replace(/\{msg\}/g, gitInfo.commitMessage); + await isomorphic_git_1.default.commit({ + fs: fs_1.default, + dir: REPO_TEMP, + message, + author: { email, name }, + }); + if (tag) { + log.log(`##[info] Tagging commit with ${tag}`); + await isomorphic_git_1.default.tag({ + fs: fs_1.default, + dir: REPO_TEMP, + ref: tag, + force: true, + }); } - _getReaderOptions(task) { - const basePath = task.base === '.' ? '' : task.base; - return { - basePath, - pathSegmentSeparator: '/', - concurrency: this._settings.concurrency, - deepFilter: this.deepFilter.getFilter(basePath, task.positive, task.negative), - entryFilter: this.entryFilter.getFilter(task.positive, task.negative), - errorFilter: this.errorFilter.getFilter(), - followSymbolicLinks: this._settings.followSymbolicLinks, - fs: this._settings.fs, - stats: this._settings.stats, - throwErrorOnBrokenSymbolicLink: this._settings.throwErrorOnBrokenSymbolicLink, - transform: this.entryTransformer.getTransformer() - }; + if (config.skipEmptyCommits) { + log.log(`##[info] Checking whether contents have changed before pushing`); + // Before we push, check whether it changed the tree, + // and avoid pushing if not + const head = await isomorphic_git_1.default.resolveRef({ + fs: fs_1.default, + dir: REPO_TEMP, + ref: 'HEAD', + }); + const currentCommit = await isomorphic_git_1.default.readCommit({ + fs: fs_1.default, + dir: REPO_TEMP, + oid: head, + }); + if (currentCommit.commit.parent.length === 1) { + const previousCommit = await isomorphic_git_1.default.readCommit({ + fs: fs_1.default, + dir: REPO_TEMP, + oid: currentCommit.commit.parent[0], + }); + if (currentCommit.commit.tree === previousCommit.commit.tree) { + log.log(`##[info] Contents of target repo unchanged, exiting.`); + return; + } + } } - _getMicromatchOptions() { - return { - dot: this._settings.dot, - matchBase: this._settings.baseNameMatch, - nobrace: !this._settings.braceExpansion, - nocase: !this._settings.caseSensitiveMatch, - noext: !this._settings.extglob, - noglobstar: !this._settings.globstar, - posix: true, - strictSlashes: false - }; + log.log(`##[info] Pushing`); + const forceArg = config.squashHistory ? '-f' : ''; + const tagsArg = tag ? '--tags' : ''; + const push = await (0, exports.exec)(`git push ${forceArg} origin "${config.branch}" ${tagsArg}`, { log, env: childEnv, cwd: REPO_TEMP }); + log.log(push.stdout); + log.log(`##[info] Deployment Successful`); + if (config.mode === 'ssh') { + log.log(`##[info] Killing ssh-agent`); + await (0, exports.exec)(`ssh-agent -k`, { log, env: childEnv }); } -} -exports.default = Provider; +}; +exports.main = main; /***/ }), -/***/ 36: -/***/ (function(__unusedmodule, exports) { +/***/ 1962: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { "use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.removeDuplicateSlashes = exports.transform = void 0; -/** - * Matches a sequence of two or more consecutive slashes, excluding the first two slashes at the beginning of the string. - * The latter is due to the presence of the device path at the beginning of the UNC path. - * @todo rewrite to negative lookbehind with the next major release. - */ -const DOUBLE_SLASH_RE = /(?!^)\/{2,}/g; -function transform(patterns) { - return patterns.map((pattern) => removeDuplicateSlashes(pattern)); -} -exports.transform = transform; -/** - * This package only works with forward slashes as a path separator. - * Because of this, we cannot use the standard `path.normalize` method, because on Windows platform it will use of backslashes. - */ -function removeDuplicateSlashes(pattern) { - return pattern.replace(DOUBLE_SLASH_RE, '/'); +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +var _a; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.getCmdPath = exports.tryGetExecutablePath = exports.isRooted = exports.isDirectory = exports.exists = exports.IS_WINDOWS = exports.unlink = exports.symlink = exports.stat = exports.rmdir = exports.rename = exports.readlink = exports.readdir = exports.mkdir = exports.lstat = exports.copyFile = exports.chmod = void 0; +const fs = __importStar(__nccwpck_require__(7147)); +const path = __importStar(__nccwpck_require__(1017)); +_a = fs.promises, exports.chmod = _a.chmod, exports.copyFile = _a.copyFile, exports.lstat = _a.lstat, exports.mkdir = _a.mkdir, exports.readdir = _a.readdir, exports.readlink = _a.readlink, exports.rename = _a.rename, exports.rmdir = _a.rmdir, exports.stat = _a.stat, exports.symlink = _a.symlink, exports.unlink = _a.unlink; +exports.IS_WINDOWS = process.platform === 'win32'; +function exists(fsPath) { + return __awaiter(this, void 0, void 0, function* () { + try { + yield exports.stat(fsPath); + } + catch (err) { + if (err.code === 'ENOENT') { + return false; + } + throw err; + } + return true; + }); } -exports.removeDuplicateSlashes = removeDuplicateSlashes; - - -/***/ }), - -/***/ 42: -/***/ (function(__unusedmodule, exports, __webpack_require__) { - -"use strict"; - -Object.defineProperty(exports, "__esModule", { value: true }); -exports.merge = void 0; -const merge2 = __webpack_require__(538); -function merge(streams) { - const mergedStream = merge2(streams); - streams.forEach((stream) => { - stream.once('error', (error) => mergedStream.emit('error', error)); +exports.exists = exists; +function isDirectory(fsPath, useStat = false) { + return __awaiter(this, void 0, void 0, function* () { + const stats = useStat ? yield exports.stat(fsPath) : yield exports.lstat(fsPath); + return stats.isDirectory(); }); - mergedStream.once('close', () => propagateCloseEventToSources(streams)); - mergedStream.once('end', () => propagateCloseEventToSources(streams)); - return mergedStream; } -exports.merge = merge; -function propagateCloseEventToSources(streams) { - streams.forEach((stream) => stream.emit('close')); +exports.isDirectory = isDirectory; +/** + * On OSX/Linux, true if path starts with '/'. On Windows, true for paths like: + * \, \hello, \\hello\share, C:, and C:\hello (and corresponding alternate separator cases). + */ +function isRooted(p) { + p = normalizeSeparators(p); + if (!p) { + throw new Error('isRooted() parameter "p" cannot be empty'); + } + if (exports.IS_WINDOWS) { + return (p.startsWith('\\') || /^[A-Z]:/i.test(p) // e.g. \ or \hello or \\hello + ); // e.g. C: or C:\hello + } + return p.startsWith('/'); } - - -/***/ }), - -/***/ 43: -/***/ (function(__unusedmodule, exports) { - -"use strict"; - -Object.defineProperty(exports, "__esModule", { value: true }); -exports.createDirentFromStats = void 0; -class DirentFromStats { - constructor(name, stats) { - this.name = name; - this.isBlockDevice = stats.isBlockDevice.bind(stats); - this.isCharacterDevice = stats.isCharacterDevice.bind(stats); - this.isDirectory = stats.isDirectory.bind(stats); - this.isFIFO = stats.isFIFO.bind(stats); - this.isFile = stats.isFile.bind(stats); - this.isSocket = stats.isSocket.bind(stats); - this.isSymbolicLink = stats.isSymbolicLink.bind(stats); +exports.isRooted = isRooted; +/** + * Best effort attempt to determine whether a file exists and is executable. + * @param filePath file path to check + * @param extensions additional file extensions to try + * @return if file exists and is executable, returns the file path. otherwise empty string. + */ +function tryGetExecutablePath(filePath, extensions) { + return __awaiter(this, void 0, void 0, function* () { + let stats = undefined; + try { + // test file exists + stats = yield exports.stat(filePath); + } + catch (err) { + if (err.code !== 'ENOENT') { + // eslint-disable-next-line no-console + console.log(`Unexpected error attempting to determine if executable file exists '${filePath}': ${err}`); + } + } + if (stats && stats.isFile()) { + if (exports.IS_WINDOWS) { + // on Windows, test for valid extension + const upperExt = path.extname(filePath).toUpperCase(); + if (extensions.some(validExt => validExt.toUpperCase() === upperExt)) { + return filePath; + } + } + else { + if (isUnixExecutable(stats)) { + return filePath; + } + } + } + // try each extension + const originalFilePath = filePath; + for (const extension of extensions) { + filePath = originalFilePath + extension; + stats = undefined; + try { + stats = yield exports.stat(filePath); + } + catch (err) { + if (err.code !== 'ENOENT') { + // eslint-disable-next-line no-console + console.log(`Unexpected error attempting to determine if executable file exists '${filePath}': ${err}`); + } + } + if (stats && stats.isFile()) { + if (exports.IS_WINDOWS) { + // preserve the case of the actual file (since an extension was appended) + try { + const directory = path.dirname(filePath); + const upperName = path.basename(filePath).toUpperCase(); + for (const actualName of yield exports.readdir(directory)) { + if (upperName === actualName.toUpperCase()) { + filePath = path.join(directory, actualName); + break; + } + } + } + catch (err) { + // eslint-disable-next-line no-console + console.log(`Unexpected error attempting to determine the actual case of the file '${filePath}': ${err}`); + } + return filePath; + } + else { + if (isUnixExecutable(stats)) { + return filePath; + } + } + } + } + return ''; + }); +} +exports.tryGetExecutablePath = tryGetExecutablePath; +function normalizeSeparators(p) { + p = p || ''; + if (exports.IS_WINDOWS) { + // convert slashes on Windows + p = p.replace(/\//g, '\\'); + // remove redundant slashes + return p.replace(/\\\\+/g, '\\'); } + // remove redundant slashes + return p.replace(/\/\/+/g, '/'); } -function createDirentFromStats(name, stats) { - return new DirentFromStats(name, stats); +// on Mac/Linux, test the execute bit +// R W X R W X R W X +// 256 128 64 32 16 8 4 2 1 +function isUnixExecutable(stats) { + return ((stats.mode & 1) > 0 || + ((stats.mode & 8) > 0 && stats.gid === process.getgid()) || + ((stats.mode & 64) > 0 && stats.uid === process.getuid())); } -exports.createDirentFromStats = createDirentFromStats; - +// Get the path of cmd.exe in windows +function getCmdPath() { + var _a; + return (_a = process.env['COMSPEC']) !== null && _a !== void 0 ? _a : `cmd.exe`; +} +exports.getCmdPath = getCmdPath; +//# sourceMappingURL=io-util.js.map /***/ }), -/***/ 45: -/***/ (function(module, __unusedexports, __webpack_require__) { +/***/ 7351: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { "use strict"; - -// Dependencies - -var parseUrl = __webpack_require__(800), - isSsh = __webpack_require__(720); - +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.findInPath = exports.which = exports.mkdirP = exports.rmRF = exports.mv = exports.cp = void 0; +const assert_1 = __nccwpck_require__(9491); +const childProcess = __importStar(__nccwpck_require__(2081)); +const path = __importStar(__nccwpck_require__(1017)); +const util_1 = __nccwpck_require__(3837); +const ioUtil = __importStar(__nccwpck_require__(1962)); +const exec = util_1.promisify(childProcess.exec); +const execFile = util_1.promisify(childProcess.execFile); /** - * gitUp - * Parses the input url. - * - * @name gitUp - * @function - * @param {String} input The input url. - * @return {Object} An object containing the following fields: + * Copies a file or folder. + * Based off of shelljs - https://github.com/shelljs/shelljs/blob/9237f66c52e5daa40458f94f9565e18e8132f5a6/src/cp.js * - * - `protocols` (Array): An array with the url protocols (usually it has one element). - * - `port` (null|Number): The domain port. - * - `resource` (String): The url domain (including subdomains). - * - `user` (String): The authentication user (usually for ssh urls). - * - `pathname` (String): The url pathname. - * - `hash` (String): The url hash. - * - `search` (String): The url querystring value. - * - `href` (String): The input url. - * - `protocol` (String): The git url protocol. - * - `token` (String): The oauth token (could appear in the https urls). + * @param source source path + * @param dest destination path + * @param options optional. See CopyOptions. */ -function gitUp(input) { - var output = parseUrl(input); - output.token = ""; - - if (output.password === "x-oauth-basic") { - output.token = output.user; - } else if (output.user === "x-token-auth") { - output.token = output.password; - } - - if (isSsh(output.protocols) || output.protocols.length === 0 && isSsh(input)) { - output.protocol = "ssh"; - } else if (output.protocols.length) { - output.protocol = output.protocols[0]; - } else { - output.protocol = "file"; - output.protocols = ["file"]; - } - - output.href = output.href.replace(/\/$/, ""); - return output; -} - -module.exports = gitUp; - -/***/ }), - -/***/ 59: -/***/ (function(module) { - -module.exports = require("assert"); - -/***/ }), - -/***/ 61: -/***/ (function(module, __unusedexports, __webpack_require__) { - -var Buffer = __webpack_require__(149).Buffer - -// prototype class for hash functions -function Hash (blockSize, finalSize) { - this._block = Buffer.alloc(blockSize) - this._finalSize = finalSize - this._blockSize = blockSize - this._len = 0 -} - -Hash.prototype.update = function (data, enc) { - if (typeof data === 'string') { - enc = enc || 'utf8' - data = Buffer.from(data, enc) - } - - var block = this._block - var blockSize = this._blockSize - var length = data.length - var accum = this._len - - for (var offset = 0; offset < length;) { - var assigned = accum % blockSize - var remainder = Math.min(length - offset, blockSize - assigned) - - for (var i = 0; i < remainder; i++) { - block[assigned + i] = data[offset + i] - } - - accum += remainder - offset += remainder - - if ((accum % blockSize) === 0) { - this._update(block) - } - } - - this._len += length - return this -} - -Hash.prototype.digest = function (enc) { - var rem = this._len % this._blockSize - - this._block[rem] = 0x80 - - // zero (rem + 1) trailing bits, where (rem + 1) is the smallest - // non-negative solution to the equation (length + 1 + (rem + 1)) === finalSize mod blockSize - this._block.fill(0, rem + 1) - - if (rem >= this._finalSize) { - this._update(this._block) - this._block.fill(0) - } - - var bits = this._len * 8 - - // uint32 - if (bits <= 0xffffffff) { - this._block.writeUInt32BE(bits, this._blockSize - 4) - - // uint64 - } else { - var lowBits = (bits & 0xffffffff) >>> 0 - var highBits = (bits - lowBits) / 0x100000000 - - this._block.writeUInt32BE(highBits, this._blockSize - 8) - this._block.writeUInt32BE(lowBits, this._blockSize - 4) - } - - this._update(this._block) - var hash = this._hash() - - return enc ? hash.toString(enc) : hash -} - -Hash.prototype._update = function () { - throw new Error('_update must be implemented by subclass') +function cp(source, dest, options = {}) { + return __awaiter(this, void 0, void 0, function* () { + const { force, recursive, copySourceDirectory } = readCopyOptions(options); + const destStat = (yield ioUtil.exists(dest)) ? yield ioUtil.stat(dest) : null; + // Dest is an existing file, but not forcing + if (destStat && destStat.isFile() && !force) { + return; + } + // If dest is an existing directory, should copy inside. + const newDest = destStat && destStat.isDirectory() && copySourceDirectory + ? path.join(dest, path.basename(source)) + : dest; + if (!(yield ioUtil.exists(source))) { + throw new Error(`no such file or directory: ${source}`); + } + const sourceStat = yield ioUtil.stat(source); + if (sourceStat.isDirectory()) { + if (!recursive) { + throw new Error(`Failed to copy. ${source} is a directory, but tried to copy without recursive flag.`); + } + else { + yield cpDirRecursive(source, newDest, 0, force); + } + } + else { + if (path.relative(source, newDest) === '') { + // a file cannot be copied to itself + throw new Error(`'${newDest}' and '${source}' are the same file`); + } + yield copyFile(source, newDest, force); + } + }); } - -module.exports = Hash - - -/***/ }), - -/***/ 74: -/***/ (function(module, __unusedexports, __webpack_require__) { - -"use strict"; - - -const util = __webpack_require__(669); -const braces = __webpack_require__(783); -const picomatch = __webpack_require__(827); -const utils = __webpack_require__(265); -const isEmptyString = val => val === '' || val === './'; - +exports.cp = cp; /** - * Returns an array of strings that match one or more glob patterns. + * Moves a path. * - * ```js - * const mm = require('micromatch'); - * // mm(list, patterns[, options]); + * @param source source path + * @param dest destination path + * @param options optional. See MoveOptions. + */ +function mv(source, dest, options = {}) { + return __awaiter(this, void 0, void 0, function* () { + if (yield ioUtil.exists(dest)) { + let destExists = true; + if (yield ioUtil.isDirectory(dest)) { + // If dest is directory copy src into dest + dest = path.join(dest, path.basename(source)); + destExists = yield ioUtil.exists(dest); + } + if (destExists) { + if (options.force == null || options.force) { + yield rmRF(dest); + } + else { + throw new Error('Destination already exists'); + } + } + } + yield mkdirP(path.dirname(dest)); + yield ioUtil.rename(source, dest); + }); +} +exports.mv = mv; +/** + * Remove a path recursively with force * - * console.log(mm(['a.js', 'a.txt'], ['*.js'])); - * //=> [ 'a.js' ] - * ``` - * @param {String|Array} `list` List of strings to match. - * @param {String|Array} `patterns` One or more glob patterns to use for matching. - * @param {Object} `options` See available [options](#options) - * @return {Array} Returns an array of matches - * @summary false - * @api public + * @param inputPath path to remove */ - -const micromatch = (list, patterns, options) => { - patterns = [].concat(patterns); - list = [].concat(list); - - let omit = new Set(); - let keep = new Set(); - let items = new Set(); - let negatives = 0; - - let onResult = state => { - items.add(state.output); - if (options && options.onResult) { - options.onResult(state); - } - }; - - for (let i = 0; i < patterns.length; i++) { - let isMatch = picomatch(String(patterns[i]), { ...options, onResult }, true); - let negated = isMatch.state.negated || isMatch.state.negatedExtglob; - if (negated) negatives++; - - for (let item of list) { - let matched = isMatch(item, true); - - let match = negated ? !matched.isMatch : matched.isMatch; - if (!match) continue; - - if (negated) { - omit.add(matched.output); - } else { - omit.delete(matched.output); - keep.add(matched.output); - } - } - } - - let result = negatives === patterns.length ? [...items] : [...keep]; - let matches = result.filter(item => !omit.has(item)); - - if (options && matches.length === 0) { - if (options.failglob === true) { - throw new Error(`No matches found for "${patterns.join(', ')}"`); - } - - if (options.nonull === true || options.nullglob === true) { - return options.unescape ? patterns.map(p => p.replace(/\\/g, '')) : patterns; - } - } - - return matches; -}; - -/** - * Backwards compatibility - */ - -micromatch.match = micromatch; - -/** - * Returns a matcher function from the given glob `pattern` and `options`. - * The returned function takes a string to match as its only argument and returns - * true if the string is a match. - * - * ```js - * const mm = require('micromatch'); - * // mm.matcher(pattern[, options]); - * - * const isMatch = mm.matcher('*.!(*a)'); - * console.log(isMatch('a.a')); //=> false - * console.log(isMatch('a.b')); //=> true - * ``` - * @param {String} `pattern` Glob pattern - * @param {Object} `options` - * @return {Function} Returns a matcher function. - * @api public - */ - -micromatch.matcher = (pattern, options) => picomatch(pattern, options); - -/** - * Returns true if **any** of the given glob `patterns` match the specified `string`. - * - * ```js - * const mm = require('micromatch'); - * // mm.isMatch(string, patterns[, options]); - * - * console.log(mm.isMatch('a.a', ['b.*', '*.a'])); //=> true - * console.log(mm.isMatch('a.a', 'b.*')); //=> false - * ``` - * @param {String} `str` The string to test. - * @param {String|Array} `patterns` One or more glob patterns to use for matching. - * @param {Object} `[options]` See available [options](#options). - * @return {Boolean} Returns true if any patterns match `str` - * @api public - */ - -micromatch.isMatch = (str, patterns, options) => picomatch(patterns, options)(str); - -/** - * Backwards compatibility - */ - -micromatch.any = micromatch.isMatch; - +function rmRF(inputPath) { + return __awaiter(this, void 0, void 0, function* () { + if (ioUtil.IS_WINDOWS) { + // Node doesn't provide a delete operation, only an unlink function. This means that if the file is being used by another + // program (e.g. antivirus), it won't be deleted. To address this, we shell out the work to rd/del. + // Check for invalid characters + // https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file + if (/[*"<>|]/.test(inputPath)) { + throw new Error('File path must not contain `*`, `"`, `<`, `>` or `|` on Windows'); + } + try { + const cmdPath = ioUtil.getCmdPath(); + if (yield ioUtil.isDirectory(inputPath, true)) { + yield exec(`${cmdPath} /s /c "rd /s /q "%inputPath%""`, { + env: { inputPath } + }); + } + else { + yield exec(`${cmdPath} /s /c "del /f /a "%inputPath%""`, { + env: { inputPath } + }); + } + } + catch (err) { + // if you try to delete a file that doesn't exist, desired result is achieved + // other errors are valid + if (err.code !== 'ENOENT') + throw err; + } + // Shelling out fails to remove a symlink folder with missing source, this unlink catches that + try { + yield ioUtil.unlink(inputPath); + } + catch (err) { + // if you try to delete a file that doesn't exist, desired result is achieved + // other errors are valid + if (err.code !== 'ENOENT') + throw err; + } + } + else { + let isDir = false; + try { + isDir = yield ioUtil.isDirectory(inputPath); + } + catch (err) { + // if you try to delete a file that doesn't exist, desired result is achieved + // other errors are valid + if (err.code !== 'ENOENT') + throw err; + return; + } + if (isDir) { + yield execFile(`rm`, [`-rf`, `${inputPath}`]); + } + else { + yield ioUtil.unlink(inputPath); + } + } + }); +} +exports.rmRF = rmRF; /** - * Returns a list of strings that _**do not match any**_ of the given `patterns`. - * - * ```js - * const mm = require('micromatch'); - * // mm.not(list, patterns[, options]); + * Make a directory. Creates the full path with folders in between + * Will throw if it fails * - * console.log(mm.not(['a.a', 'b.b', 'c.c'], '*.a')); - * //=> ['b.b', 'c.c'] - * ``` - * @param {Array} `list` Array of strings to match. - * @param {String|Array} `patterns` One or more glob pattern to use for matching. - * @param {Object} `options` See available [options](#options) for changing how matches are performed - * @return {Array} Returns an array of strings that **do not match** the given patterns. - * @api public + * @param fsPath path to create + * @returns Promise */ - -micromatch.not = (list, patterns, options = {}) => { - patterns = [].concat(patterns).map(String); - let result = new Set(); - let items = []; - - let onResult = state => { - if (options.onResult) options.onResult(state); - items.push(state.output); - }; - - let matches = micromatch(list, patterns, { ...options, onResult }); - - for (let item of items) { - if (!matches.includes(item)) { - result.add(item); - } - } - return [...result]; -}; - +function mkdirP(fsPath) { + return __awaiter(this, void 0, void 0, function* () { + assert_1.ok(fsPath, 'a path argument must be provided'); + yield ioUtil.mkdir(fsPath, { recursive: true }); + }); +} +exports.mkdirP = mkdirP; /** - * Returns true if the given `string` contains the given pattern. Similar - * to [.isMatch](#isMatch) but the pattern can match any part of the string. - * - * ```js - * var mm = require('micromatch'); - * // mm.contains(string, pattern[, options]); + * Returns path of a tool had the tool actually been invoked. Resolves via paths. + * If you check and the tool does not exist, it will throw. * - * console.log(mm.contains('aa/bb/cc', '*b')); - * //=> true - * console.log(mm.contains('aa/bb/cc', '*d')); - * //=> false - * ``` - * @param {String} `str` The string to match. - * @param {String|Array} `patterns` Glob pattern to use for matching. - * @param {Object} `options` See available [options](#options) for changing how matches are performed - * @return {Boolean} Returns true if any of the patterns matches any part of `str`. - * @api public + * @param tool name of the tool + * @param check whether to check if tool exists + * @returns Promise path to tool */ - -micromatch.contains = (str, pattern, options) => { - if (typeof str !== 'string') { - throw new TypeError(`Expected a string: "${util.inspect(str)}"`); - } - - if (Array.isArray(pattern)) { - return pattern.some(p => micromatch.contains(str, p, options)); - } - - if (typeof pattern === 'string') { - if (isEmptyString(str) || isEmptyString(pattern)) { - return false; - } - - if (str.includes(pattern) || (str.startsWith('./') && str.slice(2).includes(pattern))) { - return true; - } - } - - return micromatch.isMatch(str, pattern, { ...options, contains: true }); -}; - +function which(tool, check) { + return __awaiter(this, void 0, void 0, function* () { + if (!tool) { + throw new Error("parameter 'tool' is required"); + } + // recursive when check=true + if (check) { + const result = yield which(tool, false); + if (!result) { + if (ioUtil.IS_WINDOWS) { + throw new Error(`Unable to locate executable file: ${tool}. Please verify either the file path exists or the file can be found within a directory specified by the PATH environment variable. Also verify the file has a valid extension for an executable file.`); + } + else { + throw new Error(`Unable to locate executable file: ${tool}. Please verify either the file path exists or the file can be found within a directory specified by the PATH environment variable. Also check the file mode to verify the file is executable.`); + } + } + return result; + } + const matches = yield findInPath(tool); + if (matches && matches.length > 0) { + return matches[0]; + } + return ''; + }); +} +exports.which = which; /** - * Filter the keys of the given object with the given `glob` pattern - * and `options`. Does not attempt to match nested keys. If you need this feature, - * use [glob-object][] instead. - * - * ```js - * const mm = require('micromatch'); - * // mm.matchKeys(object, patterns[, options]); + * Returns a list of all occurrences of the given tool on the system path. * - * const obj = { aa: 'a', ab: 'b', ac: 'c' }; - * console.log(mm.matchKeys(obj, '*b')); - * //=> { ab: 'b' } - * ``` - * @param {Object} `object` The object with keys to filter. - * @param {String|Array} `patterns` One or more glob patterns to use for matching. - * @param {Object} `options` See available [options](#options) for changing how matches are performed - * @return {Object} Returns an object with only keys that match the given patterns. - * @api public + * @returns Promise the paths of the tool */ +function findInPath(tool) { + return __awaiter(this, void 0, void 0, function* () { + if (!tool) { + throw new Error("parameter 'tool' is required"); + } + // build the list of extensions to try + const extensions = []; + if (ioUtil.IS_WINDOWS && process.env['PATHEXT']) { + for (const extension of process.env['PATHEXT'].split(path.delimiter)) { + if (extension) { + extensions.push(extension); + } + } + } + // if it's rooted, return it if exists. otherwise return empty. + if (ioUtil.isRooted(tool)) { + const filePath = yield ioUtil.tryGetExecutablePath(tool, extensions); + if (filePath) { + return [filePath]; + } + return []; + } + // if any path separators, return empty + if (tool.includes(path.sep)) { + return []; + } + // build the list of directories + // + // Note, technically "where" checks the current directory on Windows. From a toolkit perspective, + // it feels like we should not do this. Checking the current directory seems like more of a use + // case of a shell, and the which() function exposed by the toolkit should strive for consistency + // across platforms. + const directories = []; + if (process.env.PATH) { + for (const p of process.env.PATH.split(path.delimiter)) { + if (p) { + directories.push(p); + } + } + } + // find all matches + const matches = []; + for (const directory of directories) { + const filePath = yield ioUtil.tryGetExecutablePath(path.join(directory, tool), extensions); + if (filePath) { + matches.push(filePath); + } + } + return matches; + }); +} +exports.findInPath = findInPath; +function readCopyOptions(options) { + const force = options.force == null ? true : options.force; + const recursive = Boolean(options.recursive); + const copySourceDirectory = options.copySourceDirectory == null + ? true + : Boolean(options.copySourceDirectory); + return { force, recursive, copySourceDirectory }; +} +function cpDirRecursive(sourceDir, destDir, currentDepth, force) { + return __awaiter(this, void 0, void 0, function* () { + // Ensure there is not a run away recursive copy + if (currentDepth >= 255) + return; + currentDepth++; + yield mkdirP(destDir); + const files = yield ioUtil.readdir(sourceDir); + for (const fileName of files) { + const srcFile = `${sourceDir}/${fileName}`; + const destFile = `${destDir}/${fileName}`; + const srcFileStat = yield ioUtil.lstat(srcFile); + if (srcFileStat.isDirectory()) { + // Recurse + yield cpDirRecursive(srcFile, destFile, currentDepth, force); + } + else { + yield copyFile(srcFile, destFile, force); + } + } + // Change the mode for the newly created directory + yield ioUtil.chmod(destDir, (yield ioUtil.stat(sourceDir)).mode); + }); +} +// Buffered file copy +function copyFile(srcFile, destFile, force) { + return __awaiter(this, void 0, void 0, function* () { + if ((yield ioUtil.lstat(srcFile)).isSymbolicLink()) { + // unlink/re-link it + try { + yield ioUtil.lstat(destFile); + yield ioUtil.unlink(destFile); + } + catch (e) { + // Try to override file permission + if (e.code === 'EPERM') { + yield ioUtil.chmod(destFile, '0666'); + yield ioUtil.unlink(destFile); + } + // other errors = it doesn't exist, no work to do + } + // Copy over symlink + const symlinkFull = yield ioUtil.readlink(srcFile); + yield ioUtil.symlink(symlinkFull, destFile, ioUtil.IS_WINDOWS ? 'junction' : null); + } + else if (!(yield ioUtil.exists(destFile)) || force) { + yield ioUtil.copyFile(srcFile, destFile); + } + }); +} +//# sourceMappingURL=io.js.map -micromatch.matchKeys = (obj, patterns, options) => { - if (!utils.isObject(obj)) { - throw new TypeError('Expected the first argument to be an object'); - } - let keys = micromatch(Object.keys(obj), patterns, options); - let res = {}; - for (let key of keys) res[key] = obj[key]; - return res; -}; - -/** - * Returns true if some of the strings in the given `list` match any of the given glob `patterns`. - * - * ```js - * const mm = require('micromatch'); - * // mm.some(list, patterns[, options]); - * - * console.log(mm.some(['foo.js', 'bar.js'], ['*.js', '!foo.js'])); - * // true - * console.log(mm.some(['foo.js'], ['*.js', '!foo.js'])); - * // false - * ``` - * @param {String|Array} `list` The string or array of strings to test. Returns as soon as the first match is found. - * @param {String|Array} `patterns` One or more glob patterns to use for matching. - * @param {Object} `options` See available [options](#options) for changing how matches are performed - * @return {Boolean} Returns true if any `patterns` matches any of the strings in `list` - * @api public - */ +/***/ }), -micromatch.some = (list, patterns, options) => { - let items = [].concat(list); +/***/ 3803: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - for (let pattern of [].concat(patterns)) { - let isMatch = picomatch(String(pattern), options); - if (items.some(item => isMatch(item))) { - return true; - } - } - return false; +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.createFileSystemAdapter = exports.FILE_SYSTEM_ADAPTER = void 0; +const fs = __nccwpck_require__(7147); +exports.FILE_SYSTEM_ADAPTER = { + lstat: fs.lstat, + stat: fs.stat, + lstatSync: fs.lstatSync, + statSync: fs.statSync, + readdir: fs.readdir, + readdirSync: fs.readdirSync }; +function createFileSystemAdapter(fsMethods) { + if (fsMethods === undefined) { + return exports.FILE_SYSTEM_ADAPTER; + } + return Object.assign(Object.assign({}, exports.FILE_SYSTEM_ADAPTER), fsMethods); +} +exports.createFileSystemAdapter = createFileSystemAdapter; -/** - * Returns true if every string in the given `list` matches - * any of the given glob `patterns`. - * - * ```js - * const mm = require('micromatch'); - * // mm.every(list, patterns[, options]); - * - * console.log(mm.every('foo.js', ['foo.js'])); - * // true - * console.log(mm.every(['foo.js', 'bar.js'], ['*.js'])); - * // true - * console.log(mm.every(['foo.js', 'bar.js'], ['*.js', '!foo.js'])); - * // false - * console.log(mm.every(['foo.js'], ['*.js', '!foo.js'])); - * // false - * ``` - * @param {String|Array} `list` The string or array of strings to test. - * @param {String|Array} `patterns` One or more glob patterns to use for matching. - * @param {Object} `options` See available [options](#options) for changing how matches are performed - * @return {Boolean} Returns true if all `patterns` matches all of the strings in `list` - * @api public - */ -micromatch.every = (list, patterns, options) => { - let items = [].concat(list); +/***/ }), - for (let pattern of [].concat(patterns)) { - let isMatch = picomatch(String(pattern), options); - if (!items.every(item => isMatch(item))) { - return false; - } - } - return true; -}; +/***/ 8838: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.IS_SUPPORT_READDIR_WITH_FILE_TYPES = void 0; +const NODE_PROCESS_VERSION_PARTS = process.versions.node.split('.'); +if (NODE_PROCESS_VERSION_PARTS[0] === undefined || NODE_PROCESS_VERSION_PARTS[1] === undefined) { + throw new Error(`Unexpected behavior. The 'process.versions.node' variable has invalid value: ${process.versions.node}`); +} +const MAJOR_VERSION = Number.parseInt(NODE_PROCESS_VERSION_PARTS[0], 10); +const MINOR_VERSION = Number.parseInt(NODE_PROCESS_VERSION_PARTS[1], 10); +const SUPPORTED_MAJOR_VERSION = 10; +const SUPPORTED_MINOR_VERSION = 10; +const IS_MATCHED_BY_MAJOR = MAJOR_VERSION > SUPPORTED_MAJOR_VERSION; +const IS_MATCHED_BY_MAJOR_AND_MINOR = MAJOR_VERSION === SUPPORTED_MAJOR_VERSION && MINOR_VERSION >= SUPPORTED_MINOR_VERSION; /** - * Returns true if **all** of the given `patterns` match - * the specified string. - * - * ```js - * const mm = require('micromatch'); - * // mm.all(string, patterns[, options]); - * - * console.log(mm.all('foo.js', ['foo.js'])); - * // true - * - * console.log(mm.all('foo.js', ['*.js', '!foo.js'])); - * // false - * - * console.log(mm.all('foo.js', ['*.js', 'foo.js'])); - * // true - * - * console.log(mm.all('foo.js', ['*.js', 'f*', '*o*', '*o.js'])); - * // true - * ``` - * @param {String|Array} `str` The string to test. - * @param {String|Array} `patterns` One or more glob patterns to use for matching. - * @param {Object} `options` See available [options](#options) for changing how matches are performed - * @return {Boolean} Returns true if any patterns match `str` - * @api public + * IS `true` for Node.js 10.10 and greater. */ +exports.IS_SUPPORT_READDIR_WITH_FILE_TYPES = IS_MATCHED_BY_MAJOR || IS_MATCHED_BY_MAJOR_AND_MINOR; -micromatch.all = (str, patterns, options) => { - if (typeof str !== 'string') { - throw new TypeError(`Expected a string: "${util.inspect(str)}"`); - } - return [].concat(patterns).every(p => picomatch(p, options)(str)); -}; +/***/ }), -/** - * Returns an array of matches captured by `pattern` in `string, or `null` if the pattern did not match. - * - * ```js - * const mm = require('micromatch'); - * // mm.capture(pattern, string[, options]); - * - * console.log(mm.capture('test/*.js', 'test/foo.js')); - * //=> ['foo'] - * console.log(mm.capture('test/*.js', 'foo/bar.css')); - * //=> null - * ``` - * @param {String} `glob` Glob pattern to use for matching. - * @param {String} `input` String to match - * @param {Object} `options` See available [options](#options) for changing how matches are performed - * @return {Array|null} Returns an array of captures if the input matches the glob pattern, otherwise `null`. - * @api public - */ - -micromatch.capture = (glob, input, options) => { - let posix = utils.isWindows(options); - let regex = picomatch.makeRe(String(glob), { ...options, capture: true }); - let match = regex.exec(posix ? utils.toPosixSlashes(input) : input); - - if (match) { - return match.slice(1).map(v => v === void 0 ? '' : v); - } -}; - -/** - * Create a regular expression from the given glob `pattern`. - * - * ```js - * const mm = require('micromatch'); - * // mm.makeRe(pattern[, options]); - * - * console.log(mm.makeRe('*.js')); - * //=> /^(?:(\.[\\\/])?(?!\.)(?=.)[^\/]*?\.js)$/ - * ``` - * @param {String} `pattern` A glob pattern to convert to regex. - * @param {Object} `options` - * @return {RegExp} Returns a regex created from the given pattern. - * @api public - */ - -micromatch.makeRe = (...args) => picomatch.makeRe(...args); - -/** - * Scan a glob pattern to separate the pattern into segments. Used - * by the [split](#split) method. - * - * ```js - * const mm = require('micromatch'); - * const state = mm.scan(pattern[, options]); - * ``` - * @param {String} `pattern` - * @param {Object} `options` - * @return {Object} Returns an object with - * @api public - */ - -micromatch.scan = (...args) => picomatch.scan(...args); +/***/ 5667: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -/** - * Parse a glob pattern to create the source string for a regular - * expression. - * - * ```js - * const mm = require('micromatch'); - * const state = mm(pattern[, options]); - * ``` - * @param {String} `glob` - * @param {Object} `options` - * @return {Object} Returns an object with useful properties and output to be used as regex source string. - * @api public - */ +"use strict"; -micromatch.parse = (patterns, options) => { - let res = []; - for (let pattern of [].concat(patterns || [])) { - for (let str of braces(String(pattern), options)) { - res.push(picomatch.parse(str, options)); +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.Settings = exports.scandirSync = exports.scandir = void 0; +const async = __nccwpck_require__(4507); +const sync = __nccwpck_require__(9560); +const settings_1 = __nccwpck_require__(8662); +exports.Settings = settings_1.default; +function scandir(path, optionsOrSettingsOrCallback, callback) { + if (typeof optionsOrSettingsOrCallback === 'function') { + async.read(path, getSettings(), optionsOrSettingsOrCallback); + return; } - } - return res; -}; - -/** - * Process the given brace `pattern`. - * - * ```js - * const { braces } = require('micromatch'); - * console.log(braces('foo/{a,b,c}/bar')); - * //=> [ 'foo/(a|b|c)/bar' ] - * - * console.log(braces('foo/{a,b,c}/bar', { expand: true })); - * //=> [ 'foo/a/bar', 'foo/b/bar', 'foo/c/bar' ] - * ``` - * @param {String} `pattern` String with brace pattern to process. - * @param {Object} `options` Any [options](#options) to change how expansion is performed. See the [braces][] library for all available options. - * @return {Array} - * @api public - */ - -micromatch.braces = (pattern, options) => { - if (typeof pattern !== 'string') throw new TypeError('Expected a string'); - if ((options && options.nobrace === true) || !/\{.*\}/.test(pattern)) { - return [pattern]; - } - return braces(pattern, options); -}; - -/** - * Expand braces - */ - -micromatch.braceExpand = (pattern, options) => { - if (typeof pattern !== 'string') throw new TypeError('Expected a string'); - return micromatch.braces(pattern, { ...options, expand: true }); -}; - -/** - * Expose micromatch - */ - -module.exports = micromatch; + async.read(path, getSettings(optionsOrSettingsOrCallback), callback); +} +exports.scandir = scandir; +function scandirSync(path, optionsOrSettings) { + const settings = getSettings(optionsOrSettings); + return sync.read(path, settings); +} +exports.scandirSync = scandirSync; +function getSettings(settingsOrOptions = {}) { + if (settingsOrOptions instanceof settings_1.default) { + return settingsOrOptions; + } + return new settings_1.default(settingsOrOptions); +} /***/ }), -/***/ 75: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 4507: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -const matcher_1 = __webpack_require__(320); -class PartialMatcher extends matcher_1.default { - match(filepath) { - const parts = filepath.split('/'); - const levels = parts.length; - const patterns = this._storage.filter((info) => !info.complete || info.segments.length > levels); - for (const pattern of patterns) { - const section = pattern.sections[0]; - /** - * In this case, the pattern has a globstar and we must read all directories unconditionally, - * but only if the level has reached the end of the first group. - * - * fixtures/{a,b}/** - * ^ true/false ^ always true - */ - if (!pattern.complete && levels > section.length) { - return true; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.readdir = exports.readdirWithFileTypes = exports.read = void 0; +const fsStat = __nccwpck_require__(109); +const rpl = __nccwpck_require__(5288); +const constants_1 = __nccwpck_require__(8838); +const utils = __nccwpck_require__(6297); +const common = __nccwpck_require__(3847); +function read(directory, settings, callback) { + if (!settings.stats && constants_1.IS_SUPPORT_READDIR_WITH_FILE_TYPES) { + readdirWithFileTypes(directory, settings, callback); + return; + } + readdir(directory, settings, callback); +} +exports.read = read; +function readdirWithFileTypes(directory, settings, callback) { + settings.fs.readdir(directory, { withFileTypes: true }, (readdirError, dirents) => { + if (readdirError !== null) { + callFailureCallback(callback, readdirError); + return; + } + const entries = dirents.map((dirent) => ({ + dirent, + name: dirent.name, + path: common.joinPathSegments(directory, dirent.name, settings.pathSegmentSeparator) + })); + if (!settings.followSymbolicLinks) { + callSuccessCallback(callback, entries); + return; + } + const tasks = entries.map((entry) => makeRplTaskEntry(entry, settings)); + rpl(tasks, (rplError, rplEntries) => { + if (rplError !== null) { + callFailureCallback(callback, rplError); + return; } - const match = parts.every((part, index) => { - const segment = pattern.segments[index]; - if (segment.dynamic && segment.patternRe.test(part)) { - return true; - } - if (!segment.dynamic && segment.pattern === part) { - return true; + callSuccessCallback(callback, rplEntries); + }); + }); +} +exports.readdirWithFileTypes = readdirWithFileTypes; +function makeRplTaskEntry(entry, settings) { + return (done) => { + if (!entry.dirent.isSymbolicLink()) { + done(null, entry); + return; + } + settings.fs.stat(entry.path, (statError, stats) => { + if (statError !== null) { + if (settings.throwErrorOnBrokenSymbolicLink) { + done(statError); + return; } - return false; - }); - if (match) { - return true; + done(null, entry); + return; } + entry.dirent = utils.fs.createDirentFromStats(entry.name, stats); + done(null, entry); + }); + }; +} +function readdir(directory, settings, callback) { + settings.fs.readdir(directory, (readdirError, names) => { + if (readdirError !== null) { + callFailureCallback(callback, readdirError); + return; } - return false; - } + const tasks = names.map((name) => { + const path = common.joinPathSegments(directory, name, settings.pathSegmentSeparator); + return (done) => { + fsStat.stat(path, settings.fsStatSettings, (error, stats) => { + if (error !== null) { + done(error); + return; + } + const entry = { + name, + path, + dirent: utils.fs.createDirentFromStats(name, stats) + }; + if (settings.stats) { + entry.stats = stats; + } + done(null, entry); + }); + }; + }); + rpl(tasks, (rplError, entries) => { + if (rplError !== null) { + callFailureCallback(callback, rplError); + return; + } + callSuccessCallback(callback, entries); + }); + }); +} +exports.readdir = readdir; +function callFailureCallback(callback, error) { + callback(error); +} +function callSuccessCallback(callback, result) { + callback(null, result); } -exports.default = PartialMatcher; /***/ }), -/***/ 78: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 3847: +/***/ ((__unused_webpack_module, exports) => { "use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -const sync_1 = __webpack_require__(519); -const provider_1 = __webpack_require__(2); -class ProviderSync extends provider_1.default { - constructor() { - super(...arguments); - this._reader = new sync_1.default(this._settings); - } - read(task) { - const root = this._getRootDirectory(task); - const options = this._getReaderOptions(task); - const entries = this.api(root, task, options); - return entries.map(options.transform); - } - api(root, task, options) { - if (task.dynamic) { - return this._reader.dynamic(root, options); - } - return this._reader.static(task.patterns, options); +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.joinPathSegments = void 0; +function joinPathSegments(a, b, separator) { + /** + * The correct handling of cases when the first segment is a root (`/`, `C:/`) or UNC path (`//?/C:/`). + */ + if (a.endsWith(separator)) { + return a + b; } + return a + separator + b; } -exports.default = ProviderSync; +exports.joinPathSegments = joinPathSegments; /***/ }), -/***/ 81: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 9560: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; -/* istanbul ignore file - this file is used purely as an entry-point */ -Object.defineProperty(exports, "__esModule", { value: true }); -const _1 = __webpack_require__(593); -(0, _1.main)({ - log: console, - env: process.env, -}).catch((err) => { - console.error(err); - process.exit(1); -}); - - -/***/ }), - -/***/ 87: -/***/ (function(module) { +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.readdir = exports.readdirWithFileTypes = exports.read = void 0; +const fsStat = __nccwpck_require__(109); +const constants_1 = __nccwpck_require__(8838); +const utils = __nccwpck_require__(6297); +const common = __nccwpck_require__(3847); +function read(directory, settings) { + if (!settings.stats && constants_1.IS_SUPPORT_READDIR_WITH_FILE_TYPES) { + return readdirWithFileTypes(directory, settings); + } + return readdir(directory, settings); +} +exports.read = read; +function readdirWithFileTypes(directory, settings) { + const dirents = settings.fs.readdirSync(directory, { withFileTypes: true }); + return dirents.map((dirent) => { + const entry = { + dirent, + name: dirent.name, + path: common.joinPathSegments(directory, dirent.name, settings.pathSegmentSeparator) + }; + if (entry.dirent.isSymbolicLink() && settings.followSymbolicLinks) { + try { + const stats = settings.fs.statSync(entry.path); + entry.dirent = utils.fs.createDirentFromStats(entry.name, stats); + } + catch (error) { + if (settings.throwErrorOnBrokenSymbolicLink) { + throw error; + } + } + } + return entry; + }); +} +exports.readdirWithFileTypes = readdirWithFileTypes; +function readdir(directory, settings) { + const names = settings.fs.readdirSync(directory); + return names.map((name) => { + const entryPath = common.joinPathSegments(directory, name, settings.pathSegmentSeparator); + const stats = fsStat.statSync(entryPath, settings.fsStatSettings); + const entry = { + name, + path: entryPath, + dirent: utils.fs.createDirentFromStats(name, stats) + }; + if (settings.stats) { + entry.stats = stats; + } + return entry; + }); +} +exports.readdir = readdir; -module.exports = require("os"); /***/ }), -/***/ 113: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 8662: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -const stream_1 = __webpack_require__(608); -const provider_1 = __webpack_require__(2); -class ProviderAsync extends provider_1.default { - constructor() { - super(...arguments); - this._reader = new stream_1.default(this._settings); - } - read(task) { - const root = this._getRootDirectory(task); - const options = this._getReaderOptions(task); - const entries = []; - return new Promise((resolve, reject) => { - const stream = this.api(root, task, options); - stream.once('error', reject); - stream.on('data', (entry) => entries.push(options.transform(entry))); - stream.once('end', () => resolve(entries)); +Object.defineProperty(exports, "__esModule", ({ value: true })); +const path = __nccwpck_require__(1017); +const fsStat = __nccwpck_require__(109); +const fs = __nccwpck_require__(3803); +class Settings { + constructor(_options = {}) { + this._options = _options; + this.followSymbolicLinks = this._getValue(this._options.followSymbolicLinks, false); + this.fs = fs.createFileSystemAdapter(this._options.fs); + this.pathSegmentSeparator = this._getValue(this._options.pathSegmentSeparator, path.sep); + this.stats = this._getValue(this._options.stats, false); + this.throwErrorOnBrokenSymbolicLink = this._getValue(this._options.throwErrorOnBrokenSymbolicLink, true); + this.fsStatSettings = new fsStat.Settings({ + followSymbolicLink: this.followSymbolicLinks, + fs: this.fs, + throwErrorOnBrokenSymbolicLink: this.throwErrorOnBrokenSymbolicLink }); } - api(root, task, options) { - if (task.dynamic) { - return this._reader.dynamic(root, options); - } - return this._reader.static(task.patterns, options); + _getValue(option, value) { + return option !== null && option !== void 0 ? option : value; } } -exports.default = ProviderAsync; +exports["default"] = Settings; /***/ }), -/***/ 115: -/***/ (function(__unusedmodule, exports) { +/***/ 883: +/***/ ((__unused_webpack_module, exports) => { "use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.isEnoentCodeError = void 0; -function isEnoentCodeError(error) { - return error.code === 'ENOENT'; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.createDirentFromStats = void 0; +class DirentFromStats { + constructor(name, stats) { + this.name = name; + this.isBlockDevice = stats.isBlockDevice.bind(stats); + this.isCharacterDevice = stats.isCharacterDevice.bind(stats); + this.isDirectory = stats.isDirectory.bind(stats); + this.isFIFO = stats.isFIFO.bind(stats); + this.isFile = stats.isFile.bind(stats); + this.isSocket = stats.isSocket.bind(stats); + this.isSymbolicLink = stats.isSymbolicLink.bind(stats); + } } -exports.isEnoentCodeError = isEnoentCodeError; +function createDirentFromStats(name, stats) { + return new DirentFromStats(name, stats); +} +exports.createDirentFromStats = createDirentFromStats; /***/ }), -/***/ 124: -/***/ (function(module, __unusedexports, __webpack_require__) { +/***/ 6297: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; -module.exports = __webpack_require__(543); +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.fs = void 0; +const fs = __nccwpck_require__(883); +exports.fs = fs; /***/ }), -/***/ 129: -/***/ (function(module) { - -module.exports = require("child_process"); +/***/ 2987: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -/***/ }), +"use strict"; -/***/ 136: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.createFileSystemAdapter = exports.FILE_SYSTEM_ADAPTER = void 0; +const fs = __nccwpck_require__(7147); +exports.FILE_SYSTEM_ADAPTER = { + lstat: fs.lstat, + stat: fs.stat, + lstatSync: fs.lstatSync, + statSync: fs.statSync +}; +function createFileSystemAdapter(fsMethods) { + if (fsMethods === undefined) { + return exports.FILE_SYSTEM_ADAPTER; + } + return Object.assign(Object.assign({}, exports.FILE_SYSTEM_ADAPTER), fsMethods); +} +exports.createFileSystemAdapter = createFileSystemAdapter; -"use strict"; +/***/ }), -// (C) 1995-2013 Jean-loup Gailly and Mark Adler -// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. +/***/ 109: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -/* eslint-disable space-unary-ops */ +"use strict"; -var utils = __webpack_require__(999); +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.statSync = exports.stat = exports.Settings = void 0; +const async = __nccwpck_require__(4147); +const sync = __nccwpck_require__(4527); +const settings_1 = __nccwpck_require__(2410); +exports.Settings = settings_1.default; +function stat(path, optionsOrSettingsOrCallback, callback) { + if (typeof optionsOrSettingsOrCallback === 'function') { + async.read(path, getSettings(), optionsOrSettingsOrCallback); + return; + } + async.read(path, getSettings(optionsOrSettingsOrCallback), callback); +} +exports.stat = stat; +function statSync(path, optionsOrSettings) { + const settings = getSettings(optionsOrSettings); + return sync.read(path, settings); +} +exports.statSync = statSync; +function getSettings(settingsOrOptions = {}) { + if (settingsOrOptions instanceof settings_1.default) { + return settingsOrOptions; + } + return new settings_1.default(settingsOrOptions); +} -/* Public constants ==========================================================*/ -/* ===========================================================================*/ +/***/ }), -//var Z_FILTERED = 1; -//var Z_HUFFMAN_ONLY = 2; -//var Z_RLE = 3; -var Z_FIXED = 4; -//var Z_DEFAULT_STRATEGY = 0; +/***/ 4147: +/***/ ((__unused_webpack_module, exports) => { -/* Possible values of the data_type field (though see inflate()) */ -var Z_BINARY = 0; -var Z_TEXT = 1; -//var Z_ASCII = 1; // = Z_TEXT -var Z_UNKNOWN = 2; +"use strict"; -/*============================================================================*/ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.read = void 0; +function read(path, settings, callback) { + settings.fs.lstat(path, (lstatError, lstat) => { + if (lstatError !== null) { + callFailureCallback(callback, lstatError); + return; + } + if (!lstat.isSymbolicLink() || !settings.followSymbolicLink) { + callSuccessCallback(callback, lstat); + return; + } + settings.fs.stat(path, (statError, stat) => { + if (statError !== null) { + if (settings.throwErrorOnBrokenSymbolicLink) { + callFailureCallback(callback, statError); + return; + } + callSuccessCallback(callback, lstat); + return; + } + if (settings.markSymbolicLink) { + stat.isSymbolicLink = () => true; + } + callSuccessCallback(callback, stat); + }); + }); +} +exports.read = read; +function callFailureCallback(callback, error) { + callback(error); +} +function callSuccessCallback(callback, result) { + callback(null, result); +} -function zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } } +/***/ }), -// From zutil.h +/***/ 4527: +/***/ ((__unused_webpack_module, exports) => { -var STORED_BLOCK = 0; -var STATIC_TREES = 1; -var DYN_TREES = 2; -/* The three kinds of block type */ +"use strict"; -var MIN_MATCH = 3; -var MAX_MATCH = 258; -/* The minimum and maximum match lengths */ - -// From deflate.h -/* =========================================================================== - * Internal compression state. - */ - -var LENGTH_CODES = 29; -/* number of length codes, not counting the special END_BLOCK code */ - -var LITERALS = 256; -/* number of literal bytes 0..255 */ - -var L_CODES = LITERALS + 1 + LENGTH_CODES; -/* number of Literal or Length codes, including the END_BLOCK code */ - -var D_CODES = 30; -/* number of distance codes */ - -var BL_CODES = 19; -/* number of codes used to transfer the bit lengths */ - -var HEAP_SIZE = 2 * L_CODES + 1; -/* maximum heap size */ - -var MAX_BITS = 15; -/* All codes must not exceed MAX_BITS bits */ - -var Buf_size = 16; -/* size of bit buffer in bi_buf */ - - -/* =========================================================================== - * Constants - */ - -var MAX_BL_BITS = 7; -/* Bit length codes must not exceed MAX_BL_BITS bits */ - -var END_BLOCK = 256; -/* end of block literal code */ - -var REP_3_6 = 16; -/* repeat previous bit length 3-6 times (2 bits of repeat count) */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.read = void 0; +function read(path, settings) { + const lstat = settings.fs.lstatSync(path); + if (!lstat.isSymbolicLink() || !settings.followSymbolicLink) { + return lstat; + } + try { + const stat = settings.fs.statSync(path); + if (settings.markSymbolicLink) { + stat.isSymbolicLink = () => true; + } + return stat; + } + catch (error) { + if (!settings.throwErrorOnBrokenSymbolicLink) { + return lstat; + } + throw error; + } +} +exports.read = read; -var REPZ_3_10 = 17; -/* repeat a zero length 3-10 times (3 bits of repeat count) */ -var REPZ_11_138 = 18; -/* repeat a zero length 11-138 times (7 bits of repeat count) */ +/***/ }), -/* eslint-disable comma-spacing,array-bracket-spacing */ -var extra_lbits = /* extra bits for each length code */ - [0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0]; +/***/ 2410: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -var extra_dbits = /* extra bits for each distance code */ - [0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13]; +"use strict"; -var extra_blbits = /* extra bits for each bit length code */ - [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7]; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const fs = __nccwpck_require__(2987); +class Settings { + constructor(_options = {}) { + this._options = _options; + this.followSymbolicLink = this._getValue(this._options.followSymbolicLink, true); + this.fs = fs.createFileSystemAdapter(this._options.fs); + this.markSymbolicLink = this._getValue(this._options.markSymbolicLink, false); + this.throwErrorOnBrokenSymbolicLink = this._getValue(this._options.throwErrorOnBrokenSymbolicLink, true); + } + _getValue(option, value) { + return option !== null && option !== void 0 ? option : value; + } +} +exports["default"] = Settings; -var bl_order = - [16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]; -/* eslint-enable comma-spacing,array-bracket-spacing */ -/* The lengths of the bit length codes are sent in order of decreasing - * probability, to avoid transmitting the lengths for unused bit length codes. - */ +/***/ }), -/* =========================================================================== - * Local data. These are initialized only once. - */ +/***/ 6026: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -// We pre-fill arrays with 0 to avoid uninitialized gaps +"use strict"; -var DIST_CODE_LEN = 512; /* see definition of array dist_code below */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.Settings = exports.walkStream = exports.walkSync = exports.walk = void 0; +const async_1 = __nccwpck_require__(7523); +const stream_1 = __nccwpck_require__(6737); +const sync_1 = __nccwpck_require__(3068); +const settings_1 = __nccwpck_require__(141); +exports.Settings = settings_1.default; +function walk(directory, optionsOrSettingsOrCallback, callback) { + if (typeof optionsOrSettingsOrCallback === 'function') { + new async_1.default(directory, getSettings()).read(optionsOrSettingsOrCallback); + return; + } + new async_1.default(directory, getSettings(optionsOrSettingsOrCallback)).read(callback); +} +exports.walk = walk; +function walkSync(directory, optionsOrSettings) { + const settings = getSettings(optionsOrSettings); + const provider = new sync_1.default(directory, settings); + return provider.read(); +} +exports.walkSync = walkSync; +function walkStream(directory, optionsOrSettings) { + const settings = getSettings(optionsOrSettings); + const provider = new stream_1.default(directory, settings); + return provider.read(); +} +exports.walkStream = walkStream; +function getSettings(settingsOrOptions = {}) { + if (settingsOrOptions instanceof settings_1.default) { + return settingsOrOptions; + } + return new settings_1.default(settingsOrOptions); +} -// !!!! Use flat array instead of structure, Freq = i*2, Len = i*2+1 -var static_ltree = new Array((L_CODES + 2) * 2); -zero(static_ltree); -/* The static literal tree. Since the bit lengths are imposed, there is no - * need for the L_CODES extra codes used during heap construction. However - * The codes 286 and 287 are needed to build a canonical tree (see _tr_init - * below). - */ -var static_dtree = new Array(D_CODES * 2); -zero(static_dtree); -/* The static distance tree. (Actually a trivial tree since all codes use - * 5 bits.) - */ +/***/ }), -var _dist_code = new Array(DIST_CODE_LEN); -zero(_dist_code); -/* Distance codes. The first 256 values correspond to the distances - * 3 .. 258, the last 256 values correspond to the top 8 bits of - * the 15 bit distances. - */ +/***/ 7523: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -var _length_code = new Array(MAX_MATCH - MIN_MATCH + 1); -zero(_length_code); -/* length code for each normalized match length (0 == MIN_MATCH) */ +"use strict"; -var base_length = new Array(LENGTH_CODES); -zero(base_length); -/* First normalized length for each code (0 = MIN_MATCH) */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +const async_1 = __nccwpck_require__(5732); +class AsyncProvider { + constructor(_root, _settings) { + this._root = _root; + this._settings = _settings; + this._reader = new async_1.default(this._root, this._settings); + this._storage = []; + } + read(callback) { + this._reader.onError((error) => { + callFailureCallback(callback, error); + }); + this._reader.onEntry((entry) => { + this._storage.push(entry); + }); + this._reader.onEnd(() => { + callSuccessCallback(callback, this._storage); + }); + this._reader.read(); + } +} +exports["default"] = AsyncProvider; +function callFailureCallback(callback, error) { + callback(error); +} +function callSuccessCallback(callback, entries) { + callback(null, entries); +} -var base_dist = new Array(D_CODES); -zero(base_dist); -/* First normalized distance for each code (0 = distance of 1) */ +/***/ }), -function StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) { +/***/ 6737: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - this.static_tree = static_tree; /* static tree or NULL */ - this.extra_bits = extra_bits; /* extra bits for each code or NULL */ - this.extra_base = extra_base; /* base index for extra_bits */ - this.elems = elems; /* max number of elements in the tree */ - this.max_length = max_length; /* max bit length for the codes */ +"use strict"; - // show if `static_tree` has data or dummy - needed for monomorphic objects - this.has_stree = static_tree && static_tree.length; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const stream_1 = __nccwpck_require__(2781); +const async_1 = __nccwpck_require__(5732); +class StreamProvider { + constructor(_root, _settings) { + this._root = _root; + this._settings = _settings; + this._reader = new async_1.default(this._root, this._settings); + this._stream = new stream_1.Readable({ + objectMode: true, + read: () => { }, + destroy: () => { + if (!this._reader.isDestroyed) { + this._reader.destroy(); + } + } + }); + } + read() { + this._reader.onError((error) => { + this._stream.emit('error', error); + }); + this._reader.onEntry((entry) => { + this._stream.push(entry); + }); + this._reader.onEnd(() => { + this._stream.push(null); + }); + this._reader.read(); + return this._stream; + } } +exports["default"] = StreamProvider; -var static_l_desc; -var static_d_desc; -var static_bl_desc; +/***/ }), +/***/ 3068: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -function TreeDesc(dyn_tree, stat_desc) { - this.dyn_tree = dyn_tree; /* the dynamic tree */ - this.max_code = 0; /* largest code with non zero frequency */ - this.stat_desc = stat_desc; /* the corresponding static tree */ +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +const sync_1 = __nccwpck_require__(3595); +class SyncProvider { + constructor(_root, _settings) { + this._root = _root; + this._settings = _settings; + this._reader = new sync_1.default(this._root, this._settings); + } + read() { + return this._reader.read(); + } } +exports["default"] = SyncProvider; +/***/ }), -function d_code(dist) { - return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)]; -} +/***/ 5732: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +"use strict"; -/* =========================================================================== - * Output a short LSB first on the stream. - * IN assertion: there is enough room in pendingBuf. - */ -function put_short(s, w) { -// put_byte(s, (uch)((w) & 0xff)); -// put_byte(s, (uch)((ush)(w) >> 8)); - s.pending_buf[s.pending++] = (w) & 0xff; - s.pending_buf[s.pending++] = (w >>> 8) & 0xff; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const events_1 = __nccwpck_require__(2361); +const fsScandir = __nccwpck_require__(5667); +const fastq = __nccwpck_require__(7340); +const common = __nccwpck_require__(7988); +const reader_1 = __nccwpck_require__(8311); +class AsyncReader extends reader_1.default { + constructor(_root, _settings) { + super(_root, _settings); + this._settings = _settings; + this._scandir = fsScandir.scandir; + this._emitter = new events_1.EventEmitter(); + this._queue = fastq(this._worker.bind(this), this._settings.concurrency); + this._isFatalError = false; + this._isDestroyed = false; + this._queue.drain = () => { + if (!this._isFatalError) { + this._emitter.emit('end'); + } + }; + } + read() { + this._isFatalError = false; + this._isDestroyed = false; + setImmediate(() => { + this._pushToQueue(this._root, this._settings.basePath); + }); + return this._emitter; + } + get isDestroyed() { + return this._isDestroyed; + } + destroy() { + if (this._isDestroyed) { + throw new Error('The reader is already destroyed'); + } + this._isDestroyed = true; + this._queue.killAndDrain(); + } + onEntry(callback) { + this._emitter.on('entry', callback); + } + onError(callback) { + this._emitter.once('error', callback); + } + onEnd(callback) { + this._emitter.once('end', callback); + } + _pushToQueue(directory, base) { + const queueItem = { directory, base }; + this._queue.push(queueItem, (error) => { + if (error !== null) { + this._handleError(error); + } + }); + } + _worker(item, done) { + this._scandir(item.directory, this._settings.fsScandirSettings, (error, entries) => { + if (error !== null) { + done(error, undefined); + return; + } + for (const entry of entries) { + this._handleEntry(entry, item.base); + } + done(null, undefined); + }); + } + _handleError(error) { + if (this._isDestroyed || !common.isFatalError(this._settings, error)) { + return; + } + this._isFatalError = true; + this._isDestroyed = true; + this._emitter.emit('error', error); + } + _handleEntry(entry, base) { + if (this._isDestroyed || this._isFatalError) { + return; + } + const fullpath = entry.path; + if (base !== undefined) { + entry.path = common.joinPathSegments(base, entry.name, this._settings.pathSegmentSeparator); + } + if (common.isAppliedFilter(this._settings.entryFilter, entry)) { + this._emitEntry(entry); + } + if (entry.dirent.isDirectory() && common.isAppliedFilter(this._settings.deepFilter, entry)) { + this._pushToQueue(fullpath, base === undefined ? undefined : entry.path); + } + } + _emitEntry(entry) { + this._emitter.emit('entry', entry); + } } +exports["default"] = AsyncReader; -/* =========================================================================== - * Send a value on a given number of bits. - * IN assertion: length <= 16 and value fits in length bits. - */ -function send_bits(s, value, length) { - if (s.bi_valid > (Buf_size - length)) { - s.bi_buf |= (value << s.bi_valid) & 0xffff; - put_short(s, s.bi_buf); - s.bi_buf = value >> (Buf_size - s.bi_valid); - s.bi_valid += length - Buf_size; - } else { - s.bi_buf |= (value << s.bi_valid) & 0xffff; - s.bi_valid += length; - } -} +/***/ }), +/***/ 7988: +/***/ ((__unused_webpack_module, exports) => { -function send_code(s, c, tree) { - send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/); +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.joinPathSegments = exports.replacePathSegmentSeparator = exports.isAppliedFilter = exports.isFatalError = void 0; +function isFatalError(settings, error) { + if (settings.errorFilter === null) { + return true; + } + return !settings.errorFilter(error); +} +exports.isFatalError = isFatalError; +function isAppliedFilter(filter, value) { + return filter === null || filter(value); +} +exports.isAppliedFilter = isAppliedFilter; +function replacePathSegmentSeparator(filepath, separator) { + return filepath.split(/[/\\]/).join(separator); +} +exports.replacePathSegmentSeparator = replacePathSegmentSeparator; +function joinPathSegments(a, b, separator) { + if (a === '') { + return b; + } + /** + * The correct handling of cases when the first segment is a root (`/`, `C:/`) or UNC path (`//?/C:/`). + */ + if (a.endsWith(separator)) { + return a + b; + } + return a + separator + b; } +exports.joinPathSegments = joinPathSegments; -/* =========================================================================== - * Reverse the first len bits of a code, using straightforward code (a faster - * method would use a table) - * IN assertion: 1 <= len <= 15 - */ -function bi_reverse(code, len) { - var res = 0; - do { - res |= code & 1; - code >>>= 1; - res <<= 1; - } while (--len > 0); - return res >>> 1; -} +/***/ }), +/***/ 8311: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -/* =========================================================================== - * Flush the bit buffer, keeping at most 7 bits in it. - */ -function bi_flush(s) { - if (s.bi_valid === 16) { - put_short(s, s.bi_buf); - s.bi_buf = 0; - s.bi_valid = 0; +"use strict"; - } else if (s.bi_valid >= 8) { - s.pending_buf[s.pending++] = s.bi_buf & 0xff; - s.bi_buf >>= 8; - s.bi_valid -= 8; - } +Object.defineProperty(exports, "__esModule", ({ value: true })); +const common = __nccwpck_require__(7988); +class Reader { + constructor(_root, _settings) { + this._root = _root; + this._settings = _settings; + this._root = common.replacePathSegmentSeparator(_root, _settings.pathSegmentSeparator); + } } +exports["default"] = Reader; -/* =========================================================================== - * Compute the optimal bit lengths for a tree and update the total bit length - * for the current block. - * IN assertion: the fields freq and dad are set, heap[heap_max] and - * above are the tree nodes sorted by increasing frequency. - * OUT assertions: the field len is set to the optimal bit length, the - * array bl_count contains the frequencies for each bit length. - * The length opt_len is updated; static_len is also updated if stree is - * not null. - */ -function gen_bitlen(s, desc) -// deflate_state *s; -// tree_desc *desc; /* the tree descriptor */ -{ - var tree = desc.dyn_tree; - var max_code = desc.max_code; - var stree = desc.stat_desc.static_tree; - var has_stree = desc.stat_desc.has_stree; - var extra = desc.stat_desc.extra_bits; - var base = desc.stat_desc.extra_base; - var max_length = desc.stat_desc.max_length; - var h; /* heap index */ - var n, m; /* iterate over the tree elements */ - var bits; /* bit length */ - var xbits; /* extra bits */ - var f; /* frequency */ - var overflow = 0; /* number of elements with bit length too large */ +/***/ }), - for (bits = 0; bits <= MAX_BITS; bits++) { - s.bl_count[bits] = 0; - } +/***/ 3595: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - /* In a first pass, compute the optimal bit lengths (which may - * overflow in the case of the bit length tree). - */ - tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */ +"use strict"; - for (h = s.heap_max + 1; h < HEAP_SIZE; h++) { - n = s.heap[h]; - bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1; - if (bits > max_length) { - bits = max_length; - overflow++; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const fsScandir = __nccwpck_require__(5667); +const common = __nccwpck_require__(7988); +const reader_1 = __nccwpck_require__(8311); +class SyncReader extends reader_1.default { + constructor() { + super(...arguments); + this._scandir = fsScandir.scandirSync; + this._storage = []; + this._queue = new Set(); } - tree[n * 2 + 1]/*.Len*/ = bits; - /* We overwrite tree[n].Dad which is no longer needed */ - - if (n > max_code) { continue; } /* not a leaf node */ - - s.bl_count[bits]++; - xbits = 0; - if (n >= base) { - xbits = extra[n - base]; + read() { + this._pushToQueue(this._root, this._settings.basePath); + this._handleQueue(); + return this._storage; } - f = tree[n * 2]/*.Freq*/; - s.opt_len += f * (bits + xbits); - if (has_stree) { - s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits); + _pushToQueue(directory, base) { + this._queue.add({ directory, base }); } - } - if (overflow === 0) { return; } - - // Trace((stderr,"\nbit length overflow\n")); - /* This happens for example on obj2 and pic of the Calgary corpus */ - - /* Find the first bit length which could increase: */ - do { - bits = max_length - 1; - while (s.bl_count[bits] === 0) { bits--; } - s.bl_count[bits]--; /* move one leaf down the tree */ - s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */ - s.bl_count[max_length]--; - /* The brother of the overflow item also moves one step up, - * but this does not affect bl_count[max_length] - */ - overflow -= 2; - } while (overflow > 0); - - /* Now recompute all bit lengths, scanning in increasing frequency. - * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all - * lengths instead of fixing only the wrong ones. This idea is taken - * from 'ar' written by Haruhiko Okumura.) - */ - for (bits = max_length; bits !== 0; bits--) { - n = s.bl_count[bits]; - while (n !== 0) { - m = s.heap[--h]; - if (m > max_code) { continue; } - if (tree[m * 2 + 1]/*.Len*/ !== bits) { - // Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); - s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/; - tree[m * 2 + 1]/*.Len*/ = bits; - } - n--; + _handleQueue() { + for (const item of this._queue.values()) { + this._handleDirectory(item.directory, item.base); + } + } + _handleDirectory(directory, base) { + try { + const entries = this._scandir(directory, this._settings.fsScandirSettings); + for (const entry of entries) { + this._handleEntry(entry, base); + } + } + catch (error) { + this._handleError(error); + } + } + _handleError(error) { + if (!common.isFatalError(this._settings, error)) { + return; + } + throw error; + } + _handleEntry(entry, base) { + const fullpath = entry.path; + if (base !== undefined) { + entry.path = common.joinPathSegments(base, entry.name, this._settings.pathSegmentSeparator); + } + if (common.isAppliedFilter(this._settings.entryFilter, entry)) { + this._pushToStorage(entry); + } + if (entry.dirent.isDirectory() && common.isAppliedFilter(this._settings.deepFilter, entry)) { + this._pushToQueue(fullpath, base === undefined ? undefined : entry.path); + } + } + _pushToStorage(entry) { + this._storage.push(entry); } - } } +exports["default"] = SyncReader; -/* =========================================================================== - * Generate the codes for a given tree and bit counts (which need not be - * optimal). - * IN assertion: the array bl_count contains the bit length statistics for - * the given tree and the field len is set for all tree elements. - * OUT assertion: the field code is set for all tree elements of non - * zero code length. - */ -function gen_codes(tree, max_code, bl_count) -// ct_data *tree; /* the tree to decorate */ -// int max_code; /* largest code with non zero frequency */ -// ushf *bl_count; /* number of codes at each bit length */ -{ - var next_code = new Array(MAX_BITS + 1); /* next code value for each bit length */ - var code = 0; /* running code value */ - var bits; /* bit index */ - var n; /* code index */ +/***/ }), - /* The distribution counts are first used to generate the code values - * without bit reversal. - */ - for (bits = 1; bits <= MAX_BITS; bits++) { - next_code[bits] = code = (code + bl_count[bits - 1]) << 1; - } - /* Check that the bit counts in bl_count are consistent. The last code - * must be all ones. - */ - //Assert (code + bl_count[MAX_BITS]-1 == (1< { - for (n = 0; n <= max_code; n++) { - var len = tree[n * 2 + 1]/*.Len*/; - if (len === 0) { continue; } - /* Now reverse the bits */ - tree[n * 2]/*.Code*/ = bi_reverse(next_code[len]++, len); +"use strict"; - //Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", - // n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); - } +Object.defineProperty(exports, "__esModule", ({ value: true })); +const path = __nccwpck_require__(1017); +const fsScandir = __nccwpck_require__(5667); +class Settings { + constructor(_options = {}) { + this._options = _options; + this.basePath = this._getValue(this._options.basePath, undefined); + this.concurrency = this._getValue(this._options.concurrency, Number.POSITIVE_INFINITY); + this.deepFilter = this._getValue(this._options.deepFilter, null); + this.entryFilter = this._getValue(this._options.entryFilter, null); + this.errorFilter = this._getValue(this._options.errorFilter, null); + this.pathSegmentSeparator = this._getValue(this._options.pathSegmentSeparator, path.sep); + this.fsScandirSettings = new fsScandir.Settings({ + followSymbolicLinks: this._options.followSymbolicLinks, + fs: this._options.fs, + pathSegmentSeparator: this._options.pathSegmentSeparator, + stats: this._options.stats, + throwErrorOnBrokenSymbolicLink: this._options.throwErrorOnBrokenSymbolicLink + }); + } + _getValue(option, value) { + return option !== null && option !== void 0 ? option : value; + } } +exports["default"] = Settings; -/* =========================================================================== - * Initialize the various 'constant' tables. - */ -function tr_static_init() { - var n; /* iterates over tree elements */ - var bits; /* bit counter */ - var length; /* length value */ - var code; /* code value */ - var dist; /* distance index */ - var bl_count = new Array(MAX_BITS + 1); - /* number of codes at each bit length for an optimal tree */ +/***/ }), - // do check in _tr_init() - //if (static_init_done) return; +/***/ 1542: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - /* For some embedded targets, global variables are not initialized: */ -/*#ifdef NO_INIT_GLOBAL_POINTERS - static_l_desc.static_tree = static_ltree; - static_l_desc.extra_bits = extra_lbits; - static_d_desc.static_tree = static_dtree; - static_d_desc.extra_bits = extra_dbits; - static_bl_desc.extra_bits = extra_blbits; -#endif*/ +"use strict"; - /* Initialize the mapping length (0..255) -> length code (0..28) */ - length = 0; - for (code = 0; code < LENGTH_CODES - 1; code++) { - base_length[code] = length; - for (n = 0; n < (1 << extra_lbits[code]); n++) { - _length_code[length++] = code; - } - } - //Assert (length == 256, "tr_static_init: length != 256"); - /* Note that the length 255 (match length 258) can be represented - * in two different ways: code 284 + 5 bits or code 285, so we - * overwrite length_code[255] to use the best encoding: - */ - _length_code[length - 1] = code; +module.exports = __nccwpck_require__(785); - /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ - dist = 0; - for (code = 0; code < 16; code++) { - base_dist[code] = dist; - for (n = 0; n < (1 << extra_dbits[code]); n++) { - _dist_code[dist++] = code; - } - } - //Assert (dist == 256, "tr_static_init: dist != 256"); - dist >>= 7; /* from now on, all distances are divided by 128 */ - for (; code < D_CODES; code++) { - base_dist[code] = dist << 7; - for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) { - _dist_code[256 + dist++] = code; - } - } - //Assert (dist == 256, "tr_static_init: 256+dist != 512"); - /* Construct the codes of the static literal tree */ - for (bits = 0; bits <= MAX_BITS; bits++) { - bl_count[bits] = 0; - } +/***/ }), - n = 0; - while (n <= 143) { - static_ltree[n * 2 + 1]/*.Len*/ = 8; - n++; - bl_count[8]++; - } - while (n <= 255) { - static_ltree[n * 2 + 1]/*.Len*/ = 9; - n++; - bl_count[9]++; - } - while (n <= 279) { - static_ltree[n * 2 + 1]/*.Len*/ = 7; - n++; - bl_count[7]++; - } - while (n <= 287) { - static_ltree[n * 2 + 1]/*.Len*/ = 8; - n++; - bl_count[8]++; - } - /* Codes 286 and 287 do not exist, but we must include them in the - * tree construction to get a canonical Huffman tree (longest code - * all ones) - */ - gen_codes(static_ltree, L_CODES + 1, bl_count); +/***/ 785: +/***/ ((module) => { - /* The static distance tree is trivial: */ - for (n = 0; n < D_CODES; n++) { - static_dtree[n * 2 + 1]/*.Len*/ = 5; - static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5); - } +"use strict"; - // Now data ready and we can init static trees - static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS + 1, L_CODES, MAX_BITS); - static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES, MAX_BITS); - static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES, MAX_BL_BITS); - //static_init_done = true; -} +var AsyncLock = function (opts) { + opts = opts || {}; + this.Promise = opts.Promise || Promise; -/* =========================================================================== - * Initialize a new block. - */ -function init_block(s) { - var n; /* iterates over tree elements */ + // format: {key : [fn, fn]} + // queues[key] = null indicates no job running for key + this.queues = Object.create(null); - /* Initialize the trees. */ - for (n = 0; n < L_CODES; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; } - for (n = 0; n < D_CODES; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; } - for (n = 0; n < BL_CODES; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; } + // lock is reentrant for same domain + this.domainReentrant = opts.domainReentrant || false; + if (this.domainReentrant) { + if (typeof process === 'undefined' || typeof process.domain === 'undefined') { + throw new Error( + 'Domain-reentrant locks require `process.domain` to exist. Please flip `opts.domainReentrant = false`, ' + + 'use a NodeJS version that still implements Domain, or install a browser polyfill.'); + } + // domain of current running func {key : fn} + this.domains = Object.create(null); + } - s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1; - s.opt_len = s.static_len = 0; - s.last_lit = s.matches = 0; -} + this.timeout = opts.timeout || AsyncLock.DEFAULT_TIMEOUT; + this.maxOccupationTime = opts.maxOccupationTime || AsyncLock.DEFAULT_MAX_OCCUPATION_TIME; + if (opts.maxPending === Infinity || (Number.isInteger(opts.maxPending) && opts.maxPending >= 0)) { + this.maxPending = opts.maxPending; + } else { + this.maxPending = AsyncLock.DEFAULT_MAX_PENDING; + } +}; +AsyncLock.DEFAULT_TIMEOUT = 0; //Never +AsyncLock.DEFAULT_MAX_OCCUPATION_TIME = 0; //Never +AsyncLock.DEFAULT_MAX_PENDING = 1000; -/* =========================================================================== - * Flush the bit buffer and align the output on a byte boundary - */ -function bi_windup(s) -{ - if (s.bi_valid > 8) { - put_short(s, s.bi_buf); - } else if (s.bi_valid > 0) { - //put_byte(s, (Byte)s->bi_buf); - s.pending_buf[s.pending++] = s.bi_buf; - } - s.bi_buf = 0; - s.bi_valid = 0; -} - -/* =========================================================================== - * Copy a stored block, storing first the length and its - * one's complement if requested. +/** + * Acquire Locks + * + * @param {String|Array} key resource key or keys to lock + * @param {function} fn async function + * @param {function} cb callback function, otherwise will return a promise + * @param {Object} opts options */ -function copy_block(s, buf, len, header) -//DeflateState *s; -//charf *buf; /* the input data */ -//unsigned len; /* its length */ -//int header; /* true if block header must be written */ -{ - bi_windup(s); /* align on byte boundary */ - - if (header) { - put_short(s, len); - put_short(s, ~len); - } -// while (len--) { -// put_byte(s, *buf++); -// } - utils.arraySet(s.pending_buf, s.window, buf, len, s.pending); - s.pending += len; -} +AsyncLock.prototype.acquire = function (key, fn, cb, opts) { + if (Array.isArray(key)) { + return this._acquireBatch(key, fn, cb, opts); + } -/* =========================================================================== - * Compares to subtrees, using the tree depth as tie breaker when - * the subtrees have equal frequency. This minimizes the worst case length. - */ -function smaller(tree, n, m, depth) { - var _n2 = n * 2; - var _m2 = m * 2; - return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ || - (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m])); -} + if (typeof (fn) !== 'function') { + throw new Error('You must pass a function to execute'); + } -/* =========================================================================== - * Restore the heap property by moving down the tree starting at node k, - * exchanging a node with the smallest of its two sons if necessary, stopping - * when the heap property is re-established (each father smaller than its - * two sons). - */ -function pqdownheap(s, tree, k) -// deflate_state *s; -// ct_data *tree; /* the tree to restore */ -// int k; /* node to move down */ -{ - var v = s.heap[k]; - var j = k << 1; /* left son of k */ - while (j <= s.heap_len) { - /* Set j to the smallest of the two sons: */ - if (j < s.heap_len && - smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) { - j++; - } - /* Exit if v is smaller than both sons */ - if (smaller(tree, v, s.heap[j], s.depth)) { break; } + // faux-deferred promise using new Promise() (as Promise.defer is deprecated) + var deferredResolve = null; + var deferredReject = null; + var deferred = null; - /* Exchange v with the smallest son */ - s.heap[k] = s.heap[j]; - k = j; + if (typeof (cb) !== 'function') { + opts = cb; + cb = null; - /* And continue down the tree, setting j to the left son of k */ - j <<= 1; - } - s.heap[k] = v; -} + // will return a promise + deferred = new this.Promise(function(resolve, reject) { + deferredResolve = resolve; + deferredReject = reject; + }); + } + opts = opts || {}; -// inlined manually -// var SMALLEST = 1; + var resolved = false; + var timer = null; + var occupationTimer = null; + var self = this; -/* =========================================================================== - * Send the block data compressed using the given Huffman trees - */ -function compress_block(s, ltree, dtree) -// deflate_state *s; -// const ct_data *ltree; /* literal tree */ -// const ct_data *dtree; /* distance tree */ -{ - var dist; /* distance of matched string */ - var lc; /* match length or unmatched char (if dist == 0) */ - var lx = 0; /* running index in l_buf */ - var code; /* the code to send */ - var extra; /* number of extra bits to send */ + var done = function (locked, err, ret) { - if (s.last_lit !== 0) { - do { - dist = (s.pending_buf[s.d_buf + lx * 2] << 8) | (s.pending_buf[s.d_buf + lx * 2 + 1]); - lc = s.pending_buf[s.l_buf + lx]; - lx++; + if (occupationTimer) { + clearTimeout(occupationTimer); + occupationTimer = null; + } - if (dist === 0) { - send_code(s, lc, ltree); /* send a literal byte */ - //Tracecv(isgraph(lc), (stderr," '%c' ", lc)); - } else { - /* Here, lc is the match length - MIN_MATCH */ - code = _length_code[lc]; - send_code(s, code + LITERALS + 1, ltree); /* send the length code */ - extra = extra_lbits[code]; - if (extra !== 0) { - lc -= base_length[code]; - send_bits(s, lc, extra); /* send the extra length bits */ - } - dist--; /* dist is now the match distance - 1 */ - code = d_code(dist); - //Assert (code < D_CODES, "bad d_code"); + if (locked) { + if (!!self.queues[key] && self.queues[key].length === 0) { + delete self.queues[key]; + } + if (self.domainReentrant) { + delete self.domains[key]; + } + } - send_code(s, code, dtree); /* send the distance code */ - extra = extra_dbits[code]; - if (extra !== 0) { - dist -= base_dist[code]; - send_bits(s, dist, extra); /* send the extra distance bits */ - } - } /* literal or match pair ? */ + if (!resolved) { + if (!deferred) { + if (typeof (cb) === 'function') { + cb(err, ret); + } + } + else { + //promise mode + if (err) { + deferredReject(err); + } + else { + deferredResolve(ret); + } + } + resolved = true; + } - /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ - //Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, - // "pendingBuf overflow"); + if (locked) { + //run next func + if (!!self.queues[key] && self.queues[key].length > 0) { + self.queues[key].shift()(); + } + } + }; - } while (lx < s.last_lit); - } + var exec = function (locked) { + if (resolved) { // may due to timed out + return done(locked); + } - send_code(s, END_BLOCK, ltree); -} + if (timer) { + clearTimeout(timer); + timer = null; + } + if (self.domainReentrant && locked) { + self.domains[key] = process.domain; + } -/* =========================================================================== - * Construct one Huffman tree and assigns the code bit strings and lengths. - * Update the total bit length for the current block. - * IN assertion: the field freq is set for all tree elements. - * OUT assertions: the fields len and code are set to the optimal bit length - * and corresponding code. The length opt_len is updated; static_len is - * also updated if stree is not null. The field max_code is set. - */ -function build_tree(s, desc) -// deflate_state *s; -// tree_desc *desc; /* the tree descriptor */ -{ - var tree = desc.dyn_tree; - var stree = desc.stat_desc.static_tree; - var has_stree = desc.stat_desc.has_stree; - var elems = desc.stat_desc.elems; - var n, m; /* iterate over heap elements */ - var max_code = -1; /* largest code with non zero frequency */ - var node; /* new node being created */ + // Callback mode + if (fn.length === 1) { + var called = false; + fn(function (err, ret) { + if (!called) { + called = true; + done(locked, err, ret); + } + }); + } + else { + // Promise mode + self._promiseTry(function () { + return fn(); + }) + .then(function(ret){ + done(locked, undefined, ret); + }, function(error){ + done(locked, error); + }); + } + }; - /* Construct the initial heap, with least frequent element in - * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. - * heap[0] is not used. - */ - s.heap_len = 0; - s.heap_max = HEAP_SIZE; + if (self.domainReentrant && !!process.domain) { + exec = process.domain.bind(exec); + } - for (n = 0; n < elems; n++) { - if (tree[n * 2]/*.Freq*/ !== 0) { - s.heap[++s.heap_len] = max_code = n; - s.depth[n] = 0; - - } else { - tree[n * 2 + 1]/*.Len*/ = 0; - } - } - - /* The pkzip format requires that at least one distance code exists, - * and that at least one bit should be sent even if there is only one - * possible code. So to avoid special checks later on we force at least - * two codes of non zero frequency. - */ - while (s.heap_len < 2) { - node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0); - tree[node * 2]/*.Freq*/ = 1; - s.depth[node] = 0; - s.opt_len--; - - if (has_stree) { - s.static_len -= stree[node * 2 + 1]/*.Len*/; - } - /* node is 0 or 1 so it does not have extra bits */ - } - desc.max_code = max_code; - - /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, - * establish sub-heaps of increasing lengths: - */ - for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); } - - /* Construct the Huffman tree by repeatedly combining the least two - * frequent nodes. - */ - node = elems; /* next internal node of the tree */ - do { - //pqremove(s, tree, n); /* n = node of least frequency */ - /*** pqremove ***/ - n = s.heap[1/*SMALLEST*/]; - s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--]; - pqdownheap(s, tree, 1/*SMALLEST*/); - /***/ - - m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */ - - s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */ - s.heap[--s.heap_max] = m; + if (!self.queues[key]) { + self.queues[key] = []; + exec(true); + } + else if (self.domainReentrant && !!process.domain && process.domain === self.domains[key]) { + // If code is in the same domain of current running task, run it directly + // Since lock is re-enterable + exec(false); + } + else if (self.queues[key].length >= self.maxPending) { + done(false, new Error('Too many pending tasks in queue ' + key)); + } + else { + var taskFn = function () { + exec(true); + }; + if (opts.skipQueue) { + self.queues[key].unshift(taskFn); + } else { + self.queues[key].push(taskFn); + } - /* Create a new node father of n and m */ - tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/; - s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1; - tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node; + var timeout = opts.timeout || self.timeout; + if (timeout) { + timer = setTimeout(function () { + timer = null; + done(false, new Error('async-lock timed out in queue ' + key)); + }, timeout); + } + } - /* and insert the new node in the heap */ - s.heap[1/*SMALLEST*/] = node++; - pqdownheap(s, tree, 1/*SMALLEST*/); + var maxOccupationTime = opts.maxOccupationTime || self.maxOccupationTime; + if (maxOccupationTime) { + occupationTimer = setTimeout(function () { + if (!!self.queues[key]) { + done(false, new Error('Maximum occupation time is exceeded in queue ' + key)); + } + }, maxOccupationTime); + } - } while (s.heap_len >= 2); + if (deferred) { + return deferred; + } +}; - s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/]; +/* + * Below is how this function works: + * + * Equivalent code: + * self.acquire(key1, function(cb){ + * self.acquire(key2, function(cb){ + * self.acquire(key3, fn, cb); + * }, cb); + * }, cb); + * + * Equivalent code: + * var fn3 = getFn(key3, fn); + * var fn2 = getFn(key2, fn3); + * var fn1 = getFn(key1, fn2); + * fn1(cb); + */ +AsyncLock.prototype._acquireBatch = function (keys, fn, cb, opts) { + if (typeof (cb) !== 'function') { + opts = cb; + cb = null; + } - /* At this point, the fields freq and dad are set. We can now - * generate the bit lengths. - */ - gen_bitlen(s, desc); + var self = this; + var getFn = function (key, fn) { + return function (cb) { + self.acquire(key, fn, cb, opts); + }; + }; - /* The field len is now set, we can generate the bit codes */ - gen_codes(tree, max_code, s.bl_count); -} + var fnx = fn; + keys.reverse().forEach(function (key) { + fnx = getFn(key, fnx); + }); + if (typeof (cb) === 'function') { + fnx(cb); + } + else { + return new this.Promise(function (resolve, reject) { + // check for promise mode in case keys is empty array + if (fnx.length === 1) { + fnx(function (err, ret) { + if (err) { + reject(err); + } + else { + resolve(ret); + } + }); + } else { + resolve(fnx()); + } + }); + } +}; -/* =========================================================================== - * Scan a literal or distance tree to determine the frequencies of the codes - * in the bit length tree. +/* + * Whether there is any running or pending asyncFunc + * + * @param {String} key */ -function scan_tree(s, tree, max_code) -// deflate_state *s; -// ct_data *tree; /* the tree to be scanned */ -// int max_code; /* and its largest code of non zero frequency */ -{ - var n; /* iterates over all tree elements */ - var prevlen = -1; /* last emitted length */ - var curlen; /* length of current code */ - - var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */ - - var count = 0; /* repeat count of the current code */ - var max_count = 7; /* max repeat count */ - var min_count = 4; /* min repeat count */ - - if (nextlen === 0) { - max_count = 138; - min_count = 3; - } - tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */ +AsyncLock.prototype.isBusy = function (key) { + if (!key) { + return Object.keys(this.queues).length > 0; + } + else { + return !!this.queues[key]; + } +}; - for (n = 0; n <= max_code; n++) { - curlen = nextlen; - nextlen = tree[(n + 1) * 2 + 1]/*.Len*/; +/** + * Promise.try() implementation to become independent of Q-specific methods + */ +AsyncLock.prototype._promiseTry = function(fn) { + try { + return this.Promise.resolve(fn()); + } catch (e) { + return this.Promise.reject(e); + } +}; - if (++count < max_count && curlen === nextlen) { - continue; +module.exports = AsyncLock; - } else if (count < min_count) { - s.bl_tree[curlen * 2]/*.Freq*/ += count; - } else if (curlen !== 0) { +/***/ }), - if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; } - s.bl_tree[REP_3_6 * 2]/*.Freq*/++; +/***/ 610: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - } else if (count <= 10) { - s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++; +"use strict"; - } else { - s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++; - } - count = 0; - prevlen = curlen; +const stringify = __nccwpck_require__(8750); +const compile = __nccwpck_require__(9434); +const expand = __nccwpck_require__(5873); +const parse = __nccwpck_require__(6477); - if (nextlen === 0) { - max_count = 138; - min_count = 3; +/** + * Expand the given pattern or create a regex-compatible string. + * + * ```js + * const braces = require('braces'); + * console.log(braces('{a,b,c}', { compile: true })); //=> ['(a|b|c)'] + * console.log(braces('{a,b,c}')); //=> ['a', 'b', 'c'] + * ``` + * @param {String} `str` + * @param {Object} `options` + * @return {String} + * @api public + */ - } else if (curlen === nextlen) { - max_count = 6; - min_count = 3; +const braces = (input, options = {}) => { + let output = []; - } else { - max_count = 7; - min_count = 4; + if (Array.isArray(input)) { + for (let pattern of input) { + let result = braces.create(pattern, options); + if (Array.isArray(result)) { + output.push(...result); + } else { + output.push(result); + } } + } else { + output = [].concat(braces.create(input, options)); } -} - -/* =========================================================================== - * Send a literal or distance tree in compressed form, using the codes in - * bl_tree. + if (options && options.expand === true && options.nodupes === true) { + output = [...new Set(output)]; + } + return output; +}; + +/** + * Parse the given `str` with the given `options`. + * + * ```js + * // braces.parse(pattern, [, options]); + * const ast = braces.parse('a/{b,c}/d'); + * console.log(ast); + * ``` + * @param {String} pattern Brace pattern to parse + * @param {Object} options + * @return {Object} Returns an AST + * @api public */ -function send_tree(s, tree, max_code) -// deflate_state *s; -// ct_data *tree; /* the tree to be scanned */ -// int max_code; /* and its largest code of non zero frequency */ -{ - var n; /* iterates over all tree elements */ - var prevlen = -1; /* last emitted length */ - var curlen; /* length of current code */ - var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */ +braces.parse = (input, options = {}) => parse(input, options); - var count = 0; /* repeat count of the current code */ - var max_count = 7; /* max repeat count */ - var min_count = 4; /* min repeat count */ +/** + * Creates a braces string from an AST, or an AST node. + * + * ```js + * const braces = require('braces'); + * let ast = braces.parse('foo/{a,b}/bar'); + * console.log(stringify(ast.nodes[2])); //=> '{a,b}' + * ``` + * @param {String} `input` Brace pattern or AST. + * @param {Object} `options` + * @return {Array} Returns an array of expanded values. + * @api public + */ - /* tree[max_code+1].Len = -1; */ /* guard already set */ - if (nextlen === 0) { - max_count = 138; - min_count = 3; +braces.stringify = (input, options = {}) => { + if (typeof input === 'string') { + return stringify(braces.parse(input, options), options); } + return stringify(input, options); +}; - for (n = 0; n <= max_code; n++) { - curlen = nextlen; - nextlen = tree[(n + 1) * 2 + 1]/*.Len*/; +/** + * Compiles a brace pattern into a regex-compatible, optimized string. + * This method is called by the main [braces](#braces) function by default. + * + * ```js + * const braces = require('braces'); + * console.log(braces.compile('a/{b,c}/d')); + * //=> ['a/(b|c)/d'] + * ``` + * @param {String} `input` Brace pattern or AST. + * @param {Object} `options` + * @return {Array} Returns an array of expanded values. + * @api public + */ - if (++count < max_count && curlen === nextlen) { - continue; +braces.compile = (input, options = {}) => { + if (typeof input === 'string') { + input = braces.parse(input, options); + } + return compile(input, options); +}; - } else if (count < min_count) { - do { send_code(s, curlen, s.bl_tree); } while (--count !== 0); +/** + * Expands a brace pattern into an array. This method is called by the + * main [braces](#braces) function when `options.expand` is true. Before + * using this method it's recommended that you read the [performance notes](#performance)) + * and advantages of using [.compile](#compile) instead. + * + * ```js + * const braces = require('braces'); + * console.log(braces.expand('a/{b,c}/d')); + * //=> ['a/b/d', 'a/c/d']; + * ``` + * @param {String} `pattern` Brace pattern + * @param {Object} `options` + * @return {Array} Returns an array of expanded values. + * @api public + */ - } else if (curlen !== 0) { - if (curlen !== prevlen) { - send_code(s, curlen, s.bl_tree); - count--; - } - //Assert(count >= 3 && count <= 6, " 3_6?"); - send_code(s, REP_3_6, s.bl_tree); - send_bits(s, count - 3, 2); +braces.expand = (input, options = {}) => { + if (typeof input === 'string') { + input = braces.parse(input, options); + } - } else if (count <= 10) { - send_code(s, REPZ_3_10, s.bl_tree); - send_bits(s, count - 3, 3); + let result = expand(input, options); - } else { - send_code(s, REPZ_11_138, s.bl_tree); - send_bits(s, count - 11, 7); - } + // filter out empty strings if specified + if (options.noempty === true) { + result = result.filter(Boolean); + } - count = 0; - prevlen = curlen; - if (nextlen === 0) { - max_count = 138; - min_count = 3; + // filter out duplicates if specified + if (options.nodupes === true) { + result = [...new Set(result)]; + } - } else if (curlen === nextlen) { - max_count = 6; - min_count = 3; + return result; +}; - } else { - max_count = 7; - min_count = 4; - } +/** + * Processes a brace pattern and returns either an expanded array + * (if `options.expand` is true), a highly optimized regex-compatible string. + * This method is called by the main [braces](#braces) function. + * + * ```js + * const braces = require('braces'); + * console.log(braces.create('user-{200..300}/project-{a,b,c}-{1..10}')) + * //=> 'user-(20[0-9]|2[1-9][0-9]|300)/project-(a|b|c)-([1-9]|10)' + * ``` + * @param {String} `pattern` Brace pattern + * @param {Object} `options` + * @return {Array} Returns an array of expanded values. + * @api public + */ + +braces.create = (input, options = {}) => { + if (input === '' || input.length < 3) { + return [input]; } -} + return options.expand !== true + ? braces.compile(input, options) + : braces.expand(input, options); +}; -/* =========================================================================== - * Construct the Huffman tree for the bit lengths and return the index in - * bl_order of the last bit length code to send. +/** + * Expose "braces" */ -function build_bl_tree(s) { - var max_blindex; /* index of last bit length code of non zero freq */ - /* Determine the bit length frequencies for literal and distance trees */ - scan_tree(s, s.dyn_ltree, s.l_desc.max_code); - scan_tree(s, s.dyn_dtree, s.d_desc.max_code); +module.exports = braces; - /* Build the bit length tree: */ - build_tree(s, s.bl_desc); - /* opt_len now includes the length of the tree representations, except - * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. - */ - /* Determine the number of bit length codes to send. The pkzip format - * requires that at least 4 bit length codes be sent. (appnote.txt says - * 3 but the actual value used is 4.) - */ - for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) { - if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) { - break; - } - } - /* Update opt_len to include the bit length tree and counts */ - s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; - //Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", - // s->opt_len, s->static_len)); +/***/ }), - return max_blindex; -} +/***/ 9434: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { +"use strict"; -/* =========================================================================== - * Send the header for a block using dynamic Huffman trees: the counts, the - * lengths of the bit length codes, the literal tree and the distance tree. - * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. - */ -function send_all_trees(s, lcodes, dcodes, blcodes) -// deflate_state *s; -// int lcodes, dcodes, blcodes; /* number of codes for each tree */ -{ - var rank; /* index in bl_order */ - //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); - //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, - // "too many codes"); - //Tracev((stderr, "\nbl counts: ")); - send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */ - send_bits(s, dcodes - 1, 5); - send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */ - for (rank = 0; rank < blcodes; rank++) { - //Tracev((stderr, "\nbl code %2d ", bl_order[rank])); - send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3); - } - //Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); +const fill = __nccwpck_require__(6330); +const utils = __nccwpck_require__(5207); - send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */ - //Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); +const compile = (ast, options = {}) => { + let walk = (node, parent = {}) => { + let invalidBlock = utils.isInvalidBrace(parent); + let invalidNode = node.invalid === true && options.escapeInvalid === true; + let invalid = invalidBlock === true || invalidNode === true; + let prefix = options.escapeInvalid === true ? '\\' : ''; + let output = ''; - send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */ - //Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); -} + if (node.isOpen === true) { + return prefix + node.value; + } + if (node.isClose === true) { + return prefix + node.value; + } + if (node.type === 'open') { + return invalid ? (prefix + node.value) : '('; + } -/* =========================================================================== - * Check if the data type is TEXT or BINARY, using the following algorithm: - * - TEXT if the two conditions below are satisfied: - * a) There are no non-portable control characters belonging to the - * "black list" (0..6, 14..25, 28..31). - * b) There is at least one printable character belonging to the - * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). - * - BINARY otherwise. - * - The following partially-portable control characters form a - * "gray list" that is ignored in this detection algorithm: - * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). - * IN assertion: the fields Freq of dyn_ltree are set. - */ -function detect_data_type(s) { - /* black_mask is the bit mask of black-listed bytes - * set bits 0..6, 14..25, and 28..31 - * 0xf3ffc07f = binary 11110011111111111100000001111111 - */ - var black_mask = 0xf3ffc07f; - var n; - - /* Check for non-textual ("black-listed") bytes. */ - for (n = 0; n <= 31; n++, black_mask >>>= 1) { - if ((black_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) { - return Z_BINARY; + if (node.type === 'close') { + return invalid ? (prefix + node.value) : ')'; } - } - /* Check for textual ("white-listed") bytes. */ - if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 || - s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) { - return Z_TEXT; - } - for (n = 32; n < LITERALS; n++) { - if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) { - return Z_TEXT; + if (node.type === 'comma') { + return node.prev.type === 'comma' ? '' : (invalid ? node.value : '|'); } - } - /* There are no "black-listed" or "white-listed" bytes: - * this stream either is empty or has tolerated ("gray-listed") bytes only. - */ - return Z_BINARY; -} - - -var static_init_done = false; + if (node.value) { + return node.value; + } -/* =========================================================================== - * Initialize the tree data structures for a new zlib stream. - */ -function _tr_init(s) -{ + if (node.nodes && node.ranges > 0) { + let args = utils.reduce(node.nodes); + let range = fill(...args, { ...options, wrap: false, toRegex: true }); - if (!static_init_done) { - tr_static_init(); - static_init_done = true; - } + if (range.length !== 0) { + return args.length > 1 && range.length > 1 ? `(${range})` : range; + } + } - s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc); - s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc); - s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc); + if (node.nodes) { + for (let child of node.nodes) { + output += walk(child, node); + } + } + return output; + }; - s.bi_buf = 0; - s.bi_valid = 0; + return walk(ast); +}; - /* Initialize the first block of the first file: */ - init_block(s); -} +module.exports = compile; -/* =========================================================================== - * Send a stored block - */ -function _tr_stored_block(s, buf, stored_len, last) -//DeflateState *s; -//charf *buf; /* input block */ -//ulg stored_len; /* length of input block */ -//int last; /* one if this is the last block for a file */ -{ - send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */ - copy_block(s, buf, stored_len, true); /* with header */ -} +/***/ }), +/***/ 8774: +/***/ ((module) => { -/* =========================================================================== - * Send one empty static block to give enough lookahead for inflate. - * This takes 10 bits, of which 7 may remain in the bit buffer. - */ -function _tr_align(s) { - send_bits(s, STATIC_TREES << 1, 3); - send_code(s, END_BLOCK, static_ltree); - bi_flush(s); -} +"use strict"; -/* =========================================================================== - * Determine the best encoding for the current block: dynamic trees, static - * trees or store, and output the encoded block to the zip file. - */ -function _tr_flush_block(s, buf, stored_len, last) -//DeflateState *s; -//charf *buf; /* input block, or NULL if too old */ -//ulg stored_len; /* length of input block */ -//int last; /* one if this is the last block for a file */ -{ - var opt_lenb, static_lenb; /* opt_len and static_len in bytes */ - var max_blindex = 0; /* index of last bit length code of non zero freq */ +module.exports = { + MAX_LENGTH: 1024 * 64, - /* Build the Huffman trees unless a stored block is forced */ - if (s.level > 0) { + // Digits + CHAR_0: '0', /* 0 */ + CHAR_9: '9', /* 9 */ - /* Check if the file is binary or text */ - if (s.strm.data_type === Z_UNKNOWN) { - s.strm.data_type = detect_data_type(s); - } + // Alphabet chars. + CHAR_UPPERCASE_A: 'A', /* A */ + CHAR_LOWERCASE_A: 'a', /* a */ + CHAR_UPPERCASE_Z: 'Z', /* Z */ + CHAR_LOWERCASE_Z: 'z', /* z */ - /* Construct the literal and distance trees */ - build_tree(s, s.l_desc); - // Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, - // s->static_len)); + CHAR_LEFT_PARENTHESES: '(', /* ( */ + CHAR_RIGHT_PARENTHESES: ')', /* ) */ - build_tree(s, s.d_desc); - // Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, - // s->static_len)); - /* At this point, opt_len and static_len are the total bit lengths of - * the compressed block data, excluding the tree representations. - */ + CHAR_ASTERISK: '*', /* * */ - /* Build the bit length tree for the above two trees, and get the index - * in bl_order of the last bit length code to send. - */ - max_blindex = build_bl_tree(s); + // Non-alphabetic chars. + CHAR_AMPERSAND: '&', /* & */ + CHAR_AT: '@', /* @ */ + CHAR_BACKSLASH: '\\', /* \ */ + CHAR_BACKTICK: '`', /* ` */ + CHAR_CARRIAGE_RETURN: '\r', /* \r */ + CHAR_CIRCUMFLEX_ACCENT: '^', /* ^ */ + CHAR_COLON: ':', /* : */ + CHAR_COMMA: ',', /* , */ + CHAR_DOLLAR: '$', /* . */ + CHAR_DOT: '.', /* . */ + CHAR_DOUBLE_QUOTE: '"', /* " */ + CHAR_EQUAL: '=', /* = */ + CHAR_EXCLAMATION_MARK: '!', /* ! */ + CHAR_FORM_FEED: '\f', /* \f */ + CHAR_FORWARD_SLASH: '/', /* / */ + CHAR_HASH: '#', /* # */ + CHAR_HYPHEN_MINUS: '-', /* - */ + CHAR_LEFT_ANGLE_BRACKET: '<', /* < */ + CHAR_LEFT_CURLY_BRACE: '{', /* { */ + CHAR_LEFT_SQUARE_BRACKET: '[', /* [ */ + CHAR_LINE_FEED: '\n', /* \n */ + CHAR_NO_BREAK_SPACE: '\u00A0', /* \u00A0 */ + CHAR_PERCENT: '%', /* % */ + CHAR_PLUS: '+', /* + */ + CHAR_QUESTION_MARK: '?', /* ? */ + CHAR_RIGHT_ANGLE_BRACKET: '>', /* > */ + CHAR_RIGHT_CURLY_BRACE: '}', /* } */ + CHAR_RIGHT_SQUARE_BRACKET: ']', /* ] */ + CHAR_SEMICOLON: ';', /* ; */ + CHAR_SINGLE_QUOTE: '\'', /* ' */ + CHAR_SPACE: ' ', /* */ + CHAR_TAB: '\t', /* \t */ + CHAR_UNDERSCORE: '_', /* _ */ + CHAR_VERTICAL_LINE: '|', /* | */ + CHAR_ZERO_WIDTH_NOBREAK_SPACE: '\uFEFF' /* \uFEFF */ +}; - /* Determine the best encoding. Compute the block lengths in bytes. */ - opt_lenb = (s.opt_len + 3 + 7) >>> 3; - static_lenb = (s.static_len + 3 + 7) >>> 3; - // Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", - // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, - // s->last_lit)); +/***/ }), - if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; } +/***/ 5873: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - } else { - // Assert(buf != (char*)0, "lost buf"); - opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ - } +"use strict"; - if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) { - /* 4: two words for the lengths */ - /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. - * Otherwise we can't have processed more than WSIZE input bytes since - * the last block flush, because compression would have been - * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to - * transform a block into a stored block. - */ - _tr_stored_block(s, buf, stored_len, last); +const fill = __nccwpck_require__(6330); +const stringify = __nccwpck_require__(8750); +const utils = __nccwpck_require__(5207); - } else if (s.strategy === Z_FIXED || static_lenb === opt_lenb) { +const append = (queue = '', stash = '', enclose = false) => { + let result = []; - send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3); - compress_block(s, static_ltree, static_dtree); + queue = [].concat(queue); + stash = [].concat(stash); - } else { - send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3); - send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1); - compress_block(s, s.dyn_ltree, s.dyn_dtree); + if (!stash.length) return queue; + if (!queue.length) { + return enclose ? utils.flatten(stash).map(ele => `{${ele}}`) : stash; } - // Assert (s->compressed_len == s->bits_sent, "bad compressed size"); - /* The above check is made mod 2^32, for files larger than 512 MB - * and uLong implemented on 32 bits. - */ - init_block(s); - if (last) { - bi_windup(s); + for (let item of queue) { + if (Array.isArray(item)) { + for (let value of item) { + result.push(append(value, stash, enclose)); + } + } else { + for (let ele of stash) { + if (enclose === true && typeof ele === 'string') ele = `{${ele}}`; + result.push(Array.isArray(ele) ? append(item, ele, enclose) : (item + ele)); + } + } } - // Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, - // s->compressed_len-7*last)); -} + return utils.flatten(result); +}; -/* =========================================================================== - * Save the match info and tally the frequency counts. Return true if - * the current block must be flushed. - */ -function _tr_tally(s, dist, lc) -// deflate_state *s; -// unsigned dist; /* distance of matched string */ -// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ -{ - //var out_length, in_length, dcode; +const expand = (ast, options = {}) => { + let rangeLimit = options.rangeLimit === void 0 ? 1000 : options.rangeLimit; - s.pending_buf[s.d_buf + s.last_lit * 2] = (dist >>> 8) & 0xff; - s.pending_buf[s.d_buf + s.last_lit * 2 + 1] = dist & 0xff; + let walk = (node, parent = {}) => { + node.queue = []; - s.pending_buf[s.l_buf + s.last_lit] = lc & 0xff; - s.last_lit++; + let p = parent; + let q = parent.queue; - if (dist === 0) { - /* lc is the unmatched char */ - s.dyn_ltree[lc * 2]/*.Freq*/++; - } else { - s.matches++; - /* Here, lc is the match length - MIN_MATCH */ - dist--; /* dist = match distance - 1 */ - //Assert((ush)dist < (ush)MAX_DIST(s) && - // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && - // (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); - - s.dyn_ltree[(_length_code[lc] + LITERALS + 1) * 2]/*.Freq*/++; - s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++; - } - -// (!) This block is disabled in zlib defaults, -// don't enable it for binary compatibility - -//#ifdef TRUNCATE_BLOCK -// /* Try to guess if it is profitable to stop the current block here */ -// if ((s.last_lit & 0x1fff) === 0 && s.level > 2) { -// /* Compute an upper bound for the compressed length */ -// out_length = s.last_lit*8; -// in_length = s.strstart - s.block_start; -// -// for (dcode = 0; dcode < D_CODES; dcode++) { -// out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]); -// } -// out_length >>>= 3; -// //Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", -// // s->last_lit, in_length, out_length, -// // 100L - out_length*100L/in_length)); -// if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) { -// return true; -// } -// } -//#endif + while (p.type !== 'brace' && p.type !== 'root' && p.parent) { + p = p.parent; + q = p.queue; + } - return (s.last_lit === s.lit_bufsize - 1); - /* We avoid equality with lit_bufsize because of wraparound at 64K - * on 16 bit machines and because stored blocks are restricted to - * 64K-1 bytes. - */ -} + if (node.invalid || node.dollar) { + q.push(append(q.pop(), stringify(node, options))); + return; + } -exports._tr_init = _tr_init; -exports._tr_stored_block = _tr_stored_block; -exports._tr_flush_block = _tr_flush_block; -exports._tr_tally = _tr_tally; -exports._tr_align = _tr_align; + if (node.type === 'brace' && node.invalid !== true && node.nodes.length === 2) { + q.push(append(q.pop(), ['{}'])); + return; + } + if (node.nodes && node.ranges > 0) { + let args = utils.reduce(node.nodes); -/***/ }), + if (utils.exceedsLimit(...args, options.step, rangeLimit)) { + throw new RangeError('expanded array length exceeds range limit. Use options.rangeLimit to increase or disable the limit.'); + } -/***/ 141: -/***/ (function(module) { + let range = fill(...args, options); + if (range.length === 0) { + range = stringify(node, options); + } -"use strict"; + q.push(append(q.pop(), range)); + node.nodes = []; + return; + } + let enclose = utils.encloseBrace(node); + let queue = node.queue; + let block = node; -// Note: adler32 takes 12% for level 0 and 2% for level 6. -// It isn't worth it to make additional optimizations as in original. -// Small size is preferable. + while (block.type !== 'brace' && block.type !== 'root' && block.parent) { + block = block.parent; + queue = block.queue; + } -// (C) 1995-2013 Jean-loup Gailly and Mark Adler -// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. + for (let i = 0; i < node.nodes.length; i++) { + let child = node.nodes[i]; -function adler32(adler, buf, len, pos) { - var s1 = (adler & 0xffff) |0, - s2 = ((adler >>> 16) & 0xffff) |0, - n = 0; + if (child.type === 'comma' && node.type === 'brace') { + if (i === 1) queue.push(''); + queue.push(''); + continue; + } - while (len !== 0) { - // Set limit ~ twice less than 5552, to keep - // s2 in 31-bits, because we force signed ints. - // in other case %= will fail. - n = len > 2000 ? 2000 : len; - len -= n; + if (child.type === 'close') { + q.push(append(q.pop(), queue, enclose)); + continue; + } - do { - s1 = (s1 + buf[pos++]) |0; - s2 = (s2 + s1) |0; - } while (--n); + if (child.value && child.type !== 'open') { + queue.push(append(queue.pop(), child.value)); + continue; + } - s1 %= 65521; - s2 %= 65521; - } + if (child.nodes) { + walk(child, node); + } + } - return (s1 | (s2 << 16)) |0; -} + return queue; + }; + return utils.flatten(walk(ast)); +}; -module.exports = adler32; +module.exports = expand; /***/ }), -/***/ 148: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 6477: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { "use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.readdir = exports.readdirWithFileTypes = exports.read = void 0; -const fsStat = __webpack_require__(231); -const constants_1 = __webpack_require__(171); -const utils = __webpack_require__(933); -const common = __webpack_require__(185); -function read(directory, settings) { - if (!settings.stats && constants_1.IS_SUPPORT_READDIR_WITH_FILE_TYPES) { - return readdirWithFileTypes(directory, settings); - } - return readdir(directory, settings); -} -exports.read = read; -function readdirWithFileTypes(directory, settings) { - const dirents = settings.fs.readdirSync(directory, { withFileTypes: true }); - return dirents.map((dirent) => { - const entry = { - dirent, - name: dirent.name, - path: common.joinPathSegments(directory, dirent.name, settings.pathSegmentSeparator) - }; - if (entry.dirent.isSymbolicLink() && settings.followSymbolicLinks) { - try { - const stats = settings.fs.statSync(entry.path); - entry.dirent = utils.fs.createDirentFromStats(entry.name, stats); - } - catch (error) { - if (settings.throwErrorOnBrokenSymbolicLink) { - throw error; - } - } - } - return entry; - }); -} -exports.readdirWithFileTypes = readdirWithFileTypes; -function readdir(directory, settings) { - const names = settings.fs.readdirSync(directory); - return names.map((name) => { - const entryPath = common.joinPathSegments(directory, name, settings.pathSegmentSeparator); - const stats = fsStat.statSync(entryPath, settings.fsStatSettings); - const entry = { - name, - path: entryPath, - dirent: utils.fs.createDirentFromStats(name, stats) - }; - if (settings.stats) { - entry.stats = stats; - } - return entry; - }); -} -exports.readdir = readdir; +const stringify = __nccwpck_require__(8750); -/***/ }), +/** + * Constants + */ -/***/ 149: -/***/ (function(module, exports, __webpack_require__) { +const { + MAX_LENGTH, + CHAR_BACKSLASH, /* \ */ + CHAR_BACKTICK, /* ` */ + CHAR_COMMA, /* , */ + CHAR_DOT, /* . */ + CHAR_LEFT_PARENTHESES, /* ( */ + CHAR_RIGHT_PARENTHESES, /* ) */ + CHAR_LEFT_CURLY_BRACE, /* { */ + CHAR_RIGHT_CURLY_BRACE, /* } */ + CHAR_LEFT_SQUARE_BRACKET, /* [ */ + CHAR_RIGHT_SQUARE_BRACKET, /* ] */ + CHAR_DOUBLE_QUOTE, /* " */ + CHAR_SINGLE_QUOTE, /* ' */ + CHAR_NO_BREAK_SPACE, + CHAR_ZERO_WIDTH_NOBREAK_SPACE +} = __nccwpck_require__(8774); -/*! safe-buffer. MIT License. Feross Aboukhadijeh */ -/* eslint-disable node/no-deprecated-api */ -var buffer = __webpack_require__(293) -var Buffer = buffer.Buffer +/** + * parse + */ -// alternative to using Object.keys for old browsers -function copyProps (src, dst) { - for (var key in src) { - dst[key] = src[key] +const parse = (input, options = {}) => { + if (typeof input !== 'string') { + throw new TypeError('Expected a string'); } -} -if (Buffer.from && Buffer.alloc && Buffer.allocUnsafe && Buffer.allocUnsafeSlow) { - module.exports = buffer -} else { - // Copy properties from require('buffer') - copyProps(buffer, exports) - exports.Buffer = SafeBuffer -} - -function SafeBuffer (arg, encodingOrOffset, length) { - return Buffer(arg, encodingOrOffset, length) -} - -SafeBuffer.prototype = Object.create(Buffer.prototype) -// Copy static methods from Buffer -copyProps(Buffer, SafeBuffer) - -SafeBuffer.from = function (arg, encodingOrOffset, length) { - if (typeof arg === 'number') { - throw new TypeError('Argument must not be a number') + let opts = options || {}; + let max = typeof opts.maxLength === 'number' ? Math.min(MAX_LENGTH, opts.maxLength) : MAX_LENGTH; + if (input.length > max) { + throw new SyntaxError(`Input length (${input.length}), exceeds max characters (${max})`); } - return Buffer(arg, encodingOrOffset, length) -} -SafeBuffer.alloc = function (size, fill, encoding) { - if (typeof size !== 'number') { - throw new TypeError('Argument must be a number') - } - var buf = Buffer(size) - if (fill !== undefined) { - if (typeof encoding === 'string') { - buf.fill(fill, encoding) - } else { - buf.fill(fill) + let ast = { type: 'root', input, nodes: [] }; + let stack = [ast]; + let block = ast; + let prev = ast; + let brackets = 0; + let length = input.length; + let index = 0; + let depth = 0; + let value; + let memo = {}; + + /** + * Helpers + */ + + const advance = () => input[index++]; + const push = node => { + if (node.type === 'text' && prev.type === 'dot') { + prev.type = 'text'; } - } else { - buf.fill(0) - } - return buf -} -SafeBuffer.allocUnsafe = function (size) { - if (typeof size !== 'number') { - throw new TypeError('Argument must be a number') - } - return Buffer(size) -} + if (prev && prev.type === 'text' && node.type === 'text') { + prev.value += node.value; + return; + } -SafeBuffer.allocUnsafeSlow = function (size) { - if (typeof size !== 'number') { - throw new TypeError('Argument must be a number') - } - return buffer.SlowBuffer(size) -} + block.nodes.push(node); + node.parent = block; + node.prev = prev; + prev = node; + return node; + }; + push({ type: 'bos' }); -/***/ }), + while (index < length) { + block = stack[stack.length - 1]; + value = advance(); -/***/ 171: -/***/ (function(__unusedmodule, exports) { + /** + * Invalid chars + */ -"use strict"; + if (value === CHAR_ZERO_WIDTH_NOBREAK_SPACE || value === CHAR_NO_BREAK_SPACE) { + continue; + } -Object.defineProperty(exports, "__esModule", { value: true }); -exports.IS_SUPPORT_READDIR_WITH_FILE_TYPES = void 0; -const NODE_PROCESS_VERSION_PARTS = process.versions.node.split('.'); -if (NODE_PROCESS_VERSION_PARTS[0] === undefined || NODE_PROCESS_VERSION_PARTS[1] === undefined) { - throw new Error(`Unexpected behavior. The 'process.versions.node' variable has invalid value: ${process.versions.node}`); -} -const MAJOR_VERSION = Number.parseInt(NODE_PROCESS_VERSION_PARTS[0], 10); -const MINOR_VERSION = Number.parseInt(NODE_PROCESS_VERSION_PARTS[1], 10); -const SUPPORTED_MAJOR_VERSION = 10; -const SUPPORTED_MINOR_VERSION = 10; -const IS_MATCHED_BY_MAJOR = MAJOR_VERSION > SUPPORTED_MAJOR_VERSION; -const IS_MATCHED_BY_MAJOR_AND_MINOR = MAJOR_VERSION === SUPPORTED_MAJOR_VERSION && MINOR_VERSION >= SUPPORTED_MINOR_VERSION; -/** - * IS `true` for Node.js 10.10 and greater. - */ -exports.IS_SUPPORT_READDIR_WITH_FILE_TYPES = IS_MATCHED_BY_MAJOR || IS_MATCHED_BY_MAJOR_AND_MINOR; + /** + * Escaped chars + */ + if (value === CHAR_BACKSLASH) { + push({ type: 'text', value: (options.keepEscaping ? value : '') + advance() }); + continue; + } -/***/ }), + /** + * Right square bracket (literal): ']' + */ -/***/ 178: -/***/ (function(module) { + if (value === CHAR_RIGHT_SQUARE_BRACKET) { + push({ type: 'text', value: '\\' + value }); + continue; + } -"use strict"; + /** + * Left square bracket: '[' + */ + if (value === CHAR_LEFT_SQUARE_BRACKET) { + brackets++; -function escapeRegExp(string) { - return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string -} + let closed = true; + let next; -function replaceAll(str, search, replacement) { - search = search instanceof RegExp ? search : new RegExp(escapeRegExp(search), 'g'); + while (index < length && (next = advance())) { + value += next; - return str.replace(search, replacement); -} + if (next === CHAR_LEFT_SQUARE_BRACKET) { + brackets++; + continue; + } -var CleanGitRef = { - clean: function clean(value) { - if (typeof value !== 'string') { - throw new Error('Expected a string, received: ' + value); - } + if (next === CHAR_BACKSLASH) { + value += advance(); + continue; + } - value = replaceAll(value, './', '/'); - value = replaceAll(value, '..', '.'); - value = replaceAll(value, ' ', '-'); - value = replaceAll(value, /^[~^:?*\\\-]/g, ''); - value = replaceAll(value, /[~^:?*\\]/g, '-'); - value = replaceAll(value, /[~^:?*\\\-]$/g, ''); - value = replaceAll(value, '@{', '-'); - value = replaceAll(value, /\.$/g, ''); - value = replaceAll(value, /\/$/g, ''); - value = replaceAll(value, /\.lock$/g, ''); - return value; - } -}; + if (next === CHAR_RIGHT_SQUARE_BRACKET) { + brackets--; -module.exports = CleanGitRef; + if (brackets === 0) { + break; + } + } + } -/***/ }), + push({ type: 'text', value }); + continue; + } -/***/ 181: -/***/ (function(module) { + /** + * Parentheses + */ -"use strict"; + if (value === CHAR_LEFT_PARENTHESES) { + block = push({ type: 'paren', nodes: [] }); + stack.push(block); + push({ type: 'text', value }); + continue; + } + if (value === CHAR_RIGHT_PARENTHESES) { + if (block.type !== 'paren') { + push({ type: 'text', value }); + continue; + } + block = stack.pop(); + push({ type: 'text', value }); + block = stack[stack.length - 1]; + continue; + } -// (C) 1995-2013 Jean-loup Gailly and Mark Adler -// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. + /** + * Quotes: '|"|` + */ -// See state defs from inflate.js -var BAD = 30; /* got a data error -- remain here until reset */ -var TYPE = 12; /* i: waiting for type bits, including last-flag bit */ + if (value === CHAR_DOUBLE_QUOTE || value === CHAR_SINGLE_QUOTE || value === CHAR_BACKTICK) { + let open = value; + let next; -/* - Decode literal, length, and distance codes and write out the resulting - literal and match bytes until either not enough input or output is - available, an end-of-block is encountered, or a data error is encountered. - When large enough input and output buffers are supplied to inflate(), for - example, a 16K input buffer and a 64K output buffer, more than 95% of the - inflate execution time is spent in this routine. + if (options.keepQuotes !== true) { + value = ''; + } - Entry assumptions: + while (index < length && (next = advance())) { + if (next === CHAR_BACKSLASH) { + value += next + advance(); + continue; + } - state.mode === LEN - strm.avail_in >= 6 - strm.avail_out >= 258 - start >= strm.avail_out - state.bits < 8 + if (next === open) { + if (options.keepQuotes === true) value += next; + break; + } - On return, state.mode is one of: + value += next; + } - LEN -- ran out of enough output space or enough available input - TYPE -- reached end of block code, inflate() to interpret next block - BAD -- error in block data + push({ type: 'text', value }); + continue; + } - Notes: + /** + * Left curly brace: '{' + */ - - The maximum input bits used by a length/distance pair is 15 bits for the - length code, 5 bits for the length extra, 15 bits for the distance code, - and 13 bits for the distance extra. This totals 48 bits, or six bytes. - Therefore if strm.avail_in >= 6, then there is enough input to avoid - checking for available input while decoding. + if (value === CHAR_LEFT_CURLY_BRACE) { + depth++; - - The maximum bytes that a single length/distance pair can output is 258 - bytes, which is the maximum length that can be coded. inflate_fast() - requires strm.avail_out >= 258 for each loop to avoid checking for - output space. - */ -module.exports = function inflate_fast(strm, start) { - var state; - var _in; /* local strm.input */ - var last; /* have enough input while in < last */ - var _out; /* local strm.output */ - var beg; /* inflate()'s initial strm.output */ - var end; /* while out < end, enough space available */ -//#ifdef INFLATE_STRICT - var dmax; /* maximum distance from zlib header */ -//#endif - var wsize; /* window size or zero if not using window */ - var whave; /* valid bytes in the window */ - var wnext; /* window write index */ - // Use `s_window` instead `window`, avoid conflict with instrumentation tools - var s_window; /* allocated sliding window, if wsize != 0 */ - var hold; /* local strm.hold */ - var bits; /* local strm.bits */ - var lcode; /* local strm.lencode */ - var dcode; /* local strm.distcode */ - var lmask; /* mask for first level of length codes */ - var dmask; /* mask for first level of distance codes */ - var here; /* retrieved table entry */ - var op; /* code bits, operation, extra bits, or */ - /* window position, window bytes to copy */ - var len; /* match length, unused bytes */ - var dist; /* match distance */ - var from; /* where to copy match from */ - var from_source; + let dollar = prev.value && prev.value.slice(-1) === '$' || block.dollar === true; + let brace = { + type: 'brace', + open: true, + close: false, + dollar, + depth, + commas: 0, + ranges: 0, + nodes: [] + }; + block = push(brace); + stack.push(block); + push({ type: 'open', value }); + continue; + } - var input, output; // JS specific, because we have no pointers + /** + * Right curly brace: '}' + */ - /* copy state to local variables */ - state = strm.state; - //here = state.here; - _in = strm.next_in; - input = strm.input; - last = _in + (strm.avail_in - 5); - _out = strm.next_out; - output = strm.output; - beg = _out - (start - strm.avail_out); - end = _out + (strm.avail_out - 257); -//#ifdef INFLATE_STRICT - dmax = state.dmax; -//#endif - wsize = state.wsize; - whave = state.whave; - wnext = state.wnext; - s_window = state.window; - hold = state.hold; - bits = state.bits; - lcode = state.lencode; - dcode = state.distcode; - lmask = (1 << state.lenbits) - 1; - dmask = (1 << state.distbits) - 1; + if (value === CHAR_RIGHT_CURLY_BRACE) { + if (block.type !== 'brace') { + push({ type: 'text', value }); + continue; + } + let type = 'close'; + block = stack.pop(); + block.close = true; - /* decode literals and length/distances until end-of-block or not enough - input data or output space */ + push({ type, value }); + depth--; - top: - do { - if (bits < 15) { - hold += input[_in++] << bits; - bits += 8; - hold += input[_in++] << bits; - bits += 8; + block = stack[stack.length - 1]; + continue; } - here = lcode[hold & lmask]; + /** + * Comma: ',' + */ - dolen: - for (;;) { // Goto emulation - op = here >>> 24/*here.bits*/; - hold >>>= op; - bits -= op; - op = (here >>> 16) & 0xff/*here.op*/; - if (op === 0) { /* literal */ - //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? - // "inflate: literal '%c'\n" : - // "inflate: literal 0x%02x\n", here.val)); - output[_out++] = here & 0xffff/*here.val*/; + if (value === CHAR_COMMA && depth > 0) { + if (block.ranges > 0) { + block.ranges = 0; + let open = block.nodes.shift(); + block.nodes = [open, { type: 'text', value: stringify(block) }]; } - else if (op & 16) { /* length base */ - len = here & 0xffff/*here.val*/; - op &= 15; /* number of extra bits */ - if (op) { - if (bits < op) { - hold += input[_in++] << bits; - bits += 8; - } - len += hold & ((1 << op) - 1); - hold >>>= op; - bits -= op; + + push({ type: 'comma', value }); + block.commas++; + continue; + } + + /** + * Dot: '.' + */ + + if (value === CHAR_DOT && depth > 0 && block.commas === 0) { + let siblings = block.nodes; + + if (depth === 0 || siblings.length === 0) { + push({ type: 'text', value }); + continue; + } + + if (prev.type === 'dot') { + block.range = []; + prev.value += value; + prev.type = 'range'; + + if (block.nodes.length !== 3 && block.nodes.length !== 5) { + block.invalid = true; + block.ranges = 0; + prev.type = 'text'; + continue; } - //Tracevv((stderr, "inflate: length %u\n", len)); - if (bits < 15) { - hold += input[_in++] << bits; - bits += 8; - hold += input[_in++] << bits; - bits += 8; + + block.ranges++; + block.args = []; + continue; + } + + if (prev.type === 'range') { + siblings.pop(); + + let before = siblings[siblings.length - 1]; + before.value += prev.value + value; + prev = before; + block.ranges--; + continue; + } + + push({ type: 'dot', value }); + continue; + } + + /** + * Text + */ + + push({ type: 'text', value }); + } + + // Mark imbalanced braces and brackets as invalid + do { + block = stack.pop(); + + if (block.type !== 'root') { + block.nodes.forEach(node => { + if (!node.nodes) { + if (node.type === 'open') node.isOpen = true; + if (node.type === 'close') node.isClose = true; + if (!node.nodes) node.type = 'text'; + node.invalid = true; } - here = dcode[hold & dmask]; + }); - dodist: - for (;;) { // goto emulation - op = here >>> 24/*here.bits*/; - hold >>>= op; - bits -= op; - op = (here >>> 16) & 0xff/*here.op*/; + // get the location of the block on parent.nodes (block's siblings) + let parent = stack[stack.length - 1]; + let index = parent.nodes.indexOf(block); + // replace the (invalid) block with it's nodes + parent.nodes.splice(index, 1, ...block.nodes); + } + } while (stack.length > 0); - if (op & 16) { /* distance base */ - dist = here & 0xffff/*here.val*/; - op &= 15; /* number of extra bits */ - if (bits < op) { - hold += input[_in++] << bits; - bits += 8; - if (bits < op) { - hold += input[_in++] << bits; - bits += 8; - } - } - dist += hold & ((1 << op) - 1); -//#ifdef INFLATE_STRICT - if (dist > dmax) { - strm.msg = 'invalid distance too far back'; - state.mode = BAD; - break top; - } -//#endif - hold >>>= op; - bits -= op; - //Tracevv((stderr, "inflate: distance %u\n", dist)); - op = _out - beg; /* max distance in output */ - if (dist > op) { /* see if copy from window */ - op = dist - op; /* distance back in window */ - if (op > whave) { - if (state.sane) { - strm.msg = 'invalid distance too far back'; - state.mode = BAD; - break top; - } + push({ type: 'eos' }); + return ast; +}; -// (!) This block is disabled in zlib defaults, -// don't enable it for binary compatibility -//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR -// if (len <= op - whave) { -// do { -// output[_out++] = 0; -// } while (--len); -// continue top; -// } -// len -= op - whave; -// do { -// output[_out++] = 0; -// } while (--op > whave); -// if (op === 0) { -// from = _out - dist; -// do { -// output[_out++] = output[from++]; -// } while (--len); -// continue top; -// } -//#endif - } - from = 0; // window index - from_source = s_window; - if (wnext === 0) { /* very common case */ - from += wsize - op; - if (op < len) { /* some from window */ - len -= op; - do { - output[_out++] = s_window[from++]; - } while (--op); - from = _out - dist; /* rest from output */ - from_source = output; - } - } - else if (wnext < op) { /* wrap around window */ - from += wsize + wnext - op; - op -= wnext; - if (op < len) { /* some from end of window */ - len -= op; - do { - output[_out++] = s_window[from++]; - } while (--op); - from = 0; - if (wnext < len) { /* some from start of window */ - op = wnext; - len -= op; - do { - output[_out++] = s_window[from++]; - } while (--op); - from = _out - dist; /* rest from output */ - from_source = output; - } - } - } - else { /* contiguous in window */ - from += wnext - op; - if (op < len) { /* some from window */ - len -= op; - do { - output[_out++] = s_window[from++]; - } while (--op); - from = _out - dist; /* rest from output */ - from_source = output; - } - } - while (len > 2) { - output[_out++] = from_source[from++]; - output[_out++] = from_source[from++]; - output[_out++] = from_source[from++]; - len -= 3; - } - if (len) { - output[_out++] = from_source[from++]; - if (len > 1) { - output[_out++] = from_source[from++]; - } - } - } - else { - from = _out - dist; /* copy direct from output */ - do { /* minimum length is three */ - output[_out++] = output[from++]; - output[_out++] = output[from++]; - output[_out++] = output[from++]; - len -= 3; - } while (len > 2); - if (len) { - output[_out++] = output[from++]; - if (len > 1) { - output[_out++] = output[from++]; - } - } - } - } - else if ((op & 64) === 0) { /* 2nd level distance code */ - here = dcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))]; - continue dodist; - } - else { - strm.msg = 'invalid distance code'; - state.mode = BAD; - break top; - } - - break; // need to emulate goto via "continue" - } - } - else if ((op & 64) === 0) { /* 2nd level length code */ - here = lcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))]; - continue dolen; - } - else if (op & 32) { /* end-of-block */ - //Tracevv((stderr, "inflate: end of block\n")); - state.mode = TYPE; - break top; - } - else { - strm.msg = 'invalid literal/length code'; - state.mode = BAD; - break top; - } - - break; // need to emulate goto via "continue" - } - } while (_in < last && _out < end); - - /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ - len = bits >> 3; - _in -= len; - bits -= len << 3; - hold &= (1 << bits) - 1; - - /* update state and return */ - strm.next_in = _in; - strm.next_out = _out; - strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last)); - strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end)); - state.hold = hold; - state.bits = bits; - return; -}; +module.exports = parse; /***/ }), -/***/ 182: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 8750: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { "use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.readdir = exports.readdirWithFileTypes = exports.read = void 0; -const fsStat = __webpack_require__(231); -const rpl = __webpack_require__(885); -const constants_1 = __webpack_require__(171); -const utils = __webpack_require__(933); -const common = __webpack_require__(185); -function read(directory, settings, callback) { - if (!settings.stats && constants_1.IS_SUPPORT_READDIR_WITH_FILE_TYPES) { - readdirWithFileTypes(directory, settings, callback); - return; - } - readdir(directory, settings, callback); -} -exports.read = read; -function readdirWithFileTypes(directory, settings, callback) { - settings.fs.readdir(directory, { withFileTypes: true }, (readdirError, dirents) => { - if (readdirError !== null) { - callFailureCallback(callback, readdirError); - return; - } - const entries = dirents.map((dirent) => ({ - dirent, - name: dirent.name, - path: common.joinPathSegments(directory, dirent.name, settings.pathSegmentSeparator) - })); - if (!settings.followSymbolicLinks) { - callSuccessCallback(callback, entries); - return; - } - const tasks = entries.map((entry) => makeRplTaskEntry(entry, settings)); - rpl(tasks, (rplError, rplEntries) => { - if (rplError !== null) { - callFailureCallback(callback, rplError); - return; - } - callSuccessCallback(callback, rplEntries); - }); - }); -} -exports.readdirWithFileTypes = readdirWithFileTypes; -function makeRplTaskEntry(entry, settings) { - return (done) => { - if (!entry.dirent.isSymbolicLink()) { - done(null, entry); - return; - } - settings.fs.stat(entry.path, (statError, stats) => { - if (statError !== null) { - if (settings.throwErrorOnBrokenSymbolicLink) { - done(statError); - return; - } - done(null, entry); - return; - } - entry.dirent = utils.fs.createDirentFromStats(entry.name, stats); - done(null, entry); - }); - }; -} -function readdir(directory, settings, callback) { - settings.fs.readdir(directory, (readdirError, names) => { - if (readdirError !== null) { - callFailureCallback(callback, readdirError); - return; - } - const tasks = names.map((name) => { - const path = common.joinPathSegments(directory, name, settings.pathSegmentSeparator); - return (done) => { - fsStat.stat(path, settings.fsStatSettings, (error, stats) => { - if (error !== null) { - done(error); - return; - } - const entry = { - name, - path, - dirent: utils.fs.createDirentFromStats(name, stats) - }; - if (settings.stats) { - entry.stats = stats; - } - done(null, entry); - }); - }; - }); - rpl(tasks, (rplError, entries) => { - if (rplError !== null) { - callFailureCallback(callback, rplError); - return; - } - callSuccessCallback(callback, entries); - }); - }); -} -exports.readdir = readdir; -function callFailureCallback(callback, error) { - callback(error); -} -function callSuccessCallback(callback, result) { - callback(null, result); -} +const utils = __nccwpck_require__(5207); -/***/ }), +module.exports = (ast, options = {}) => { + let stringify = (node, parent = {}) => { + let invalidBlock = options.escapeInvalid && utils.isInvalidBrace(parent); + let invalidNode = node.invalid === true && options.escapeInvalid === true; + let output = ''; -/***/ 185: -/***/ (function(__unusedmodule, exports) { + if (node.value) { + if ((invalidBlock || invalidNode) && utils.isOpenOrClose(node)) { + return '\\' + node.value; + } + return node.value; + } -"use strict"; + if (node.value) { + return node.value; + } -Object.defineProperty(exports, "__esModule", { value: true }); -exports.joinPathSegments = void 0; -function joinPathSegments(a, b, separator) { - /** - * The correct handling of cases when the first segment is a root (`/`, `C:/`) or UNC path (`//?/C:/`). - */ - if (a.endsWith(separator)) { - return a + b; + if (node.nodes) { + for (let child of node.nodes) { + output += stringify(child); + } } - return a + separator + b; -} -exports.joinPathSegments = joinPathSegments; + return output; + }; + + return stringify(ast); +}; + /***/ }), -/***/ 199: -/***/ (function(module, __unusedexports, __webpack_require__) { - -"use strict"; - - -const path = __webpack_require__(622); -const WIN_SLASH = '\\\\/'; -const WIN_NO_SLASH = `[^${WIN_SLASH}]`; - -/** - * Posix glob regex - */ - -const DOT_LITERAL = '\\.'; -const PLUS_LITERAL = '\\+'; -const QMARK_LITERAL = '\\?'; -const SLASH_LITERAL = '\\/'; -const ONE_CHAR = '(?=.)'; -const QMARK = '[^/]'; -const END_ANCHOR = `(?:${SLASH_LITERAL}|$)`; -const START_ANCHOR = `(?:^|${SLASH_LITERAL})`; -const DOTS_SLASH = `${DOT_LITERAL}{1,2}${END_ANCHOR}`; -const NO_DOT = `(?!${DOT_LITERAL})`; -const NO_DOTS = `(?!${START_ANCHOR}${DOTS_SLASH})`; -const NO_DOT_SLASH = `(?!${DOT_LITERAL}{0,1}${END_ANCHOR})`; -const NO_DOTS_SLASH = `(?!${DOTS_SLASH})`; -const QMARK_NO_DOT = `[^.${SLASH_LITERAL}]`; -const STAR = `${QMARK}*?`; - -const POSIX_CHARS = { - DOT_LITERAL, - PLUS_LITERAL, - QMARK_LITERAL, - SLASH_LITERAL, - ONE_CHAR, - QMARK, - END_ANCHOR, - DOTS_SLASH, - NO_DOT, - NO_DOTS, - NO_DOT_SLASH, - NO_DOTS_SLASH, - QMARK_NO_DOT, - STAR, - START_ANCHOR -}; - -/** - * Windows glob regex - */ - -const WINDOWS_CHARS = { - ...POSIX_CHARS, - - SLASH_LITERAL: `[${WIN_SLASH}]`, - QMARK: WIN_NO_SLASH, - STAR: `${WIN_NO_SLASH}*?`, - DOTS_SLASH: `${DOT_LITERAL}{1,2}(?:[${WIN_SLASH}]|$)`, - NO_DOT: `(?!${DOT_LITERAL})`, - NO_DOTS: `(?!(?:^|[${WIN_SLASH}])${DOT_LITERAL}{1,2}(?:[${WIN_SLASH}]|$))`, - NO_DOT_SLASH: `(?!${DOT_LITERAL}{0,1}(?:[${WIN_SLASH}]|$))`, - NO_DOTS_SLASH: `(?!${DOT_LITERAL}{1,2}(?:[${WIN_SLASH}]|$))`, - QMARK_NO_DOT: `[^.${WIN_SLASH}]`, - START_ANCHOR: `(?:^|[${WIN_SLASH}])`, - END_ANCHOR: `(?:[${WIN_SLASH}]|$)` -}; - -/** - * POSIX Bracket Regex - */ - -const POSIX_REGEX_SOURCE = { - alnum: 'a-zA-Z0-9', - alpha: 'a-zA-Z', - ascii: '\\x00-\\x7F', - blank: ' \\t', - cntrl: '\\x00-\\x1F\\x7F', - digit: '0-9', - graph: '\\x21-\\x7E', - lower: 'a-z', - print: '\\x20-\\x7E ', - punct: '\\-!"#$%&\'()\\*+,./:;<=>?@[\\]^_`{|}~', - space: ' \\t\\r\\n\\v\\f', - upper: 'A-Z', - word: 'A-Za-z0-9_', - xdigit: 'A-Fa-f0-9' -}; - -module.exports = { - MAX_LENGTH: 1024 * 64, - POSIX_REGEX_SOURCE, - - // regular expressions - REGEX_BACKSLASH: /\\(?![*+?^${}(|)[\]])/g, - REGEX_NON_SPECIAL_CHARS: /^[^@![\].,$*+?^{}()|\\/]+/, - REGEX_SPECIAL_CHARS: /[-*+?.^${}(|)[\]]/, - REGEX_SPECIAL_CHARS_BACKREF: /(\\?)((\W)(\3*))/g, - REGEX_SPECIAL_CHARS_GLOBAL: /([-*+?.^${}(|)[\]])/g, - REGEX_REMOVE_BACKSLASH: /(?:\[.*?[^\\]\]|\\(?=.))/g, - - // Replace globs with equivalent patterns to reduce parsing time. - REPLACEMENTS: { - '***': '*', - '**/**': '**', - '**/**/**': '**' - }, - - // Digits - CHAR_0: 48, /* 0 */ - CHAR_9: 57, /* 9 */ - - // Alphabet chars. - CHAR_UPPERCASE_A: 65, /* A */ - CHAR_LOWERCASE_A: 97, /* a */ - CHAR_UPPERCASE_Z: 90, /* Z */ - CHAR_LOWERCASE_Z: 122, /* z */ - - CHAR_LEFT_PARENTHESES: 40, /* ( */ - CHAR_RIGHT_PARENTHESES: 41, /* ) */ - - CHAR_ASTERISK: 42, /* * */ - - // Non-alphabetic chars. - CHAR_AMPERSAND: 38, /* & */ - CHAR_AT: 64, /* @ */ - CHAR_BACKWARD_SLASH: 92, /* \ */ - CHAR_CARRIAGE_RETURN: 13, /* \r */ - CHAR_CIRCUMFLEX_ACCENT: 94, /* ^ */ - CHAR_COLON: 58, /* : */ - CHAR_COMMA: 44, /* , */ - CHAR_DOT: 46, /* . */ - CHAR_DOUBLE_QUOTE: 34, /* " */ - CHAR_EQUAL: 61, /* = */ - CHAR_EXCLAMATION_MARK: 33, /* ! */ - CHAR_FORM_FEED: 12, /* \f */ - CHAR_FORWARD_SLASH: 47, /* / */ - CHAR_GRAVE_ACCENT: 96, /* ` */ - CHAR_HASH: 35, /* # */ - CHAR_HYPHEN_MINUS: 45, /* - */ - CHAR_LEFT_ANGLE_BRACKET: 60, /* < */ - CHAR_LEFT_CURLY_BRACE: 123, /* { */ - CHAR_LEFT_SQUARE_BRACKET: 91, /* [ */ - CHAR_LINE_FEED: 10, /* \n */ - CHAR_NO_BREAK_SPACE: 160, /* \u00A0 */ - CHAR_PERCENT: 37, /* % */ - CHAR_PLUS: 43, /* + */ - CHAR_QUESTION_MARK: 63, /* ? */ - CHAR_RIGHT_ANGLE_BRACKET: 62, /* > */ - CHAR_RIGHT_CURLY_BRACE: 125, /* } */ - CHAR_RIGHT_SQUARE_BRACKET: 93, /* ] */ - CHAR_SEMICOLON: 59, /* ; */ - CHAR_SINGLE_QUOTE: 39, /* ' */ - CHAR_SPACE: 32, /* */ - CHAR_TAB: 9, /* \t */ - CHAR_UNDERSCORE: 95, /* _ */ - CHAR_VERTICAL_LINE: 124, /* | */ - CHAR_ZERO_WIDTH_NOBREAK_SPACE: 65279, /* \uFEFF */ - - SEP: path.sep, - - /** - * Create EXTGLOB_CHARS - */ - - extglobChars(chars) { - return { - '!': { type: 'negate', open: '(?:(?!(?:', close: `))${chars.STAR})` }, - '?': { type: 'qmark', open: '(?:', close: ')?' }, - '+': { type: 'plus', open: '(?:', close: ')+' }, - '*': { type: 'star', open: '(?:', close: ')*' }, - '@': { type: 'at', open: '(?:', close: ')' } - }; - }, - - /** - * Create GLOB_CHARS - */ - - globChars(win32) { - return win32 === true ? WINDOWS_CHARS : POSIX_CHARS; - } -}; - - -/***/ }), - -/***/ 210: -/***/ (function(__unusedmodule, exports) { - -"use strict"; - -Object.defineProperty(exports, "__esModule", { value: true }); -exports.createDirentFromStats = void 0; -class DirentFromStats { - constructor(name, stats) { - this.name = name; - this.isBlockDevice = stats.isBlockDevice.bind(stats); - this.isCharacterDevice = stats.isCharacterDevice.bind(stats); - this.isDirectory = stats.isDirectory.bind(stats); - this.isFIFO = stats.isFIFO.bind(stats); - this.isFile = stats.isFile.bind(stats); - this.isSocket = stats.isSocket.bind(stats); - this.isSymbolicLink = stats.isSymbolicLink.bind(stats); - } -} -function createDirentFromStats(name, stats) { - return new DirentFromStats(name, stats); -} -exports.createDirentFromStats = createDirentFromStats; - - -/***/ }), - -/***/ 225: -/***/ (function(__unusedmodule, exports) { +/***/ 5207: +/***/ ((__unused_webpack_module, exports) => { "use strict"; @@ -3641,8844 +3162,7613 @@ exports.flatten = (...args) => { /***/ }), -/***/ 227: -/***/ (function(module, __unusedexports, __webpack_require__) { +/***/ 3268: +/***/ ((module) => { "use strict"; -const stringify = __webpack_require__(382); +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} -/** - * Constants - */ +function replaceAll(str, search, replacement) { + search = search instanceof RegExp ? search : new RegExp(escapeRegExp(search), 'g'); -const { - MAX_LENGTH, - CHAR_BACKSLASH, /* \ */ - CHAR_BACKTICK, /* ` */ - CHAR_COMMA, /* , */ - CHAR_DOT, /* . */ - CHAR_LEFT_PARENTHESES, /* ( */ - CHAR_RIGHT_PARENTHESES, /* ) */ - CHAR_LEFT_CURLY_BRACE, /* { */ - CHAR_RIGHT_CURLY_BRACE, /* } */ - CHAR_LEFT_SQUARE_BRACKET, /* [ */ - CHAR_RIGHT_SQUARE_BRACKET, /* ] */ - CHAR_DOUBLE_QUOTE, /* " */ - CHAR_SINGLE_QUOTE, /* ' */ - CHAR_NO_BREAK_SPACE, - CHAR_ZERO_WIDTH_NOBREAK_SPACE -} = __webpack_require__(807); + return str.replace(search, replacement); +} -/** - * parse - */ +var CleanGitRef = { + clean: function clean(value) { + if (typeof value !== 'string') { + throw new Error('Expected a string, received: ' + value); + } -const parse = (input, options = {}) => { - if (typeof input !== 'string') { - throw new TypeError('Expected a string'); + value = replaceAll(value, './', '/'); + value = replaceAll(value, '..', '.'); + value = replaceAll(value, ' ', '-'); + value = replaceAll(value, /^[~^:?*\\\-]/g, ''); + value = replaceAll(value, /[~^:?*\\]/g, '-'); + value = replaceAll(value, /[~^:?*\\\-]$/g, ''); + value = replaceAll(value, '@{', '-'); + value = replaceAll(value, /\.$/g, ''); + value = replaceAll(value, /\/$/g, ''); + value = replaceAll(value, /\.lock$/g, ''); + return value; } +}; - let opts = options || {}; - let max = typeof opts.maxLength === 'number' ? Math.min(MAX_LENGTH, opts.maxLength) : MAX_LENGTH; - if (input.length > max) { - throw new SyntaxError(`Input length (${input.length}), exceeds max characters (${max})`); - } - - let ast = { type: 'root', input, nodes: [] }; - let stack = [ast]; - let block = ast; - let prev = ast; - let brackets = 0; - let length = input.length; - let index = 0; - let depth = 0; - let value; - let memo = {}; - - /** - * Helpers - */ +module.exports = CleanGitRef; - const advance = () => input[index++]; - const push = node => { - if (node.type === 'text' && prev.type === 'dot') { - prev.type = 'text'; - } +/***/ }), - if (prev && prev.type === 'text' && node.type === 'text') { - prev.value += node.value; - return; - } +/***/ 3201: +/***/ ((__unused_webpack_module, exports) => { - block.nodes.push(node); - node.parent = block; - node.prev = prev; - prev = node; - return node; - }; +/*! crc32.js (C) 2014-present SheetJS -- http://sheetjs.com */ +/* vim: set ts=2: */ +/*exported CRC32 */ +var CRC32; +(function (factory) { + /*jshint ignore:start */ + /*eslint-disable */ + if(typeof DO_NOT_EXPORT_CRC === 'undefined') { + if(true) { + factory(exports); + } else {} + } else { + factory(CRC32 = {}); + } + /*eslint-enable */ + /*jshint ignore:end */ +}(function(CRC32) { +CRC32.version = '1.2.1'; +/*global Int32Array */ +function signed_crc_table() { + var c = 0, table = new Array(256); - push({ type: 'bos' }); + for(var n =0; n != 256; ++n){ + c = n; + c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); + c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); + c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); + c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); + c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); + c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); + c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); + c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); + table[n] = c; + } - while (index < length) { - block = stack[stack.length - 1]; - value = advance(); + return typeof Int32Array !== 'undefined' ? new Int32Array(table) : table; +} - /** - * Invalid chars - */ +var T0 = signed_crc_table(); +function slice_by_16_tables(T) { + var c = 0, v = 0, n = 0, table = typeof Int32Array !== 'undefined' ? new Int32Array(4096) : new Array(4096) ; - if (value === CHAR_ZERO_WIDTH_NOBREAK_SPACE || value === CHAR_NO_BREAK_SPACE) { - continue; - } + for(n = 0; n != 256; ++n) table[n] = T[n]; + for(n = 0; n != 256; ++n) { + v = T[n]; + for(c = 256 + n; c < 4096; c += 256) v = table[c] = (v >>> 8) ^ T[v & 0xFF]; + } + var out = []; + for(n = 1; n != 16; ++n) out[n - 1] = typeof Int32Array !== 'undefined' ? table.subarray(n * 256, n * 256 + 256) : table.slice(n * 256, n * 256 + 256); + return out; +} +var TT = slice_by_16_tables(T0); +var T1 = TT[0], T2 = TT[1], T3 = TT[2], T4 = TT[3], T5 = TT[4]; +var T6 = TT[5], T7 = TT[6], T8 = TT[7], T9 = TT[8], Ta = TT[9]; +var Tb = TT[10], Tc = TT[11], Td = TT[12], Te = TT[13], Tf = TT[14]; +function crc32_bstr(bstr, seed) { + var C = seed ^ -1; + for(var i = 0, L = bstr.length; i < L;) C = (C>>>8) ^ T0[(C^bstr.charCodeAt(i++))&0xFF]; + return ~C; +} - /** - * Escaped chars - */ +function crc32_buf(B, seed) { + var C = seed ^ -1, L = B.length - 15, i = 0; + for(; i < L;) C = + Tf[B[i++] ^ (C & 255)] ^ + Te[B[i++] ^ ((C >> 8) & 255)] ^ + Td[B[i++] ^ ((C >> 16) & 255)] ^ + Tc[B[i++] ^ (C >>> 24)] ^ + Tb[B[i++]] ^ Ta[B[i++]] ^ T9[B[i++]] ^ T8[B[i++]] ^ + T7[B[i++]] ^ T6[B[i++]] ^ T5[B[i++]] ^ T4[B[i++]] ^ + T3[B[i++]] ^ T2[B[i++]] ^ T1[B[i++]] ^ T0[B[i++]]; + L += 15; + while(i < L) C = (C>>>8) ^ T0[(C^B[i++])&0xFF]; + return ~C; +} - if (value === CHAR_BACKSLASH) { - push({ type: 'text', value: (options.keepEscaping ? value : '') + advance() }); - continue; - } +function crc32_str(str, seed) { + var C = seed ^ -1; + for(var i = 0, L = str.length, c = 0, d = 0; i < L;) { + c = str.charCodeAt(i++); + if(c < 0x80) { + C = (C>>>8) ^ T0[(C^c)&0xFF]; + } else if(c < 0x800) { + C = (C>>>8) ^ T0[(C ^ (192|((c>>6)&31)))&0xFF]; + C = (C>>>8) ^ T0[(C ^ (128|(c&63)))&0xFF]; + } else if(c >= 0xD800 && c < 0xE000) { + c = (c&1023)+64; d = str.charCodeAt(i++)&1023; + C = (C>>>8) ^ T0[(C ^ (240|((c>>8)&7)))&0xFF]; + C = (C>>>8) ^ T0[(C ^ (128|((c>>2)&63)))&0xFF]; + C = (C>>>8) ^ T0[(C ^ (128|((d>>6)&15)|((c&3)<<4)))&0xFF]; + C = (C>>>8) ^ T0[(C ^ (128|(d&63)))&0xFF]; + } else { + C = (C>>>8) ^ T0[(C ^ (224|((c>>12)&15)))&0xFF]; + C = (C>>>8) ^ T0[(C ^ (128|((c>>6)&63)))&0xFF]; + C = (C>>>8) ^ T0[(C ^ (128|(c&63)))&0xFF]; + } + } + return ~C; +} +CRC32.table = T0; +// $FlowIgnore +CRC32.bstr = crc32_bstr; +// $FlowIgnore +CRC32.buf = crc32_buf; +// $FlowIgnore +CRC32.str = crc32_str; +})); - /** - * Right square bracket (literal): ']' - */ - if (value === CHAR_RIGHT_SQUARE_BRACKET) { - push({ type: 'text', value: '\\' + value }); - continue; - } +/***/ }), - /** - * Left square bracket: '[' - */ +/***/ 5211: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - if (value === CHAR_LEFT_SQUARE_BRACKET) { - brackets++; +// Copyright (c) 2006, 2008 Tony Garnock-Jones +// Copyright (c) 2006, 2008 LShift Ltd. +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation files +// (the "Software"), to deal in the Software without restriction, +// including without limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of the Software, +// and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +// BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. - let closed = true; - let next; +var onp = __nccwpck_require__(8101); - while (index < length && (next = advance())) { - value += next; +function longestCommonSubsequence(file1, file2) { + var diff = new onp(file1, file2); + diff.compose(); + var ses = diff.getses(); - if (next === CHAR_LEFT_SQUARE_BRACKET) { - brackets++; - continue; + var root; + var prev; + var file1RevIdx = file1.length - 1, + file2RevIdx = file2.length - 1; + for (var i = ses.length - 1; i >= 0; --i) { + if (ses[i].t === diff.SES_COMMON) { + if (prev) { + prev.chain = { + file1index: file1RevIdx, + file2index: file2RevIdx, + chain: null + }; + prev = prev.chain; + } else { + root = { + file1index: file1RevIdx, + file2index: file2RevIdx, + chain: null + }; + prev = root; } + file1RevIdx--; + file2RevIdx--; + } else if (ses[i].t === diff.SES_DELETE) { + file1RevIdx--; + } else if (ses[i].t === diff.SES_ADD) { + file2RevIdx--; + } + } - if (next === CHAR_BACKSLASH) { - value += advance(); - continue; - } + var tail = { + file1index: -1, + file2index: -1, + chain: null + }; - if (next === CHAR_RIGHT_SQUARE_BRACKET) { - brackets--; + if (!prev) { + return tail; + } - if (brackets === 0) { - break; - } - } - } + prev.chain = tail; - push({ type: 'text', value }); - continue; - } + return root; +} - /** - * Parentheses - */ +function diffIndices(file1, file2) { + // We apply the LCS to give a simple representation of the + // offsets and lengths of mismatched chunks in the input + // files. This is used by diff3_merge_indices below. - if (value === CHAR_LEFT_PARENTHESES) { - block = push({ type: 'paren', nodes: [] }); - stack.push(block); - push({ type: 'text', value }); - continue; - } + var result = []; + var tail1 = file1.length; + var tail2 = file2.length; - if (value === CHAR_RIGHT_PARENTHESES) { - if (block.type !== 'paren') { - push({ type: 'text', value }); - continue; - } - block = stack.pop(); - push({ type: 'text', value }); - block = stack[stack.length - 1]; - continue; + for (var candidate = longestCommonSubsequence(file1, file2); candidate !== null; candidate = candidate.chain) { + var mismatchLength1 = tail1 - candidate.file1index - 1; + var mismatchLength2 = tail2 - candidate.file2index - 1; + tail1 = candidate.file1index; + tail2 = candidate.file2index; + + if (mismatchLength1 || mismatchLength2) { + result.push({ + file1: [tail1 + 1, mismatchLength1], + file2: [tail2 + 1, mismatchLength2] + }); } + } - /** - * Quotes: '|"|` - */ + result.reverse(); + return result; +} - if (value === CHAR_DOUBLE_QUOTE || value === CHAR_SINGLE_QUOTE || value === CHAR_BACKTICK) { - let open = value; - let next; +function diff3MergeIndices(a, o, b) { + // Given three files, A, O, and B, where both A and B are + // independently derived from O, returns a fairly complicated + // internal representation of merge decisions it's taken. The + // interested reader may wish to consult + // + // Sanjeev Khanna, Keshav Kunal, and Benjamin C. Pierce. "A + // Formal Investigation of Diff3." In Arvind and Prasad, + // editors, Foundations of Software Technology and Theoretical + // Computer Science (FSTTCS), December 2007. + // + // (http://www.cis.upenn.edu/~bcpierce/papers/diff3-short.pdf) + var i; - if (options.keepQuotes !== true) { - value = ''; - } + var m1 = diffIndices(o, a); + var m2 = diffIndices(o, b); - while (index < length && (next = advance())) { - if (next === CHAR_BACKSLASH) { - value += next + advance(); - continue; - } + var hunks = []; - if (next === open) { - if (options.keepQuotes === true) value += next; - break; - } + function addHunk(h, side) { + hunks.push([h.file1[0], side, h.file1[1], h.file2[0], h.file2[1]]); + } + for (i = 0; i < m1.length; i++) { + addHunk(m1[i], 0); + } + for (i = 0; i < m2.length; i++) { + addHunk(m2[i], 2); + } + hunks.sort(function(x, y) { + return x[0] - y[0] + }); - value += next; - } + var result = []; + var commonOffset = 0; - push({ type: 'text', value }); - continue; + function copyCommon(targetOffset) { + if (targetOffset > commonOffset) { + result.push([1, commonOffset, targetOffset - commonOffset]); + commonOffset = targetOffset; } + } - /** - * Left curly brace: '{' - */ - - if (value === CHAR_LEFT_CURLY_BRACE) { - depth++; + for (var hunkIndex = 0; hunkIndex < hunks.length; hunkIndex++) { + var firstHunkIndex = hunkIndex; + var hunk = hunks[hunkIndex]; + var regionLhs = hunk[0]; + var regionRhs = regionLhs + hunk[2]; + while (hunkIndex < hunks.length - 1) { + var maybeOverlapping = hunks[hunkIndex + 1]; + var maybeLhs = maybeOverlapping[0]; + if (maybeLhs > regionRhs) break; + regionRhs = Math.max(regionRhs, maybeLhs + maybeOverlapping[2]); + hunkIndex++; + } - let dollar = prev.value && prev.value.slice(-1) === '$' || block.dollar === true; - let brace = { - type: 'brace', - open: true, - close: false, - dollar, - depth, - commas: 0, - ranges: 0, - nodes: [] + copyCommon(regionLhs); + if (firstHunkIndex == hunkIndex) { + // The "overlap" was only one hunk long, meaning that + // there's no conflict here. Either a and o were the + // same, or b and o were the same. + if (hunk[4] > 0) { + result.push([hunk[1], hunk[3], hunk[4]]); + } + } else { + // A proper conflict. Determine the extents of the + // regions involved from a, o and b. Effectively merge + // all the hunks on the left into one giant hunk, and + // do the same for the right; then, correct for skew + // in the regions of o that each side changed, and + // report appropriate spans for the three sides. + var regions = { + 0: [a.length, -1, o.length, -1], + 2: [b.length, -1, o.length, -1] }; - - block = push(brace); - stack.push(block); - push({ type: 'open', value }); - continue; + for (i = firstHunkIndex; i <= hunkIndex; i++) { + hunk = hunks[i]; + var side = hunk[1]; + var r = regions[side]; + var oLhs = hunk[0]; + var oRhs = oLhs + hunk[2]; + var abLhs = hunk[3]; + var abRhs = abLhs + hunk[4]; + r[0] = Math.min(abLhs, r[0]); + r[1] = Math.max(abRhs, r[1]); + r[2] = Math.min(oLhs, r[2]); + r[3] = Math.max(oRhs, r[3]); + } + var aLhs = regions[0][0] + (regionLhs - regions[0][2]); + var aRhs = regions[0][1] + (regionRhs - regions[0][3]); + var bLhs = regions[2][0] + (regionLhs - regions[2][2]); + var bRhs = regions[2][1] + (regionRhs - regions[2][3]); + result.push([-1, + aLhs, aRhs - aLhs, + regionLhs, regionRhs - regionLhs, + bLhs, bRhs - bLhs + ]); } + commonOffset = regionRhs; + } - /** - * Right curly brace: '}' - */ + copyCommon(o.length); + return result; +} - if (value === CHAR_RIGHT_CURLY_BRACE) { - if (block.type !== 'brace') { - push({ type: 'text', value }); - continue; - } +function diff3Merge(a, o, b) { + // Applies the output of Diff.diff3_merge_indices to actually + // construct the merged file; the returned result alternates + // between "ok" and "conflict" blocks. - let type = 'close'; - block = stack.pop(); - block.close = true; + var result = []; + var files = [a, o, b]; + var indices = diff3MergeIndices(a, o, b); - push({ type, value }); - depth--; + var okLines = []; - block = stack[stack.length - 1]; - continue; + function flushOk() { + if (okLines.length) { + result.push({ + ok: okLines + }); } + okLines = []; + } - /** - * Comma: ',' - */ + function pushOk(xs) { + for (var j = 0; j < xs.length; j++) { + okLines.push(xs[j]); + } + } - if (value === CHAR_COMMA && depth > 0) { - if (block.ranges > 0) { - block.ranges = 0; - let open = block.nodes.shift(); - block.nodes = [open, { type: 'text', value: stringify(block) }]; - } + function isTrueConflict(rec) { + if (rec[2] != rec[6]) return true; + var aoff = rec[1]; + var boff = rec[5]; + for (var j = 0; j < rec[2]; j++) { + if (a[j + aoff] != b[j + boff]) return true; + } + return false; + } - push({ type: 'comma', value }); - block.commas++; - continue; + for (var i = 0; i < indices.length; i++) { + var x = indices[i]; + var side = x[0]; + if (side == -1) { + if (!isTrueConflict(x)) { + pushOk(files[0].slice(x[1], x[1] + x[2])); + } else { + flushOk(); + result.push({ + conflict: { + a: a.slice(x[1], x[1] + x[2]), + aIndex: x[1], + o: o.slice(x[3], x[3] + x[4]), + oIndex: x[3], + b: b.slice(x[5], x[5] + x[6]), + bIndex: x[5] + } + }); + } + } else { + pushOk(files[side].slice(x[1], x[1] + x[2])); } + } - /** - * Dot: '.' - */ + flushOk(); + return result; +} - if (value === CHAR_DOT && depth > 0 && block.commas === 0) { - let siblings = block.nodes; - - if (depth === 0 || siblings.length === 0) { - push({ type: 'text', value }); - continue; - } +module.exports = diff3Merge; - if (prev.type === 'dot') { - block.range = []; - prev.value += value; - prev.type = 'range'; - if (block.nodes.length !== 3 && block.nodes.length !== 5) { - block.invalid = true; - block.ranges = 0; - prev.type = 'text'; - continue; - } +/***/ }), - block.ranges++; - block.args = []; - continue; - } +/***/ 8101: +/***/ ((module) => { - if (prev.type === 'range') { - siblings.pop(); +/* + * URL: https://github.com/cubicdaiya/onp + * + * Copyright (c) 2013 Tatsuhiko Kubo + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ - let before = siblings[siblings.length - 1]; - before.value += prev.value + value; - prev = before; - block.ranges--; - continue; - } +/** + * The algorithm implemented here is based on "An O(NP) Sequence Comparison Algorithm" + * by described by Sun Wu, Udi Manber and Gene Myers +*/ +module.exports = function (a_, b_) { + var a = a_, + b = b_, + m = a.length, + n = b.length, + reverse = false, + ed = null, + offset = m + 1, + path = [], + pathposi = [], + ses = [], + lcs = "", + SES_DELETE = -1, + SES_COMMON = 0, + SES_ADD = 1; - push({ type: 'dot', value }); - continue; - } + var tmp1, + tmp2; - /** - * Text - */ + var init = function () { + if (m >= n) { + tmp1 = a; + tmp2 = m; + a = b; + b = tmp1; + m = n; + n = tmp2; + reverse = true; + offset = m + 1; + } + }; - push({ type: 'text', value }); - } + var P = function (x, y, k) { + return { + 'x' : x, + 'y' : y, + 'k' : k, + }; + }; - // Mark imbalanced braces and brackets as invalid - do { - block = stack.pop(); + var seselem = function (elem, t) { + return { + 'elem' : elem, + 't' : t, + }; + }; - if (block.type !== 'root') { - block.nodes.forEach(node => { - if (!node.nodes) { - if (node.type === 'open') node.isOpen = true; - if (node.type === 'close') node.isClose = true; - if (!node.nodes) node.type = 'text'; - node.invalid = true; + var snake = function (k, p, pp) { + var r, x, y; + if (p > pp) { + r = path[k-1+offset]; + } else { + r = path[k+1+offset]; } - }); - // get the location of the block on parent.nodes (block's siblings) - let parent = stack[stack.length - 1]; - let index = parent.nodes.indexOf(block); - // replace the (invalid) block with it's nodes - parent.nodes.splice(index, 1, ...block.nodes); - } - } while (stack.length > 0); + y = Math.max(p, pp); + x = y - k; + while (x < m && y < n && a[x] === b[y]) { + ++x; + ++y; + } - push({ type: 'eos' }); - return ast; -}; + path[k+offset] = pathposi.length; + pathposi[pathposi.length] = new P(x, y, r); + return y; + }; -module.exports = parse; + var recordseq = function (epc) { + var x_idx, y_idx, px_idx, py_idx, i; + x_idx = y_idx = 1; + px_idx = py_idx = 0; + for (i=epc.length-1;i>=0;--i) { + while(px_idx < epc[i].x || py_idx < epc[i].y) { + if (epc[i].y - epc[i].x > py_idx - px_idx) { + if (reverse) { + ses[ses.length] = new seselem(b[py_idx], SES_DELETE); + } else { + ses[ses.length] = new seselem(b[py_idx], SES_ADD); + } + ++y_idx; + ++py_idx; + } else if (epc[i].y - epc[i].x < py_idx - px_idx) { + if (reverse) { + ses[ses.length] = new seselem(a[px_idx], SES_ADD); + } else { + ses[ses.length] = new seselem(a[px_idx], SES_DELETE); + } + ++x_idx; + ++px_idx; + } else { + ses[ses.length] = new seselem(a[px_idx], SES_COMMON); + lcs += a[px_idx]; + ++x_idx; + ++y_idx; + ++px_idx; + ++py_idx; + } + } + } + }; + init(); -/***/ }), + return { + SES_DELETE : -1, + SES_COMMON : 0, + SES_ADD : 1, + editdistance : function () { + return ed; + }, + getlcs : function () { + return lcs; + }, + getses : function () { + return ses; + }, + compose : function () { + var delta, size, fp, p, r, epc, i, k; + delta = n - m; + size = m + n + 3; + fp = {}; + for (i=0;i=delta+1;--k) { + fp[k+offset] = snake(k, fp[k-1+offset]+1, fp[k+1+offset]); + } + fp[delta+offset] = snake(delta, fp[delta-1+offset]+1, fp[delta+1+offset]); + } while (fp[delta+offset] !== n); -/***/ 231: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + ed = delta + 2 * p; -"use strict"; + r = path[delta+offset]; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.statSync = exports.stat = exports.Settings = void 0; -const async = __webpack_require__(728); -const sync = __webpack_require__(641); -const settings_1 = __webpack_require__(872); -exports.Settings = settings_1.default; -function stat(path, optionsOrSettingsOrCallback, callback) { - if (typeof optionsOrSettingsOrCallback === 'function') { - async.read(path, getSettings(), optionsOrSettingsOrCallback); - return; - } - async.read(path, getSettings(optionsOrSettingsOrCallback), callback); -} -exports.stat = stat; -function statSync(path, optionsOrSettings) { - const settings = getSettings(optionsOrSettings); - return sync.read(path, settings); -} -exports.statSync = statSync; -function getSettings(settingsOrOptions = {}) { - if (settingsOrOptions instanceof settings_1.default) { - return settingsOrOptions; - } - return new settings_1.default(settingsOrOptions); -} + epc = []; + while (r !== -1) { + epc[epc.length] = new P(pathposi[r].x, pathposi[r].y, null); + r = pathposi[r].k; + } + recordseq(epc); + } + }; +}; /***/ }), -/***/ 246: -/***/ (function(module, __unusedexports, __webpack_require__) { +/***/ 3664: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { "use strict"; -// Top level file is just a mixin of submodules & constants - -var assign = __webpack_require__(999).assign; - -var deflate = __webpack_require__(259); -var inflate = __webpack_require__(832); -var constants = __webpack_require__(691); - -var pako = {}; - -assign(pako, deflate, inflate, constants); - -module.exports = pako; +const taskManager = __nccwpck_require__(2708); +const patternManager = __nccwpck_require__(8306); +const async_1 = __nccwpck_require__(5679); +const stream_1 = __nccwpck_require__(4630); +const sync_1 = __nccwpck_require__(2405); +const settings_1 = __nccwpck_require__(952); +const utils = __nccwpck_require__(5444); +async function FastGlob(source, options) { + assertPatternsInput(source); + const works = getWorks(source, async_1.default, options); + const result = await Promise.all(works); + return utils.array.flatten(result); +} +// https://github.com/typescript-eslint/typescript-eslint/issues/60 +// eslint-disable-next-line no-redeclare +(function (FastGlob) { + function sync(source, options) { + assertPatternsInput(source); + const works = getWorks(source, sync_1.default, options); + return utils.array.flatten(works); + } + FastGlob.sync = sync; + function stream(source, options) { + assertPatternsInput(source); + const works = getWorks(source, stream_1.default, options); + /** + * The stream returned by the provider cannot work with an asynchronous iterator. + * To support asynchronous iterators, regardless of the number of tasks, we always multiplex streams. + * This affects performance (+25%). I don't see best solution right now. + */ + return utils.stream.merge(works); + } + FastGlob.stream = stream; + function generateTasks(source, options) { + assertPatternsInput(source); + const patterns = patternManager.transform([].concat(source)); + const settings = new settings_1.default(options); + return taskManager.generate(patterns, settings); + } + FastGlob.generateTasks = generateTasks; + function isDynamicPattern(source, options) { + assertPatternsInput(source); + const settings = new settings_1.default(options); + return utils.pattern.isDynamicPattern(source, settings); + } + FastGlob.isDynamicPattern = isDynamicPattern; + function escapePath(source) { + assertPatternsInput(source); + return utils.path.escape(source); + } + FastGlob.escapePath = escapePath; +})(FastGlob || (FastGlob = {})); +function getWorks(source, _Provider, options) { + const patterns = patternManager.transform([].concat(source)); + const settings = new settings_1.default(options); + const tasks = taskManager.generate(patterns, settings); + const provider = new _Provider(settings); + return tasks.map(provider.read, provider); +} +function assertPatternsInput(input) { + const source = [].concat(input); + const isValidSource = source.every((item) => utils.string.isString(item) && !utils.string.isEmpty(item)); + if (!isValidSource) { + throw new TypeError('Patterns must be a string (non empty) or an array of strings'); + } +} +module.exports = FastGlob; /***/ }), -/***/ 253: -/***/ (function(module, __unusedexports, __webpack_require__) { +/***/ 8306: +/***/ ((__unused_webpack_module, exports) => { "use strict"; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.removeDuplicateSlashes = exports.transform = void 0; +/** + * Matches a sequence of two or more consecutive slashes, excluding the first two slashes at the beginning of the string. + * The latter is due to the presence of the device path at the beginning of the UNC path. + * @todo rewrite to negative lookbehind with the next major release. + */ +const DOUBLE_SLASH_RE = /(?!^)\/{2,}/g; +function transform(patterns) { + return patterns.map((pattern) => removeDuplicateSlashes(pattern)); +} +exports.transform = transform; +/** + * This package only works with forward slashes as a path separator. + * Because of this, we cannot use the standard `path.normalize` method, because on Windows platform it will use of backslashes. + */ +function removeDuplicateSlashes(pattern) { + return pattern.replace(DOUBLE_SLASH_RE, '/'); +} +exports.removeDuplicateSlashes = removeDuplicateSlashes; + -var gitUp = __webpack_require__(45); +/***/ }), + +/***/ 2708: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.convertPatternGroupToTask = exports.convertPatternGroupsToTasks = exports.groupPatternsByBaseDirectory = exports.getNegativePatternsAsPositive = exports.getPositivePatterns = exports.convertPatternsToTasks = exports.generate = void 0; +const utils = __nccwpck_require__(5444); +function generate(patterns, settings) { + const positivePatterns = getPositivePatterns(patterns); + const negativePatterns = getNegativePatternsAsPositive(patterns, settings.ignore); + const staticPatterns = positivePatterns.filter((pattern) => utils.pattern.isStaticPattern(pattern, settings)); + const dynamicPatterns = positivePatterns.filter((pattern) => utils.pattern.isDynamicPattern(pattern, settings)); + const staticTasks = convertPatternsToTasks(staticPatterns, negativePatterns, /* dynamic */ false); + const dynamicTasks = convertPatternsToTasks(dynamicPatterns, negativePatterns, /* dynamic */ true); + return staticTasks.concat(dynamicTasks); +} +exports.generate = generate; /** - * gitUrlParse - * Parses a Git url. - * - * @name gitUrlParse - * @function - * @param {String} url The Git url to parse. - * @return {GitUrl} The `GitUrl` object containing: - * - * - `protocols` (Array): An array with the url protocols (usually it has one element). - * - `port` (null|Number): The domain port. - * - `resource` (String): The url domain (including subdomains). - * - `user` (String): The authentication user (usually for ssh urls). - * - `pathname` (String): The url pathname. - * - `hash` (String): The url hash. - * - `search` (String): The url querystring value. - * - `href` (String): The input url. - * - `protocol` (String): The git url protocol. - * - `token` (String): The oauth token (could appear in the https urls). - * - `source` (String): The Git provider (e.g. `"github.com"`). - * - `owner` (String): The repository owner. - * - `name` (String): The repository name. - * - `ref` (String): The repository ref (e.g., "master" or "dev"). - * - `filepath` (String): A filepath relative to the repository root. - * - `filepathtype` (String): The type of filepath in the url ("blob" or "tree"). - * - `full_name` (String): The owner and name values in the `owner/name` format. - * - `toString` (Function): A function to stringify the parsed url into another url type. - * - `organization` (String): The organization the owner belongs to. This is CloudForge specific. - * - `git_suffix` (Boolean): Whether to add the `.git` suffix or not. + * Returns tasks grouped by basic pattern directories. * + * Patterns that can be found inside (`./`) and outside (`../`) the current directory are handled separately. + * This is necessary because directory traversal starts at the base directory and goes deeper. */ -function gitUrlParse(url) { - - if (typeof url !== "string") { - throw new Error("The url must be a string."); +function convertPatternsToTasks(positive, negative, dynamic) { + const tasks = []; + const patternsOutsideCurrentDirectory = utils.pattern.getPatternsOutsideCurrentDirectory(positive); + const patternsInsideCurrentDirectory = utils.pattern.getPatternsInsideCurrentDirectory(positive); + const outsideCurrentDirectoryGroup = groupPatternsByBaseDirectory(patternsOutsideCurrentDirectory); + const insideCurrentDirectoryGroup = groupPatternsByBaseDirectory(patternsInsideCurrentDirectory); + tasks.push(...convertPatternGroupsToTasks(outsideCurrentDirectoryGroup, negative, dynamic)); + /* + * For the sake of reducing future accesses to the file system, we merge all tasks within the current directory + * into a global task, if at least one pattern refers to the root (`.`). In this case, the global task covers the rest. + */ + if ('.' in insideCurrentDirectoryGroup) { + tasks.push(convertPatternGroupToTask('.', patternsInsideCurrentDirectory, negative, dynamic)); + } + else { + tasks.push(...convertPatternGroupsToTasks(insideCurrentDirectoryGroup, negative, dynamic)); } + return tasks; +} +exports.convertPatternsToTasks = convertPatternsToTasks; +function getPositivePatterns(patterns) { + return utils.pattern.getPositivePatterns(patterns); +} +exports.getPositivePatterns = getPositivePatterns; +function getNegativePatternsAsPositive(patterns, ignore) { + const negative = utils.pattern.getNegativePatterns(patterns).concat(ignore); + const positive = negative.map(utils.pattern.convertToPositivePattern); + return positive; +} +exports.getNegativePatternsAsPositive = getNegativePatternsAsPositive; +function groupPatternsByBaseDirectory(patterns) { + const group = {}; + return patterns.reduce((collection, pattern) => { + const base = utils.pattern.getBaseDirectory(pattern); + if (base in collection) { + collection[base].push(pattern); + } + else { + collection[base] = [pattern]; + } + return collection; + }, group); +} +exports.groupPatternsByBaseDirectory = groupPatternsByBaseDirectory; +function convertPatternGroupsToTasks(positive, negative, dynamic) { + return Object.keys(positive).map((base) => { + return convertPatternGroupToTask(base, positive[base], negative, dynamic); + }); +} +exports.convertPatternGroupsToTasks = convertPatternGroupsToTasks; +function convertPatternGroupToTask(base, positive, negative, dynamic) { + return { + dynamic, + positive, + negative, + base, + patterns: [].concat(positive, negative.map(utils.pattern.convertToNegativePattern)) + }; +} +exports.convertPatternGroupToTask = convertPatternGroupToTask; - var shorthandRe = /^([a-z\d-]{1,39})\/([-\.\w]{1,100})$/i; - if (shorthandRe.test(url)) { - url = "https://github.com/" + url; +/***/ }), + +/***/ 5679: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +const stream_1 = __nccwpck_require__(2083); +const provider_1 = __nccwpck_require__(257); +class ProviderAsync extends provider_1.default { + constructor() { + super(...arguments); + this._reader = new stream_1.default(this._settings); + } + read(task) { + const root = this._getRootDirectory(task); + const options = this._getReaderOptions(task); + const entries = []; + return new Promise((resolve, reject) => { + const stream = this.api(root, task, options); + stream.once('error', reject); + stream.on('data', (entry) => entries.push(options.transform(entry))); + stream.once('end', () => resolve(entries)); + }); + } + api(root, task, options) { + if (task.dynamic) { + return this._reader.dynamic(root, options); + } + return this._reader.static(task.patterns, options); } +} +exports["default"] = ProviderAsync; - var urlInfo = gitUp(url), - sourceParts = urlInfo.resource.split("."), - splits = null; - urlInfo.toString = function (type) { - return gitUrlParse.stringify(this, type); - }; +/***/ }), - urlInfo.source = sourceParts.length > 2 ? sourceParts.slice(1 - sourceParts.length).join(".") : urlInfo.source = urlInfo.resource; +/***/ 6983: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - // Note: Some hosting services (e.g. Visual Studio Team Services) allow whitespace characters - // in the repository and owner names so we decode the URL pieces to get the correct result - urlInfo.git_suffix = /\.git$/.test(urlInfo.pathname); - urlInfo.name = decodeURIComponent((urlInfo.pathname || urlInfo.href).replace(/(^\/)|(\/$)/g, '').replace(/\.git$/, "")); - urlInfo.owner = decodeURIComponent(urlInfo.user); +"use strict"; - switch (urlInfo.source) { - case "git.cloudforge.com": - urlInfo.owner = urlInfo.user; - urlInfo.organization = sourceParts[0]; - urlInfo.source = "cloudforge.com"; - break; - case "visualstudio.com": - // Handle VSTS SSH URLs - if (urlInfo.resource === 'vs-ssh.visualstudio.com') { - splits = urlInfo.name.split("/"); - if (splits.length === 4) { - urlInfo.organization = splits[1]; - urlInfo.owner = splits[2]; - urlInfo.name = splits[3]; - urlInfo.full_name = splits[2] + '/' + splits[3]; - } - break; - } else { - splits = urlInfo.name.split("/"); - if (splits.length === 2) { - urlInfo.owner = splits[1]; - urlInfo.name = splits[1]; - urlInfo.full_name = '_git/' + urlInfo.name; - } else if (splits.length === 3) { - urlInfo.name = splits[2]; - if (splits[0] === 'DefaultCollection') { - urlInfo.owner = splits[2]; - urlInfo.organization = splits[0]; - urlInfo.full_name = urlInfo.organization + '/_git/' + urlInfo.name; - } else { - urlInfo.owner = splits[0]; - urlInfo.full_name = urlInfo.owner + '/_git/' + urlInfo.name; - } - } else if (splits.length === 4) { - urlInfo.organization = splits[0]; - urlInfo.owner = splits[1]; - urlInfo.name = splits[3]; - urlInfo.full_name = urlInfo.organization + '/' + urlInfo.owner + '/_git/' + urlInfo.name; - } - break; - } - - // Azure DevOps (formerly Visual Studio Team Services) - case "dev.azure.com": - case "azure.com": - if (urlInfo.resource === 'ssh.dev.azure.com') { - splits = urlInfo.name.split("/"); - if (splits.length === 4) { - urlInfo.organization = splits[1]; - urlInfo.owner = splits[2]; - urlInfo.name = splits[3]; - } - break; - } else { - splits = urlInfo.name.split("/"); - if (splits.length === 5) { - urlInfo.organization = splits[0]; - urlInfo.owner = splits[1]; - urlInfo.name = splits[4]; - urlInfo.full_name = '_git/' + urlInfo.name; - } else if (splits.length === 3) { - urlInfo.name = splits[2]; - if (splits[0] === 'DefaultCollection') { - urlInfo.owner = splits[2]; - urlInfo.organization = splits[0]; - urlInfo.full_name = urlInfo.organization + '/_git/' + urlInfo.name; - } else { - urlInfo.owner = splits[0]; - urlInfo.full_name = urlInfo.owner + '/_git/' + urlInfo.name; - } - } else if (splits.length === 4) { - urlInfo.organization = splits[0]; - urlInfo.owner = splits[1]; - urlInfo.name = splits[3]; - urlInfo.full_name = urlInfo.organization + '/' + urlInfo.owner + '/_git/' + urlInfo.name; - } - if (urlInfo.query && urlInfo.query['path']) { - urlInfo.filepath = urlInfo.query['path'].replace(/^\/+/g, ''); // Strip leading slash (/) - } - if (urlInfo.query && urlInfo.query['version']) { - // version=GB - urlInfo.ref = urlInfo.query['version'].replace(/^GB/, ''); // remove GB - } - break; - } - default: - splits = urlInfo.name.split("/"); - var nameIndex = splits.length - 1; - if (splits.length >= 2) { - var dashIndex = splits.indexOf("-", 2); - var blobIndex = splits.indexOf("blob", 2); - var treeIndex = splits.indexOf("tree", 2); - var commitIndex = splits.indexOf("commit", 2); - var srcIndex = splits.indexOf("src", 2); - var rawIndex = splits.indexOf("raw", 2); - var editIndex = splits.indexOf("edit", 2); - nameIndex = dashIndex > 0 ? dashIndex - 1 : blobIndex > 0 ? blobIndex - 1 : treeIndex > 0 ? treeIndex - 1 : commitIndex > 0 ? commitIndex - 1 : srcIndex > 0 ? srcIndex - 1 : rawIndex > 0 ? rawIndex - 1 : editIndex > 0 ? editIndex - 1 : nameIndex; - - urlInfo.owner = splits.slice(0, nameIndex).join('/'); - urlInfo.name = splits[nameIndex]; - if (commitIndex) { - urlInfo.commit = splits[nameIndex + 2]; - } - } - - urlInfo.ref = ""; - urlInfo.filepathtype = ""; - urlInfo.filepath = ""; - var offsetNameIndex = splits.length > nameIndex && splits[nameIndex + 1] === "-" ? nameIndex + 1 : nameIndex; - - if (splits.length > offsetNameIndex + 2 && ["raw", "src", "blob", "tree", "edit"].indexOf(splits[offsetNameIndex + 1]) >= 0) { - urlInfo.filepathtype = splits[offsetNameIndex + 1]; - urlInfo.ref = splits[offsetNameIndex + 2]; - if (splits.length > offsetNameIndex + 3) { - urlInfo.filepath = splits.slice(offsetNameIndex + 3).join('/'); - } - } - urlInfo.organization = urlInfo.owner; - break; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const utils = __nccwpck_require__(5444); +const partial_1 = __nccwpck_require__(5295); +class DeepFilter { + constructor(_settings, _micromatchOptions) { + this._settings = _settings; + this._micromatchOptions = _micromatchOptions; } - - if (!urlInfo.full_name) { - urlInfo.full_name = urlInfo.owner; - if (urlInfo.name) { - urlInfo.full_name && (urlInfo.full_name += "/"); - urlInfo.full_name += urlInfo.name; - } + getFilter(basePath, positive, negative) { + const matcher = this._getMatcher(positive); + const negativeRe = this._getNegativePatternsRe(negative); + return (entry) => this._filter(basePath, entry, matcher, negativeRe); } - // Bitbucket Server - if (urlInfo.owner.startsWith("scm/")) { - urlInfo.source = "bitbucket-server"; - urlInfo.owner = urlInfo.owner.replace("scm/", ""); - urlInfo.organization = urlInfo.owner; - urlInfo.full_name = urlInfo.owner + "/" + urlInfo.name; + _getMatcher(patterns) { + return new partial_1.default(patterns, this._settings, this._micromatchOptions); } - - var bitbucket = /(projects|users)\/(.*?)\/repos\/(.*?)((\/.*$)|$)/; - var matches = bitbucket.exec(urlInfo.pathname); - if (matches != null) { - urlInfo.source = "bitbucket-server"; - if (matches[1] === "users") { - urlInfo.owner = "~" + matches[2]; - } else { - urlInfo.owner = matches[2]; + _getNegativePatternsRe(patterns) { + const affectDepthOfReadingPatterns = patterns.filter(utils.pattern.isAffectDepthOfReadingPattern); + return utils.pattern.convertPatternsToRe(affectDepthOfReadingPatterns, this._micromatchOptions); + } + _filter(basePath, entry, matcher, negativeRe) { + if (this._isSkippedByDeep(basePath, entry.path)) { + return false; } - - urlInfo.organization = urlInfo.owner; - urlInfo.name = matches[3]; - - splits = matches[4].split("/"); - if (splits.length > 1) { - if (["raw", "browse"].indexOf(splits[1]) >= 0) { - urlInfo.filepathtype = splits[1]; - if (splits.length > 2) { - urlInfo.filepath = splits.slice(2).join('/'); - } - } else if (splits[1] === "commits" && splits.length > 2) { - urlInfo.commit = splits[2]; - } + if (this._isSkippedSymbolicLink(entry)) { + return false; } - urlInfo.full_name = urlInfo.owner + "/" + urlInfo.name; - - if (urlInfo.query.at) { - urlInfo.ref = urlInfo.query.at; - } else { - urlInfo.ref = ""; + const filepath = utils.path.removeLeadingDotSegment(entry.path); + if (this._isSkippedByPositivePatterns(filepath, matcher)) { + return false; } + return this._isSkippedByNegativePatterns(filepath, negativeRe); } - return urlInfo; -} - -/** - * stringify - * Stringifies a `GitUrl` object. - * - * @name stringify - * @function - * @param {GitUrl} obj The parsed Git url object. - * @param {String} type The type of the stringified url (default `obj.protocol`). - * @return {String} The stringified url. - */ -gitUrlParse.stringify = function (obj, type) { - type = type || (obj.protocols && obj.protocols.length ? obj.protocols.join('+') : obj.protocol); - var port = obj.port ? ":" + obj.port : ''; - var user = obj.user || 'git'; - var maybeGitSuffix = obj.git_suffix ? ".git" : ""; - switch (type) { - case "ssh": - if (port) return "ssh://" + user + "@" + obj.resource + port + "/" + obj.full_name + maybeGitSuffix;else return user + "@" + obj.resource + ":" + obj.full_name + maybeGitSuffix; - case "git+ssh": - case "ssh+git": - case "ftp": - case "ftps": - return type + "://" + user + "@" + obj.resource + port + "/" + obj.full_name + maybeGitSuffix; - case "http": - case "https": - var auth = obj.token ? buildToken(obj) : obj.user && (obj.protocols.includes('http') || obj.protocols.includes('https')) ? obj.user + "@" : ""; - return type + "://" + auth + obj.resource + port + "/" + buildPath(obj) + maybeGitSuffix; - default: - return obj.href; + _isSkippedByDeep(basePath, entryPath) { + /** + * Avoid unnecessary depth calculations when it doesn't matter. + */ + if (this._settings.deep === Infinity) { + return false; + } + return this._getEntryLevel(basePath, entryPath) >= this._settings.deep; } -}; - -/*! - * buildToken - * Builds OAuth token prefix (helper function) - * - * @name buildToken - * @function - * @param {GitUrl} obj The parsed Git url object. - * @return {String} token prefix - */ -function buildToken(obj) { - switch (obj.source) { - case "bitbucket.org": - return "x-token-auth:" + obj.token + "@"; - default: - return obj.token + "@"; + _getEntryLevel(basePath, entryPath) { + const entryPathDepth = entryPath.split('/').length; + if (basePath === '') { + return entryPathDepth; + } + const basePathDepth = basePath.split('/').length; + return entryPathDepth - basePathDepth; } -} - -function buildPath(obj) { - switch (obj.source) { - case "bitbucket-server": - return "scm/" + obj.full_name; - default: - return "" + obj.full_name; - + _isSkippedSymbolicLink(entry) { + return !this._settings.followSymbolicLinks && entry.dirent.isSymbolicLink(); + } + _isSkippedByPositivePatterns(entryPath, matcher) { + return !this._settings.baseNameMatch && !matcher.match(entryPath); + } + _isSkippedByNegativePatterns(entryPath, patternsRe) { + return !utils.pattern.matchAny(entryPath, patternsRe); } } +exports["default"] = DeepFilter; -module.exports = gitUrlParse; /***/ }), -/***/ 259: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 1343: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const utils = __nccwpck_require__(5444); +class EntryFilter { + constructor(_settings, _micromatchOptions) { + this._settings = _settings; + this._micromatchOptions = _micromatchOptions; + this.index = new Map(); + } + getFilter(positive, negative) { + const positiveRe = utils.pattern.convertPatternsToRe(positive, this._micromatchOptions); + const negativeRe = utils.pattern.convertPatternsToRe(negative, this._micromatchOptions); + return (entry) => this._filter(entry, positiveRe, negativeRe); + } + _filter(entry, positiveRe, negativeRe) { + if (this._settings.unique && this._isDuplicateEntry(entry)) { + return false; + } + if (this._onlyFileFilter(entry) || this._onlyDirectoryFilter(entry)) { + return false; + } + if (this._isSkippedByAbsoluteNegativePatterns(entry.path, negativeRe)) { + return false; + } + const filepath = this._settings.baseNameMatch ? entry.name : entry.path; + const isMatched = this._isMatchToPatterns(filepath, positiveRe) && !this._isMatchToPatterns(entry.path, negativeRe); + if (this._settings.unique && isMatched) { + this._createIndexRecord(entry); + } + return isMatched; + } + _isDuplicateEntry(entry) { + return this.index.has(entry.path); + } + _createIndexRecord(entry) { + this.index.set(entry.path, undefined); + } + _onlyFileFilter(entry) { + return this._settings.onlyFiles && !entry.dirent.isFile(); + } + _onlyDirectoryFilter(entry) { + return this._settings.onlyDirectories && !entry.dirent.isDirectory(); + } + _isSkippedByAbsoluteNegativePatterns(entryPath, patternsRe) { + if (!this._settings.absolute) { + return false; + } + const fullpath = utils.path.makeAbsolute(this._settings.cwd, entryPath); + return utils.pattern.matchAny(fullpath, patternsRe); + } + /** + * First, just trying to apply patterns to the path. + * Second, trying to apply patterns to the path with final slash. + */ + _isMatchToPatterns(entryPath, patternsRe) { + const filepath = utils.path.removeLeadingDotSegment(entryPath); + return utils.pattern.matchAny(filepath, patternsRe) || utils.pattern.matchAny(filepath + '/', patternsRe); + } +} +exports["default"] = EntryFilter; -var zlib_deflate = __webpack_require__(378); -var utils = __webpack_require__(999); -var strings = __webpack_require__(279); -var msg = __webpack_require__(868); -var ZStream = __webpack_require__(991); - -var toString = Object.prototype.toString; - -/* Public constants ==========================================================*/ -/* ===========================================================================*/ - -var Z_NO_FLUSH = 0; -var Z_FINISH = 4; - -var Z_OK = 0; -var Z_STREAM_END = 1; -var Z_SYNC_FLUSH = 2; - -var Z_DEFAULT_COMPRESSION = -1; - -var Z_DEFAULT_STRATEGY = 0; +/***/ }), -var Z_DEFLATED = 8; +/***/ 6654: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -/* ===========================================================================*/ +"use strict"; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const utils = __nccwpck_require__(5444); +class ErrorFilter { + constructor(_settings) { + this._settings = _settings; + } + getFilter() { + return (error) => this._isNonFatalError(error); + } + _isNonFatalError(error) { + return utils.errno.isEnoentCodeError(error) || this._settings.suppressErrors; + } +} +exports["default"] = ErrorFilter; -/** - * class Deflate - * - * Generic JS-style wrapper for zlib calls. If you don't need - * streaming behaviour - use more simple functions: [[deflate]], - * [[deflateRaw]] and [[gzip]]. - **/ -/* internal - * Deflate.chunks -> Array - * - * Chunks of output data, if [[Deflate#onData]] not overridden. - **/ +/***/ }), -/** - * Deflate.result -> Uint8Array|Array - * - * Compressed result, generated by default [[Deflate#onData]] - * and [[Deflate#onEnd]] handlers. Filled after you push last chunk - * (call [[Deflate#push]] with `Z_FINISH` / `true` param) or if you - * push a chunk with explicit flush (call [[Deflate#push]] with - * `Z_SYNC_FLUSH` param). - **/ +/***/ 2576: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -/** - * Deflate.err -> Number - * - * Error code after deflate finished. 0 (Z_OK) on success. - * You will not need it in real life, because deflate errors - * are possible only on wrong options or bad `onData` / `onEnd` - * custom handlers. - **/ +"use strict"; -/** - * Deflate.msg -> String - * - * Error message, if [[Deflate.err]] != 0 - **/ +Object.defineProperty(exports, "__esModule", ({ value: true })); +const utils = __nccwpck_require__(5444); +class Matcher { + constructor(_patterns, _settings, _micromatchOptions) { + this._patterns = _patterns; + this._settings = _settings; + this._micromatchOptions = _micromatchOptions; + this._storage = []; + this._fillStorage(); + } + _fillStorage() { + /** + * The original pattern may include `{,*,**,a/*}`, which will lead to problems with matching (unresolved level). + * So, before expand patterns with brace expansion into separated patterns. + */ + const patterns = utils.pattern.expandPatternsWithBraceExpansion(this._patterns); + for (const pattern of patterns) { + const segments = this._getPatternSegments(pattern); + const sections = this._splitSegmentsIntoSections(segments); + this._storage.push({ + complete: sections.length <= 1, + pattern, + segments, + sections + }); + } + } + _getPatternSegments(pattern) { + const parts = utils.pattern.getPatternParts(pattern, this._micromatchOptions); + return parts.map((part) => { + const dynamic = utils.pattern.isDynamicPattern(part, this._settings); + if (!dynamic) { + return { + dynamic: false, + pattern: part + }; + } + return { + dynamic: true, + pattern: part, + patternRe: utils.pattern.makeRe(part, this._micromatchOptions) + }; + }); + } + _splitSegmentsIntoSections(segments) { + return utils.array.splitWhen(segments, (segment) => segment.dynamic && utils.pattern.hasGlobStar(segment.pattern)); + } +} +exports["default"] = Matcher; -/** - * new Deflate(options) - * - options (Object): zlib deflate options. - * - * Creates new deflator instance with specified params. Throws exception - * on bad params. Supported options: - * - * - `level` - * - `windowBits` - * - `memLevel` - * - `strategy` - * - `dictionary` - * - * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) - * for more information on these. - * - * Additional options, for internal needs: - * - * - `chunkSize` - size of generated data chunks (16K by default) - * - `raw` (Boolean) - do raw deflate - * - `gzip` (Boolean) - create gzip wrapper - * - `to` (String) - if equal to 'string', then result will be "binary string" - * (each char code [0..255]) - * - `header` (Object) - custom header for gzip - * - `text` (Boolean) - true if compressed data believed to be text - * - `time` (Number) - modification time, unix timestamp - * - `os` (Number) - operation system code - * - `extra` (Array) - array of bytes with extra data (max 65536) - * - `name` (String) - file name (binary string) - * - `comment` (String) - comment (binary string) - * - `hcrc` (Boolean) - true if header crc should be added - * - * ##### Example: - * - * ```javascript - * var pako = require('pako') - * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9]) - * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]); - * - * var deflate = new pako.Deflate({ level: 3}); - * - * deflate.push(chunk1, false); - * deflate.push(chunk2, true); // true -> last chunk - * - * if (deflate.err) { throw new Error(deflate.err); } - * - * console.log(deflate.result); - * ``` - **/ -function Deflate(options) { - if (!(this instanceof Deflate)) return new Deflate(options); +/***/ }), - this.options = utils.assign({ - level: Z_DEFAULT_COMPRESSION, - method: Z_DEFLATED, - chunkSize: 16384, - windowBits: 15, - memLevel: 8, - strategy: Z_DEFAULT_STRATEGY, - to: '' - }, options || {}); +/***/ 5295: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - var opt = this.options; +"use strict"; - if (opt.raw && (opt.windowBits > 0)) { - opt.windowBits = -opt.windowBits; - } +Object.defineProperty(exports, "__esModule", ({ value: true })); +const matcher_1 = __nccwpck_require__(2576); +class PartialMatcher extends matcher_1.default { + match(filepath) { + const parts = filepath.split('/'); + const levels = parts.length; + const patterns = this._storage.filter((info) => !info.complete || info.segments.length > levels); + for (const pattern of patterns) { + const section = pattern.sections[0]; + /** + * In this case, the pattern has a globstar and we must read all directories unconditionally, + * but only if the level has reached the end of the first group. + * + * fixtures/{a,b}/** + * ^ true/false ^ always true + */ + if (!pattern.complete && levels > section.length) { + return true; + } + const match = parts.every((part, index) => { + const segment = pattern.segments[index]; + if (segment.dynamic && segment.patternRe.test(part)) { + return true; + } + if (!segment.dynamic && segment.pattern === part) { + return true; + } + return false; + }); + if (match) { + return true; + } + } + return false; + } +} +exports["default"] = PartialMatcher; - else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) { - opt.windowBits += 16; - } - this.err = 0; // error code, if happens (0 = Z_OK) - this.msg = ''; // error message - this.ended = false; // used to avoid multiple onEnd() calls - this.chunks = []; // chunks of compressed data +/***/ }), - this.strm = new ZStream(); - this.strm.avail_out = 0; +/***/ 257: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - var status = zlib_deflate.deflateInit2( - this.strm, - opt.level, - opt.method, - opt.windowBits, - opt.memLevel, - opt.strategy - ); +"use strict"; - if (status !== Z_OK) { - throw new Error(msg[status]); - } +Object.defineProperty(exports, "__esModule", ({ value: true })); +const path = __nccwpck_require__(1017); +const deep_1 = __nccwpck_require__(6983); +const entry_1 = __nccwpck_require__(1343); +const error_1 = __nccwpck_require__(6654); +const entry_2 = __nccwpck_require__(4029); +class Provider { + constructor(_settings) { + this._settings = _settings; + this.errorFilter = new error_1.default(this._settings); + this.entryFilter = new entry_1.default(this._settings, this._getMicromatchOptions()); + this.deepFilter = new deep_1.default(this._settings, this._getMicromatchOptions()); + this.entryTransformer = new entry_2.default(this._settings); + } + _getRootDirectory(task) { + return path.resolve(this._settings.cwd, task.base); + } + _getReaderOptions(task) { + const basePath = task.base === '.' ? '' : task.base; + return { + basePath, + pathSegmentSeparator: '/', + concurrency: this._settings.concurrency, + deepFilter: this.deepFilter.getFilter(basePath, task.positive, task.negative), + entryFilter: this.entryFilter.getFilter(task.positive, task.negative), + errorFilter: this.errorFilter.getFilter(), + followSymbolicLinks: this._settings.followSymbolicLinks, + fs: this._settings.fs, + stats: this._settings.stats, + throwErrorOnBrokenSymbolicLink: this._settings.throwErrorOnBrokenSymbolicLink, + transform: this.entryTransformer.getTransformer() + }; + } + _getMicromatchOptions() { + return { + dot: this._settings.dot, + matchBase: this._settings.baseNameMatch, + nobrace: !this._settings.braceExpansion, + nocase: !this._settings.caseSensitiveMatch, + noext: !this._settings.extglob, + noglobstar: !this._settings.globstar, + posix: true, + strictSlashes: false + }; + } +} +exports["default"] = Provider; - if (opt.header) { - zlib_deflate.deflateSetHeader(this.strm, opt.header); - } - if (opt.dictionary) { - var dict; - // Convert data if needed - if (typeof opt.dictionary === 'string') { - // If we need to compress text, change encoding to utf8. - dict = strings.string2buf(opt.dictionary); - } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') { - dict = new Uint8Array(opt.dictionary); - } else { - dict = opt.dictionary; - } +/***/ }), - status = zlib_deflate.deflateSetDictionary(this.strm, dict); +/***/ 4630: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - if (status !== Z_OK) { - throw new Error(msg[status]); - } +"use strict"; - this._dict_set = true; - } +Object.defineProperty(exports, "__esModule", ({ value: true })); +const stream_1 = __nccwpck_require__(2781); +const stream_2 = __nccwpck_require__(2083); +const provider_1 = __nccwpck_require__(257); +class ProviderStream extends provider_1.default { + constructor() { + super(...arguments); + this._reader = new stream_2.default(this._settings); + } + read(task) { + const root = this._getRootDirectory(task); + const options = this._getReaderOptions(task); + const source = this.api(root, task, options); + const destination = new stream_1.Readable({ objectMode: true, read: () => { } }); + source + .once('error', (error) => destination.emit('error', error)) + .on('data', (entry) => destination.emit('data', options.transform(entry))) + .once('end', () => destination.emit('end')); + destination + .once('close', () => source.destroy()); + return destination; + } + api(root, task, options) { + if (task.dynamic) { + return this._reader.dynamic(root, options); + } + return this._reader.static(task.patterns, options); + } } +exports["default"] = ProviderStream; -/** - * Deflate#push(data[, mode]) -> Boolean - * - data (Uint8Array|Array|ArrayBuffer|String): input data. Strings will be - * converted to utf8 byte sequence. - * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes. - * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH. - * - * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with - * new compressed chunks. Returns `true` on success. The last data block must have - * mode Z_FINISH (or `true`). That will flush internal pending buffers and call - * [[Deflate#onEnd]]. For interim explicit flushes (without ending the stream) you - * can use mode Z_SYNC_FLUSH, keeping the compression context. - * - * On fail call [[Deflate#onEnd]] with error code and return false. - * - * We strongly recommend to use `Uint8Array` on input for best speed (output - * array format is detected automatically). Also, don't skip last param and always - * use the same type in your code (boolean or number). That will improve JS speed. - * - * For regular `Array`-s make sure all elements are [0..255]. - * - * ##### Example - * - * ```javascript - * push(chunk, false); // push one of data chunks - * ... - * push(chunk, true); // push last chunk - * ``` - **/ -Deflate.prototype.push = function (data, mode) { - var strm = this.strm; - var chunkSize = this.options.chunkSize; - var status, _mode; - - if (this.ended) { return false; } - _mode = (mode === ~~mode) ? mode : ((mode === true) ? Z_FINISH : Z_NO_FLUSH); +/***/ }), - // Convert data if needed - if (typeof data === 'string') { - // If we need to compress text, change encoding to utf8. - strm.input = strings.string2buf(data); - } else if (toString.call(data) === '[object ArrayBuffer]') { - strm.input = new Uint8Array(data); - } else { - strm.input = data; - } +/***/ 2405: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - strm.next_in = 0; - strm.avail_in = strm.input.length; +"use strict"; - do { - if (strm.avail_out === 0) { - strm.output = new utils.Buf8(chunkSize); - strm.next_out = 0; - strm.avail_out = chunkSize; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const sync_1 = __nccwpck_require__(6234); +const provider_1 = __nccwpck_require__(257); +class ProviderSync extends provider_1.default { + constructor() { + super(...arguments); + this._reader = new sync_1.default(this._settings); } - status = zlib_deflate.deflate(strm, _mode); /* no bad return value */ - - if (status !== Z_STREAM_END && status !== Z_OK) { - this.onEnd(status); - this.ended = true; - return false; + read(task) { + const root = this._getRootDirectory(task); + const options = this._getReaderOptions(task); + const entries = this.api(root, task, options); + return entries.map(options.transform); } - if (strm.avail_out === 0 || (strm.avail_in === 0 && (_mode === Z_FINISH || _mode === Z_SYNC_FLUSH))) { - if (this.options.to === 'string') { - this.onData(strings.buf2binstring(utils.shrinkBuf(strm.output, strm.next_out))); - } else { - this.onData(utils.shrinkBuf(strm.output, strm.next_out)); - } + api(root, task, options) { + if (task.dynamic) { + return this._reader.dynamic(root, options); + } + return this._reader.static(task.patterns, options); } - } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== Z_STREAM_END); +} +exports["default"] = ProviderSync; - // Finalize on the last chunk. - if (_mode === Z_FINISH) { - status = zlib_deflate.deflateEnd(this.strm); - this.onEnd(status); - this.ended = true; - return status === Z_OK; - } - // callback interim results if Z_SYNC_FLUSH. - if (_mode === Z_SYNC_FLUSH) { - this.onEnd(Z_OK); - strm.avail_out = 0; - return true; - } +/***/ }), - return true; -}; +/***/ 4029: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +"use strict"; -/** - * Deflate#onData(chunk) -> Void - * - chunk (Uint8Array|Array|String): output data. Type of array depends - * on js engine support. When string output requested, each chunk - * will be string. - * - * By default, stores data blocks in `chunks[]` property and glue - * those in `onEnd`. Override this handler, if you need another behaviour. - **/ -Deflate.prototype.onData = function (chunk) { - this.chunks.push(chunk); -}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const utils = __nccwpck_require__(5444); +class EntryTransformer { + constructor(_settings) { + this._settings = _settings; + } + getTransformer() { + return (entry) => this._transform(entry); + } + _transform(entry) { + let filepath = entry.path; + if (this._settings.absolute) { + filepath = utils.path.makeAbsolute(this._settings.cwd, filepath); + filepath = utils.path.unixify(filepath); + } + if (this._settings.markDirectories && entry.dirent.isDirectory()) { + filepath += '/'; + } + if (!this._settings.objectMode) { + return filepath; + } + return Object.assign(Object.assign({}, entry), { path: filepath }); + } +} +exports["default"] = EntryTransformer; -/** - * Deflate#onEnd(status) -> Void - * - status (Number): deflate status. 0 (Z_OK) on success, - * other if not. - * - * Called once after you tell deflate that the input stream is - * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH) - * or if an error happened. By default - join collected chunks, - * free memory and fill `results` / `err` properties. - **/ -Deflate.prototype.onEnd = function (status) { - // On success - join - if (status === Z_OK) { - if (this.options.to === 'string') { - this.result = this.chunks.join(''); - } else { - this.result = utils.flattenChunks(this.chunks); - } - } - this.chunks = []; - this.err = status; - this.msg = this.strm.msg; -}; - - -/** - * deflate(data[, options]) -> Uint8Array|Array|String - * - data (Uint8Array|Array|String): input data to compress. - * - options (Object): zlib deflate options. - * - * Compress `data` with deflate algorithm and `options`. - * - * Supported options are: - * - * - level - * - windowBits - * - memLevel - * - strategy - * - dictionary - * - * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) - * for more information on these. - * - * Sugar (options): - * - * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify - * negative windowBits implicitly. - * - `to` (String) - if equal to 'string', then result will be "binary string" - * (each char code [0..255]) - * - * ##### Example: - * - * ```javascript - * var pako = require('pako') - * , data = Uint8Array([1,2,3,4,5,6,7,8,9]); - * - * console.log(pako.deflate(data)); - * ``` - **/ -function deflate(input, options) { - var deflator = new Deflate(options); +/***/ }), - deflator.push(input, true); +/***/ 5582: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - // That will never happens, if you don't cheat with options :) - if (deflator.err) { throw deflator.msg || msg[deflator.err]; } +"use strict"; - return deflator.result; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const path = __nccwpck_require__(1017); +const fsStat = __nccwpck_require__(109); +const utils = __nccwpck_require__(5444); +class Reader { + constructor(_settings) { + this._settings = _settings; + this._fsStatSettings = new fsStat.Settings({ + followSymbolicLink: this._settings.followSymbolicLinks, + fs: this._settings.fs, + throwErrorOnBrokenSymbolicLink: this._settings.followSymbolicLinks + }); + } + _getFullEntryPath(filepath) { + return path.resolve(this._settings.cwd, filepath); + } + _makeEntry(stats, pattern) { + const entry = { + name: pattern, + path: pattern, + dirent: utils.fs.createDirentFromStats(pattern, stats) + }; + if (this._settings.stats) { + entry.stats = stats; + } + return entry; + } + _isFatalError(error) { + return !utils.errno.isEnoentCodeError(error) && !this._settings.suppressErrors; + } } +exports["default"] = Reader; -/** - * deflateRaw(data[, options]) -> Uint8Array|Array|String - * - data (Uint8Array|Array|String): input data to compress. - * - options (Object): zlib deflate options. - * - * The same as [[deflate]], but creates raw data, without wrapper - * (header and adler32 crc). - **/ -function deflateRaw(input, options) { - options = options || {}; - options.raw = true; - return deflate(input, options); -} - +/***/ }), -/** - * gzip(data[, options]) -> Uint8Array|Array|String - * - data (Uint8Array|Array|String): input data to compress. - * - options (Object): zlib deflate options. - * - * The same as [[deflate]], but create gzip wrapper instead of - * deflate one. - **/ -function gzip(input, options) { - options = options || {}; - options.gzip = true; - return deflate(input, options); -} +/***/ 2083: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +"use strict"; -exports.Deflate = Deflate; -exports.deflate = deflate; -exports.deflateRaw = deflateRaw; -exports.gzip = gzip; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const stream_1 = __nccwpck_require__(2781); +const fsStat = __nccwpck_require__(109); +const fsWalk = __nccwpck_require__(6026); +const reader_1 = __nccwpck_require__(5582); +class ReaderStream extends reader_1.default { + constructor() { + super(...arguments); + this._walkStream = fsWalk.walkStream; + this._stat = fsStat.stat; + } + dynamic(root, options) { + return this._walkStream(root, options); + } + static(patterns, options) { + const filepaths = patterns.map(this._getFullEntryPath, this); + const stream = new stream_1.PassThrough({ objectMode: true }); + stream._write = (index, _enc, done) => { + return this._getEntry(filepaths[index], patterns[index], options) + .then((entry) => { + if (entry !== null && options.entryFilter(entry)) { + stream.push(entry); + } + if (index === filepaths.length - 1) { + stream.end(); + } + done(); + }) + .catch(done); + }; + for (let i = 0; i < filepaths.length; i++) { + stream.write(i); + } + return stream; + } + _getEntry(filepath, pattern, options) { + return this._getStat(filepath) + .then((stats) => this._makeEntry(stats, pattern)) + .catch((error) => { + if (options.errorFilter(error)) { + return null; + } + throw error; + }); + } + _getStat(filepath) { + return new Promise((resolve, reject) => { + this._stat(filepath, this._fsStatSettings, (error, stats) => { + return error === null ? resolve(stats) : reject(error); + }); + }); + } +} +exports["default"] = ReaderStream; /***/ }), -/***/ 265: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 6234: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const fsStat = __nccwpck_require__(109); +const fsWalk = __nccwpck_require__(6026); +const reader_1 = __nccwpck_require__(5582); +class ReaderSync extends reader_1.default { + constructor() { + super(...arguments); + this._walkSync = fsWalk.walkSync; + this._statSync = fsStat.statSync; + } + dynamic(root, options) { + return this._walkSync(root, options); + } + static(patterns, options) { + const entries = []; + for (const pattern of patterns) { + const filepath = this._getFullEntryPath(pattern); + const entry = this._getEntry(filepath, pattern, options); + if (entry === null || !options.entryFilter(entry)) { + continue; + } + entries.push(entry); + } + return entries; + } + _getEntry(filepath, pattern, options) { + try { + const stats = this._getStat(filepath); + return this._makeEntry(stats, pattern); + } + catch (error) { + if (options.errorFilter(error)) { + return null; + } + throw error; + } + } + _getStat(filepath) { + return this._statSync(filepath, this._fsStatSettings); + } +} +exports["default"] = ReaderSync; -const path = __webpack_require__(622); -const win32 = process.platform === 'win32'; -const { - REGEX_BACKSLASH, - REGEX_REMOVE_BACKSLASH, - REGEX_SPECIAL_CHARS, - REGEX_SPECIAL_CHARS_GLOBAL -} = __webpack_require__(199); - -exports.isObject = val => val !== null && typeof val === 'object' && !Array.isArray(val); -exports.hasRegexChars = str => REGEX_SPECIAL_CHARS.test(str); -exports.isRegexChar = str => str.length === 1 && exports.hasRegexChars(str); -exports.escapeRegex = str => str.replace(REGEX_SPECIAL_CHARS_GLOBAL, '\\$1'); -exports.toPosixSlashes = str => str.replace(REGEX_BACKSLASH, '/'); - -exports.removeBackslashes = str => { - return str.replace(REGEX_REMOVE_BACKSLASH, match => { - return match === '\\' ? '' : match; - }); -}; -exports.supportsLookbehinds = () => { - const segs = process.version.slice(1).split('.').map(Number); - if (segs.length === 3 && segs[0] >= 9 || (segs[0] === 8 && segs[1] >= 10)) { - return true; - } - return false; -}; +/***/ }), -exports.isWindows = options => { - if (options && typeof options.windows === 'boolean') { - return options.windows; - } - return win32 === true || path.sep === '\\'; -}; +/***/ 952: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -exports.escapeLast = (input, char, lastIdx) => { - const idx = input.lastIndexOf(char, lastIdx); - if (idx === -1) return input; - if (input[idx - 1] === '\\') return exports.escapeLast(input, char, idx - 1); - return `${input.slice(0, idx)}\\${input.slice(idx)}`; -}; +"use strict"; -exports.removePrefix = (input, state = {}) => { - let output = input; - if (output.startsWith('./')) { - output = output.slice(2); - state.prefix = './'; - } - return output; -}; - -exports.wrapOutput = (input, state = {}, options = {}) => { - const prepend = options.contains ? '' : '^'; - const append = options.contains ? '' : '$'; - - let output = `${prepend}(?:${input})${append}`; - if (state.negated === true) { - output = `(?:^(?!${output}).*$)`; - } - return output; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.DEFAULT_FILE_SYSTEM_ADAPTER = void 0; +const fs = __nccwpck_require__(7147); +const os = __nccwpck_require__(2037); +/** + * The `os.cpus` method can return zero. We expect the number of cores to be greater than zero. + * https://github.com/nodejs/node/blob/7faeddf23a98c53896f8b574a6e66589e8fb1eb8/lib/os.js#L106-L107 + */ +const CPU_COUNT = Math.max(os.cpus().length, 1); +exports.DEFAULT_FILE_SYSTEM_ADAPTER = { + lstat: fs.lstat, + lstatSync: fs.lstatSync, + stat: fs.stat, + statSync: fs.statSync, + readdir: fs.readdir, + readdirSync: fs.readdirSync }; +class Settings { + constructor(_options = {}) { + this._options = _options; + this.absolute = this._getValue(this._options.absolute, false); + this.baseNameMatch = this._getValue(this._options.baseNameMatch, false); + this.braceExpansion = this._getValue(this._options.braceExpansion, true); + this.caseSensitiveMatch = this._getValue(this._options.caseSensitiveMatch, true); + this.concurrency = this._getValue(this._options.concurrency, CPU_COUNT); + this.cwd = this._getValue(this._options.cwd, process.cwd()); + this.deep = this._getValue(this._options.deep, Infinity); + this.dot = this._getValue(this._options.dot, false); + this.extglob = this._getValue(this._options.extglob, true); + this.followSymbolicLinks = this._getValue(this._options.followSymbolicLinks, true); + this.fs = this._getFileSystemMethods(this._options.fs); + this.globstar = this._getValue(this._options.globstar, true); + this.ignore = this._getValue(this._options.ignore, []); + this.markDirectories = this._getValue(this._options.markDirectories, false); + this.objectMode = this._getValue(this._options.objectMode, false); + this.onlyDirectories = this._getValue(this._options.onlyDirectories, false); + this.onlyFiles = this._getValue(this._options.onlyFiles, true); + this.stats = this._getValue(this._options.stats, false); + this.suppressErrors = this._getValue(this._options.suppressErrors, false); + this.throwErrorOnBrokenSymbolicLink = this._getValue(this._options.throwErrorOnBrokenSymbolicLink, false); + this.unique = this._getValue(this._options.unique, true); + if (this.onlyDirectories) { + this.onlyFiles = false; + } + if (this.stats) { + this.objectMode = true; + } + } + _getValue(option, value) { + return option === undefined ? value : option; + } + _getFileSystemMethods(methods = {}) { + return Object.assign(Object.assign({}, exports.DEFAULT_FILE_SYSTEM_ADAPTER), methods); + } +} +exports["default"] = Settings; /***/ }), -/***/ 279: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 5325: +/***/ ((__unused_webpack_module, exports) => { "use strict"; -// String encode/decode helpers - - -var utils = __webpack_require__(999); +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.splitWhen = exports.flatten = void 0; +function flatten(items) { + return items.reduce((collection, item) => [].concat(collection, item), []); +} +exports.flatten = flatten; +function splitWhen(items, predicate) { + const result = [[]]; + let groupIndex = 0; + for (const item of items) { + if (predicate(item)) { + groupIndex++; + result[groupIndex] = []; + } + else { + result[groupIndex].push(item); + } + } + return result; +} +exports.splitWhen = splitWhen; -// Quick check if we can use fast array to bin string conversion -// -// - apply(Array) can fail on Android 2.2 -// - apply(Uint8Array) can fail on iOS 5.1 Safari -// -var STR_APPLY_OK = true; -var STR_APPLY_UIA_OK = true; +/***/ }), -try { String.fromCharCode.apply(null, [ 0 ]); } catch (__) { STR_APPLY_OK = false; } -try { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; } +/***/ 1230: +/***/ ((__unused_webpack_module, exports) => { +"use strict"; -// Table with utf8 lengths (calculated by first byte of sequence) -// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS, -// because max possible codepoint is 0x10ffff -var _utf8len = new utils.Buf8(256); -for (var q = 0; q < 256; q++) { - _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1); +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.isEnoentCodeError = void 0; +function isEnoentCodeError(error) { + return error.code === 'ENOENT'; } -_utf8len[254] = _utf8len[254] = 1; // Invalid sequence start +exports.isEnoentCodeError = isEnoentCodeError; -// convert string to array (typed, when possible) -exports.string2buf = function (str) { - var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0; +/***/ }), - // count binary size - for (m_pos = 0; m_pos < str_len; m_pos++) { - c = str.charCodeAt(m_pos); - if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) { - c2 = str.charCodeAt(m_pos + 1); - if ((c2 & 0xfc00) === 0xdc00) { - c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); - m_pos++; - } - } - buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4; - } +/***/ 7543: +/***/ ((__unused_webpack_module, exports) => { - // allocate buffer - buf = new utils.Buf8(buf_len); +"use strict"; - // convert - for (i = 0, m_pos = 0; i < buf_len; m_pos++) { - c = str.charCodeAt(m_pos); - if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) { - c2 = str.charCodeAt(m_pos + 1); - if ((c2 & 0xfc00) === 0xdc00) { - c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); - m_pos++; - } - } - if (c < 0x80) { - /* one byte */ - buf[i++] = c; - } else if (c < 0x800) { - /* two bytes */ - buf[i++] = 0xC0 | (c >>> 6); - buf[i++] = 0x80 | (c & 0x3f); - } else if (c < 0x10000) { - /* three bytes */ - buf[i++] = 0xE0 | (c >>> 12); - buf[i++] = 0x80 | (c >>> 6 & 0x3f); - buf[i++] = 0x80 | (c & 0x3f); - } else { - /* four bytes */ - buf[i++] = 0xf0 | (c >>> 18); - buf[i++] = 0x80 | (c >>> 12 & 0x3f); - buf[i++] = 0x80 | (c >>> 6 & 0x3f); - buf[i++] = 0x80 | (c & 0x3f); +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.createDirentFromStats = void 0; +class DirentFromStats { + constructor(name, stats) { + this.name = name; + this.isBlockDevice = stats.isBlockDevice.bind(stats); + this.isCharacterDevice = stats.isCharacterDevice.bind(stats); + this.isDirectory = stats.isDirectory.bind(stats); + this.isFIFO = stats.isFIFO.bind(stats); + this.isFile = stats.isFile.bind(stats); + this.isSocket = stats.isSocket.bind(stats); + this.isSymbolicLink = stats.isSymbolicLink.bind(stats); } - } +} +function createDirentFromStats(name, stats) { + return new DirentFromStats(name, stats); +} +exports.createDirentFromStats = createDirentFromStats; - return buf; -}; -// Helper (used in 2 places) -function buf2binstring(buf, len) { - // On Chrome, the arguments in a function call that are allowed is `65534`. - // If the length of the buffer is smaller than that, we can use this optimization, - // otherwise we will take a slower path. - if (len < 65534) { - if ((buf.subarray && STR_APPLY_UIA_OK) || (!buf.subarray && STR_APPLY_OK)) { - return String.fromCharCode.apply(null, utils.shrinkBuf(buf, len)); - } - } +/***/ }), - var result = ''; - for (var i = 0; i < len; i++) { - result += String.fromCharCode(buf[i]); - } - return result; -} +/***/ 5444: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +"use strict"; -// Convert byte array to binary string -exports.buf2binstring = function (buf) { - return buf2binstring(buf, buf.length); -}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.string = exports.stream = exports.pattern = exports.path = exports.fs = exports.errno = exports.array = void 0; +const array = __nccwpck_require__(5325); +exports.array = array; +const errno = __nccwpck_require__(1230); +exports.errno = errno; +const fs = __nccwpck_require__(7543); +exports.fs = fs; +const path = __nccwpck_require__(3873); +exports.path = path; +const pattern = __nccwpck_require__(1221); +exports.pattern = pattern; +const stream = __nccwpck_require__(8382); +exports.stream = stream; +const string = __nccwpck_require__(2203); +exports.string = string; -// Convert binary string (typed, when possible) -exports.binstring2buf = function (str) { - var buf = new utils.Buf8(str.length); - for (var i = 0, len = buf.length; i < len; i++) { - buf[i] = str.charCodeAt(i); - } - return buf; -}; +/***/ }), +/***/ 3873: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -// convert array to string -exports.buf2string = function (buf, max) { - var i, out, c, c_len; - var len = max || buf.length; +"use strict"; - // Reserve max possible length (2 words per char) - // NB: by unknown reasons, Array is significantly faster for - // String.fromCharCode.apply than Uint16Array. - var utf16buf = new Array(len * 2); +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.removeLeadingDotSegment = exports.escape = exports.makeAbsolute = exports.unixify = void 0; +const path = __nccwpck_require__(1017); +const LEADING_DOT_SEGMENT_CHARACTERS_COUNT = 2; // ./ or .\\ +const UNESCAPED_GLOB_SYMBOLS_RE = /(\\?)([()*?[\]{|}]|^!|[!+@](?=\())/g; +/** + * Designed to work only with simple paths: `dir\\file`. + */ +function unixify(filepath) { + return filepath.replace(/\\/g, '/'); +} +exports.unixify = unixify; +function makeAbsolute(cwd, filepath) { + return path.resolve(cwd, filepath); +} +exports.makeAbsolute = makeAbsolute; +function escape(pattern) { + return pattern.replace(UNESCAPED_GLOB_SYMBOLS_RE, '\\$2'); +} +exports.escape = escape; +function removeLeadingDotSegment(entry) { + // We do not use `startsWith` because this is 10x slower than current implementation for some cases. + // eslint-disable-next-line @typescript-eslint/prefer-string-starts-ends-with + if (entry.charAt(0) === '.') { + const secondCharactery = entry.charAt(1); + if (secondCharactery === '/' || secondCharactery === '\\') { + return entry.slice(LEADING_DOT_SEGMENT_CHARACTERS_COUNT); + } + } + return entry; +} +exports.removeLeadingDotSegment = removeLeadingDotSegment; - for (out = 0, i = 0; i < len;) { - c = buf[i++]; - // quick process ascii - if (c < 0x80) { utf16buf[out++] = c; continue; } - c_len = _utf8len[c]; - // skip 5 & 6 byte codes - if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; } +/***/ }), - // apply mask on first byte - c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07; - // join the rest - while (c_len > 1 && i < len) { - c = (c << 6) | (buf[i++] & 0x3f); - c_len--; - } +/***/ 1221: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - // terminated by end of string? - if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; } +"use strict"; - if (c < 0x10000) { - utf16buf[out++] = c; - } else { - c -= 0x10000; - utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff); - utf16buf[out++] = 0xdc00 | (c & 0x3ff); +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.matchAny = exports.convertPatternsToRe = exports.makeRe = exports.getPatternParts = exports.expandBraceExpansion = exports.expandPatternsWithBraceExpansion = exports.isAffectDepthOfReadingPattern = exports.endsWithSlashGlobStar = exports.hasGlobStar = exports.getBaseDirectory = exports.isPatternRelatedToParentDirectory = exports.getPatternsOutsideCurrentDirectory = exports.getPatternsInsideCurrentDirectory = exports.getPositivePatterns = exports.getNegativePatterns = exports.isPositivePattern = exports.isNegativePattern = exports.convertToNegativePattern = exports.convertToPositivePattern = exports.isDynamicPattern = exports.isStaticPattern = void 0; +const path = __nccwpck_require__(1017); +const globParent = __nccwpck_require__(4655); +const micromatch = __nccwpck_require__(6228); +const GLOBSTAR = '**'; +const ESCAPE_SYMBOL = '\\'; +const COMMON_GLOB_SYMBOLS_RE = /[*?]|^!/; +const REGEX_CHARACTER_CLASS_SYMBOLS_RE = /\[[^[]*]/; +const REGEX_GROUP_SYMBOLS_RE = /(?:^|[^!*+?@])\([^(]*\|[^|]*\)/; +const GLOB_EXTENSION_SYMBOLS_RE = /[!*+?@]\([^(]*\)/; +const BRACE_EXPANSION_SEPARATORS_RE = /,|\.\./; +function isStaticPattern(pattern, options = {}) { + return !isDynamicPattern(pattern, options); +} +exports.isStaticPattern = isStaticPattern; +function isDynamicPattern(pattern, options = {}) { + /** + * A special case with an empty string is necessary for matching patterns that start with a forward slash. + * An empty string cannot be a dynamic pattern. + * For example, the pattern `/lib/*` will be spread into parts: '', 'lib', '*'. + */ + if (pattern === '') { + return false; } - } + /** + * When the `caseSensitiveMatch` option is disabled, all patterns must be marked as dynamic, because we cannot check + * filepath directly (without read directory). + */ + if (options.caseSensitiveMatch === false || pattern.includes(ESCAPE_SYMBOL)) { + return true; + } + if (COMMON_GLOB_SYMBOLS_RE.test(pattern) || REGEX_CHARACTER_CLASS_SYMBOLS_RE.test(pattern) || REGEX_GROUP_SYMBOLS_RE.test(pattern)) { + return true; + } + if (options.extglob !== false && GLOB_EXTENSION_SYMBOLS_RE.test(pattern)) { + return true; + } + if (options.braceExpansion !== false && hasBraceExpansion(pattern)) { + return true; + } + return false; +} +exports.isDynamicPattern = isDynamicPattern; +function hasBraceExpansion(pattern) { + const openingBraceIndex = pattern.indexOf('{'); + if (openingBraceIndex === -1) { + return false; + } + const closingBraceIndex = pattern.indexOf('}', openingBraceIndex + 1); + if (closingBraceIndex === -1) { + return false; + } + const braceContent = pattern.slice(openingBraceIndex, closingBraceIndex); + return BRACE_EXPANSION_SEPARATORS_RE.test(braceContent); +} +function convertToPositivePattern(pattern) { + return isNegativePattern(pattern) ? pattern.slice(1) : pattern; +} +exports.convertToPositivePattern = convertToPositivePattern; +function convertToNegativePattern(pattern) { + return '!' + pattern; +} +exports.convertToNegativePattern = convertToNegativePattern; +function isNegativePattern(pattern) { + return pattern.startsWith('!') && pattern[1] !== '('; +} +exports.isNegativePattern = isNegativePattern; +function isPositivePattern(pattern) { + return !isNegativePattern(pattern); +} +exports.isPositivePattern = isPositivePattern; +function getNegativePatterns(patterns) { + return patterns.filter(isNegativePattern); +} +exports.getNegativePatterns = getNegativePatterns; +function getPositivePatterns(patterns) { + return patterns.filter(isPositivePattern); +} +exports.getPositivePatterns = getPositivePatterns; +/** + * Returns patterns that can be applied inside the current directory. + * + * @example + * // ['./*', '*', 'a/*'] + * getPatternsInsideCurrentDirectory(['./*', '*', 'a/*', '../*', './../*']) + */ +function getPatternsInsideCurrentDirectory(patterns) { + return patterns.filter((pattern) => !isPatternRelatedToParentDirectory(pattern)); +} +exports.getPatternsInsideCurrentDirectory = getPatternsInsideCurrentDirectory; +/** + * Returns patterns to be expanded relative to (outside) the current directory. + * + * @example + * // ['../*', './../*'] + * getPatternsInsideCurrentDirectory(['./*', '*', 'a/*', '../*', './../*']) + */ +function getPatternsOutsideCurrentDirectory(patterns) { + return patterns.filter(isPatternRelatedToParentDirectory); +} +exports.getPatternsOutsideCurrentDirectory = getPatternsOutsideCurrentDirectory; +function isPatternRelatedToParentDirectory(pattern) { + return pattern.startsWith('..') || pattern.startsWith('./..'); +} +exports.isPatternRelatedToParentDirectory = isPatternRelatedToParentDirectory; +function getBaseDirectory(pattern) { + return globParent(pattern, { flipBackslashes: false }); +} +exports.getBaseDirectory = getBaseDirectory; +function hasGlobStar(pattern) { + return pattern.includes(GLOBSTAR); +} +exports.hasGlobStar = hasGlobStar; +function endsWithSlashGlobStar(pattern) { + return pattern.endsWith('/' + GLOBSTAR); +} +exports.endsWithSlashGlobStar = endsWithSlashGlobStar; +function isAffectDepthOfReadingPattern(pattern) { + const basename = path.basename(pattern); + return endsWithSlashGlobStar(pattern) || isStaticPattern(basename); +} +exports.isAffectDepthOfReadingPattern = isAffectDepthOfReadingPattern; +function expandPatternsWithBraceExpansion(patterns) { + return patterns.reduce((collection, pattern) => { + return collection.concat(expandBraceExpansion(pattern)); + }, []); +} +exports.expandPatternsWithBraceExpansion = expandPatternsWithBraceExpansion; +function expandBraceExpansion(pattern) { + return micromatch.braces(pattern, { + expand: true, + nodupes: true + }); +} +exports.expandBraceExpansion = expandBraceExpansion; +function getPatternParts(pattern, options) { + let { parts } = micromatch.scan(pattern, Object.assign(Object.assign({}, options), { parts: true })); + /** + * The scan method returns an empty array in some cases. + * See micromatch/picomatch#58 for more details. + */ + if (parts.length === 0) { + parts = [pattern]; + } + /** + * The scan method does not return an empty part for the pattern with a forward slash. + * This is another part of micromatch/picomatch#58. + */ + if (parts[0].startsWith('/')) { + parts[0] = parts[0].slice(1); + parts.unshift(''); + } + return parts; +} +exports.getPatternParts = getPatternParts; +function makeRe(pattern, options) { + return micromatch.makeRe(pattern, options); +} +exports.makeRe = makeRe; +function convertPatternsToRe(patterns, options) { + return patterns.map((pattern) => makeRe(pattern, options)); +} +exports.convertPatternsToRe = convertPatternsToRe; +function matchAny(entry, patternsRe) { + return patternsRe.some((patternRe) => patternRe.test(entry)); +} +exports.matchAny = matchAny; - return buf2binstring(utf16buf, out); -}; +/***/ }), -// Calculate max possible position in utf8 buffer, -// that will not break sequence. If that's not possible -// - (very small limits) return max size as is. -// -// buf[] - utf8 bytes array -// max - length limit (mandatory); -exports.utf8border = function (buf, max) { - var pos; +/***/ 8382: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - max = max || buf.length; - if (max > buf.length) { max = buf.length; } +"use strict"; - // go back from last position, until start of sequence found - pos = max - 1; - while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; } +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.merge = void 0; +const merge2 = __nccwpck_require__(2578); +function merge(streams) { + const mergedStream = merge2(streams); + streams.forEach((stream) => { + stream.once('error', (error) => mergedStream.emit('error', error)); + }); + mergedStream.once('close', () => propagateCloseEventToSources(streams)); + mergedStream.once('end', () => propagateCloseEventToSources(streams)); + return mergedStream; +} +exports.merge = merge; +function propagateCloseEventToSources(streams) { + streams.forEach((stream) => stream.emit('close')); +} - // Very small and broken sequence, - // return max, because we should return something anyway. - if (pos < 0) { return max; } - // If we came to start of buffer - that means buffer is too small, - // return max too. - if (pos === 0) { return max; } +/***/ }), - return (pos + _utf8len[buf[pos]] > max) ? pos : max; -}; +/***/ 2203: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.isEmpty = exports.isString = void 0; +function isString(input) { + return typeof input === 'string'; +} +exports.isString = isString; +function isEmpty(input) { + return input === ''; +} +exports.isEmpty = isEmpty; /***/ }), -/***/ 290: -/***/ (function(module) { +/***/ 7340: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { -/* - * URL: https://github.com/cubicdaiya/onp - * - * Copyright (c) 2013 Tatsuhiko Kubo - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ +"use strict"; -/** - * The algorithm implemented here is based on "An O(NP) Sequence Comparison Algorithm" - * by described by Sun Wu, Udi Manber and Gene Myers -*/ -module.exports = function (a_, b_) { - var a = a_, - b = b_, - m = a.length, - n = b.length, - reverse = false, - ed = null, - offset = m + 1, - path = [], - pathposi = [], - ses = [], - lcs = "", - SES_DELETE = -1, - SES_COMMON = 0, - SES_ADD = 1; - var tmp1, - tmp2; +/* eslint-disable no-var */ - var init = function () { - if (m >= n) { - tmp1 = a; - tmp2 = m; - a = b; - b = tmp1; - m = n; - n = tmp2; - reverse = true; - offset = m + 1; - } - }; +var reusify = __nccwpck_require__(2113) - var P = function (x, y, k) { - return { - 'x' : x, - 'y' : y, - 'k' : k, - }; - }; +function fastqueue (context, worker, concurrency) { + if (typeof context === 'function') { + concurrency = worker + worker = context + context = null + } - var seselem = function (elem, t) { - return { - 'elem' : elem, - 't' : t, - }; - }; + if (concurrency < 1) { + throw new Error('fastqueue concurrency must be greater than 1') + } - var snake = function (k, p, pp) { - var r, x, y; - if (p > pp) { - r = path[k-1+offset]; - } else { - r = path[k+1+offset]; - } + var cache = reusify(Task) + var queueHead = null + var queueTail = null + var _running = 0 + var errorHandler = null - y = Math.max(p, pp); - x = y - k; - while (x < m && y < n && a[x] === b[y]) { - ++x; - ++y; - } + var self = { + push: push, + drain: noop, + saturated: noop, + pause: pause, + paused: false, + concurrency: concurrency, + running: running, + resume: resume, + idle: idle, + length: length, + getQueue: getQueue, + unshift: unshift, + empty: noop, + kill: kill, + killAndDrain: killAndDrain, + error: error + } - path[k+offset] = pathposi.length; - pathposi[pathposi.length] = new P(x, y, r); - return y; - }; + return self - var recordseq = function (epc) { - var x_idx, y_idx, px_idx, py_idx, i; - x_idx = y_idx = 1; - px_idx = py_idx = 0; - for (i=epc.length-1;i>=0;--i) { - while(px_idx < epc[i].x || py_idx < epc[i].y) { - if (epc[i].y - epc[i].x > py_idx - px_idx) { - if (reverse) { - ses[ses.length] = new seselem(b[py_idx], SES_DELETE); - } else { - ses[ses.length] = new seselem(b[py_idx], SES_ADD); - } - ++y_idx; - ++py_idx; - } else if (epc[i].y - epc[i].x < py_idx - px_idx) { - if (reverse) { - ses[ses.length] = new seselem(a[px_idx], SES_ADD); - } else { - ses[ses.length] = new seselem(a[px_idx], SES_DELETE); - } - ++x_idx; - ++px_idx; - } else { - ses[ses.length] = new seselem(a[px_idx], SES_COMMON); - lcs += a[px_idx]; - ++x_idx; - ++y_idx; - ++px_idx; - ++py_idx; - } - } - } - }; + function running () { + return _running + } - init(); + function pause () { + self.paused = true + } - return { - SES_DELETE : -1, - SES_COMMON : 0, - SES_ADD : 1, - editdistance : function () { - return ed; - }, - getlcs : function () { - return lcs; - }, - getses : function () { - return ses; - }, - compose : function () { - var delta, size, fp, p, r, epc, i, k; - delta = n - m; - size = m + n + 3; - fp = {}; - for (i=0;i=delta+1;--k) { - fp[k+offset] = snake(k, fp[k-1+offset]+1, fp[k+1+offset]); - } - fp[delta+offset] = snake(delta, fp[delta-1+offset]+1, fp[delta+1+offset]); - } while (fp[delta+offset] !== n); + function length () { + var current = queueHead + var counter = 0 - ed = delta + 2 * p; + while (current) { + current = current.next + counter++ + } - r = path[delta+offset]; + return counter + } - epc = []; - while (r !== -1) { - epc[epc.length] = new P(pathposi[r].x, pathposi[r].y, null); - r = pathposi[r].k; - } - recordseq(epc); - } - }; -}; + function getQueue () { + var current = queueHead + var tasks = [] + + while (current) { + tasks.push(current.value) + current = current.next + } + return tasks + } -/***/ }), + function resume () { + if (!self.paused) return + self.paused = false + for (var i = 0; i < self.concurrency; i++) { + _running++ + release() + } + } -/***/ 291: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + function idle () { + return _running === 0 && self.length() === 0 + } -"use strict"; + function push (value, done) { + var current = cache.get() -Object.defineProperty(exports, "__esModule", { value: true }); -const events_1 = __webpack_require__(614); -const fsScandir = __webpack_require__(661); -const fastq = __webpack_require__(689); -const common = __webpack_require__(617); -const reader_1 = __webpack_require__(962); -class AsyncReader extends reader_1.default { - constructor(_root, _settings) { - super(_root, _settings); - this._settings = _settings; - this._scandir = fsScandir.scandir; - this._emitter = new events_1.EventEmitter(); - this._queue = fastq(this._worker.bind(this), this._settings.concurrency); - this._isFatalError = false; - this._isDestroyed = false; - this._queue.drain = () => { - if (!this._isFatalError) { - this._emitter.emit('end'); - } - }; - } - read() { - this._isFatalError = false; - this._isDestroyed = false; - setImmediate(() => { - this._pushToQueue(this._root, this._settings.basePath); - }); - return this._emitter; - } - get isDestroyed() { - return this._isDestroyed; - } - destroy() { - if (this._isDestroyed) { - throw new Error('The reader is already destroyed'); - } - this._isDestroyed = true; - this._queue.killAndDrain(); + current.context = context + current.release = release + current.value = value + current.callback = done || noop + current.errorHandler = errorHandler + + if (_running === self.concurrency || self.paused) { + if (queueTail) { + queueTail.next = current + queueTail = current + } else { + queueHead = current + queueTail = current + self.saturated() + } + } else { + _running++ + worker.call(context, current.value, current.worked) } - onEntry(callback) { - this._emitter.on('entry', callback); - } - onError(callback) { - this._emitter.once('error', callback); - } - onEnd(callback) { - this._emitter.once('end', callback); - } - _pushToQueue(directory, base) { - const queueItem = { directory, base }; - this._queue.push(queueItem, (error) => { - if (error !== null) { - this._handleError(error); - } - }); - } - _worker(item, done) { - this._scandir(item.directory, this._settings.fsScandirSettings, (error, entries) => { - if (error !== null) { - done(error, undefined); - return; - } - for (const entry of entries) { - this._handleEntry(entry, item.base); - } - done(null, undefined); - }); + } + + function unshift (value, done) { + var current = cache.get() + + current.context = context + current.release = release + current.value = value + current.callback = done || noop + + if (_running === self.concurrency || self.paused) { + if (queueHead) { + current.next = queueHead + queueHead = current + } else { + queueHead = current + queueTail = current + self.saturated() + } + } else { + _running++ + worker.call(context, current.value, current.worked) } - _handleError(error) { - if (this._isDestroyed || !common.isFatalError(this._settings, error)) { - return; - } - this._isFatalError = true; - this._isDestroyed = true; - this._emitter.emit('error', error); + } + + function release (holder) { + if (holder) { + cache.release(holder) } - _handleEntry(entry, base) { - if (this._isDestroyed || this._isFatalError) { - return; - } - const fullpath = entry.path; - if (base !== undefined) { - entry.path = common.joinPathSegments(base, entry.name, this._settings.pathSegmentSeparator); - } - if (common.isAppliedFilter(this._settings.entryFilter, entry)) { - this._emitEntry(entry); + var next = queueHead + if (next) { + if (!self.paused) { + if (queueTail === queueHead) { + queueTail = null } - if (entry.dirent.isDirectory() && common.isAppliedFilter(this._settings.deepFilter, entry)) { - this._pushToQueue(fullpath, base === undefined ? undefined : entry.path); + queueHead = next.next + next.next = null + worker.call(context, next.value, next.worked) + if (queueTail === null) { + self.empty() } + } else { + _running-- + } + } else if (--_running === 0) { + self.drain() } - _emitEntry(entry) { - this._emitter.emit('entry', entry); - } -} -exports.default = AsyncReader; + } + function kill () { + queueHead = null + queueTail = null + self.drain = noop + } -/***/ }), + function killAndDrain () { + queueHead = null + queueTail = null + self.drain() + self.drain = noop + } -/***/ 293: -/***/ (function(module) { + function error (handler) { + errorHandler = handler + } +} -module.exports = require("buffer"); +function noop () {} -/***/ }), +function Task () { + this.value = null + this.callback = noop + this.next = null + this.release = noop + this.context = null + this.errorHandler = null -/***/ 315: -/***/ (function(module) { + var self = this -if (typeof Object.create === 'function') { - // implementation from standard node.js 'util' module - module.exports = function inherits(ctor, superCtor) { - if (superCtor) { - ctor.super_ = superCtor - ctor.prototype = Object.create(superCtor.prototype, { - constructor: { - value: ctor, - enumerable: false, - writable: true, - configurable: true - } - }) - } - }; -} else { - // old school shim for old browsers - module.exports = function inherits(ctor, superCtor) { - if (superCtor) { - ctor.super_ = superCtor - var TempCtor = function () {} - TempCtor.prototype = superCtor.prototype - ctor.prototype = new TempCtor() - ctor.prototype.constructor = ctor + this.worked = function worked (err, result) { + var callback = self.callback + var errorHandler = self.errorHandler + var val = self.value + self.value = null + self.callback = noop + if (self.errorHandler) { + errorHandler(err, val) } + callback.call(self.context, err, result) + self.release(self) } } +function queueAsPromised (context, worker, concurrency) { + if (typeof context === 'function') { + concurrency = worker + worker = context + context = null + } -/***/ }), + function asyncWrapper (arg, cb) { + worker.call(this, arg) + .then(function (res) { + cb(null, res) + }, cb) + } -/***/ 317: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + var queue = fastqueue(context, asyncWrapper, concurrency) -"use strict"; + var pushCb = queue.push + var unshiftCb = queue.unshift -Object.defineProperty(exports, "__esModule", { value: true }); -const utils = __webpack_require__(444); -class EntryTransformer { - constructor(_settings) { - this._settings = _settings; - } - getTransformer() { - return (entry) => this._transform(entry); - } - _transform(entry) { - let filepath = entry.path; - if (this._settings.absolute) { - filepath = utils.path.makeAbsolute(this._settings.cwd, filepath); - filepath = utils.path.unixify(filepath); - } - if (this._settings.markDirectories && entry.dirent.isDirectory()) { - filepath += '/'; + queue.push = push + queue.unshift = unshift + queue.drained = drained + + return queue + + function push (value) { + var p = new Promise(function (resolve, reject) { + pushCb(value, function (err, result) { + if (err) { + reject(err) + return } - if (!this._settings.objectMode) { - return filepath; + resolve(result) + }) + }) + + // Let's fork the promise chain to + // make the error bubble up to the user but + // not lead to a unhandledRejection + p.catch(noop) + + return p + } + + function unshift (value) { + var p = new Promise(function (resolve, reject) { + unshiftCb(value, function (err, result) { + if (err) { + reject(err) + return } - return Object.assign(Object.assign({}, entry), { path: filepath }); - } + resolve(result) + }) + }) + + // Let's fork the promise chain to + // make the error bubble up to the user but + // not lead to a unhandledRejection + p.catch(noop) + + return p + } + + function drained () { + var previousDrain = queue.drain + + var p = new Promise(function (resolve) { + queue.drain = function () { + previousDrain() + resolve() + } + }) + + return p + } } -exports.default = EntryTransformer; + +module.exports = fastqueue +module.exports.promise = queueAsPromised /***/ }), -/***/ 320: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 6330: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { "use strict"; +/*! + * fill-range + * + * Copyright (c) 2014-present, Jon Schlinkert. + * Licensed under the MIT License. + */ -Object.defineProperty(exports, "__esModule", { value: true }); -const utils = __webpack_require__(444); -class Matcher { - constructor(_patterns, _settings, _micromatchOptions) { - this._patterns = _patterns; - this._settings = _settings; - this._micromatchOptions = _micromatchOptions; - this._storage = []; - this._fillStorage(); - } - _fillStorage() { - /** - * The original pattern may include `{,*,**,a/*}`, which will lead to problems with matching (unresolved level). - * So, before expand patterns with brace expansion into separated patterns. - */ - const patterns = utils.pattern.expandPatternsWithBraceExpansion(this._patterns); - for (const pattern of patterns) { - const segments = this._getPatternSegments(pattern); - const sections = this._splitSegmentsIntoSections(segments); - this._storage.push({ - complete: sections.length <= 1, - pattern, - segments, - sections - }); - } - } - _getPatternSegments(pattern) { - const parts = utils.pattern.getPatternParts(pattern, this._micromatchOptions); - return parts.map((part) => { - const dynamic = utils.pattern.isDynamicPattern(part, this._settings); - if (!dynamic) { - return { - dynamic: false, - pattern: part - }; - } - return { - dynamic: true, - pattern: part, - patternRe: utils.pattern.makeRe(part, this._micromatchOptions) - }; - }); - } - _splitSegmentsIntoSections(segments) { - return utils.array.splitWhen(segments, (segment) => segment.dynamic && utils.pattern.hasGlobStar(segment.pattern)); - } -} -exports.default = Matcher; -/***/ }), - -/***/ 332: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +const util = __nccwpck_require__(3837); +const toRegexRange = __nccwpck_require__(1861); -"use strict"; +const isObject = val => val !== null && typeof val === 'object' && !Array.isArray(val); -Object.defineProperty(exports, "__esModule", { value: true }); -exports.DEFAULT_FILE_SYSTEM_ADAPTER = void 0; -const fs = __webpack_require__(747); -const os = __webpack_require__(87); -/** - * The `os.cpus` method can return zero. We expect the number of cores to be greater than zero. - * https://github.com/nodejs/node/blob/7faeddf23a98c53896f8b574a6e66589e8fb1eb8/lib/os.js#L106-L107 - */ -const CPU_COUNT = Math.max(os.cpus().length, 1); -exports.DEFAULT_FILE_SYSTEM_ADAPTER = { - lstat: fs.lstat, - lstatSync: fs.lstatSync, - stat: fs.stat, - statSync: fs.statSync, - readdir: fs.readdir, - readdirSync: fs.readdirSync +const transform = toNumber => { + return value => toNumber === true ? Number(value) : String(value); }; -class Settings { - constructor(_options = {}) { - this._options = _options; - this.absolute = this._getValue(this._options.absolute, false); - this.baseNameMatch = this._getValue(this._options.baseNameMatch, false); - this.braceExpansion = this._getValue(this._options.braceExpansion, true); - this.caseSensitiveMatch = this._getValue(this._options.caseSensitiveMatch, true); - this.concurrency = this._getValue(this._options.concurrency, CPU_COUNT); - this.cwd = this._getValue(this._options.cwd, process.cwd()); - this.deep = this._getValue(this._options.deep, Infinity); - this.dot = this._getValue(this._options.dot, false); - this.extglob = this._getValue(this._options.extglob, true); - this.followSymbolicLinks = this._getValue(this._options.followSymbolicLinks, true); - this.fs = this._getFileSystemMethods(this._options.fs); - this.globstar = this._getValue(this._options.globstar, true); - this.ignore = this._getValue(this._options.ignore, []); - this.markDirectories = this._getValue(this._options.markDirectories, false); - this.objectMode = this._getValue(this._options.objectMode, false); - this.onlyDirectories = this._getValue(this._options.onlyDirectories, false); - this.onlyFiles = this._getValue(this._options.onlyFiles, true); - this.stats = this._getValue(this._options.stats, false); - this.suppressErrors = this._getValue(this._options.suppressErrors, false); - this.throwErrorOnBrokenSymbolicLink = this._getValue(this._options.throwErrorOnBrokenSymbolicLink, false); - this.unique = this._getValue(this._options.unique, true); - if (this.onlyDirectories) { - this.onlyFiles = false; - } - if (this.stats) { - this.objectMode = true; - } - } - _getValue(option, value) { - return option === undefined ? value : option; - } - _getFileSystemMethods(methods = {}) { - return Object.assign(Object.assign({}, exports.DEFAULT_FILE_SYSTEM_ADAPTER), methods); - } -} -exports.default = Settings; - -/***/ }), +const isValidValue = value => { + return typeof value === 'number' || (typeof value === 'string' && value !== ''); +}; -/***/ 357: -/***/ (function(module, __unusedexports, __webpack_require__) { +const isNumber = num => Number.isInteger(+num); -/*! - * is-glob - * - * Copyright (c) 2014-2017, Jon Schlinkert. - * Released under the MIT License. - */ +const zeros = input => { + let value = `${input}`; + let index = -1; + if (value[0] === '-') value = value.slice(1); + if (value === '0') return false; + while (value[++index] === '0'); + return index > 0; +}; -var isExtglob = __webpack_require__(888); -var chars = { '{': '}', '(': ')', '[': ']'}; -var strictCheck = function(str) { - if (str[0] === '!') { +const stringify = (start, end, options) => { + if (typeof start === 'string' || typeof end === 'string') { return true; } - var index = 0; - var pipeIndex = -2; - var closeSquareIndex = -2; - var closeCurlyIndex = -2; - var closeParenIndex = -2; - var backSlashIndex = -2; - while (index < str.length) { - if (str[index] === '*') { - return true; - } + return options.stringify === true; +}; - if (str[index + 1] === '?' && /[\].+)]/.test(str[index])) { - return true; - } +const pad = (input, maxLength, toNumber) => { + if (maxLength > 0) { + let dash = input[0] === '-' ? '-' : ''; + if (dash) input = input.slice(1); + input = (dash + input.padStart(dash ? maxLength - 1 : maxLength, '0')); + } + if (toNumber === false) { + return String(input); + } + return input; +}; - if (closeSquareIndex !== -1 && str[index] === '[' && str[index + 1] !== ']') { - if (closeSquareIndex < index) { - closeSquareIndex = str.indexOf(']', index); - } - if (closeSquareIndex > index) { - if (backSlashIndex === -1 || backSlashIndex > closeSquareIndex) { - return true; - } - backSlashIndex = str.indexOf('\\', index); - if (backSlashIndex === -1 || backSlashIndex > closeSquareIndex) { - return true; - } - } - } +const toMaxLen = (input, maxLength) => { + let negative = input[0] === '-' ? '-' : ''; + if (negative) { + input = input.slice(1); + maxLength--; + } + while (input.length < maxLength) input = '0' + input; + return negative ? ('-' + input) : input; +}; - if (closeCurlyIndex !== -1 && str[index] === '{' && str[index + 1] !== '}') { - closeCurlyIndex = str.indexOf('}', index); - if (closeCurlyIndex > index) { - backSlashIndex = str.indexOf('\\', index); - if (backSlashIndex === -1 || backSlashIndex > closeCurlyIndex) { - return true; - } - } - } +const toSequence = (parts, options) => { + parts.negatives.sort((a, b) => a < b ? -1 : a > b ? 1 : 0); + parts.positives.sort((a, b) => a < b ? -1 : a > b ? 1 : 0); - if (closeParenIndex !== -1 && str[index] === '(' && str[index + 1] === '?' && /[:!=]/.test(str[index + 2]) && str[index + 3] !== ')') { - closeParenIndex = str.indexOf(')', index); - if (closeParenIndex > index) { - backSlashIndex = str.indexOf('\\', index); - if (backSlashIndex === -1 || backSlashIndex > closeParenIndex) { - return true; - } - } - } + let prefix = options.capture ? '' : '?:'; + let positives = ''; + let negatives = ''; + let result; - if (pipeIndex !== -1 && str[index] === '(' && str[index + 1] !== '|') { - if (pipeIndex < index) { - pipeIndex = str.indexOf('|', index); - } - if (pipeIndex !== -1 && str[pipeIndex + 1] !== ')') { - closeParenIndex = str.indexOf(')', pipeIndex); - if (closeParenIndex > pipeIndex) { - backSlashIndex = str.indexOf('\\', pipeIndex); - if (backSlashIndex === -1 || backSlashIndex > closeParenIndex) { - return true; - } - } - } - } + if (parts.positives.length) { + positives = parts.positives.join('|'); + } - if (str[index] === '\\') { - var open = str[index + 1]; - index += 2; - var close = chars[open]; + if (parts.negatives.length) { + negatives = `-(${prefix}${parts.negatives.join('|')})`; + } - if (close) { - var n = str.indexOf(close, index); - if (n !== -1) { - index = n + 1; - } - } + if (positives && negatives) { + result = `${positives}|${negatives}`; + } else { + result = positives || negatives; + } - if (str[index] === '!') { - return true; - } - } else { - index++; - } + if (options.wrap) { + return `(${prefix}${result})`; } - return false; + + return result; }; -var relaxedCheck = function(str) { - if (str[0] === '!') { - return true; +const toRange = (a, b, isNumbers, options) => { + if (isNumbers) { + return toRegexRange(a, b, { wrap: false, ...options }); } - var index = 0; - while (index < str.length) { - if (/[*?{}()[\]]/.test(str[index])) { - return true; - } - if (str[index] === '\\') { - var open = str[index + 1]; - index += 2; - var close = chars[open]; - - if (close) { - var n = str.indexOf(close, index); - if (n !== -1) { - index = n + 1; - } - } + let start = String.fromCharCode(a); + if (a === b) return start; - if (str[index] === '!') { - return true; - } - } else { - index++; - } - } - return false; + let stop = String.fromCharCode(b); + return `[${start}-${stop}]`; }; -module.exports = function isGlob(str, options) { - if (typeof str !== 'string' || str === '') { - return false; +const toRegex = (start, end, options) => { + if (Array.isArray(start)) { + let wrap = options.wrap === true; + let prefix = options.capture ? '' : '?:'; + return wrap ? `(${prefix}${start.join('|')})` : start.join('|'); } + return toRegexRange(start, end, options); +}; - if (isExtglob(str)) { - return true; - } +const rangeError = (...args) => { + return new RangeError('Invalid range arguments: ' + util.inspect(...args)); +}; - var check = strictCheck; +const invalidRange = (start, end, options) => { + if (options.strictRanges === true) throw rangeError([start, end]); + return []; +}; - // optionally relax check - if (options && options.strict === false) { - check = relaxedCheck; +const invalidStep = (step, options) => { + if (options.strictRanges === true) { + throw new TypeError(`Expected step "${step}" to be a number`); } - - return check(str); + return []; }; +const fillNumbers = (start, end, step = 1, options = {}) => { + let a = Number(start); + let b = Number(end); -/***/ }), - -/***/ 366: -/***/ (function(module, __unusedexports, __webpack_require__) { - -"use strict"; + if (!Number.isInteger(a) || !Number.isInteger(b)) { + if (options.strictRanges === true) throw rangeError([start, end]); + return []; + } + // fix negative zero + if (a === 0) a = 0; + if (b === 0) b = 0; -const path = __webpack_require__(622); -const scan = __webpack_require__(537); -const parse = __webpack_require__(806); -const utils = __webpack_require__(265); -const constants = __webpack_require__(199); -const isObject = val => val && typeof val === 'object' && !Array.isArray(val); + let descending = a > b; + let startString = String(start); + let endString = String(end); + let stepString = String(step); + step = Math.max(Math.abs(step), 1); -/** - * Creates a matcher function from one or more glob patterns. The - * returned function takes a string to match as its first argument, - * and returns true if the string is a match. The returned matcher - * function also takes a boolean as the second argument that, when true, - * returns an object with additional information. - * - * ```js - * const picomatch = require('picomatch'); - * // picomatch(glob[, options]); - * - * const isMatch = picomatch('*.!(*a)'); - * console.log(isMatch('a.a')); //=> false - * console.log(isMatch('a.b')); //=> true - * ``` - * @name picomatch - * @param {String|Array} `globs` One or more glob patterns. - * @param {Object=} `options` - * @return {Function=} Returns a matcher function. - * @api public - */ + let padded = zeros(startString) || zeros(endString) || zeros(stepString); + let maxLen = padded ? Math.max(startString.length, endString.length, stepString.length) : 0; + let toNumber = padded === false && stringify(start, end, options) === false; + let format = options.transform || transform(toNumber); -const picomatch = (glob, options, returnState = false) => { - if (Array.isArray(glob)) { - const fns = glob.map(input => picomatch(input, options, returnState)); - const arrayMatcher = str => { - for (const isMatch of fns) { - const state = isMatch(str); - if (state) return state; - } - return false; - }; - return arrayMatcher; + if (options.toRegex && step === 1) { + return toRange(toMaxLen(start, maxLen), toMaxLen(end, maxLen), true, options); } - const isState = isObject(glob) && glob.tokens && glob.input; + let parts = { negatives: [], positives: [] }; + let push = num => parts[num < 0 ? 'negatives' : 'positives'].push(Math.abs(num)); + let range = []; + let index = 0; - if (glob === '' || (typeof glob !== 'string' && !isState)) { - throw new TypeError('Expected pattern to be a non-empty string'); + while (descending ? a >= b : a <= b) { + if (options.toRegex === true && step > 1) { + push(a); + } else { + range.push(pad(format(a, index), maxLen, toNumber)); + } + a = descending ? a - step : a + step; + index++; } - const opts = options || {}; - const posix = utils.isWindows(options); - const regex = isState - ? picomatch.compileRe(glob, options) - : picomatch.makeRe(glob, options, false, true); + if (options.toRegex === true) { + return step > 1 + ? toSequence(parts, options) + : toRegex(range, null, { wrap: false, ...options }); + } - const state = regex.state; - delete regex.state; + return range; +}; - let isIgnored = () => false; - if (opts.ignore) { - const ignoreOpts = { ...options, ignore: null, onMatch: null, onResult: null }; - isIgnored = picomatch(opts.ignore, ignoreOpts, returnState); +const fillLetters = (start, end, step = 1, options = {}) => { + if ((!isNumber(start) && start.length > 1) || (!isNumber(end) && end.length > 1)) { + return invalidRange(start, end, options); } - const matcher = (input, returnObject = false) => { - const { isMatch, match, output } = picomatch.test(input, regex, options, { glob, posix }); - const result = { glob, state, regex, posix, input, output, match, isMatch }; - if (typeof opts.onResult === 'function') { - opts.onResult(result); - } + let format = options.transform || (val => String.fromCharCode(val)); + let a = `${start}`.charCodeAt(0); + let b = `${end}`.charCodeAt(0); - if (isMatch === false) { - result.isMatch = false; - return returnObject ? result : false; - } + let descending = a > b; + let min = Math.min(a, b); + let max = Math.max(a, b); - if (isIgnored(input)) { - if (typeof opts.onIgnore === 'function') { - opts.onIgnore(result); - } - result.isMatch = false; - return returnObject ? result : false; - } + if (options.toRegex && step === 1) { + return toRange(min, max, false, options); + } - if (typeof opts.onMatch === 'function') { - opts.onMatch(result); - } - return returnObject ? result : true; - }; + let range = []; + let index = 0; - if (returnState) { - matcher.state = state; + while (descending ? a >= b : a <= b) { + range.push(format(a, index)); + a = descending ? a - step : a + step; + index++; } - return matcher; + if (options.toRegex === true) { + return toRegex(range, null, { wrap: false, options }); + } + + return range; }; -/** - * Test `input` with the given `regex`. This is used by the main - * `picomatch()` function to test the input string. - * - * ```js - * const picomatch = require('picomatch'); - * // picomatch.test(input, regex[, options]); - * - * console.log(picomatch.test('foo/bar', /^(?:([^/]*?)\/([^/]*?))$/)); - * // { isMatch: true, match: [ 'foo/', 'foo', 'bar' ], output: 'foo/bar' } - * ``` - * @param {String} `input` String to test. - * @param {RegExp} `regex` - * @return {Object} Returns an object with matching info. - * @api public - */ +const fill = (start, end, step, options = {}) => { + if (end == null && isValidValue(start)) { + return [start]; + } -picomatch.test = (input, regex, options, { glob, posix } = {}) => { - if (typeof input !== 'string') { - throw new TypeError('Expected input to be a string'); + if (!isValidValue(start) || !isValidValue(end)) { + return invalidRange(start, end, options); } - if (input === '') { - return { isMatch: false, output: '' }; + if (typeof step === 'function') { + return fill(start, end, 1, { transform: step }); } - const opts = options || {}; - const format = opts.format || (posix ? utils.toPosixSlashes : null); - let match = input === glob; - let output = (match && format) ? format(input) : input; + if (isObject(step)) { + return fill(start, end, 0, step); + } - if (match === false) { - output = format ? format(input) : input; - match = output === glob; + let opts = { ...options }; + if (opts.capture === true) opts.wrap = true; + step = step || opts.step || 1; + + if (!isNumber(step)) { + if (step != null && !isObject(step)) return invalidStep(step, opts); + return fill(start, end, 1, step); } - if (match === false || opts.capture === true) { - if (opts.matchBase === true || opts.basename === true) { - match = picomatch.matchBase(input, regex, options, posix); - } else { - match = regex.exec(output); - } + if (isNumber(start) && isNumber(end)) { + return fillNumbers(start, end, step, opts); } - return { isMatch: Boolean(match), match, output }; + return fillLetters(start, end, Math.max(Math.abs(step), 1), opts); }; -/** - * Match the basename of a filepath. - * - * ```js - * const picomatch = require('picomatch'); - * // picomatch.matchBase(input, glob[, options]); - * console.log(picomatch.matchBase('foo/bar.js', '*.js'); // true - * ``` - * @param {String} `input` String to test. - * @param {RegExp|String} `glob` Glob pattern or regex created by [.makeRe](#makeRe). - * @return {Boolean} - * @api public - */ +module.exports = fill; -picomatch.matchBase = (input, glob, options, posix = utils.isWindows(options)) => { - const regex = glob instanceof RegExp ? glob : picomatch.makeRe(glob, options); - return regex.test(path.basename(input)); -}; -/** - * Returns true if **any** of the given glob `patterns` match the specified `string`. - * - * ```js - * const picomatch = require('picomatch'); - * // picomatch.isMatch(string, patterns[, options]); - * - * console.log(picomatch.isMatch('a.a', ['b.*', '*.a'])); //=> true - * console.log(picomatch.isMatch('a.a', 'b.*')); //=> false - * ``` - * @param {String|Array} str The string to test. - * @param {String|Array} patterns One or more glob patterns to use for matching. - * @param {Object} [options] See available [options](#options). - * @return {Boolean} Returns true if any patterns match `str` - * @api public - */ +/***/ }), -picomatch.isMatch = (str, patterns, options) => picomatch(patterns, options)(str); +/***/ 8749: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { -/** - * Parse a glob pattern to create the source string for a regular - * expression. - * - * ```js - * const picomatch = require('picomatch'); - * const result = picomatch.parse(pattern[, options]); - * ``` - * @param {String} `pattern` - * @param {Object} `options` - * @return {Object} Returns an object with useful properties and output to be used as a regex source string. - * @api public - */ +"use strict"; -picomatch.parse = (pattern, options) => { - if (Array.isArray(pattern)) return pattern.map(p => picomatch.parse(p, options)); - return parse(pattern, { ...options, fastpaths: false }); -}; -/** - * Scan a glob pattern to separate the pattern into segments. - * - * ```js - * const picomatch = require('picomatch'); - * // picomatch.scan(input[, options]); - * - * const result = picomatch.scan('!./foo/*.js'); - * console.log(result); - * { prefix: '!./', - * input: '!./foo/*.js', - * start: 3, - * base: 'foo', - * glob: '*.js', - * isBrace: false, - * isBracket: false, - * isGlob: true, - * isExtglob: false, - * isGlobstar: false, - * negated: true } - * ``` - * @param {String} `input` Glob pattern to scan. - * @param {Object} `options` - * @return {Object} Returns an object with - * @api public - */ +// Dependencies -picomatch.scan = (input, options) => scan(input, options); +var parseUrl = __nccwpck_require__(473), + isSsh = __nccwpck_require__(44); /** - * Compile a regular expression from the `state` object returned by the - * [parse()](#parse) method. + * gitUp + * Parses the input url. * - * @param {Object} `state` - * @param {Object} `options` - * @param {Boolean} `returnOutput` Intended for implementors, this argument allows you to return the raw output from the parser. - * @param {Boolean} `returnState` Adds the state to a `state` property on the returned regex. Useful for implementors and debugging. - * @return {RegExp} - * @api public + * @name gitUp + * @function + * @param {String} input The input url. + * @return {Object} An object containing the following fields: + * + * - `protocols` (Array): An array with the url protocols (usually it has one element). + * - `port` (null|Number): The domain port. + * - `resource` (String): The url domain (including subdomains). + * - `user` (String): The authentication user (usually for ssh urls). + * - `pathname` (String): The url pathname. + * - `hash` (String): The url hash. + * - `search` (String): The url querystring value. + * - `href` (String): The input url. + * - `protocol` (String): The git url protocol. + * - `token` (String): The oauth token (could appear in the https urls). */ +function gitUp(input) { + var output = parseUrl(input); + output.token = ""; -picomatch.compileRe = (state, options, returnOutput = false, returnState = false) => { - if (returnOutput === true) { - return state.output; - } - - const opts = options || {}; - const prepend = opts.contains ? '' : '^'; - const append = opts.contains ? '' : '$'; - - let source = `${prepend}(?:${state.output})${append}`; - if (state && state.negated === true) { - source = `^(?!${source}).*$`; - } + if (output.password === "x-oauth-basic") { + output.token = output.user; + } else if (output.user === "x-token-auth") { + output.token = output.password; + } - const regex = picomatch.toRegex(source, options); - if (returnState === true) { - regex.state = state; - } + if (isSsh(output.protocols) || output.protocols.length === 0 && isSsh(input)) { + output.protocol = "ssh"; + } else if (output.protocols.length) { + output.protocol = output.protocols[0]; + } else { + output.protocol = "file"; + output.protocols = ["file"]; + } - return regex; -}; + output.href = output.href.replace(/\/$/, ""); + return output; +} -/** - * Create a regular expression from a parsed glob pattern. - * - * ```js - * const picomatch = require('picomatch'); - * const state = picomatch.parse('*.js'); - * // picomatch.compileRe(state[, options]); - * - * console.log(picomatch.compileRe(state)); - * //=> /^(?:(?!\.)(?=.)[^/]*?\.js)$/ - * ``` - * @param {String} `state` The object returned from the `.parse` method. - * @param {Object} `options` - * @param {Boolean} `returnOutput` Implementors may use this argument to return the compiled output, instead of a regular expression. This is not exposed on the options to prevent end-users from mutating the result. - * @param {Boolean} `returnState` Implementors may use this argument to return the state from the parsed glob with the returned regular expression. - * @return {RegExp} Returns a regex created from the given pattern. - * @api public - */ +module.exports = gitUp; -picomatch.makeRe = (input, options = {}, returnOutput = false, returnState = false) => { - if (!input || typeof input !== 'string') { - throw new TypeError('Expected a non-empty string'); - } +/***/ }), - let parsed = { negated: false, fastpaths: true }; +/***/ 8244: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - if (options.fastpaths !== false && (input[0] === '.' || input[0] === '*')) { - parsed.output = parse.fastpaths(input, options); - } +"use strict"; - if (!parsed.output) { - parsed = parse(input, options); - } - return picomatch.compileRe(parsed, options, returnOutput, returnState); -}; +var gitUp = __nccwpck_require__(8749); /** - * Create a regular expression from the given regex source string. + * gitUrlParse + * Parses a Git url. * - * ```js - * const picomatch = require('picomatch'); - * // picomatch.toRegex(source[, options]); + * @name gitUrlParse + * @function + * @param {String} url The Git url to parse. + * @return {GitUrl} The `GitUrl` object containing: + * + * - `protocols` (Array): An array with the url protocols (usually it has one element). + * - `port` (null|Number): The domain port. + * - `resource` (String): The url domain (including subdomains). + * - `user` (String): The authentication user (usually for ssh urls). + * - `pathname` (String): The url pathname. + * - `hash` (String): The url hash. + * - `search` (String): The url querystring value. + * - `href` (String): The input url. + * - `protocol` (String): The git url protocol. + * - `token` (String): The oauth token (could appear in the https urls). + * - `source` (String): The Git provider (e.g. `"github.com"`). + * - `owner` (String): The repository owner. + * - `name` (String): The repository name. + * - `ref` (String): The repository ref (e.g., "master" or "dev"). + * - `filepath` (String): A filepath relative to the repository root. + * - `filepathtype` (String): The type of filepath in the url ("blob" or "tree"). + * - `full_name` (String): The owner and name values in the `owner/name` format. + * - `toString` (Function): A function to stringify the parsed url into another url type. + * - `organization` (String): The organization the owner belongs to. This is CloudForge specific. + * - `git_suffix` (Boolean): Whether to add the `.git` suffix or not. * - * const { output } = picomatch.parse('*.js'); - * console.log(picomatch.toRegex(output)); - * //=> /^(?:(?!\.)(?=.)[^/]*?\.js)$/ - * ``` - * @param {String} `source` Regular expression source string. - * @param {Object} `options` - * @return {RegExp} - * @api public - */ - -picomatch.toRegex = (source, options) => { - try { - const opts = options || {}; - return new RegExp(source, opts.flags || (opts.nocase ? 'i' : '')); - } catch (err) { - if (options && options.debug === true) throw err; - return /$^/; - } -}; - -/** - * Picomatch constants. - * @return {Object} */ +function gitUrlParse(url) { -picomatch.constants = constants; + if (typeof url !== "string") { + throw new Error("The url must be a string."); + } -/** - * Expose "picomatch" - */ + var shorthandRe = /^([a-z\d-]{1,39})\/([-\.\w]{1,100})$/i; -module.exports = picomatch; + if (shorthandRe.test(url)) { + url = "https://github.com/" + url; + } + var urlInfo = gitUp(url), + sourceParts = urlInfo.resource.split("."), + splits = null; -/***/ }), + urlInfo.toString = function (type) { + return gitUrlParse.stringify(this, type); + }; -/***/ 375: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + urlInfo.source = sourceParts.length > 2 ? sourceParts.slice(1 - sourceParts.length).join(".") : urlInfo.source = urlInfo.resource; -"use strict"; + // Note: Some hosting services (e.g. Visual Studio Team Services) allow whitespace characters + // in the repository and owner names so we decode the URL pieces to get the correct result + urlInfo.git_suffix = /\.git$/.test(urlInfo.pathname); + urlInfo.name = decodeURIComponent((urlInfo.pathname || urlInfo.href).replace(/(^\/)|(\/$)/g, '').replace(/\.git$/, "")); + urlInfo.owner = decodeURIComponent(urlInfo.user); -Object.defineProperty(exports, "__esModule", { value: true }); -const utils = __webpack_require__(444); -class ErrorFilter { - constructor(_settings) { - this._settings = _settings; - } - getFilter() { - return (error) => this._isNonFatalError(error); + switch (urlInfo.source) { + case "git.cloudforge.com": + urlInfo.owner = urlInfo.user; + urlInfo.organization = sourceParts[0]; + urlInfo.source = "cloudforge.com"; + break; + case "visualstudio.com": + // Handle VSTS SSH URLs + if (urlInfo.resource === 'vs-ssh.visualstudio.com') { + splits = urlInfo.name.split("/"); + if (splits.length === 4) { + urlInfo.organization = splits[1]; + urlInfo.owner = splits[2]; + urlInfo.name = splits[3]; + urlInfo.full_name = splits[2] + '/' + splits[3]; + } + break; + } else { + splits = urlInfo.name.split("/"); + if (splits.length === 2) { + urlInfo.owner = splits[1]; + urlInfo.name = splits[1]; + urlInfo.full_name = '_git/' + urlInfo.name; + } else if (splits.length === 3) { + urlInfo.name = splits[2]; + if (splits[0] === 'DefaultCollection') { + urlInfo.owner = splits[2]; + urlInfo.organization = splits[0]; + urlInfo.full_name = urlInfo.organization + '/_git/' + urlInfo.name; + } else { + urlInfo.owner = splits[0]; + urlInfo.full_name = urlInfo.owner + '/_git/' + urlInfo.name; + } + } else if (splits.length === 4) { + urlInfo.organization = splits[0]; + urlInfo.owner = splits[1]; + urlInfo.name = splits[3]; + urlInfo.full_name = urlInfo.organization + '/' + urlInfo.owner + '/_git/' + urlInfo.name; + } + break; + } + + // Azure DevOps (formerly Visual Studio Team Services) + case "dev.azure.com": + case "azure.com": + if (urlInfo.resource === 'ssh.dev.azure.com') { + splits = urlInfo.name.split("/"); + if (splits.length === 4) { + urlInfo.organization = splits[1]; + urlInfo.owner = splits[2]; + urlInfo.name = splits[3]; + } + break; + } else { + splits = urlInfo.name.split("/"); + if (splits.length === 5) { + urlInfo.organization = splits[0]; + urlInfo.owner = splits[1]; + urlInfo.name = splits[4]; + urlInfo.full_name = '_git/' + urlInfo.name; + } else if (splits.length === 3) { + urlInfo.name = splits[2]; + if (splits[0] === 'DefaultCollection') { + urlInfo.owner = splits[2]; + urlInfo.organization = splits[0]; + urlInfo.full_name = urlInfo.organization + '/_git/' + urlInfo.name; + } else { + urlInfo.owner = splits[0]; + urlInfo.full_name = urlInfo.owner + '/_git/' + urlInfo.name; + } + } else if (splits.length === 4) { + urlInfo.organization = splits[0]; + urlInfo.owner = splits[1]; + urlInfo.name = splits[3]; + urlInfo.full_name = urlInfo.organization + '/' + urlInfo.owner + '/_git/' + urlInfo.name; + } + if (urlInfo.query && urlInfo.query['path']) { + urlInfo.filepath = urlInfo.query['path'].replace(/^\/+/g, ''); // Strip leading slash (/) + } + if (urlInfo.query && urlInfo.query['version']) { + // version=GB + urlInfo.ref = urlInfo.query['version'].replace(/^GB/, ''); // remove GB + } + break; + } + default: + splits = urlInfo.name.split("/"); + var nameIndex = splits.length - 1; + if (splits.length >= 2) { + var dashIndex = splits.indexOf("-", 2); + var blobIndex = splits.indexOf("blob", 2); + var treeIndex = splits.indexOf("tree", 2); + var commitIndex = splits.indexOf("commit", 2); + var srcIndex = splits.indexOf("src", 2); + var rawIndex = splits.indexOf("raw", 2); + var editIndex = splits.indexOf("edit", 2); + nameIndex = dashIndex > 0 ? dashIndex - 1 : blobIndex > 0 ? blobIndex - 1 : treeIndex > 0 ? treeIndex - 1 : commitIndex > 0 ? commitIndex - 1 : srcIndex > 0 ? srcIndex - 1 : rawIndex > 0 ? rawIndex - 1 : editIndex > 0 ? editIndex - 1 : nameIndex; + + urlInfo.owner = splits.slice(0, nameIndex).join('/'); + urlInfo.name = splits[nameIndex]; + if (commitIndex) { + urlInfo.commit = splits[nameIndex + 2]; + } + } + + urlInfo.ref = ""; + urlInfo.filepathtype = ""; + urlInfo.filepath = ""; + var offsetNameIndex = splits.length > nameIndex && splits[nameIndex + 1] === "-" ? nameIndex + 1 : nameIndex; + + if (splits.length > offsetNameIndex + 2 && ["raw", "src", "blob", "tree", "edit"].indexOf(splits[offsetNameIndex + 1]) >= 0) { + urlInfo.filepathtype = splits[offsetNameIndex + 1]; + urlInfo.ref = splits[offsetNameIndex + 2]; + if (splits.length > offsetNameIndex + 3) { + urlInfo.filepath = splits.slice(offsetNameIndex + 3).join('/'); + } + } + urlInfo.organization = urlInfo.owner; + break; } - _isNonFatalError(error) { - return utils.errno.isEnoentCodeError(error) || this._settings.suppressErrors; + + if (!urlInfo.full_name) { + urlInfo.full_name = urlInfo.owner; + if (urlInfo.name) { + urlInfo.full_name && (urlInfo.full_name += "/"); + urlInfo.full_name += urlInfo.name; + } + } + // Bitbucket Server + if (urlInfo.owner.startsWith("scm/")) { + urlInfo.source = "bitbucket-server"; + urlInfo.owner = urlInfo.owner.replace("scm/", ""); + urlInfo.organization = urlInfo.owner; + urlInfo.full_name = urlInfo.owner + "/" + urlInfo.name; } -} -exports.default = ErrorFilter; + var bitbucket = /(projects|users)\/(.*?)\/repos\/(.*?)((\/.*$)|$)/; + var matches = bitbucket.exec(urlInfo.pathname); + if (matches != null) { + urlInfo.source = "bitbucket-server"; + if (matches[1] === "users") { + urlInfo.owner = "~" + matches[2]; + } else { + urlInfo.owner = matches[2]; + } -/***/ }), + urlInfo.organization = urlInfo.owner; + urlInfo.name = matches[3]; -/***/ 378: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + splits = matches[4].split("/"); + if (splits.length > 1) { + if (["raw", "browse"].indexOf(splits[1]) >= 0) { + urlInfo.filepathtype = splits[1]; + if (splits.length > 2) { + urlInfo.filepath = splits.slice(2).join('/'); + } + } else if (splits[1] === "commits" && splits.length > 2) { + urlInfo.commit = splits[2]; + } + } + urlInfo.full_name = urlInfo.owner + "/" + urlInfo.name; -"use strict"; + if (urlInfo.query.at) { + urlInfo.ref = urlInfo.query.at; + } else { + urlInfo.ref = ""; + } + } + return urlInfo; +} +/** + * stringify + * Stringifies a `GitUrl` object. + * + * @name stringify + * @function + * @param {GitUrl} obj The parsed Git url object. + * @param {String} type The type of the stringified url (default `obj.protocol`). + * @return {String} The stringified url. + */ +gitUrlParse.stringify = function (obj, type) { + type = type || (obj.protocols && obj.protocols.length ? obj.protocols.join('+') : obj.protocol); + var port = obj.port ? ":" + obj.port : ''; + var user = obj.user || 'git'; + var maybeGitSuffix = obj.git_suffix ? ".git" : ""; + switch (type) { + case "ssh": + if (port) return "ssh://" + user + "@" + obj.resource + port + "/" + obj.full_name + maybeGitSuffix;else return user + "@" + obj.resource + ":" + obj.full_name + maybeGitSuffix; + case "git+ssh": + case "ssh+git": + case "ftp": + case "ftps": + return type + "://" + user + "@" + obj.resource + port + "/" + obj.full_name + maybeGitSuffix; + case "http": + case "https": + var auth = obj.token ? buildToken(obj) : obj.user && (obj.protocols.includes('http') || obj.protocols.includes('https')) ? obj.user + "@" : ""; + return type + "://" + auth + obj.resource + port + "/" + buildPath(obj) + maybeGitSuffix; + default: + return obj.href; + } +}; -// (C) 1995-2013 Jean-loup Gailly and Mark Adler -// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. +/*! + * buildToken + * Builds OAuth token prefix (helper function) + * + * @name buildToken + * @function + * @param {GitUrl} obj The parsed Git url object. + * @return {String} token prefix + */ +function buildToken(obj) { + switch (obj.source) { + case "bitbucket.org": + return "x-token-auth:" + obj.token + "@"; + default: + return obj.token + "@"; + } +} -var utils = __webpack_require__(999); -var trees = __webpack_require__(136); -var adler32 = __webpack_require__(141); -var crc32 = __webpack_require__(613); -var msg = __webpack_require__(868); +function buildPath(obj) { + switch (obj.source) { + case "bitbucket-server": + return "scm/" + obj.full_name; + default: + return "" + obj.full_name; -/* Public constants ==========================================================*/ -/* ===========================================================================*/ + } +} +module.exports = gitUrlParse; -/* Allowed flush values; see deflate() and inflate() below for details */ -var Z_NO_FLUSH = 0; -var Z_PARTIAL_FLUSH = 1; -//var Z_SYNC_FLUSH = 2; -var Z_FULL_FLUSH = 3; -var Z_FINISH = 4; -var Z_BLOCK = 5; -//var Z_TREES = 6; +/***/ }), +/***/ 4655: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { -/* Return codes for the compression/decompression functions. Negative values - * are errors, positive values are used for special but normal events. - */ -var Z_OK = 0; -var Z_STREAM_END = 1; -//var Z_NEED_DICT = 2; -//var Z_ERRNO = -1; -var Z_STREAM_ERROR = -2; -var Z_DATA_ERROR = -3; -//var Z_MEM_ERROR = -4; -var Z_BUF_ERROR = -5; -//var Z_VERSION_ERROR = -6; - +"use strict"; -/* compression levels */ -//var Z_NO_COMPRESSION = 0; -//var Z_BEST_SPEED = 1; -//var Z_BEST_COMPRESSION = 9; -var Z_DEFAULT_COMPRESSION = -1; +var isGlob = __nccwpck_require__(4466); +var pathPosixDirname = (__nccwpck_require__(1017).posix.dirname); +var isWin32 = (__nccwpck_require__(2037).platform)() === 'win32'; -var Z_FILTERED = 1; -var Z_HUFFMAN_ONLY = 2; -var Z_RLE = 3; -var Z_FIXED = 4; -var Z_DEFAULT_STRATEGY = 0; +var slash = '/'; +var backslash = /\\/g; +var enclosure = /[\{\[].*[\}\]]$/; +var globby = /(^|[^\\])([\{\[]|\([^\)]+$)/; +var escaped = /\\([\!\*\?\|\[\]\(\)\{\}])/g; -/* Possible values of the data_type field (though see inflate()) */ -//var Z_BINARY = 0; -//var Z_TEXT = 1; -//var Z_ASCII = 1; // = Z_TEXT -var Z_UNKNOWN = 2; +/** + * @param {string} str + * @param {Object} opts + * @param {boolean} [opts.flipBackslashes=true] + * @returns {string} + */ +module.exports = function globParent(str, opts) { + var options = Object.assign({ flipBackslashes: true }, opts); + // flip windows path separators + if (options.flipBackslashes && isWin32 && str.indexOf(slash) < 0) { + str = str.replace(backslash, slash); + } -/* The deflate compression method */ -var Z_DEFLATED = 8; + // special case for strings ending in enclosure containing path separator + if (enclosure.test(str)) { + str += slash; + } -/*============================================================================*/ + // preserves full path in case of trailing path separator + str += 'a'; + // remove path parts that are globby + do { + str = pathPosixDirname(str); + } while (isGlob(str) || globby.test(str)); -var MAX_MEM_LEVEL = 9; -/* Maximum value for memLevel in deflateInit2 */ -var MAX_WBITS = 15; -/* 32K LZ77 window */ -var DEF_MEM_LEVEL = 8; + // remove escape chars and return result + return str.replace(escaped, '$1'); +}; -var LENGTH_CODES = 29; -/* number of length codes, not counting the special END_BLOCK code */ -var LITERALS = 256; -/* number of literal bytes 0..255 */ -var L_CODES = LITERALS + 1 + LENGTH_CODES; -/* number of Literal or Length codes, including the END_BLOCK code */ -var D_CODES = 30; -/* number of distance codes */ -var BL_CODES = 19; -/* number of codes used to transfer the bit lengths */ -var HEAP_SIZE = 2 * L_CODES + 1; -/* maximum heap size */ -var MAX_BITS = 15; -/* All codes must not exceed MAX_BITS bits */ +/***/ }), -var MIN_MATCH = 3; -var MAX_MATCH = 258; -var MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1); +/***/ 4777: +/***/ ((module) => { -var PRESET_DICT = 0x20; +// A simple implementation of make-array +function makeArray (subject) { + return Array.isArray(subject) + ? subject + : [subject] +} -var INIT_STATE = 42; -var EXTRA_STATE = 69; -var NAME_STATE = 73; -var COMMENT_STATE = 91; -var HCRC_STATE = 103; -var BUSY_STATE = 113; -var FINISH_STATE = 666; +const EMPTY = '' +const SPACE = ' ' +const ESCAPE = '\\' +const REGEX_TEST_BLANK_LINE = /^\s+$/ +const REGEX_REPLACE_LEADING_EXCAPED_EXCLAMATION = /^\\!/ +const REGEX_REPLACE_LEADING_EXCAPED_HASH = /^\\#/ +const REGEX_SPLITALL_CRLF = /\r?\n/g +// /foo, +// ./foo, +// ../foo, +// . +// .. +const REGEX_TEST_INVALID_PATH = /^\.*\/|^\.+$/ -var BS_NEED_MORE = 1; /* block not completed, need more input or more output */ -var BS_BLOCK_DONE = 2; /* block flush performed */ -var BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */ -var BS_FINISH_DONE = 4; /* finish done, accept no more input or output */ +const SLASH = '/' +const KEY_IGNORE = typeof Symbol !== 'undefined' + ? Symbol.for('node-ignore') + /* istanbul ignore next */ + : 'node-ignore' -var OS_CODE = 0x03; // Unix :) . Don't detect, use this default. +const define = (object, key, value) => + Object.defineProperty(object, key, {value}) -function err(strm, errorCode) { - strm.msg = msg[errorCode]; - return errorCode; -} +const REGEX_REGEXP_RANGE = /([0-z])-([0-z])/g -function rank(f) { - return ((f) << 1) - ((f) > 4 ? 9 : 0); -} +const RETURN_FALSE = () => false -function zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } } +// Sanitize the range of a regular expression +// The cases are complicated, see test cases for details +const sanitizeRange = range => range.replace( + REGEX_REGEXP_RANGE, + (match, from, to) => from.charCodeAt(0) <= to.charCodeAt(0) + ? match + // Invalid range (out of order) which is ok for gitignore rules but + // fatal for JavaScript regular expression, so eliminate it. + : EMPTY +) +// See fixtures #59 +const cleanRangeBackSlash = slashes => { + const {length} = slashes + return slashes.slice(0, length - length % 2) +} -/* ========================================================================= - * Flush as much pending output as possible. All deflate() output goes - * through this function so some applications may wish to modify it - * to avoid allocating a large strm->output buffer and copying into it. - * (See also read_buf()). - */ -function flush_pending(strm) { - var s = strm.state; +// > If the pattern ends with a slash, +// > it is removed for the purpose of the following description, +// > but it would only find a match with a directory. +// > In other words, foo/ will match a directory foo and paths underneath it, +// > but will not match a regular file or a symbolic link foo +// > (this is consistent with the way how pathspec works in general in Git). +// '`foo/`' will not match regular file '`foo`' or symbolic link '`foo`' +// -> ignore-rules will not deal with it, because it costs extra `fs.stat` call +// you could use option `mark: true` with `glob` - //_tr_flush_bits(s); - var len = s.pending; - if (len > strm.avail_out) { - len = strm.avail_out; - } - if (len === 0) { return; } +// '`foo/`' should not continue with the '`..`' +const REPLACERS = [ - utils.arraySet(strm.output, s.pending_buf, s.pending_out, len, strm.next_out); - strm.next_out += len; - s.pending_out += len; - strm.total_out += len; - strm.avail_out -= len; - s.pending -= len; - if (s.pending === 0) { - s.pending_out = 0; - } -} + // > Trailing spaces are ignored unless they are quoted with backslash ("\") + [ + // (a\ ) -> (a ) + // (a ) -> (a) + // (a \ ) -> (a ) + /\\?\s+$/, + match => match.indexOf('\\') === 0 + ? SPACE + : EMPTY + ], + // replace (\ ) with ' ' + [ + /\\\s/g, + () => SPACE + ], -function flush_block_only(s, last) { - trees._tr_flush_block(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last); - s.block_start = s.strstart; - flush_pending(s.strm); -} + // Escape metacharacters + // which is written down by users but means special for regular expressions. + // > There are 12 characters with special meanings: + // > - the backslash \, + // > - the caret ^, + // > - the dollar sign $, + // > - the period or dot ., + // > - the vertical bar or pipe symbol |, + // > - the question mark ?, + // > - the asterisk or star *, + // > - the plus sign +, + // > - the opening parenthesis (, + // > - the closing parenthesis ), + // > - and the opening square bracket [, + // > - the opening curly brace {, + // > These special characters are often called "metacharacters". + [ + /[\\$.|*+(){^]/g, + match => `\\${match}` + ], -function put_byte(s, b) { - s.pending_buf[s.pending++] = b; -} + [ + // > a question mark (?) matches a single character + /(?!\\)\?/g, + () => '[^/]' + ], + // leading slash + [ -/* ========================================================================= - * Put a short in the pending buffer. The 16-bit value is put in MSB order. - * IN assertion: the stream state is correct and there is enough room in - * pending_buf. - */ -function putShortMSB(s, b) { -// put_byte(s, (Byte)(b >> 8)); -// put_byte(s, (Byte)(b & 0xff)); - s.pending_buf[s.pending++] = (b >>> 8) & 0xff; - s.pending_buf[s.pending++] = b & 0xff; -} + // > A leading slash matches the beginning of the pathname. + // > For example, "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c". + // A leading slash matches the beginning of the pathname + /^\//, + () => '^' + ], + // replace special metacharacter slash after the leading slash + [ + /\//g, + () => '\\/' + ], -/* =========================================================================== - * Read a new buffer from the current input stream, update the adler32 - * and total number of bytes read. All deflate() input goes through - * this function so some applications may wish to modify it to avoid - * allocating a large strm->input buffer and copying from it. - * (See also flush_pending()). - */ -function read_buf(strm, buf, start, size) { - var len = strm.avail_in; + [ + // > A leading "**" followed by a slash means match in all directories. + // > For example, "**/foo" matches file or directory "foo" anywhere, + // > the same as pattern "foo". + // > "**/foo/bar" matches file or directory "bar" anywhere that is directly + // > under directory "foo". + // Notice that the '*'s have been replaced as '\\*' + /^\^*\\\*\\\*\\\//, - if (len > size) { len = size; } - if (len === 0) { return 0; } + // '**/foo' <-> 'foo' + () => '^(?:.*\\/)?' + ], - strm.avail_in -= len; + // starting + [ + // there will be no leading '/' + // (which has been replaced by section "leading slash") + // If starts with '**', adding a '^' to the regular expression also works + /^(?=[^^])/, + function startingReplacer () { + // If has a slash `/` at the beginning or middle + return !/\/(?!$)/.test(this) + // > Prior to 2.22.1 + // > If the pattern does not contain a slash /, + // > Git treats it as a shell glob pattern + // Actually, if there is only a trailing slash, + // git also treats it as a shell glob pattern - // zmemcpy(buf, strm->next_in, len); - utils.arraySet(buf, strm.input, strm.next_in, len, start); - if (strm.state.wrap === 1) { - strm.adler = adler32(strm.adler, buf, len, start); - } + // After 2.22.1 (compatible but clearer) + // > If there is a separator at the beginning or middle (or both) + // > of the pattern, then the pattern is relative to the directory + // > level of the particular .gitignore file itself. + // > Otherwise the pattern may also match at any level below + // > the .gitignore level. + ? '(?:^|\\/)' - else if (strm.state.wrap === 2) { - strm.adler = crc32(strm.adler, buf, len, start); - } + // > Otherwise, Git treats the pattern as a shell glob suitable for + // > consumption by fnmatch(3) + : '^' + } + ], - strm.next_in += len; - strm.total_in += len; + // two globstars + [ + // Use lookahead assertions so that we could match more than one `'/**'` + /\\\/\\\*\\\*(?=\\\/|$)/g, - return len; -} + // Zero, one or several directories + // should not use '*', or it will be replaced by the next replacer + // Check if it is not the last `'/**'` + (_, index, str) => index + 6 < str.length -/* =========================================================================== - * Set match_start to the longest match starting at the given string and - * return its length. Matches shorter or equal to prev_length are discarded, - * in which case the result is equal to prev_length and match_start is - * garbage. - * IN assertions: cur_match is the head of the hash chain for the current - * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 - * OUT assertion: the match length is not greater than s->lookahead. - */ -function longest_match(s, cur_match) { - var chain_length = s.max_chain_length; /* max hash chain length */ - var scan = s.strstart; /* current string */ - var match; /* matched string */ - var len; /* length of current match */ - var best_len = s.prev_length; /* best match length so far */ - var nice_match = s.nice_match; /* stop if match long enough */ - var limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ? - s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/; + // case: /**/ + // > A slash followed by two consecutive asterisks then a slash matches + // > zero or more directories. + // > For example, "a/**/b" matches "a/b", "a/x/b", "a/x/y/b" and so on. + // '/**/' + ? '(?:\\/[^\\/]+)*' - var _win = s.window; // shortcut + // case: /** + // > A trailing `"/**"` matches everything inside. - var wmask = s.w_mask; - var prev = s.prev; + // #21: everything inside but it should not include the current folder + : '\\/.+' + ], - /* Stop when cur_match becomes <= limit. To simplify the code, - * we prevent matches with the string of window index 0. - */ + // intermediate wildcards + [ + // Never replace escaped '*' + // ignore rule '\*' will match the path '*' - var strend = s.strstart + MAX_MATCH; - var scan_end1 = _win[scan + best_len - 1]; - var scan_end = _win[scan + best_len]; + // 'abc.*/' -> go + // 'abc.*' -> skip this rule + /(^|[^\\]+)\\\*(?=.+)/g, - /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. - * It is easy to get rid of this optimization if necessary. - */ - // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); + // '*.js' matches '.js' + // '*.js' doesn't match 'abc' + (_, p1) => `${p1}[^\\/]*` + ], - /* Do not waste too much time if we already have a good match: */ - if (s.prev_length >= s.good_match) { - chain_length >>= 2; - } - /* Do not look for matches beyond the end of the input. This is necessary - * to make deflate deterministic. - */ - if (nice_match > s.lookahead) { nice_match = s.lookahead; } + [ + // unescape, revert step 3 except for back slash + // For example, if a user escape a '\\*', + // after step 3, the result will be '\\\\\\*' + /\\\\\\(?=[$.|*+(){^])/g, + () => ESCAPE + ], - // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + [ + // '\\\\' -> '\\' + /\\\\/g, + () => ESCAPE + ], - do { - // Assert(cur_match < s->strstart, "no future"); - match = cur_match; + [ + // > The range notation, e.g. [a-zA-Z], + // > can be used to match one of the characters in a range. - /* Skip to next match if the match length cannot increase - * or if the match length is less than 2. Note that the checks below - * for insufficient lookahead only occur occasionally for performance - * reasons. Therefore uninitialized memory will be accessed, and - * conditional jumps will be made that depend on those values. - * However the length of the match is limited to the lookahead, so - * the output of deflate is not affected by the uninitialized values. - */ + // `\` is escaped by step 3 + /(\\)?\[([^\]/]*?)(\\*)($|\])/g, + (match, leadEscape, range, endEscape, close) => leadEscape === ESCAPE + // '\\[bar]' -> '\\\\[bar\\]' + ? `\\[${range}${cleanRangeBackSlash(endEscape)}${close}` + : close === ']' + ? endEscape.length % 2 === 0 + // A normal case, and it is a range notation + // '[bar]' + // '[bar\\\\]' + ? `[${sanitizeRange(range)}${endEscape}]` + // Invalid range notaton + // '[bar\\]' -> '[bar\\\\]' + : '[]' + : '[]' + ], - if (_win[match + best_len] !== scan_end || - _win[match + best_len - 1] !== scan_end1 || - _win[match] !== _win[scan] || - _win[++match] !== _win[scan + 1]) { - continue; - } + // ending + [ + // 'js' will not match 'js.' + // 'ab' will not match 'abc' + /(?:[^*])$/, - /* The check at best_len-1 can be removed because it will be made - * again later. (This heuristic is not always a win.) - * It is not necessary to compare scan[2] and match[2] since they - * are always equal when the other bytes match, given that - * the hash keys are equal and that HASH_BITS >= 8. - */ - scan += 2; - match++; - // Assert(*scan == *match, "match[2]?"); + // WTF! + // https://git-scm.com/docs/gitignore + // changes in [2.22.1](https://git-scm.com/docs/gitignore/2.22.1) + // which re-fixes #24, #38 - /* We check for insufficient lookahead only every 8th comparison; - * the 256th check will be made at strstart+258. - */ - do { - /*jshint noempty:false*/ - } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] && - _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && - _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && - _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && - scan < strend); + // > If there is a separator at the end of the pattern then the pattern + // > will only match directories, otherwise the pattern can match both + // > files and directories. - // Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + // 'js*' will not match 'a.js' + // 'js/' will not match 'a.js' + // 'js' will match 'a.js' and 'a.js/' + match => /\/$/.test(match) + // foo/ will not match 'foo' + ? `${match}$` + // foo matches 'foo' and 'foo/' + : `${match}(?=$|\\/$)` + ], - len = MAX_MATCH - (strend - scan); - scan = strend - MAX_MATCH; + // trailing wildcard + [ + /(\^|\\\/)?\\\*$/, + (_, p1) => { + const prefix = p1 + // '\^': + // '/*' does not match EMPTY + // '/*' does not match everything - if (len > best_len) { - s.match_start = cur_match; - best_len = len; - if (len >= nice_match) { - break; - } - scan_end1 = _win[scan + best_len - 1]; - scan_end = _win[scan + best_len]; - } - } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0); + // '\\\/': + // 'abc/*' does not match 'abc/' + ? `${p1}[^/]+` - if (best_len <= s.lookahead) { - return best_len; - } - return s.lookahead; -} + // 'a*' matches 'a' + // 'a*' matches 'aa' + : '[^/]*' + return `${prefix}(?=$|\\/$)` + } + ], +] -/* =========================================================================== - * Fill the window when the lookahead becomes insufficient. - * Updates strstart and lookahead. - * - * IN assertion: lookahead < MIN_LOOKAHEAD - * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD - * At least one byte has been read, or avail_in == 0; reads are - * performed for at least two bytes (required for the zip translate_eol - * option -- not supported here). - */ -function fill_window(s) { - var _w_size = s.w_size; - var p, n, m, more, str; - - //Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); - - do { - more = s.window_size - s.lookahead - s.strstart; +// A simple cache, because an ignore rule only has only one certain meaning +const regexCache = Object.create(null) - // JS ints have 32 bit, block below not needed - /* Deal with !@#$% 64K limit: */ - //if (sizeof(int) <= 2) { - // if (more == 0 && s->strstart == 0 && s->lookahead == 0) { - // more = wsize; - // - // } else if (more == (unsigned)(-1)) { - // /* Very unlikely, but possible on 16 bit machine if - // * strstart == 0 && lookahead == 1 (input done a byte at time) - // */ - // more--; - // } - //} +// @param {pattern} +const makeRegex = (pattern, ignoreCase) => { + let source = regexCache[pattern] + if (!source) { + source = REPLACERS.reduce( + (prev, current) => prev.replace(current[0], current[1].bind(pattern)), + pattern + ) + regexCache[pattern] = source + } - /* If the window is almost full and there is insufficient lookahead, - * move the upper half to the lower one to make room in the upper half. - */ - if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) { + return ignoreCase + ? new RegExp(source, 'i') + : new RegExp(source) +} - utils.arraySet(s.window, s.window, _w_size, _w_size, 0); - s.match_start -= _w_size; - s.strstart -= _w_size; - /* we now have strstart >= MAX_DIST */ - s.block_start -= _w_size; +const isString = subject => typeof subject === 'string' - /* Slide the hash table (could be avoided with 32 bit values - at the expense of memory usage). We slide even when level == 0 - to keep the hash table consistent if we switch back to level > 0 - later. (Using level 0 permanently is not an optimal usage of - zlib, so we don't care about this pathological case.) - */ +// > A blank line matches no files, so it can serve as a separator for readability. +const checkPattern = pattern => pattern + && isString(pattern) + && !REGEX_TEST_BLANK_LINE.test(pattern) - n = s.hash_size; - p = n; - do { - m = s.head[--p]; - s.head[p] = (m >= _w_size ? m - _w_size : 0); - } while (--n); + // > A line starting with # serves as a comment. + && pattern.indexOf('#') !== 0 - n = _w_size; - p = n; - do { - m = s.prev[--p]; - s.prev[p] = (m >= _w_size ? m - _w_size : 0); - /* If n is not on any hash chain, prev[n] is garbage but - * its value will never be used. - */ - } while (--n); +const splitPattern = pattern => pattern.split(REGEX_SPLITALL_CRLF) - more += _w_size; - } - if (s.strm.avail_in === 0) { - break; - } +class IgnoreRule { + constructor ( + origin, + pattern, + negative, + regex + ) { + this.origin = origin + this.pattern = pattern + this.negative = negative + this.regex = regex + } +} - /* If there was no sliding: - * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && - * more == window_size - lookahead - strstart - * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) - * => more >= window_size - 2*WSIZE + 2 - * In the BIG_MEM or MMAP case (not yet supported), - * window_size == input_size + MIN_LOOKAHEAD && - * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. - * Otherwise, window_size == 2*WSIZE so more >= 2. - * If there was sliding, more >= WSIZE. So in all cases, more >= 2. - */ - //Assert(more >= 2, "more < 2"); - n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more); - s.lookahead += n; +const createRule = (pattern, ignoreCase) => { + const origin = pattern + let negative = false - /* Initialize the hash value now that we have some input: */ - if (s.lookahead + s.insert >= MIN_MATCH) { - str = s.strstart - s.insert; - s.ins_h = s.window[str]; + // > An optional prefix "!" which negates the pattern; + if (pattern.indexOf('!') === 0) { + negative = true + pattern = pattern.substr(1) + } - /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */ - s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + 1]) & s.hash_mask; -//#if MIN_MATCH != 3 -// Call update_hash() MIN_MATCH-3 more times -//#endif - while (s.insert) { - /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ - s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask; + pattern = pattern + // > Put a backslash ("\") in front of the first "!" for patterns that + // > begin with a literal "!", for example, `"\!important!.txt"`. + .replace(REGEX_REPLACE_LEADING_EXCAPED_EXCLAMATION, '!') + // > Put a backslash ("\") in front of the first hash for patterns that + // > begin with a hash. + .replace(REGEX_REPLACE_LEADING_EXCAPED_HASH, '#') - s.prev[str & s.w_mask] = s.head[s.ins_h]; - s.head[s.ins_h] = str; - str++; - s.insert--; - if (s.lookahead + s.insert < MIN_MATCH) { - break; - } - } - } - /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, - * but this is not important since only literal bytes will be emitted. - */ + const regex = makeRegex(pattern, ignoreCase) - } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0); + return new IgnoreRule( + origin, + pattern, + negative, + regex + ) +} - /* If the WIN_INIT bytes after the end of the current data have never been - * written, then zero those bytes in order to avoid memory check reports of - * the use of uninitialized (or uninitialised as Julian writes) bytes by - * the longest match routines. Update the high water mark for the next - * time through here. WIN_INIT is set to MAX_MATCH since the longest match - * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. - */ -// if (s.high_water < s.window_size) { -// var curr = s.strstart + s.lookahead; -// var init = 0; -// -// if (s.high_water < curr) { -// /* Previous high water mark below current data -- zero WIN_INIT -// * bytes or up to end of window, whichever is less. -// */ -// init = s.window_size - curr; -// if (init > WIN_INIT) -// init = WIN_INIT; -// zmemzero(s->window + curr, (unsigned)init); -// s->high_water = curr + init; -// } -// else if (s->high_water < (ulg)curr + WIN_INIT) { -// /* High water mark at or above current data, but below current data -// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up -// * to end of window, whichever is less. -// */ -// init = (ulg)curr + WIN_INIT - s->high_water; -// if (init > s->window_size - s->high_water) -// init = s->window_size - s->high_water; -// zmemzero(s->window + s->high_water, (unsigned)init); -// s->high_water += init; -// } -// } -// -// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, -// "not enough room for search"); +const throwError = (message, Ctor) => { + throw new Ctor(message) } -/* =========================================================================== - * Copy without compression as much as possible from the input stream, return - * the current block state. - * This function does not insert new strings in the dictionary since - * uncompressible data is probably not useful. This function is used - * only for the level=0 compression option. - * NOTE: this function should be optimized to avoid extra copying from - * window to pending_buf. - */ -function deflate_stored(s, flush) { - /* Stored blocks are limited to 0xffff bytes, pending_buf is limited - * to pending_buf_size, and each stored block has a 5 byte header: - */ - var max_block_size = 0xffff; +const checkPath = (path, originalPath, doThrow) => { + if (!isString(path)) { + return doThrow( + `path must be a string, but got \`${originalPath}\``, + TypeError + ) + } - if (max_block_size > s.pending_buf_size - 5) { - max_block_size = s.pending_buf_size - 5; + // We don't know if we should ignore EMPTY, so throw + if (!path) { + return doThrow(`path must not be empty`, TypeError) } - /* Copy as much as possible from input to output: */ - for (;;) { - /* Fill the window as much as possible: */ - if (s.lookahead <= 1) { + // Check if it is a relative path + if (checkPath.isNotRelative(path)) { + const r = '`path.relative()`d' + return doThrow( + `path should be a ${r} string, but got "${originalPath}"`, + RangeError + ) + } - //Assert(s->strstart < s->w_size+MAX_DIST(s) || - // s->block_start >= (long)s->w_size, "slide too late"); -// if (!(s.strstart < s.w_size + (s.w_size - MIN_LOOKAHEAD) || -// s.block_start >= s.w_size)) { -// throw new Error("slide too late"); -// } + return true +} - fill_window(s); - if (s.lookahead === 0 && flush === Z_NO_FLUSH) { - return BS_NEED_MORE; - } +const isNotRelative = path => REGEX_TEST_INVALID_PATH.test(path) - if (s.lookahead === 0) { - break; - } - /* flush the current block */ - } - //Assert(s->block_start >= 0L, "block gone"); -// if (s.block_start < 0) throw new Error("block gone"); - - s.strstart += s.lookahead; - s.lookahead = 0; +checkPath.isNotRelative = isNotRelative +checkPath.convert = p => p - /* Emit a stored block if pending_buf will be full: */ - var max_start = s.block_start + max_block_size; +class Ignore { + constructor ({ + ignorecase = true, + ignoreCase = ignorecase, + allowRelativePaths = false + } = {}) { + define(this, KEY_IGNORE, true) - if (s.strstart === 0 || s.strstart >= max_start) { - /* strstart == 0 is possible when wraparound on 16-bit machine */ - s.lookahead = s.strstart - max_start; - s.strstart = max_start; - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false); - if (s.strm.avail_out === 0) { - return BS_NEED_MORE; - } - /***/ + this._rules = [] + this._ignoreCase = ignoreCase + this._allowRelativePaths = allowRelativePaths + this._initCache() + } + _initCache () { + this._ignoreCache = Object.create(null) + this._testCache = Object.create(null) + } + _addPattern (pattern) { + // #32 + if (pattern && pattern[KEY_IGNORE]) { + this._rules = this._rules.concat(pattern._rules) + this._added = true + return } - /* Flush if we may have to slide, otherwise block_start may become - * negative and the data will be gone: - */ - if (s.strstart - s.block_start >= (s.w_size - MIN_LOOKAHEAD)) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false); - if (s.strm.avail_out === 0) { - return BS_NEED_MORE; - } - /***/ + + if (checkPattern(pattern)) { + const rule = createRule(pattern, this._ignoreCase) + this._added = true + this._rules.push(rule) } } - s.insert = 0; + // @param {Array | string | Ignore} pattern + add (pattern) { + this._added = false - if (flush === Z_FINISH) { - /*** FLUSH_BLOCK(s, 1); ***/ - flush_block_only(s, true); - if (s.strm.avail_out === 0) { - return BS_FINISH_STARTED; + makeArray( + isString(pattern) + ? splitPattern(pattern) + : pattern + ).forEach(this._addPattern, this) + + // Some rules have just added to the ignore, + // making the behavior changed. + if (this._added) { + this._initCache() } - /***/ - return BS_FINISH_DONE; + + return this } - if (s.strstart > s.block_start) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false); - if (s.strm.avail_out === 0) { - return BS_NEED_MORE; - } - /***/ + // legacy + addPattern (pattern) { + return this.add(pattern) } - return BS_NEED_MORE; -} + // | ignored : unignored + // negative | 0:0 | 0:1 | 1:0 | 1:1 + // -------- | ------- | ------- | ------- | -------- + // 0 | TEST | TEST | SKIP | X + // 1 | TESTIF | SKIP | TEST | X -/* =========================================================================== - * Compress as much as possible from the input stream, return the current - * block state. - * This function does not perform lazy evaluation of matches and inserts - * new strings in the dictionary only for unmatched strings or for short - * matches. It is used only for the fast compression options. - */ -function deflate_fast(s, flush) { - var hash_head; /* head of the hash chain */ - var bflush; /* set if current block must be flushed */ + // - SKIP: always skip + // - TEST: always test + // - TESTIF: only test if checkUnignored + // - X: that never happen - for (;;) { - /* Make sure that we always have enough lookahead, except - * at the end of the input file. We need MAX_MATCH bytes - * for the next match, plus MIN_MATCH bytes to insert the - * string following the next match. - */ - if (s.lookahead < MIN_LOOKAHEAD) { - fill_window(s); - if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) { - return BS_NEED_MORE; + // @param {boolean} whether should check if the path is unignored, + // setting `checkUnignored` to `false` could reduce additional + // path matching. + + // @returns {TestResult} true if a file is ignored + _testOne (path, checkUnignored) { + let ignored = false + let unignored = false + + this._rules.forEach(rule => { + const {negative} = rule + if ( + unignored === negative && ignored !== unignored + || negative && !ignored && !unignored && !checkUnignored + ) { + return } - if (s.lookahead === 0) { - break; /* flush the current block */ + + const matched = rule.regex.test(path) + + if (matched) { + ignored = !negative + unignored = negative } + }) + + return { + ignored, + unignored } + } - /* Insert the string window[strstart .. strstart+2] in the - * dictionary, and set hash_head to the head of the hash chain: - */ - hash_head = 0/*NIL*/; - if (s.lookahead >= MIN_MATCH) { - /*** INSERT_STRING(s, s.strstart, hash_head); ***/ - s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; - hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; - s.head[s.ins_h] = s.strstart; - /***/ + // @returns {TestResult} + _test (originalPath, cache, checkUnignored, slices) { + const path = originalPath + // Supports nullable path + && checkPath.convert(originalPath) + + checkPath( + path, + originalPath, + this._allowRelativePaths + ? RETURN_FALSE + : throwError + ) + + return this._t(path, cache, checkUnignored, slices) + } + + _t (path, cache, checkUnignored, slices) { + if (path in cache) { + return cache[path] } - /* Find the longest match, discarding those <= prev_length. - * At this point we have always match_length < MIN_MATCH - */ - if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) { - /* To simplify the code, we prevent matches with the string - * of window index 0 (in particular we have to avoid a match - * of the string with itself at the start of the input file). - */ - s.match_length = longest_match(s, hash_head); - /* longest_match() sets match_start */ + if (!slices) { + // path/to/a.js + // ['path', 'to', 'a.js'] + slices = path.split(SLASH) } - if (s.match_length >= MIN_MATCH) { - // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only - /*** _tr_tally_dist(s, s.strstart - s.match_start, - s.match_length - MIN_MATCH, bflush); ***/ - bflush = trees._tr_tally(s, s.strstart - s.match_start, s.match_length - MIN_MATCH); + slices.pop() - s.lookahead -= s.match_length; + // If the path has no parent directory, just test it + if (!slices.length) { + return cache[path] = this._testOne(path, checkUnignored) + } - /* Insert new strings in the hash table only if the match length - * is not too large. This saves time but degrades compression. - */ - if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH) { - s.match_length--; /* string at strstart already in table */ - do { - s.strstart++; - /*** INSERT_STRING(s, s.strstart, hash_head); ***/ - s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; - hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; - s.head[s.ins_h] = s.strstart; - /***/ - /* strstart never exceeds WSIZE-MAX_MATCH, so there are - * always MIN_MATCH bytes ahead. - */ - } while (--s.match_length !== 0); - s.strstart++; - } else - { - s.strstart += s.match_length; - s.match_length = 0; - s.ins_h = s.window[s.strstart]; - /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */ - s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + 1]) & s.hash_mask; + const parent = this._t( + slices.join(SLASH) + SLASH, + cache, + checkUnignored, + slices + ) -//#if MIN_MATCH != 3 -// Call UPDATE_HASH() MIN_MATCH-3 more times -//#endif - /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not - * matter since it will be recomputed at next deflate call. - */ - } - } else { - /* No match, output a literal byte */ - //Tracevv((stderr,"%c", s.window[s.strstart])); - /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ - bflush = trees._tr_tally(s, 0, s.window[s.strstart]); + // If the path contains a parent directory, check the parent first + return cache[path] = parent.ignored + // > It is not possible to re-include a file if a parent directory of + // > that file is excluded. + ? parent + : this._testOne(path, checkUnignored) + } - s.lookahead--; - s.strstart++; - } - if (bflush) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false); - if (s.strm.avail_out === 0) { - return BS_NEED_MORE; - } - /***/ - } + ignores (path) { + return this._test(path, this._ignoreCache, false).ignored } - s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1); - if (flush === Z_FINISH) { - /*** FLUSH_BLOCK(s, 1); ***/ - flush_block_only(s, true); - if (s.strm.avail_out === 0) { - return BS_FINISH_STARTED; - } - /***/ - return BS_FINISH_DONE; + + createFilter () { + return path => !this.ignores(path) } - if (s.last_lit) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false); - if (s.strm.avail_out === 0) { - return BS_NEED_MORE; - } - /***/ + + filter (paths) { + return makeArray(paths).filter(this.createFilter()) + } + + // @returns {TestResult} + test (path) { + return this._test(path, this._testCache, true) } - return BS_BLOCK_DONE; } -/* =========================================================================== - * Same as above, but achieves better compression. We use a lazy - * evaluation for matches: a match is finally adopted only if there is - * no better match at the next window position. - */ -function deflate_slow(s, flush) { - var hash_head; /* head of hash chain */ - var bflush; /* set if current block must be flushed */ +const factory = options => new Ignore(options) - var max_insert; +const isPathValid = path => + checkPath(path && checkPath.convert(path), path, RETURN_FALSE) - /* Process the input block. */ - for (;;) { - /* Make sure that we always have enough lookahead, except - * at the end of the input file. We need MAX_MATCH bytes - * for the next match, plus MIN_MATCH bytes to insert the - * string following the next match. - */ - if (s.lookahead < MIN_LOOKAHEAD) { - fill_window(s); - if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) { - return BS_NEED_MORE; - } - if (s.lookahead === 0) { break; } /* flush the current block */ - } +factory.isPathValid = isPathValid - /* Insert the string window[strstart .. strstart+2] in the - * dictionary, and set hash_head to the head of the hash chain: - */ - hash_head = 0/*NIL*/; - if (s.lookahead >= MIN_MATCH) { - /*** INSERT_STRING(s, s.strstart, hash_head); ***/ - s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; - hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; - s.head[s.ins_h] = s.strstart; - /***/ - } +// Fixes typescript +factory.default = factory - /* Find the longest match, discarding those <= prev_length. - */ - s.prev_length = s.match_length; - s.prev_match = s.match_start; - s.match_length = MIN_MATCH - 1; +module.exports = factory - if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match && - s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) { - /* To simplify the code, we prevent matches with the string - * of window index 0 (in particular we have to avoid a match - * of the string with itself at the start of the input file). - */ - s.match_length = longest_match(s, hash_head); - /* longest_match() sets match_start */ +// Windows +// -------------------------------------------------------------- +/* istanbul ignore if */ +if ( + // Detect `process` so that it can run in browsers. + typeof process !== 'undefined' + && ( + process.env && process.env.IGNORE_TEST_WIN32 + || process.platform === 'win32' + ) +) { + /* eslint no-control-regex: "off" */ + const makePosix = str => /^\\\\\?\\/.test(str) + || /["<>|\u0000-\u001F]+/u.test(str) + ? str + : str.replace(/\\/g, '/') - if (s.match_length <= 5 && - (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH && s.strstart - s.match_start > 4096/*TOO_FAR*/))) { + checkPath.convert = makePosix - /* If prev_match is also MIN_MATCH, match_start is garbage - * but we will ignore the current match anyway. - */ - s.match_length = MIN_MATCH - 1; - } - } - /* If there was a match at the previous step and the current - * match is not better, output the previous match: - */ - if (s.prev_length >= MIN_MATCH && s.match_length <= s.prev_length) { - max_insert = s.strstart + s.lookahead - MIN_MATCH; - /* Do not insert strings in hash table beyond this. */ + // 'C:\\foo' <- 'C:\\foo' has been converted to 'C:/' + // 'd:\\foo' + const REGIX_IS_WINDOWS_PATH_ABSOLUTE = /^[a-z]:\//i + checkPath.isNotRelative = path => + REGIX_IS_WINDOWS_PATH_ABSOLUTE.test(path) + || isNotRelative(path) +} - //check_match(s, s.strstart-1, s.prev_match, s.prev_length); - /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match, - s.prev_length - MIN_MATCH, bflush);***/ - bflush = trees._tr_tally(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH); - /* Insert in hash table all strings up to the end of the match. - * strstart-1 and strstart are already inserted. If there is not - * enough lookahead, the last two strings are not inserted in - * the hash table. - */ - s.lookahead -= s.prev_length - 1; - s.prev_length -= 2; - do { - if (++s.strstart <= max_insert) { - /*** INSERT_STRING(s, s.strstart, hash_head); ***/ - s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; - hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; - s.head[s.ins_h] = s.strstart; - /***/ - } - } while (--s.prev_length !== 0); - s.match_available = 0; - s.match_length = MIN_MATCH - 1; - s.strstart++; +/***/ }), - if (bflush) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false); - if (s.strm.avail_out === 0) { - return BS_NEED_MORE; - } - /***/ - } +/***/ 4124: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - } else if (s.match_available) { - /* If there was no match at the previous position, output a - * single literal. If there was a match but the current match - * is longer, truncate the previous match to a single literal. - */ - //Tracevv((stderr,"%c", s->window[s->strstart-1])); - /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ - bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]); +try { + var util = __nccwpck_require__(3837); + /* istanbul ignore next */ + if (typeof util.inherits !== 'function') throw ''; + module.exports = util.inherits; +} catch (e) { + /* istanbul ignore next */ + module.exports = __nccwpck_require__(8544); +} - if (bflush) { - /*** FLUSH_BLOCK_ONLY(s, 0) ***/ - flush_block_only(s, false); - /***/ - } - s.strstart++; - s.lookahead--; - if (s.strm.avail_out === 0) { - return BS_NEED_MORE; - } - } else { - /* There is no previous match to compare with, wait for - * the next step to decide. - */ - s.match_available = 1; - s.strstart++; - s.lookahead--; - } - } - //Assert (flush != Z_NO_FLUSH, "no flush?"); - if (s.match_available) { - //Tracevv((stderr,"%c", s->window[s->strstart-1])); - /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ - bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]); - s.match_available = 0; - } - s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1; - if (flush === Z_FINISH) { - /*** FLUSH_BLOCK(s, 1); ***/ - flush_block_only(s, true); - if (s.strm.avail_out === 0) { - return BS_FINISH_STARTED; +/***/ }), + +/***/ 8544: +/***/ ((module) => { + +if (typeof Object.create === 'function') { + // implementation from standard node.js 'util' module + module.exports = function inherits(ctor, superCtor) { + if (superCtor) { + ctor.super_ = superCtor + ctor.prototype = Object.create(superCtor.prototype, { + constructor: { + value: ctor, + enumerable: false, + writable: true, + configurable: true + } + }) } - /***/ - return BS_FINISH_DONE; - } - if (s.last_lit) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false); - if (s.strm.avail_out === 0) { - return BS_NEED_MORE; + }; +} else { + // old school shim for old browsers + module.exports = function inherits(ctor, superCtor) { + if (superCtor) { + ctor.super_ = superCtor + var TempCtor = function () {} + TempCtor.prototype = superCtor.prototype + ctor.prototype = new TempCtor() + ctor.prototype.constructor = ctor } - /***/ } - - return BS_BLOCK_DONE; } -/* =========================================================================== - * For Z_RLE, simply look for runs of bytes, generate matches only of distance - * one. Do not maintain a hash table. (It will be regenerated if this run of - * deflate switches away from Z_RLE.) - */ -function deflate_rle(s, flush) { - var bflush; /* set if current block must be flushed */ - var prev; /* byte at distance one to match */ - var scan, strend; /* scan goes up to strend for length of run */ +/***/ }), - var _win = s.window; +/***/ 6435: +/***/ ((module) => { - for (;;) { - /* Make sure that we always have enough lookahead, except - * at the end of the input file. We need MAX_MATCH bytes - * for the longest run, plus one for the unrolled loop. - */ - if (s.lookahead <= MAX_MATCH) { - fill_window(s); - if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH) { - return BS_NEED_MORE; +/*! + * is-extglob + * + * Copyright (c) 2014-2016, Jon Schlinkert. + * Licensed under the MIT License. + */ + +module.exports = function isExtglob(str) { + if (typeof str !== 'string' || str === '') { + return false; + } + + var match; + while ((match = /(\\).|([@?!+*]\(.*\))/g.exec(str))) { + if (match[2]) return true; + str = str.slice(match.index + match[0].length); + } + + return false; +}; + + +/***/ }), + +/***/ 4466: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +/*! + * is-glob + * + * Copyright (c) 2014-2017, Jon Schlinkert. + * Released under the MIT License. + */ + +var isExtglob = __nccwpck_require__(6435); +var chars = { '{': '}', '(': ')', '[': ']'}; +var strictCheck = function(str) { + if (str[0] === '!') { + return true; + } + var index = 0; + var pipeIndex = -2; + var closeSquareIndex = -2; + var closeCurlyIndex = -2; + var closeParenIndex = -2; + var backSlashIndex = -2; + while (index < str.length) { + if (str[index] === '*') { + return true; + } + + if (str[index + 1] === '?' && /[\].+)]/.test(str[index])) { + return true; + } + + if (closeSquareIndex !== -1 && str[index] === '[' && str[index + 1] !== ']') { + if (closeSquareIndex < index) { + closeSquareIndex = str.indexOf(']', index); + } + if (closeSquareIndex > index) { + if (backSlashIndex === -1 || backSlashIndex > closeSquareIndex) { + return true; + } + backSlashIndex = str.indexOf('\\', index); + if (backSlashIndex === -1 || backSlashIndex > closeSquareIndex) { + return true; + } } - if (s.lookahead === 0) { break; } /* flush the current block */ } - /* See how many times the previous byte repeats */ - s.match_length = 0; - if (s.lookahead >= MIN_MATCH && s.strstart > 0) { - scan = s.strstart - 1; - prev = _win[scan]; - if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) { - strend = s.strstart + MAX_MATCH; - do { - /*jshint noempty:false*/ - } while (prev === _win[++scan] && prev === _win[++scan] && - prev === _win[++scan] && prev === _win[++scan] && - prev === _win[++scan] && prev === _win[++scan] && - prev === _win[++scan] && prev === _win[++scan] && - scan < strend); - s.match_length = MAX_MATCH - (strend - scan); - if (s.match_length > s.lookahead) { - s.match_length = s.lookahead; + if (closeCurlyIndex !== -1 && str[index] === '{' && str[index + 1] !== '}') { + closeCurlyIndex = str.indexOf('}', index); + if (closeCurlyIndex > index) { + backSlashIndex = str.indexOf('\\', index); + if (backSlashIndex === -1 || backSlashIndex > closeCurlyIndex) { + return true; } } - //Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); } - /* Emit match if have run of MIN_MATCH or longer, else emit literal */ - if (s.match_length >= MIN_MATCH) { - //check_match(s, s.strstart, s.strstart - 1, s.match_length); + if (closeParenIndex !== -1 && str[index] === '(' && str[index + 1] === '?' && /[:!=]/.test(str[index + 2]) && str[index + 3] !== ')') { + closeParenIndex = str.indexOf(')', index); + if (closeParenIndex > index) { + backSlashIndex = str.indexOf('\\', index); + if (backSlashIndex === -1 || backSlashIndex > closeParenIndex) { + return true; + } + } + } - /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/ - bflush = trees._tr_tally(s, 1, s.match_length - MIN_MATCH); + if (pipeIndex !== -1 && str[index] === '(' && str[index + 1] !== '|') { + if (pipeIndex < index) { + pipeIndex = str.indexOf('|', index); + } + if (pipeIndex !== -1 && str[pipeIndex + 1] !== ')') { + closeParenIndex = str.indexOf(')', pipeIndex); + if (closeParenIndex > pipeIndex) { + backSlashIndex = str.indexOf('\\', pipeIndex); + if (backSlashIndex === -1 || backSlashIndex > closeParenIndex) { + return true; + } + } + } + } - s.lookahead -= s.match_length; - s.strstart += s.match_length; - s.match_length = 0; - } else { - /* No match, output a literal byte */ - //Tracevv((stderr,"%c", s->window[s->strstart])); - /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ - bflush = trees._tr_tally(s, 0, s.window[s.strstart]); + if (str[index] === '\\') { + var open = str[index + 1]; + index += 2; + var close = chars[open]; - s.lookahead--; - s.strstart++; - } - if (bflush) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false); - if (s.strm.avail_out === 0) { - return BS_NEED_MORE; + if (close) { + var n = str.indexOf(close, index); + if (n !== -1) { + index = n + 1; + } } - /***/ + + if (str[index] === '!') { + return true; + } + } else { + index++; } } - s.insert = 0; - if (flush === Z_FINISH) { - /*** FLUSH_BLOCK(s, 1); ***/ - flush_block_only(s, true); - if (s.strm.avail_out === 0) { - return BS_FINISH_STARTED; - } - /***/ - return BS_FINISH_DONE; + return false; +}; + +var relaxedCheck = function(str) { + if (str[0] === '!') { + return true; } - if (s.last_lit) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false); - if (s.strm.avail_out === 0) { - return BS_NEED_MORE; + var index = 0; + while (index < str.length) { + if (/[*?{}()[\]]/.test(str[index])) { + return true; } - /***/ - } - return BS_BLOCK_DONE; -} -/* =========================================================================== - * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. - * (It will be regenerated if this run of deflate switches away from Huffman.) - */ -function deflate_huff(s, flush) { - var bflush; /* set if current block must be flushed */ + if (str[index] === '\\') { + var open = str[index + 1]; + index += 2; + var close = chars[open]; - for (;;) { - /* Make sure that we have a literal to write. */ - if (s.lookahead === 0) { - fill_window(s); - if (s.lookahead === 0) { - if (flush === Z_NO_FLUSH) { - return BS_NEED_MORE; + if (close) { + var n = str.indexOf(close, index); + if (n !== -1) { + index = n + 1; } - break; /* flush the current block */ } - } - /* Output a literal byte */ - s.match_length = 0; - //Tracevv((stderr,"%c", s->window[s->strstart])); - /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ - bflush = trees._tr_tally(s, 0, s.window[s.strstart]); - s.lookahead--; - s.strstart++; - if (bflush) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false); - if (s.strm.avail_out === 0) { - return BS_NEED_MORE; + if (str[index] === '!') { + return true; } - /***/ - } - } - s.insert = 0; - if (flush === Z_FINISH) { - /*** FLUSH_BLOCK(s, 1); ***/ - flush_block_only(s, true); - if (s.strm.avail_out === 0) { - return BS_FINISH_STARTED; + } else { + index++; } - /***/ - return BS_FINISH_DONE; } - if (s.last_lit) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false); - if (s.strm.avail_out === 0) { - return BS_NEED_MORE; - } - /***/ + return false; +}; + +module.exports = function isGlob(str, options) { + if (typeof str !== 'string' || str === '') { + return false; } - return BS_BLOCK_DONE; -} -/* Values for max_lazy_match, good_match and max_chain_length, depending on - * the desired pack level (0..9). The values given below have been tuned to - * exclude worst case performance for pathological files. Better values may be - * found for specific files. - */ -function Config(good_length, max_lazy, nice_length, max_chain, func) { - this.good_length = good_length; - this.max_lazy = max_lazy; - this.nice_length = nice_length; - this.max_chain = max_chain; - this.func = func; -} + if (isExtglob(str)) { + return true; + } -var configuration_table; + var check = strictCheck; -configuration_table = [ - /* good lazy nice chain */ - new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */ - new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */ - new Config(4, 5, 16, 8, deflate_fast), /* 2 */ - new Config(4, 6, 32, 32, deflate_fast), /* 3 */ + // optionally relax check + if (options && options.strict === false) { + check = relaxedCheck; + } - new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */ - new Config(8, 16, 32, 32, deflate_slow), /* 5 */ - new Config(8, 16, 128, 128, deflate_slow), /* 6 */ - new Config(8, 32, 128, 256, deflate_slow), /* 7 */ - new Config(32, 128, 258, 1024, deflate_slow), /* 8 */ - new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */ -]; + return check(str); +}; -/* =========================================================================== - * Initialize the "longest match" routines for a new zlib stream +/***/ }), + +/***/ 5680: +/***/ ((module) => { + +"use strict"; +/*! + * is-number + * + * Copyright (c) 2014-present, Jon Schlinkert. + * Released under the MIT License. */ -function lm_init(s) { - s.window_size = 2 * s.w_size; - /*** CLEAR_HASH(s); ***/ - zero(s.head); // Fill with NIL (= 0); - /* Set the default configuration parameters: - */ - s.max_lazy_match = configuration_table[s.level].max_lazy; - s.good_match = configuration_table[s.level].good_length; - s.nice_match = configuration_table[s.level].nice_length; - s.max_chain_length = configuration_table[s.level].max_chain; - s.strstart = 0; - s.block_start = 0; - s.lookahead = 0; - s.insert = 0; - s.match_length = s.prev_length = MIN_MATCH - 1; - s.match_available = 0; - s.ins_h = 0; -} +module.exports = function(num) { + if (typeof num === 'number') { + return num - num === 0; + } + if (typeof num === 'string' && num.trim() !== '') { + return Number.isFinite ? Number.isFinite(+num) : isFinite(+num); + } + return false; +}; -function DeflateState() { - this.strm = null; /* pointer back to this zlib stream */ - this.status = 0; /* as the name implies */ - this.pending_buf = null; /* output still pending */ - this.pending_buf_size = 0; /* size of pending_buf */ - this.pending_out = 0; /* next pending byte to output to the stream */ - this.pending = 0; /* nb of bytes in the pending buffer */ - this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */ - this.gzhead = null; /* gzip header information to write */ - this.gzindex = 0; /* where in extra, name, or comment */ - this.method = Z_DEFLATED; /* can only be DEFLATED */ - this.last_flush = -1; /* value of flush param for previous deflate call */ +/***/ }), - this.w_size = 0; /* LZ77 window size (32K by default) */ - this.w_bits = 0; /* log2(w_size) (8..16) */ - this.w_mask = 0; /* w_size - 1 */ +/***/ 44: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - this.window = null; - /* Sliding window. Input bytes are read into the second half of the window, - * and move to the first half later to keep a dictionary of at least wSize - * bytes. With this organization, matches are limited to a distance of - * wSize-MAX_MATCH bytes, but this ensures that IO is always - * performed with a length multiple of the block size. - */ +"use strict"; - this.window_size = 0; - /* Actual size of window: 2*wSize, except when the user input buffer - * is directly used as sliding window. - */ - this.prev = null; - /* Link to older string with same hash index. To limit the size of this - * array to 64K, this link is maintained only for the last 32K strings. - * An index in this array is thus a window index modulo 32K. - */ +// Dependencies +var protocols = __nccwpck_require__(9217); - this.head = null; /* Heads of the hash chains or NIL. */ +/** + * isSsh + * Checks if an input value is a ssh url or not. + * + * @name isSsh + * @function + * @param {String|Array} input The input url or an array of protocols. + * @return {Boolean} `true` if the input is a ssh url, `false` otherwise. + */ +function isSsh(input) { - this.ins_h = 0; /* hash index of string to be inserted */ - this.hash_size = 0; /* number of elements in hash table */ - this.hash_bits = 0; /* log2(hash_size) */ - this.hash_mask = 0; /* hash_size-1 */ + if (Array.isArray(input)) { + return input.indexOf("ssh") !== -1 || input.indexOf("rsync") !== -1; + } - this.hash_shift = 0; - /* Number of bits by which ins_h must be shifted at each input - * step. It must be such that after MIN_MATCH steps, the oldest - * byte no longer takes part in the hash key, that is: - * hash_shift * MIN_MATCH >= hash_bits - */ + if (typeof input !== "string") { + return false; + } - this.block_start = 0; - /* Window position at the beginning of the current output block. Gets - * negative when the window is moved backwards. - */ + var prots = protocols(input); + input = input.substring(input.indexOf("://") + 3); + if (isSsh(prots)) { + return true; + } - this.match_length = 0; /* length of best match */ - this.prev_match = 0; /* previous match */ - this.match_available = 0; /* set if previous match exists */ - this.strstart = 0; /* start of string to insert */ - this.match_start = 0; /* start of matching string */ - this.lookahead = 0; /* number of valid bytes ahead in window */ + // TODO This probably could be improved :) + var urlPortPattern = new RegExp('\.([a-zA-Z\\d]+):(\\d+)\/'); + return !input.match(urlPortPattern) && input.indexOf("@") < input.indexOf(":"); +} - this.prev_length = 0; - /* Length of the best match at previous step. Matches not greater than this - * are discarded. This is used in the lazy match evaluation. - */ +module.exports = isSsh; - this.max_chain_length = 0; - /* To speed up deflation, hash chains are never searched beyond this - * length. A higher limit improves compression ratio but degrades the - * speed. - */ +/***/ }), - this.max_lazy_match = 0; - /* Attempt to find a better match only when the current match is strictly - * smaller than this value. This mechanism is used only for compression - * levels >= 4. - */ - // That's alias to max_lazy_match, don't use directly - //this.max_insert_length = 0; - /* Insert new strings in the hash table only if the match length is not - * greater than this length. This saves time but degrades compression. - * max_insert_length is used only for compression levels <= 3. - */ +/***/ 2578: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - this.level = 0; /* compression level (1..9) */ - this.strategy = 0; /* favor or force Huffman coding*/ +"use strict"; - this.good_match = 0; - /* Use a faster search when the previous match is longer than this */ +/* + * merge2 + * https://github.com/teambition/merge2 + * + * Copyright (c) 2014-2020 Teambition + * Licensed under the MIT license. + */ +const Stream = __nccwpck_require__(2781) +const PassThrough = Stream.PassThrough +const slice = Array.prototype.slice - this.nice_match = 0; /* Stop searching when current match exceeds this */ +module.exports = merge2 - /* used by trees.c: */ +function merge2 () { + const streamsQueue = [] + const args = slice.call(arguments) + let merging = false + let options = args[args.length - 1] - /* Didn't use ct_data typedef below to suppress compiler warning */ + if (options && !Array.isArray(options) && options.pipe == null) { + args.pop() + } else { + options = {} + } - // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ - // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ - // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ + const doEnd = options.end !== false + const doPipeError = options.pipeError === true + if (options.objectMode == null) { + options.objectMode = true + } + if (options.highWaterMark == null) { + options.highWaterMark = 64 * 1024 + } + const mergedStream = PassThrough(options) - // Use flat array of DOUBLE size, with interleaved fata, - // because JS does not support effective - this.dyn_ltree = new utils.Buf16(HEAP_SIZE * 2); - this.dyn_dtree = new utils.Buf16((2 * D_CODES + 1) * 2); - this.bl_tree = new utils.Buf16((2 * BL_CODES + 1) * 2); - zero(this.dyn_ltree); - zero(this.dyn_dtree); - zero(this.bl_tree); + function addStream () { + for (let i = 0, len = arguments.length; i < len; i++) { + streamsQueue.push(pauseStreams(arguments[i], options)) + } + mergeStream() + return this + } - this.l_desc = null; /* desc. for literal tree */ - this.d_desc = null; /* desc. for distance tree */ - this.bl_desc = null; /* desc. for bit length tree */ + function mergeStream () { + if (merging) { + return + } + merging = true - //ush bl_count[MAX_BITS+1]; - this.bl_count = new utils.Buf16(MAX_BITS + 1); - /* number of codes at each bit length for an optimal tree */ + let streams = streamsQueue.shift() + if (!streams) { + process.nextTick(endStream) + return + } + if (!Array.isArray(streams)) { + streams = [streams] + } - //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ - this.heap = new utils.Buf16(2 * L_CODES + 1); /* heap used to build the Huffman trees */ - zero(this.heap); + let pipesCount = streams.length + 1 - this.heap_len = 0; /* number of elements in the heap */ - this.heap_max = 0; /* element of largest frequency */ - /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. - * The same heap array is used to build all trees. - */ + function next () { + if (--pipesCount > 0) { + return + } + merging = false + mergeStream() + } - this.depth = new utils.Buf16(2 * L_CODES + 1); //uch depth[2*L_CODES+1]; - zero(this.depth); - /* Depth of each subtree used as tie breaker for trees of equal frequency - */ + function pipe (stream) { + function onend () { + stream.removeListener('merge2UnpipeEnd', onend) + stream.removeListener('end', onend) + if (doPipeError) { + stream.removeListener('error', onerror) + } + next() + } + function onerror (err) { + mergedStream.emit('error', err) + } + // skip ended stream + if (stream._readableState.endEmitted) { + return next() + } - this.l_buf = 0; /* buffer index for literals or lengths */ + stream.on('merge2UnpipeEnd', onend) + stream.on('end', onend) - this.lit_bufsize = 0; - /* Size of match buffer for literals/lengths. There are 4 reasons for - * limiting lit_bufsize to 64K: - * - frequencies can be kept in 16 bit counters - * - if compression is not successful for the first block, all input - * data is still in the window so we can still emit a stored block even - * when input comes from standard input. (This can also be done for - * all blocks if lit_bufsize is not greater than 32K.) - * - if compression is not successful for a file smaller than 64K, we can - * even emit a stored file instead of a stored block (saving 5 bytes). - * This is applicable only for zip (not gzip or zlib). - * - creating new Huffman trees less frequently may not provide fast - * adaptation to changes in the input data statistics. (Take for - * example a binary file with poorly compressible code followed by - * a highly compressible string table.) Smaller buffer sizes give - * fast adaptation but have of course the overhead of transmitting - * trees more frequently. - * - I can't count above 4 - */ + if (doPipeError) { + stream.on('error', onerror) + } - this.last_lit = 0; /* running index in l_buf */ + stream.pipe(mergedStream, { end: false }) + // compatible for old stream + stream.resume() + } - this.d_buf = 0; - /* Buffer index for distances. To simplify the code, d_buf and l_buf have - * the same number of elements. To use different lengths, an extra flag - * array would be necessary. - */ + for (let i = 0; i < streams.length; i++) { + pipe(streams[i]) + } - this.opt_len = 0; /* bit length of current block with optimal trees */ - this.static_len = 0; /* bit length of current block with static trees */ - this.matches = 0; /* number of string matches in current block */ - this.insert = 0; /* bytes at end of window left to insert */ + next() + } + function endStream () { + merging = false + // emit 'queueDrain' when all streams merged. + mergedStream.emit('queueDrain') + if (doEnd) { + mergedStream.end() + } + } - this.bi_buf = 0; - /* Output buffer. bits are inserted starting at the bottom (least - * significant bits). - */ - this.bi_valid = 0; - /* Number of valid bits in bi_buf. All bits above the last valid bit - * are always zero. - */ + mergedStream.setMaxListeners(0) + mergedStream.add = addStream + mergedStream.on('unpipe', function (stream) { + stream.emit('merge2UnpipeEnd') + }) - // Used for window memory init. We safely ignore it for JS. That makes - // sense only for pointers and memory check tools. - //this.high_water = 0; - /* High water mark offset in window for initialized bytes -- bytes above - * this are set to zero in order to avoid memory check warnings when - * longest match routines access bytes past the input. This is then - * updated to the new high water mark. - */ + if (args.length) { + addStream.apply(null, args) + } + return mergedStream } - -function deflateResetKeep(strm) { - var s; - - if (!strm || !strm.state) { - return err(strm, Z_STREAM_ERROR); +// check and pause streams for pipe. +function pauseStreams (streams, options) { + if (!Array.isArray(streams)) { + // Backwards-compat with old-style streams + if (!streams._readableState && streams.pipe) { + streams = streams.pipe(PassThrough(options)) + } + if (!streams._readableState || !streams.pause || !streams.pipe) { + throw new Error('Only readable stream can be merged.') + } + streams.pause() + } else { + for (let i = 0, len = streams.length; i < len; i++) { + streams[i] = pauseStreams(streams[i], options) + } } + return streams +} - strm.total_in = strm.total_out = 0; - strm.data_type = Z_UNKNOWN; - s = strm.state; - s.pending = 0; - s.pending_out = 0; +/***/ }), - if (s.wrap < 0) { - s.wrap = -s.wrap; - /* was made negative by deflate(..., Z_FINISH); */ - } - s.status = (s.wrap ? INIT_STATE : BUSY_STATE); - strm.adler = (s.wrap === 2) ? - 0 // crc32(0, Z_NULL, 0) - : - 1; // adler32(0, Z_NULL, 0) - s.last_flush = Z_NO_FLUSH; - trees._tr_init(s); - return Z_OK; -} +/***/ 6228: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { +"use strict"; -function deflateReset(strm) { - var ret = deflateResetKeep(strm); - if (ret === Z_OK) { - lm_init(strm.state); - } - return ret; -} +const util = __nccwpck_require__(3837); +const braces = __nccwpck_require__(610); +const picomatch = __nccwpck_require__(8569); +const utils = __nccwpck_require__(479); +const isEmptyString = val => val === '' || val === './'; -function deflateSetHeader(strm, head) { - if (!strm || !strm.state) { return Z_STREAM_ERROR; } - if (strm.state.wrap !== 2) { return Z_STREAM_ERROR; } - strm.state.gzhead = head; - return Z_OK; -} +/** + * Returns an array of strings that match one or more glob patterns. + * + * ```js + * const mm = require('micromatch'); + * // mm(list, patterns[, options]); + * + * console.log(mm(['a.js', 'a.txt'], ['*.js'])); + * //=> [ 'a.js' ] + * ``` + * @param {String|Array} `list` List of strings to match. + * @param {String|Array} `patterns` One or more glob patterns to use for matching. + * @param {Object} `options` See available [options](#options) + * @return {Array} Returns an array of matches + * @summary false + * @api public + */ +const micromatch = (list, patterns, options) => { + patterns = [].concat(patterns); + list = [].concat(list); -function deflateInit2(strm, level, method, windowBits, memLevel, strategy) { - if (!strm) { // === Z_NULL - return Z_STREAM_ERROR; - } - var wrap = 1; + let omit = new Set(); + let keep = new Set(); + let items = new Set(); + let negatives = 0; - if (level === Z_DEFAULT_COMPRESSION) { - level = 6; - } + let onResult = state => { + items.add(state.output); + if (options && options.onResult) { + options.onResult(state); + } + }; - if (windowBits < 0) { /* suppress zlib wrapper */ - wrap = 0; - windowBits = -windowBits; - } + for (let i = 0; i < patterns.length; i++) { + let isMatch = picomatch(String(patterns[i]), { ...options, onResult }, true); + let negated = isMatch.state.negated || isMatch.state.negatedExtglob; + if (negated) negatives++; - else if (windowBits > 15) { - wrap = 2; /* write gzip wrapper instead */ - windowBits -= 16; - } + for (let item of list) { + let matched = isMatch(item, true); + let match = negated ? !matched.isMatch : matched.isMatch; + if (!match) continue; - if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED || - windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || - strategy < 0 || strategy > Z_FIXED) { - return err(strm, Z_STREAM_ERROR); + if (negated) { + omit.add(matched.output); + } else { + omit.delete(matched.output); + keep.add(matched.output); + } + } } + let result = negatives === patterns.length ? [...items] : [...keep]; + let matches = result.filter(item => !omit.has(item)); - if (windowBits === 8) { - windowBits = 9; + if (options && matches.length === 0) { + if (options.failglob === true) { + throw new Error(`No matches found for "${patterns.join(', ')}"`); + } + + if (options.nonull === true || options.nullglob === true) { + return options.unescape ? patterns.map(p => p.replace(/\\/g, '')) : patterns; + } } - /* until 256-byte window bug fixed */ - var s = new DeflateState(); + return matches; +}; - strm.state = s; - s.strm = strm; +/** + * Backwards compatibility + */ - s.wrap = wrap; - s.gzhead = null; - s.w_bits = windowBits; - s.w_size = 1 << s.w_bits; - s.w_mask = s.w_size - 1; +micromatch.match = micromatch; - s.hash_bits = memLevel + 7; - s.hash_size = 1 << s.hash_bits; - s.hash_mask = s.hash_size - 1; - s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH); - - s.window = new utils.Buf8(s.w_size * 2); - s.head = new utils.Buf16(s.hash_size); - s.prev = new utils.Buf16(s.w_size); - - // Don't need mem init magic for JS. - //s.high_water = 0; /* nothing written to s->window yet */ - - s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ +/** + * Returns a matcher function from the given glob `pattern` and `options`. + * The returned function takes a string to match as its only argument and returns + * true if the string is a match. + * + * ```js + * const mm = require('micromatch'); + * // mm.matcher(pattern[, options]); + * + * const isMatch = mm.matcher('*.!(*a)'); + * console.log(isMatch('a.a')); //=> false + * console.log(isMatch('a.b')); //=> true + * ``` + * @param {String} `pattern` Glob pattern + * @param {Object} `options` + * @return {Function} Returns a matcher function. + * @api public + */ - s.pending_buf_size = s.lit_bufsize * 4; +micromatch.matcher = (pattern, options) => picomatch(pattern, options); - //overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); - //s->pending_buf = (uchf *) overlay; - s.pending_buf = new utils.Buf8(s.pending_buf_size); +/** + * Returns true if **any** of the given glob `patterns` match the specified `string`. + * + * ```js + * const mm = require('micromatch'); + * // mm.isMatch(string, patterns[, options]); + * + * console.log(mm.isMatch('a.a', ['b.*', '*.a'])); //=> true + * console.log(mm.isMatch('a.a', 'b.*')); //=> false + * ``` + * @param {String} `str` The string to test. + * @param {String|Array} `patterns` One or more glob patterns to use for matching. + * @param {Object} `[options]` See available [options](#options). + * @return {Boolean} Returns true if any patterns match `str` + * @api public + */ - // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`) - //s->d_buf = overlay + s->lit_bufsize/sizeof(ush); - s.d_buf = 1 * s.lit_bufsize; +micromatch.isMatch = (str, patterns, options) => picomatch(patterns, options)(str); - //s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; - s.l_buf = (1 + 2) * s.lit_bufsize; +/** + * Backwards compatibility + */ - s.level = level; - s.strategy = strategy; - s.method = method; +micromatch.any = micromatch.isMatch; - return deflateReset(strm); -} +/** + * Returns a list of strings that _**do not match any**_ of the given `patterns`. + * + * ```js + * const mm = require('micromatch'); + * // mm.not(list, patterns[, options]); + * + * console.log(mm.not(['a.a', 'b.b', 'c.c'], '*.a')); + * //=> ['b.b', 'c.c'] + * ``` + * @param {Array} `list` Array of strings to match. + * @param {String|Array} `patterns` One or more glob pattern to use for matching. + * @param {Object} `options` See available [options](#options) for changing how matches are performed + * @return {Array} Returns an array of strings that **do not match** the given patterns. + * @api public + */ -function deflateInit(strm, level) { - return deflateInit2(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY); -} +micromatch.not = (list, patterns, options = {}) => { + patterns = [].concat(patterns).map(String); + let result = new Set(); + let items = []; + let onResult = state => { + if (options.onResult) options.onResult(state); + items.push(state.output); + }; -function deflate(strm, flush) { - var old_flush, s; - var beg, val; // for gzip header write only + let matches = micromatch(list, patterns, { ...options, onResult }); - if (!strm || !strm.state || - flush > Z_BLOCK || flush < 0) { - return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR; + for (let item of items) { + if (!matches.includes(item)) { + result.add(item); + } } + return [...result]; +}; - s = strm.state; +/** + * Returns true if the given `string` contains the given pattern. Similar + * to [.isMatch](#isMatch) but the pattern can match any part of the string. + * + * ```js + * var mm = require('micromatch'); + * // mm.contains(string, pattern[, options]); + * + * console.log(mm.contains('aa/bb/cc', '*b')); + * //=> true + * console.log(mm.contains('aa/bb/cc', '*d')); + * //=> false + * ``` + * @param {String} `str` The string to match. + * @param {String|Array} `patterns` Glob pattern to use for matching. + * @param {Object} `options` See available [options](#options) for changing how matches are performed + * @return {Boolean} Returns true if any of the patterns matches any part of `str`. + * @api public + */ - if (!strm.output || - (!strm.input && strm.avail_in !== 0) || - (s.status === FINISH_STATE && flush !== Z_FINISH)) { - return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR); +micromatch.contains = (str, pattern, options) => { + if (typeof str !== 'string') { + throw new TypeError(`Expected a string: "${util.inspect(str)}"`); } - s.strm = strm; /* just in case */ - old_flush = s.last_flush; - s.last_flush = flush; + if (Array.isArray(pattern)) { + return pattern.some(p => micromatch.contains(str, p, options)); + } - /* Write the header */ - if (s.status === INIT_STATE) { + if (typeof pattern === 'string') { + if (isEmptyString(str) || isEmptyString(pattern)) { + return false; + } - if (s.wrap === 2) { // GZIP header - strm.adler = 0; //crc32(0L, Z_NULL, 0); - put_byte(s, 31); - put_byte(s, 139); - put_byte(s, 8); - if (!s.gzhead) { // s->gzhead == Z_NULL - put_byte(s, 0); - put_byte(s, 0); - put_byte(s, 0); - put_byte(s, 0); - put_byte(s, 0); - put_byte(s, s.level === 9 ? 2 : - (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? - 4 : 0)); - put_byte(s, OS_CODE); - s.status = BUSY_STATE; - } - else { - put_byte(s, (s.gzhead.text ? 1 : 0) + - (s.gzhead.hcrc ? 2 : 0) + - (!s.gzhead.extra ? 0 : 4) + - (!s.gzhead.name ? 0 : 8) + - (!s.gzhead.comment ? 0 : 16) - ); - put_byte(s, s.gzhead.time & 0xff); - put_byte(s, (s.gzhead.time >> 8) & 0xff); - put_byte(s, (s.gzhead.time >> 16) & 0xff); - put_byte(s, (s.gzhead.time >> 24) & 0xff); - put_byte(s, s.level === 9 ? 2 : - (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? - 4 : 0)); - put_byte(s, s.gzhead.os & 0xff); - if (s.gzhead.extra && s.gzhead.extra.length) { - put_byte(s, s.gzhead.extra.length & 0xff); - put_byte(s, (s.gzhead.extra.length >> 8) & 0xff); - } - if (s.gzhead.hcrc) { - strm.adler = crc32(strm.adler, s.pending_buf, s.pending, 0); - } - s.gzindex = 0; - s.status = EXTRA_STATE; - } + if (str.includes(pattern) || (str.startsWith('./') && str.slice(2).includes(pattern))) { + return true; } - else // DEFLATE header - { - var header = (Z_DEFLATED + ((s.w_bits - 8) << 4)) << 8; - var level_flags = -1; + } - if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) { - level_flags = 0; - } else if (s.level < 6) { - level_flags = 1; - } else if (s.level === 6) { - level_flags = 2; - } else { - level_flags = 3; - } - header |= (level_flags << 6); - if (s.strstart !== 0) { header |= PRESET_DICT; } - header += 31 - (header % 31); + return micromatch.isMatch(str, pattern, { ...options, contains: true }); +}; - s.status = BUSY_STATE; - putShortMSB(s, header); +/** + * Filter the keys of the given object with the given `glob` pattern + * and `options`. Does not attempt to match nested keys. If you need this feature, + * use [glob-object][] instead. + * + * ```js + * const mm = require('micromatch'); + * // mm.matchKeys(object, patterns[, options]); + * + * const obj = { aa: 'a', ab: 'b', ac: 'c' }; + * console.log(mm.matchKeys(obj, '*b')); + * //=> { ab: 'b' } + * ``` + * @param {Object} `object` The object with keys to filter. + * @param {String|Array} `patterns` One or more glob patterns to use for matching. + * @param {Object} `options` See available [options](#options) for changing how matches are performed + * @return {Object} Returns an object with only keys that match the given patterns. + * @api public + */ - /* Save the adler32 of the preset dictionary: */ - if (s.strstart !== 0) { - putShortMSB(s, strm.adler >>> 16); - putShortMSB(s, strm.adler & 0xffff); - } - strm.adler = 1; // adler32(0L, Z_NULL, 0); - } +micromatch.matchKeys = (obj, patterns, options) => { + if (!utils.isObject(obj)) { + throw new TypeError('Expected the first argument to be an object'); } + let keys = micromatch(Object.keys(obj), patterns, options); + let res = {}; + for (let key of keys) res[key] = obj[key]; + return res; +}; -//#ifdef GZIP - if (s.status === EXTRA_STATE) { - if (s.gzhead.extra/* != Z_NULL*/) { - beg = s.pending; /* start of bytes to update crc */ - - while (s.gzindex < (s.gzhead.extra.length & 0xffff)) { - if (s.pending === s.pending_buf_size) { - if (s.gzhead.hcrc && s.pending > beg) { - strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); - } - flush_pending(strm); - beg = s.pending; - if (s.pending === s.pending_buf_size) { - break; - } - } - put_byte(s, s.gzhead.extra[s.gzindex] & 0xff); - s.gzindex++; - } - if (s.gzhead.hcrc && s.pending > beg) { - strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); - } - if (s.gzindex === s.gzhead.extra.length) { - s.gzindex = 0; - s.status = NAME_STATE; - } - } - else { - s.status = NAME_STATE; - } - } - if (s.status === NAME_STATE) { - if (s.gzhead.name/* != Z_NULL*/) { - beg = s.pending; /* start of bytes to update crc */ - //int val; +/** + * Returns true if some of the strings in the given `list` match any of the given glob `patterns`. + * + * ```js + * const mm = require('micromatch'); + * // mm.some(list, patterns[, options]); + * + * console.log(mm.some(['foo.js', 'bar.js'], ['*.js', '!foo.js'])); + * // true + * console.log(mm.some(['foo.js'], ['*.js', '!foo.js'])); + * // false + * ``` + * @param {String|Array} `list` The string or array of strings to test. Returns as soon as the first match is found. + * @param {String|Array} `patterns` One or more glob patterns to use for matching. + * @param {Object} `options` See available [options](#options) for changing how matches are performed + * @return {Boolean} Returns true if any `patterns` matches any of the strings in `list` + * @api public + */ - do { - if (s.pending === s.pending_buf_size) { - if (s.gzhead.hcrc && s.pending > beg) { - strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); - } - flush_pending(strm); - beg = s.pending; - if (s.pending === s.pending_buf_size) { - val = 1; - break; - } - } - // JS specific: little magic to add zero terminator to end of string - if (s.gzindex < s.gzhead.name.length) { - val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff; - } else { - val = 0; - } - put_byte(s, val); - } while (val !== 0); +micromatch.some = (list, patterns, options) => { + let items = [].concat(list); - if (s.gzhead.hcrc && s.pending > beg) { - strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); - } - if (val === 0) { - s.gzindex = 0; - s.status = COMMENT_STATE; - } - } - else { - s.status = COMMENT_STATE; + for (let pattern of [].concat(patterns)) { + let isMatch = picomatch(String(pattern), options); + if (items.some(item => isMatch(item))) { + return true; } } - if (s.status === COMMENT_STATE) { - if (s.gzhead.comment/* != Z_NULL*/) { - beg = s.pending; /* start of bytes to update crc */ - //int val; + return false; +}; - do { - if (s.pending === s.pending_buf_size) { - if (s.gzhead.hcrc && s.pending > beg) { - strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); - } - flush_pending(strm); - beg = s.pending; - if (s.pending === s.pending_buf_size) { - val = 1; - break; - } - } - // JS specific: little magic to add zero terminator to end of string - if (s.gzindex < s.gzhead.comment.length) { - val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff; - } else { - val = 0; - } - put_byte(s, val); - } while (val !== 0); +/** + * Returns true if every string in the given `list` matches + * any of the given glob `patterns`. + * + * ```js + * const mm = require('micromatch'); + * // mm.every(list, patterns[, options]); + * + * console.log(mm.every('foo.js', ['foo.js'])); + * // true + * console.log(mm.every(['foo.js', 'bar.js'], ['*.js'])); + * // true + * console.log(mm.every(['foo.js', 'bar.js'], ['*.js', '!foo.js'])); + * // false + * console.log(mm.every(['foo.js'], ['*.js', '!foo.js'])); + * // false + * ``` + * @param {String|Array} `list` The string or array of strings to test. + * @param {String|Array} `patterns` One or more glob patterns to use for matching. + * @param {Object} `options` See available [options](#options) for changing how matches are performed + * @return {Boolean} Returns true if all `patterns` matches all of the strings in `list` + * @api public + */ - if (s.gzhead.hcrc && s.pending > beg) { - strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); - } - if (val === 0) { - s.status = HCRC_STATE; - } - } - else { - s.status = HCRC_STATE; - } - } - if (s.status === HCRC_STATE) { - if (s.gzhead.hcrc) { - if (s.pending + 2 > s.pending_buf_size) { - flush_pending(strm); - } - if (s.pending + 2 <= s.pending_buf_size) { - put_byte(s, strm.adler & 0xff); - put_byte(s, (strm.adler >> 8) & 0xff); - strm.adler = 0; //crc32(0L, Z_NULL, 0); - s.status = BUSY_STATE; - } - } - else { - s.status = BUSY_STATE; +micromatch.every = (list, patterns, options) => { + let items = [].concat(list); + + for (let pattern of [].concat(patterns)) { + let isMatch = picomatch(String(pattern), options); + if (!items.every(item => isMatch(item))) { + return false; } } -//#endif + return true; +}; - /* Flush as much pending output as possible */ - if (s.pending !== 0) { - flush_pending(strm); - if (strm.avail_out === 0) { - /* Since avail_out is 0, deflate will be called again with - * more output space, but possibly with both pending and - * avail_in equal to zero. There won't be anything to do, - * but this is not an error situation so make sure we - * return OK instead of BUF_ERROR at next call of deflate: - */ - s.last_flush = -1; - return Z_OK; - } +/** + * Returns true if **all** of the given `patterns` match + * the specified string. + * + * ```js + * const mm = require('micromatch'); + * // mm.all(string, patterns[, options]); + * + * console.log(mm.all('foo.js', ['foo.js'])); + * // true + * + * console.log(mm.all('foo.js', ['*.js', '!foo.js'])); + * // false + * + * console.log(mm.all('foo.js', ['*.js', 'foo.js'])); + * // true + * + * console.log(mm.all('foo.js', ['*.js', 'f*', '*o*', '*o.js'])); + * // true + * ``` + * @param {String|Array} `str` The string to test. + * @param {String|Array} `patterns` One or more glob patterns to use for matching. + * @param {Object} `options` See available [options](#options) for changing how matches are performed + * @return {Boolean} Returns true if any patterns match `str` + * @api public + */ - /* Make sure there is something to do and avoid duplicate consecutive - * flushes. For repeated and useless calls with Z_FINISH, we keep - * returning Z_STREAM_END instead of Z_BUF_ERROR. - */ - } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) && - flush !== Z_FINISH) { - return err(strm, Z_BUF_ERROR); +micromatch.all = (str, patterns, options) => { + if (typeof str !== 'string') { + throw new TypeError(`Expected a string: "${util.inspect(str)}"`); } - /* User must not provide more input after the first FINISH: */ - if (s.status === FINISH_STATE && strm.avail_in !== 0) { - return err(strm, Z_BUF_ERROR); - } + return [].concat(patterns).every(p => picomatch(p, options)(str)); +}; - /* Start a new block or continue the current one. - */ - if (strm.avail_in !== 0 || s.lookahead !== 0 || - (flush !== Z_NO_FLUSH && s.status !== FINISH_STATE)) { - var bstate = (s.strategy === Z_HUFFMAN_ONLY) ? deflate_huff(s, flush) : - (s.strategy === Z_RLE ? deflate_rle(s, flush) : - configuration_table[s.level].func(s, flush)); +/** + * Returns an array of matches captured by `pattern` in `string, or `null` if the pattern did not match. + * + * ```js + * const mm = require('micromatch'); + * // mm.capture(pattern, string[, options]); + * + * console.log(mm.capture('test/*.js', 'test/foo.js')); + * //=> ['foo'] + * console.log(mm.capture('test/*.js', 'foo/bar.css')); + * //=> null + * ``` + * @param {String} `glob` Glob pattern to use for matching. + * @param {String} `input` String to match + * @param {Object} `options` See available [options](#options) for changing how matches are performed + * @return {Array|null} Returns an array of captures if the input matches the glob pattern, otherwise `null`. + * @api public + */ - if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) { - s.status = FINISH_STATE; - } - if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) { - if (strm.avail_out === 0) { - s.last_flush = -1; - /* avoid BUF_ERROR next call, see above */ - } - return Z_OK; - /* If flush != Z_NO_FLUSH && avail_out == 0, the next call - * of deflate should use the same flush parameter to make sure - * that the flush is complete. So we don't have to output an - * empty block here, this will be done at next call. This also - * ensures that for a very small output buffer, we emit at most - * one empty block. - */ - } - if (bstate === BS_BLOCK_DONE) { - if (flush === Z_PARTIAL_FLUSH) { - trees._tr_align(s); - } - else if (flush !== Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ +micromatch.capture = (glob, input, options) => { + let posix = utils.isWindows(options); + let regex = picomatch.makeRe(String(glob), { ...options, capture: true }); + let match = regex.exec(posix ? utils.toPosixSlashes(input) : input); - trees._tr_stored_block(s, 0, 0, false); - /* For a full flush, this empty block will be recognized - * as a special marker by inflate_sync(). - */ - if (flush === Z_FULL_FLUSH) { - /*** CLEAR_HASH(s); ***/ /* forget history */ - zero(s.head); // Fill with NIL (= 0); + if (match) { + return match.slice(1).map(v => v === void 0 ? '' : v); + } +}; - if (s.lookahead === 0) { - s.strstart = 0; - s.block_start = 0; - s.insert = 0; - } - } - } - flush_pending(strm); - if (strm.avail_out === 0) { - s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */ - return Z_OK; - } +/** + * Create a regular expression from the given glob `pattern`. + * + * ```js + * const mm = require('micromatch'); + * // mm.makeRe(pattern[, options]); + * + * console.log(mm.makeRe('*.js')); + * //=> /^(?:(\.[\\\/])?(?!\.)(?=.)[^\/]*?\.js)$/ + * ``` + * @param {String} `pattern` A glob pattern to convert to regex. + * @param {Object} `options` + * @return {RegExp} Returns a regex created from the given pattern. + * @api public + */ + +micromatch.makeRe = (...args) => picomatch.makeRe(...args); + +/** + * Scan a glob pattern to separate the pattern into segments. Used + * by the [split](#split) method. + * + * ```js + * const mm = require('micromatch'); + * const state = mm.scan(pattern[, options]); + * ``` + * @param {String} `pattern` + * @param {Object} `options` + * @return {Object} Returns an object with + * @api public + */ + +micromatch.scan = (...args) => picomatch.scan(...args); + +/** + * Parse a glob pattern to create the source string for a regular + * expression. + * + * ```js + * const mm = require('micromatch'); + * const state = mm(pattern[, options]); + * ``` + * @param {String} `glob` + * @param {Object} `options` + * @return {Object} Returns an object with useful properties and output to be used as regex source string. + * @api public + */ + +micromatch.parse = (patterns, options) => { + let res = []; + for (let pattern of [].concat(patterns || [])) { + for (let str of braces(String(pattern), options)) { + res.push(picomatch.parse(str, options)); } } - //Assert(strm->avail_out > 0, "bug2"); - //if (strm.avail_out <= 0) { throw new Error("bug2");} + return res; +}; - if (flush !== Z_FINISH) { return Z_OK; } - if (s.wrap <= 0) { return Z_STREAM_END; } +/** + * Process the given brace `pattern`. + * + * ```js + * const { braces } = require('micromatch'); + * console.log(braces('foo/{a,b,c}/bar')); + * //=> [ 'foo/(a|b|c)/bar' ] + * + * console.log(braces('foo/{a,b,c}/bar', { expand: true })); + * //=> [ 'foo/a/bar', 'foo/b/bar', 'foo/c/bar' ] + * ``` + * @param {String} `pattern` String with brace pattern to process. + * @param {Object} `options` Any [options](#options) to change how expansion is performed. See the [braces][] library for all available options. + * @return {Array} + * @api public + */ - /* Write the trailer */ - if (s.wrap === 2) { - put_byte(s, strm.adler & 0xff); - put_byte(s, (strm.adler >> 8) & 0xff); - put_byte(s, (strm.adler >> 16) & 0xff); - put_byte(s, (strm.adler >> 24) & 0xff); - put_byte(s, strm.total_in & 0xff); - put_byte(s, (strm.total_in >> 8) & 0xff); - put_byte(s, (strm.total_in >> 16) & 0xff); - put_byte(s, (strm.total_in >> 24) & 0xff); - } - else - { - putShortMSB(s, strm.adler >>> 16); - putShortMSB(s, strm.adler & 0xffff); +micromatch.braces = (pattern, options) => { + if (typeof pattern !== 'string') throw new TypeError('Expected a string'); + if ((options && options.nobrace === true) || !/\{.*\}/.test(pattern)) { + return [pattern]; } + return braces(pattern, options); +}; - flush_pending(strm); - /* If avail_out is zero, the application will call deflate again - * to flush the rest. - */ - if (s.wrap > 0) { s.wrap = -s.wrap; } - /* write the trailer only once! */ - return s.pending !== 0 ? Z_OK : Z_STREAM_END; -} +/** + * Expand braces + */ -function deflateEnd(strm) { - var status; +micromatch.braceExpand = (pattern, options) => { + if (typeof pattern !== 'string') throw new TypeError('Expected a string'); + return micromatch.braces(pattern, { ...options, expand: true }); +}; - if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) { - return Z_STREAM_ERROR; - } +/** + * Expose micromatch + */ - status = strm.state.status; - if (status !== INIT_STATE && - status !== EXTRA_STATE && - status !== NAME_STATE && - status !== COMMENT_STATE && - status !== HCRC_STATE && - status !== BUSY_STATE && - status !== FINISH_STATE - ) { - return err(strm, Z_STREAM_ERROR); - } +module.exports = micromatch; - strm.state = null; - return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK; -} +/***/ }), +/***/ 1726: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { -/* ========================================================================= - * Initializes the compression dictionary from the given byte - * sequence without producing any compressed output. - */ -function deflateSetDictionary(strm, dictionary) { - var dictLength = dictionary.length; +"use strict"; +// Top level file is just a mixin of submodules & constants - var s; - var str, n; - var wrap; - var avail; - var next; - var input; - var tmpDict; - if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) { - return Z_STREAM_ERROR; - } +var assign = (__nccwpck_require__(5483).assign); - s = strm.state; - wrap = s.wrap; +var deflate = __nccwpck_require__(7265); +var inflate = __nccwpck_require__(6522); +var constants = __nccwpck_require__(8282); - if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) { - return Z_STREAM_ERROR; - } +var pako = {}; - /* when using zlib wrappers, compute Adler-32 for provided dictionary */ - if (wrap === 1) { - /* adler32(strm->adler, dictionary, dictLength); */ - strm.adler = adler32(strm.adler, dictionary, dictLength, 0); - } +assign(pako, deflate, inflate, constants); - s.wrap = 0; /* avoid computing Adler-32 in read_buf */ +module.exports = pako; - /* if dictionary would fill window, just replace the history */ - if (dictLength >= s.w_size) { - if (wrap === 0) { /* already empty otherwise */ - /*** CLEAR_HASH(s); ***/ - zero(s.head); // Fill with NIL (= 0); - s.strstart = 0; - s.block_start = 0; - s.insert = 0; - } - /* use the tail */ - // dictionary = dictionary.slice(dictLength - s.w_size); - tmpDict = new utils.Buf8(s.w_size); - utils.arraySet(tmpDict, dictionary, dictLength - s.w_size, s.w_size, 0); - dictionary = tmpDict; - dictLength = s.w_size; - } - /* insert dictionary into window and hash */ - avail = strm.avail_in; - next = strm.next_in; - input = strm.input; - strm.avail_in = dictLength; - strm.next_in = 0; - strm.input = dictionary; - fill_window(s); - while (s.lookahead >= MIN_MATCH) { - str = s.strstart; - n = s.lookahead - (MIN_MATCH - 1); - do { - /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ - s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask; - s.prev[str & s.w_mask] = s.head[s.ins_h]; +/***/ }), - s.head[s.ins_h] = str; - str++; - } while (--n); - s.strstart = str; - s.lookahead = MIN_MATCH - 1; - fill_window(s); - } - s.strstart += s.lookahead; - s.block_start = s.strstart; - s.insert = s.lookahead; - s.lookahead = 0; - s.match_length = s.prev_length = MIN_MATCH - 1; - s.match_available = 0; - strm.next_in = next; - strm.input = input; - strm.avail_in = avail; - s.wrap = wrap; - return Z_OK; -} +/***/ 7265: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +"use strict"; -exports.deflateInit = deflateInit; -exports.deflateInit2 = deflateInit2; -exports.deflateReset = deflateReset; -exports.deflateResetKeep = deflateResetKeep; -exports.deflateSetHeader = deflateSetHeader; -exports.deflate = deflate; -exports.deflateEnd = deflateEnd; -exports.deflateSetDictionary = deflateSetDictionary; -exports.deflateInfo = 'pako deflate (from Nodeca project)'; -/* Not implemented -exports.deflateBound = deflateBound; -exports.deflateCopy = deflateCopy; -exports.deflateParams = deflateParams; -exports.deflatePending = deflatePending; -exports.deflatePrime = deflatePrime; -exports.deflateTune = deflateTune; -*/ +var zlib_deflate = __nccwpck_require__(978); +var utils = __nccwpck_require__(5483); +var strings = __nccwpck_require__(2380); +var msg = __nccwpck_require__(1890); +var ZStream = __nccwpck_require__(6442); -/***/ }), +var toString = Object.prototype.toString; -/***/ 382: -/***/ (function(module, __unusedexports, __webpack_require__) { +/* Public constants ==========================================================*/ +/* ===========================================================================*/ -"use strict"; +var Z_NO_FLUSH = 0; +var Z_FINISH = 4; +var Z_OK = 0; +var Z_STREAM_END = 1; +var Z_SYNC_FLUSH = 2; -const utils = __webpack_require__(225); +var Z_DEFAULT_COMPRESSION = -1; -module.exports = (ast, options = {}) => { - let stringify = (node, parent = {}) => { - let invalidBlock = options.escapeInvalid && utils.isInvalidBrace(parent); - let invalidNode = node.invalid === true && options.escapeInvalid === true; - let output = ''; +var Z_DEFAULT_STRATEGY = 0; - if (node.value) { - if ((invalidBlock || invalidNode) && utils.isOpenOrClose(node)) { - return '\\' + node.value; - } - return node.value; - } +var Z_DEFLATED = 8; - if (node.value) { - return node.value; - } +/* ===========================================================================*/ - if (node.nodes) { - for (let child of node.nodes) { - output += stringify(child); - } - } - return output; - }; - return stringify(ast); -}; +/** + * class Deflate + * + * Generic JS-style wrapper for zlib calls. If you don't need + * streaming behaviour - use more simple functions: [[deflate]], + * [[deflateRaw]] and [[gzip]]. + **/ +/* internal + * Deflate.chunks -> Array + * + * Chunks of output data, if [[Deflate#onData]] not overridden. + **/ +/** + * Deflate.result -> Uint8Array|Array + * + * Compressed result, generated by default [[Deflate#onData]] + * and [[Deflate#onEnd]] handlers. Filled after you push last chunk + * (call [[Deflate#push]] with `Z_FINISH` / `true` param) or if you + * push a chunk with explicit flush (call [[Deflate#push]] with + * `Z_SYNC_FLUSH` param). + **/ -/***/ }), +/** + * Deflate.err -> Number + * + * Error code after deflate finished. 0 (Z_OK) on success. + * You will not need it in real life, because deflate errors + * are possible only on wrong options or bad `onData` / `onEnd` + * custom handlers. + **/ -/***/ 384: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/** + * Deflate.msg -> String + * + * Error message, if [[Deflate.err]] != 0 + **/ -"use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.convertPatternGroupToTask = exports.convertPatternGroupsToTasks = exports.groupPatternsByBaseDirectory = exports.getNegativePatternsAsPositive = exports.getPositivePatterns = exports.convertPatternsToTasks = exports.generate = void 0; -const utils = __webpack_require__(444); -function generate(patterns, settings) { - const positivePatterns = getPositivePatterns(patterns); - const negativePatterns = getNegativePatternsAsPositive(patterns, settings.ignore); - const staticPatterns = positivePatterns.filter((pattern) => utils.pattern.isStaticPattern(pattern, settings)); - const dynamicPatterns = positivePatterns.filter((pattern) => utils.pattern.isDynamicPattern(pattern, settings)); - const staticTasks = convertPatternsToTasks(staticPatterns, negativePatterns, /* dynamic */ false); - const dynamicTasks = convertPatternsToTasks(dynamicPatterns, negativePatterns, /* dynamic */ true); - return staticTasks.concat(dynamicTasks); -} -exports.generate = generate; /** - * Returns tasks grouped by basic pattern directories. + * new Deflate(options) + * - options (Object): zlib deflate options. * - * Patterns that can be found inside (`./`) and outside (`../`) the current directory are handled separately. - * This is necessary because directory traversal starts at the base directory and goes deeper. - */ -function convertPatternsToTasks(positive, negative, dynamic) { - const tasks = []; - const patternsOutsideCurrentDirectory = utils.pattern.getPatternsOutsideCurrentDirectory(positive); - const patternsInsideCurrentDirectory = utils.pattern.getPatternsInsideCurrentDirectory(positive); - const outsideCurrentDirectoryGroup = groupPatternsByBaseDirectory(patternsOutsideCurrentDirectory); - const insideCurrentDirectoryGroup = groupPatternsByBaseDirectory(patternsInsideCurrentDirectory); - tasks.push(...convertPatternGroupsToTasks(outsideCurrentDirectoryGroup, negative, dynamic)); - /* - * For the sake of reducing future accesses to the file system, we merge all tasks within the current directory - * into a global task, if at least one pattern refers to the root (`.`). In this case, the global task covers the rest. - */ - if ('.' in insideCurrentDirectoryGroup) { - tasks.push(convertPatternGroupToTask('.', patternsInsideCurrentDirectory, negative, dynamic)); - } - else { - tasks.push(...convertPatternGroupsToTasks(insideCurrentDirectoryGroup, negative, dynamic)); - } - return tasks; -} -exports.convertPatternsToTasks = convertPatternsToTasks; -function getPositivePatterns(patterns) { - return utils.pattern.getPositivePatterns(patterns); -} -exports.getPositivePatterns = getPositivePatterns; -function getNegativePatternsAsPositive(patterns, ignore) { - const negative = utils.pattern.getNegativePatterns(patterns).concat(ignore); - const positive = negative.map(utils.pattern.convertToPositivePattern); - return positive; -} -exports.getNegativePatternsAsPositive = getNegativePatternsAsPositive; -function groupPatternsByBaseDirectory(patterns) { - const group = {}; - return patterns.reduce((collection, pattern) => { - const base = utils.pattern.getBaseDirectory(pattern); - if (base in collection) { - collection[base].push(pattern); - } - else { - collection[base] = [pattern]; - } - return collection; - }, group); -} -exports.groupPatternsByBaseDirectory = groupPatternsByBaseDirectory; -function convertPatternGroupsToTasks(positive, negative, dynamic) { - return Object.keys(positive).map((base) => { - return convertPatternGroupToTask(base, positive[base], negative, dynamic); - }); -} -exports.convertPatternGroupsToTasks = convertPatternGroupsToTasks; -function convertPatternGroupToTask(base, positive, negative, dynamic) { - return { - dynamic, - positive, - negative, - base, - patterns: [].concat(positive, negative.map(utils.pattern.convertToNegativePattern)) - }; -} -exports.convertPatternGroupToTask = convertPatternGroupToTask; + * Creates new deflator instance with specified params. Throws exception + * on bad params. Supported options: + * + * - `level` + * - `windowBits` + * - `memLevel` + * - `strategy` + * - `dictionary` + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information on these. + * + * Additional options, for internal needs: + * + * - `chunkSize` - size of generated data chunks (16K by default) + * - `raw` (Boolean) - do raw deflate + * - `gzip` (Boolean) - create gzip wrapper + * - `to` (String) - if equal to 'string', then result will be "binary string" + * (each char code [0..255]) + * - `header` (Object) - custom header for gzip + * - `text` (Boolean) - true if compressed data believed to be text + * - `time` (Number) - modification time, unix timestamp + * - `os` (Number) - operation system code + * - `extra` (Array) - array of bytes with extra data (max 65536) + * - `name` (String) - file name (binary string) + * - `comment` (String) - comment (binary string) + * - `hcrc` (Boolean) - true if header crc should be added + * + * ##### Example: + * + * ```javascript + * var pako = require('pako') + * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9]) + * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]); + * + * var deflate = new pako.Deflate({ level: 3}); + * + * deflate.push(chunk1, false); + * deflate.push(chunk2, true); // true -> last chunk + * + * if (deflate.err) { throw new Error(deflate.err); } + * + * console.log(deflate.result); + * ``` + **/ +function Deflate(options) { + if (!(this instanceof Deflate)) return new Deflate(options); + this.options = utils.assign({ + level: Z_DEFAULT_COMPRESSION, + method: Z_DEFLATED, + chunkSize: 16384, + windowBits: 15, + memLevel: 8, + strategy: Z_DEFAULT_STRATEGY, + to: '' + }, options || {}); -/***/ }), + var opt = this.options; -/***/ 394: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + if (opt.raw && (opt.windowBits > 0)) { + opt.windowBits = -opt.windowBits; + } -"use strict"; + else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) { + opt.windowBits += 16; + } -Object.defineProperty(exports, "__esModule", { value: true }); -const fsScandir = __webpack_require__(661); -const common = __webpack_require__(617); -const reader_1 = __webpack_require__(962); -class SyncReader extends reader_1.default { - constructor() { - super(...arguments); - this._scandir = fsScandir.scandirSync; - this._storage = []; - this._queue = new Set(); - } - read() { - this._pushToQueue(this._root, this._settings.basePath); - this._handleQueue(); - return this._storage; - } - _pushToQueue(directory, base) { - this._queue.add({ directory, base }); - } - _handleQueue() { - for (const item of this._queue.values()) { - this._handleDirectory(item.directory, item.base); - } - } - _handleDirectory(directory, base) { - try { - const entries = this._scandir(directory, this._settings.fsScandirSettings); - for (const entry of entries) { - this._handleEntry(entry, base); - } - } - catch (error) { - this._handleError(error); - } - } - _handleError(error) { - if (!common.isFatalError(this._settings, error)) { - return; - } - throw error; - } - _handleEntry(entry, base) { - const fullpath = entry.path; - if (base !== undefined) { - entry.path = common.joinPathSegments(base, entry.name, this._settings.pathSegmentSeparator); - } - if (common.isAppliedFilter(this._settings.entryFilter, entry)) { - this._pushToStorage(entry); - } - if (entry.dirent.isDirectory() && common.isAppliedFilter(this._settings.deepFilter, entry)) { - this._pushToQueue(fullpath, base === undefined ? undefined : entry.path); - } - } - _pushToStorage(entry) { - this._storage.push(entry); - } -} -exports.default = SyncReader; - - -/***/ }), - -/***/ 396: -/***/ (function(module) { + this.err = 0; // error code, if happens (0 = Z_OK) + this.msg = ''; // error message + this.ended = false; // used to avoid multiple onEnd() calls + this.chunks = []; // chunks of compressed data -// A simple implementation of make-array -function makeArray (subject) { - return Array.isArray(subject) - ? subject - : [subject] -} + this.strm = new ZStream(); + this.strm.avail_out = 0; -const EMPTY = '' -const SPACE = ' ' -const ESCAPE = '\\' -const REGEX_TEST_BLANK_LINE = /^\s+$/ -const REGEX_REPLACE_LEADING_EXCAPED_EXCLAMATION = /^\\!/ -const REGEX_REPLACE_LEADING_EXCAPED_HASH = /^\\#/ -const REGEX_SPLITALL_CRLF = /\r?\n/g -// /foo, -// ./foo, -// ../foo, -// . -// .. -const REGEX_TEST_INVALID_PATH = /^\.*\/|^\.+$/ + var status = zlib_deflate.deflateInit2( + this.strm, + opt.level, + opt.method, + opt.windowBits, + opt.memLevel, + opt.strategy + ); -const SLASH = '/' -const KEY_IGNORE = typeof Symbol !== 'undefined' - ? Symbol.for('node-ignore') - /* istanbul ignore next */ - : 'node-ignore' + if (status !== Z_OK) { + throw new Error(msg[status]); + } -const define = (object, key, value) => - Object.defineProperty(object, key, {value}) + if (opt.header) { + zlib_deflate.deflateSetHeader(this.strm, opt.header); + } -const REGEX_REGEXP_RANGE = /([0-z])-([0-z])/g + if (opt.dictionary) { + var dict; + // Convert data if needed + if (typeof opt.dictionary === 'string') { + // If we need to compress text, change encoding to utf8. + dict = strings.string2buf(opt.dictionary); + } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') { + dict = new Uint8Array(opt.dictionary); + } else { + dict = opt.dictionary; + } -const RETURN_FALSE = () => false + status = zlib_deflate.deflateSetDictionary(this.strm, dict); -// Sanitize the range of a regular expression -// The cases are complicated, see test cases for details -const sanitizeRange = range => range.replace( - REGEX_REGEXP_RANGE, - (match, from, to) => from.charCodeAt(0) <= to.charCodeAt(0) - ? match - // Invalid range (out of order) which is ok for gitignore rules but - // fatal for JavaScript regular expression, so eliminate it. - : EMPTY -) + if (status !== Z_OK) { + throw new Error(msg[status]); + } -// See fixtures #59 -const cleanRangeBackSlash = slashes => { - const {length} = slashes - return slashes.slice(0, length - length % 2) + this._dict_set = true; + } } -// > If the pattern ends with a slash, -// > it is removed for the purpose of the following description, -// > but it would only find a match with a directory. -// > In other words, foo/ will match a directory foo and paths underneath it, -// > but will not match a regular file or a symbolic link foo -// > (this is consistent with the way how pathspec works in general in Git). -// '`foo/`' will not match regular file '`foo`' or symbolic link '`foo`' -// -> ignore-rules will not deal with it, because it costs extra `fs.stat` call -// you could use option `mark: true` with `glob` +/** + * Deflate#push(data[, mode]) -> Boolean + * - data (Uint8Array|Array|ArrayBuffer|String): input data. Strings will be + * converted to utf8 byte sequence. + * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes. + * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH. + * + * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with + * new compressed chunks. Returns `true` on success. The last data block must have + * mode Z_FINISH (or `true`). That will flush internal pending buffers and call + * [[Deflate#onEnd]]. For interim explicit flushes (without ending the stream) you + * can use mode Z_SYNC_FLUSH, keeping the compression context. + * + * On fail call [[Deflate#onEnd]] with error code and return false. + * + * We strongly recommend to use `Uint8Array` on input for best speed (output + * array format is detected automatically). Also, don't skip last param and always + * use the same type in your code (boolean or number). That will improve JS speed. + * + * For regular `Array`-s make sure all elements are [0..255]. + * + * ##### Example + * + * ```javascript + * push(chunk, false); // push one of data chunks + * ... + * push(chunk, true); // push last chunk + * ``` + **/ +Deflate.prototype.push = function (data, mode) { + var strm = this.strm; + var chunkSize = this.options.chunkSize; + var status, _mode; -// '`foo/`' should not continue with the '`..`' -const REPLACERS = [ + if (this.ended) { return false; } - // > Trailing spaces are ignored unless they are quoted with backslash ("\") - [ - // (a\ ) -> (a ) - // (a ) -> (a) - // (a \ ) -> (a ) - /\\?\s+$/, - match => match.indexOf('\\') === 0 - ? SPACE - : EMPTY - ], + _mode = (mode === ~~mode) ? mode : ((mode === true) ? Z_FINISH : Z_NO_FLUSH); - // replace (\ ) with ' ' - [ - /\\\s/g, - () => SPACE - ], + // Convert data if needed + if (typeof data === 'string') { + // If we need to compress text, change encoding to utf8. + strm.input = strings.string2buf(data); + } else if (toString.call(data) === '[object ArrayBuffer]') { + strm.input = new Uint8Array(data); + } else { + strm.input = data; + } - // Escape metacharacters - // which is written down by users but means special for regular expressions. + strm.next_in = 0; + strm.avail_in = strm.input.length; - // > There are 12 characters with special meanings: - // > - the backslash \, - // > - the caret ^, - // > - the dollar sign $, - // > - the period or dot ., - // > - the vertical bar or pipe symbol |, - // > - the question mark ?, - // > - the asterisk or star *, - // > - the plus sign +, - // > - the opening parenthesis (, - // > - the closing parenthesis ), - // > - and the opening square bracket [, - // > - the opening curly brace {, - // > These special characters are often called "metacharacters". - [ - /[\\$.|*+(){^]/g, - match => `\\${match}` - ], + do { + if (strm.avail_out === 0) { + strm.output = new utils.Buf8(chunkSize); + strm.next_out = 0; + strm.avail_out = chunkSize; + } + status = zlib_deflate.deflate(strm, _mode); /* no bad return value */ - [ - // > a question mark (?) matches a single character - /(?!\\)\?/g, - () => '[^/]' - ], + if (status !== Z_STREAM_END && status !== Z_OK) { + this.onEnd(status); + this.ended = true; + return false; + } + if (strm.avail_out === 0 || (strm.avail_in === 0 && (_mode === Z_FINISH || _mode === Z_SYNC_FLUSH))) { + if (this.options.to === 'string') { + this.onData(strings.buf2binstring(utils.shrinkBuf(strm.output, strm.next_out))); + } else { + this.onData(utils.shrinkBuf(strm.output, strm.next_out)); + } + } + } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== Z_STREAM_END); - // leading slash - [ + // Finalize on the last chunk. + if (_mode === Z_FINISH) { + status = zlib_deflate.deflateEnd(this.strm); + this.onEnd(status); + this.ended = true; + return status === Z_OK; + } - // > A leading slash matches the beginning of the pathname. - // > For example, "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c". - // A leading slash matches the beginning of the pathname - /^\//, - () => '^' - ], + // callback interim results if Z_SYNC_FLUSH. + if (_mode === Z_SYNC_FLUSH) { + this.onEnd(Z_OK); + strm.avail_out = 0; + return true; + } - // replace special metacharacter slash after the leading slash - [ - /\//g, - () => '\\/' - ], + return true; +}; - [ - // > A leading "**" followed by a slash means match in all directories. - // > For example, "**/foo" matches file or directory "foo" anywhere, - // > the same as pattern "foo". - // > "**/foo/bar" matches file or directory "bar" anywhere that is directly - // > under directory "foo". - // Notice that the '*'s have been replaced as '\\*' - /^\^*\\\*\\\*\\\//, - - // '**/foo' <-> 'foo' - () => '^(?:.*\\/)?' - ], - // starting - [ - // there will be no leading '/' - // (which has been replaced by section "leading slash") - // If starts with '**', adding a '^' to the regular expression also works - /^(?=[^^])/, - function startingReplacer () { - // If has a slash `/` at the beginning or middle - return !/\/(?!$)/.test(this) - // > Prior to 2.22.1 - // > If the pattern does not contain a slash /, - // > Git treats it as a shell glob pattern - // Actually, if there is only a trailing slash, - // git also treats it as a shell glob pattern +/** + * Deflate#onData(chunk) -> Void + * - chunk (Uint8Array|Array|String): output data. Type of array depends + * on js engine support. When string output requested, each chunk + * will be string. + * + * By default, stores data blocks in `chunks[]` property and glue + * those in `onEnd`. Override this handler, if you need another behaviour. + **/ +Deflate.prototype.onData = function (chunk) { + this.chunks.push(chunk); +}; - // After 2.22.1 (compatible but clearer) - // > If there is a separator at the beginning or middle (or both) - // > of the pattern, then the pattern is relative to the directory - // > level of the particular .gitignore file itself. - // > Otherwise the pattern may also match at any level below - // > the .gitignore level. - ? '(?:^|\\/)' - // > Otherwise, Git treats the pattern as a shell glob suitable for - // > consumption by fnmatch(3) - : '^' +/** + * Deflate#onEnd(status) -> Void + * - status (Number): deflate status. 0 (Z_OK) on success, + * other if not. + * + * Called once after you tell deflate that the input stream is + * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH) + * or if an error happened. By default - join collected chunks, + * free memory and fill `results` / `err` properties. + **/ +Deflate.prototype.onEnd = function (status) { + // On success - join + if (status === Z_OK) { + if (this.options.to === 'string') { + this.result = this.chunks.join(''); + } else { + this.result = utils.flattenChunks(this.chunks); } - ], + } + this.chunks = []; + this.err = status; + this.msg = this.strm.msg; +}; - // two globstars - [ - // Use lookahead assertions so that we could match more than one `'/**'` - /\\\/\\\*\\\*(?=\\\/|$)/g, - // Zero, one or several directories - // should not use '*', or it will be replaced by the next replacer +/** + * deflate(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to compress. + * - options (Object): zlib deflate options. + * + * Compress `data` with deflate algorithm and `options`. + * + * Supported options are: + * + * - level + * - windowBits + * - memLevel + * - strategy + * - dictionary + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information on these. + * + * Sugar (options): + * + * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify + * negative windowBits implicitly. + * - `to` (String) - if equal to 'string', then result will be "binary string" + * (each char code [0..255]) + * + * ##### Example: + * + * ```javascript + * var pako = require('pako') + * , data = Uint8Array([1,2,3,4,5,6,7,8,9]); + * + * console.log(pako.deflate(data)); + * ``` + **/ +function deflate(input, options) { + var deflator = new Deflate(options); - // Check if it is not the last `'/**'` - (_, index, str) => index + 6 < str.length + deflator.push(input, true); - // case: /**/ - // > A slash followed by two consecutive asterisks then a slash matches - // > zero or more directories. - // > For example, "a/**/b" matches "a/b", "a/x/b", "a/x/y/b" and so on. - // '/**/' - ? '(?:\\/[^\\/]+)*' + // That will never happens, if you don't cheat with options :) + if (deflator.err) { throw deflator.msg || msg[deflator.err]; } - // case: /** - // > A trailing `"/**"` matches everything inside. + return deflator.result; +} - // #21: everything inside but it should not include the current folder - : '\\/.+' - ], - // intermediate wildcards - [ - // Never replace escaped '*' - // ignore rule '\*' will match the path '*' +/** + * deflateRaw(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to compress. + * - options (Object): zlib deflate options. + * + * The same as [[deflate]], but creates raw data, without wrapper + * (header and adler32 crc). + **/ +function deflateRaw(input, options) { + options = options || {}; + options.raw = true; + return deflate(input, options); +} - // 'abc.*/' -> go - // 'abc.*' -> skip this rule - /(^|[^\\]+)\\\*(?=.+)/g, - // '*.js' matches '.js' - // '*.js' doesn't match 'abc' - (_, p1) => `${p1}[^\\/]*` - ], +/** + * gzip(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to compress. + * - options (Object): zlib deflate options. + * + * The same as [[deflate]], but create gzip wrapper instead of + * deflate one. + **/ +function gzip(input, options) { + options = options || {}; + options.gzip = true; + return deflate(input, options); +} - [ - // unescape, revert step 3 except for back slash - // For example, if a user escape a '\\*', - // after step 3, the result will be '\\\\\\*' - /\\\\\\(?=[$.|*+(){^])/g, - () => ESCAPE - ], - [ - // '\\\\' -> '\\' - /\\\\/g, - () => ESCAPE - ], +exports.Deflate = Deflate; +exports.deflate = deflate; +exports.deflateRaw = deflateRaw; +exports.gzip = gzip; - [ - // > The range notation, e.g. [a-zA-Z], - // > can be used to match one of the characters in a range. - // `\` is escaped by step 3 - /(\\)?\[([^\]/]*?)(\\*)($|\])/g, - (match, leadEscape, range, endEscape, close) => leadEscape === ESCAPE - // '\\[bar]' -> '\\\\[bar\\]' - ? `\\[${range}${cleanRangeBackSlash(endEscape)}${close}` - : close === ']' - ? endEscape.length % 2 === 0 - // A normal case, and it is a range notation - // '[bar]' - // '[bar\\\\]' - ? `[${sanitizeRange(range)}${endEscape}]` - // Invalid range notaton - // '[bar\\]' -> '[bar\\\\]' - : '[]' - : '[]' - ], +/***/ }), - // ending - [ - // 'js' will not match 'js.' - // 'ab' will not match 'abc' - /(?:[^*])$/, +/***/ 6522: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - // WTF! - // https://git-scm.com/docs/gitignore - // changes in [2.22.1](https://git-scm.com/docs/gitignore/2.22.1) - // which re-fixes #24, #38 +"use strict"; - // > If there is a separator at the end of the pattern then the pattern - // > will only match directories, otherwise the pattern can match both - // > files and directories. - // 'js*' will not match 'a.js' - // 'js/' will not match 'a.js' - // 'js' will match 'a.js' and 'a.js/' - match => /\/$/.test(match) - // foo/ will not match 'foo' - ? `${match}$` - // foo matches 'foo' and 'foo/' - : `${match}(?=$|\\/$)` - ], - // trailing wildcard - [ - /(\^|\\\/)?\\\*$/, - (_, p1) => { - const prefix = p1 - // '\^': - // '/*' does not match EMPTY - // '/*' does not match everything +var zlib_inflate = __nccwpck_require__(409); +var utils = __nccwpck_require__(5483); +var strings = __nccwpck_require__(2380); +var c = __nccwpck_require__(8282); +var msg = __nccwpck_require__(1890); +var ZStream = __nccwpck_require__(6442); +var GZheader = __nccwpck_require__(5105); - // '\\\/': - // 'abc/*' does not match 'abc/' - ? `${p1}[^/]+` +var toString = Object.prototype.toString; - // 'a*' matches 'a' - // 'a*' matches 'aa' - : '[^/]*' +/** + * class Inflate + * + * Generic JS-style wrapper for zlib calls. If you don't need + * streaming behaviour - use more simple functions: [[inflate]] + * and [[inflateRaw]]. + **/ - return `${prefix}(?=$|\\/$)` - } - ], -] +/* internal + * inflate.chunks -> Array + * + * Chunks of output data, if [[Inflate#onData]] not overridden. + **/ -// A simple cache, because an ignore rule only has only one certain meaning -const regexCache = Object.create(null) - -// @param {pattern} -const makeRegex = (pattern, ignoreCase) => { - let source = regexCache[pattern] - - if (!source) { - source = REPLACERS.reduce( - (prev, current) => prev.replace(current[0], current[1].bind(pattern)), - pattern - ) - regexCache[pattern] = source - } - - return ignoreCase - ? new RegExp(source, 'i') - : new RegExp(source) -} - -const isString = subject => typeof subject === 'string' - -// > A blank line matches no files, so it can serve as a separator for readability. -const checkPattern = pattern => pattern - && isString(pattern) - && !REGEX_TEST_BLANK_LINE.test(pattern) - - // > A line starting with # serves as a comment. - && pattern.indexOf('#') !== 0 - -const splitPattern = pattern => pattern.split(REGEX_SPLITALL_CRLF) - -class IgnoreRule { - constructor ( - origin, - pattern, - negative, - regex - ) { - this.origin = origin - this.pattern = pattern - this.negative = negative - this.regex = regex - } -} +/** + * Inflate.result -> Uint8Array|Array|String + * + * Uncompressed result, generated by default [[Inflate#onData]] + * and [[Inflate#onEnd]] handlers. Filled after you push last chunk + * (call [[Inflate#push]] with `Z_FINISH` / `true` param) or if you + * push a chunk with explicit flush (call [[Inflate#push]] with + * `Z_SYNC_FLUSH` param). + **/ -const createRule = (pattern, ignoreCase) => { - const origin = pattern - let negative = false +/** + * Inflate.err -> Number + * + * Error code after inflate finished. 0 (Z_OK) on success. + * Should be checked if broken data possible. + **/ - // > An optional prefix "!" which negates the pattern; - if (pattern.indexOf('!') === 0) { - negative = true - pattern = pattern.substr(1) - } +/** + * Inflate.msg -> String + * + * Error message, if [[Inflate.err]] != 0 + **/ - pattern = pattern - // > Put a backslash ("\") in front of the first "!" for patterns that - // > begin with a literal "!", for example, `"\!important!.txt"`. - .replace(REGEX_REPLACE_LEADING_EXCAPED_EXCLAMATION, '!') - // > Put a backslash ("\") in front of the first hash for patterns that - // > begin with a hash. - .replace(REGEX_REPLACE_LEADING_EXCAPED_HASH, '#') - const regex = makeRegex(pattern, ignoreCase) +/** + * new Inflate(options) + * - options (Object): zlib inflate options. + * + * Creates new inflator instance with specified params. Throws exception + * on bad params. Supported options: + * + * - `windowBits` + * - `dictionary` + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information on these. + * + * Additional options, for internal needs: + * + * - `chunkSize` - size of generated data chunks (16K by default) + * - `raw` (Boolean) - do raw inflate + * - `to` (String) - if equal to 'string', then result will be converted + * from utf8 to utf16 (javascript) string. When string output requested, + * chunk length can differ from `chunkSize`, depending on content. + * + * By default, when no options set, autodetect deflate/gzip data format via + * wrapper header. + * + * ##### Example: + * + * ```javascript + * var pako = require('pako') + * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9]) + * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]); + * + * var inflate = new pako.Inflate({ level: 3}); + * + * inflate.push(chunk1, false); + * inflate.push(chunk2, true); // true -> last chunk + * + * if (inflate.err) { throw new Error(inflate.err); } + * + * console.log(inflate.result); + * ``` + **/ +function Inflate(options) { + if (!(this instanceof Inflate)) return new Inflate(options); - return new IgnoreRule( - origin, - pattern, - negative, - regex - ) -} + this.options = utils.assign({ + chunkSize: 16384, + windowBits: 0, + to: '' + }, options || {}); -const throwError = (message, Ctor) => { - throw new Ctor(message) -} + var opt = this.options; -const checkPath = (path, originalPath, doThrow) => { - if (!isString(path)) { - return doThrow( - `path must be a string, but got \`${originalPath}\``, - TypeError - ) + // Force window size for `raw` data, if not set directly, + // because we have no header for autodetect. + if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) { + opt.windowBits = -opt.windowBits; + if (opt.windowBits === 0) { opt.windowBits = -15; } } - // We don't know if we should ignore EMPTY, so throw - if (!path) { - return doThrow(`path must not be empty`, TypeError) + // If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate + if ((opt.windowBits >= 0) && (opt.windowBits < 16) && + !(options && options.windowBits)) { + opt.windowBits += 32; } - // Check if it is a relative path - if (checkPath.isNotRelative(path)) { - const r = '`path.relative()`d' - return doThrow( - `path should be a ${r} string, but got "${originalPath}"`, - RangeError - ) + // Gzip header has no info about windows size, we can do autodetect only + // for deflate. So, if window size not set, force it to max when gzip possible + if ((opt.windowBits > 15) && (opt.windowBits < 48)) { + // bit 3 (16) -> gzipped data + // bit 4 (32) -> autodetect gzip/deflate + if ((opt.windowBits & 15) === 0) { + opt.windowBits |= 15; + } } - return true -} - -const isNotRelative = path => REGEX_TEST_INVALID_PATH.test(path) + this.err = 0; // error code, if happens (0 = Z_OK) + this.msg = ''; // error message + this.ended = false; // used to avoid multiple onEnd() calls + this.chunks = []; // chunks of compressed data -checkPath.isNotRelative = isNotRelative -checkPath.convert = p => p + this.strm = new ZStream(); + this.strm.avail_out = 0; -class Ignore { - constructor ({ - ignorecase = true, - ignoreCase = ignorecase, - allowRelativePaths = false - } = {}) { - define(this, KEY_IGNORE, true) + var status = zlib_inflate.inflateInit2( + this.strm, + opt.windowBits + ); - this._rules = [] - this._ignoreCase = ignoreCase - this._allowRelativePaths = allowRelativePaths - this._initCache() + if (status !== c.Z_OK) { + throw new Error(msg[status]); } - _initCache () { - this._ignoreCache = Object.create(null) - this._testCache = Object.create(null) - } + this.header = new GZheader(); - _addPattern (pattern) { - // #32 - if (pattern && pattern[KEY_IGNORE]) { - this._rules = this._rules.concat(pattern._rules) - this._added = true - return - } + zlib_inflate.inflateGetHeader(this.strm, this.header); - if (checkPattern(pattern)) { - const rule = createRule(pattern, this._ignoreCase) - this._added = true - this._rules.push(rule) + // Setup dictionary + if (opt.dictionary) { + // Convert data if needed + if (typeof opt.dictionary === 'string') { + opt.dictionary = strings.string2buf(opt.dictionary); + } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') { + opt.dictionary = new Uint8Array(opt.dictionary); } - } - - // @param {Array | string | Ignore} pattern - add (pattern) { - this._added = false - - makeArray( - isString(pattern) - ? splitPattern(pattern) - : pattern - ).forEach(this._addPattern, this) - - // Some rules have just added to the ignore, - // making the behavior changed. - if (this._added) { - this._initCache() + if (opt.raw) { //In raw mode we need to set the dictionary early + status = zlib_inflate.inflateSetDictionary(this.strm, opt.dictionary); + if (status !== c.Z_OK) { + throw new Error(msg[status]); + } } - - return this - } - - // legacy - addPattern (pattern) { - return this.add(pattern) } +} - // | ignored : unignored - // negative | 0:0 | 0:1 | 1:0 | 1:1 - // -------- | ------- | ------- | ------- | -------- - // 0 | TEST | TEST | SKIP | X - // 1 | TESTIF | SKIP | TEST | X +/** + * Inflate#push(data[, mode]) -> Boolean + * - data (Uint8Array|Array|ArrayBuffer|String): input data + * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes. + * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH. + * + * Sends input data to inflate pipe, generating [[Inflate#onData]] calls with + * new output chunks. Returns `true` on success. The last data block must have + * mode Z_FINISH (or `true`). That will flush internal pending buffers and call + * [[Inflate#onEnd]]. For interim explicit flushes (without ending the stream) you + * can use mode Z_SYNC_FLUSH, keeping the decompression context. + * + * On fail call [[Inflate#onEnd]] with error code and return false. + * + * We strongly recommend to use `Uint8Array` on input for best speed (output + * format is detected automatically). Also, don't skip last param and always + * use the same type in your code (boolean or number). That will improve JS speed. + * + * For regular `Array`-s make sure all elements are [0..255]. + * + * ##### Example + * + * ```javascript + * push(chunk, false); // push one of data chunks + * ... + * push(chunk, true); // push last chunk + * ``` + **/ +Inflate.prototype.push = function (data, mode) { + var strm = this.strm; + var chunkSize = this.options.chunkSize; + var dictionary = this.options.dictionary; + var status, _mode; + var next_out_utf8, tail, utf8str; - // - SKIP: always skip - // - TEST: always test - // - TESTIF: only test if checkUnignored - // - X: that never happen + // Flag to properly process Z_BUF_ERROR on testing inflate call + // when we check that all output data was flushed. + var allowBufError = false; - // @param {boolean} whether should check if the path is unignored, - // setting `checkUnignored` to `false` could reduce additional - // path matching. + if (this.ended) { return false; } + _mode = (mode === ~~mode) ? mode : ((mode === true) ? c.Z_FINISH : c.Z_NO_FLUSH); - // @returns {TestResult} true if a file is ignored - _testOne (path, checkUnignored) { - let ignored = false - let unignored = false + // Convert data if needed + if (typeof data === 'string') { + // Only binary strings can be decompressed on practice + strm.input = strings.binstring2buf(data); + } else if (toString.call(data) === '[object ArrayBuffer]') { + strm.input = new Uint8Array(data); + } else { + strm.input = data; + } - this._rules.forEach(rule => { - const {negative} = rule - if ( - unignored === negative && ignored !== unignored - || negative && !ignored && !unignored && !checkUnignored - ) { - return - } + strm.next_in = 0; + strm.avail_in = strm.input.length; - const matched = rule.regex.test(path) + do { + if (strm.avail_out === 0) { + strm.output = new utils.Buf8(chunkSize); + strm.next_out = 0; + strm.avail_out = chunkSize; + } - if (matched) { - ignored = !negative - unignored = negative - } - }) + status = zlib_inflate.inflate(strm, c.Z_NO_FLUSH); /* no bad return value */ - return { - ignored, - unignored + if (status === c.Z_NEED_DICT && dictionary) { + status = zlib_inflate.inflateSetDictionary(this.strm, dictionary); } - } - // @returns {TestResult} - _test (originalPath, cache, checkUnignored, slices) { - const path = originalPath - // Supports nullable path - && checkPath.convert(originalPath) + if (status === c.Z_BUF_ERROR && allowBufError === true) { + status = c.Z_OK; + allowBufError = false; + } - checkPath( - path, - originalPath, - this._allowRelativePaths - ? RETURN_FALSE - : throwError - ) + if (status !== c.Z_STREAM_END && status !== c.Z_OK) { + this.onEnd(status); + this.ended = true; + return false; + } - return this._t(path, cache, checkUnignored, slices) - } + if (strm.next_out) { + if (strm.avail_out === 0 || status === c.Z_STREAM_END || (strm.avail_in === 0 && (_mode === c.Z_FINISH || _mode === c.Z_SYNC_FLUSH))) { - _t (path, cache, checkUnignored, slices) { - if (path in cache) { - return cache[path] - } + if (this.options.to === 'string') { - if (!slices) { - // path/to/a.js - // ['path', 'to', 'a.js'] - slices = path.split(SLASH) - } + next_out_utf8 = strings.utf8border(strm.output, strm.next_out); - slices.pop() + tail = strm.next_out - next_out_utf8; + utf8str = strings.buf2string(strm.output, next_out_utf8); - // If the path has no parent directory, just test it - if (!slices.length) { - return cache[path] = this._testOne(path, checkUnignored) - } + // move tail + strm.next_out = tail; + strm.avail_out = chunkSize - tail; + if (tail) { utils.arraySet(strm.output, strm.output, next_out_utf8, tail, 0); } - const parent = this._t( - slices.join(SLASH) + SLASH, - cache, - checkUnignored, - slices - ) + this.onData(utf8str); - // If the path contains a parent directory, check the parent first - return cache[path] = parent.ignored - // > It is not possible to re-include a file if a parent directory of - // > that file is excluded. - ? parent - : this._testOne(path, checkUnignored) - } + } else { + this.onData(utils.shrinkBuf(strm.output, strm.next_out)); + } + } + } - ignores (path) { - return this._test(path, this._ignoreCache, false).ignored - } + // When no more input data, we should check that internal inflate buffers + // are flushed. The only way to do it when avail_out = 0 - run one more + // inflate pass. But if output data not exists, inflate return Z_BUF_ERROR. + // Here we set flag to process this error properly. + // + // NOTE. Deflate does not return error in this case and does not needs such + // logic. + if (strm.avail_in === 0 && strm.avail_out === 0) { + allowBufError = true; + } - createFilter () { - return path => !this.ignores(path) + } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== c.Z_STREAM_END); + + if (status === c.Z_STREAM_END) { + _mode = c.Z_FINISH; } - filter (paths) { - return makeArray(paths).filter(this.createFilter()) + // Finalize on the last chunk. + if (_mode === c.Z_FINISH) { + status = zlib_inflate.inflateEnd(this.strm); + this.onEnd(status); + this.ended = true; + return status === c.Z_OK; } - // @returns {TestResult} - test (path) { - return this._test(path, this._testCache, true) + // callback interim results if Z_SYNC_FLUSH. + if (_mode === c.Z_SYNC_FLUSH) { + this.onEnd(c.Z_OK); + strm.avail_out = 0; + return true; } -} -const factory = options => new Ignore(options) + return true; +}; -const isPathValid = path => - checkPath(path && checkPath.convert(path), path, RETURN_FALSE) -factory.isPathValid = isPathValid +/** + * Inflate#onData(chunk) -> Void + * - chunk (Uint8Array|Array|String): output data. Type of array depends + * on js engine support. When string output requested, each chunk + * will be string. + * + * By default, stores data blocks in `chunks[]` property and glue + * those in `onEnd`. Override this handler, if you need another behaviour. + **/ +Inflate.prototype.onData = function (chunk) { + this.chunks.push(chunk); +}; -// Fixes typescript -factory.default = factory -module.exports = factory +/** + * Inflate#onEnd(status) -> Void + * - status (Number): inflate status. 0 (Z_OK) on success, + * other if not. + * + * Called either after you tell inflate that the input stream is + * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH) + * or if an error happened. By default - join collected chunks, + * free memory and fill `results` / `err` properties. + **/ +Inflate.prototype.onEnd = function (status) { + // On success - join + if (status === c.Z_OK) { + if (this.options.to === 'string') { + // Glue & convert here, until we teach pako to send + // utf8 aligned strings to onData + this.result = this.chunks.join(''); + } else { + this.result = utils.flattenChunks(this.chunks); + } + } + this.chunks = []; + this.err = status; + this.msg = this.strm.msg; +}; -// Windows -// -------------------------------------------------------------- -/* istanbul ignore if */ -if ( - // Detect `process` so that it can run in browsers. - typeof process !== 'undefined' - && ( - process.env && process.env.IGNORE_TEST_WIN32 - || process.platform === 'win32' - ) -) { - /* eslint no-control-regex: "off" */ - const makePosix = str => /^\\\\\?\\/.test(str) - || /["<>|\u0000-\u001F]+/u.test(str) - ? str - : str.replace(/\\/g, '/') - checkPath.convert = makePosix +/** + * inflate(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to decompress. + * - options (Object): zlib inflate options. + * + * Decompress `data` with inflate/ungzip and `options`. Autodetect + * format via wrapper header by default. That's why we don't provide + * separate `ungzip` method. + * + * Supported options are: + * + * - windowBits + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information. + * + * Sugar (options): + * + * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify + * negative windowBits implicitly. + * - `to` (String) - if equal to 'string', then result will be converted + * from utf8 to utf16 (javascript) string. When string output requested, + * chunk length can differ from `chunkSize`, depending on content. + * + * + * ##### Example: + * + * ```javascript + * var pako = require('pako') + * , input = pako.deflate([1,2,3,4,5,6,7,8,9]) + * , output; + * + * try { + * output = pako.inflate(input); + * } catch (err) + * console.log(err); + * } + * ``` + **/ +function inflate(input, options) { + var inflator = new Inflate(options); - // 'C:\\foo' <- 'C:\\foo' has been converted to 'C:/' - // 'd:\\foo' - const REGIX_IS_WINDOWS_PATH_ABSOLUTE = /^[a-z]:\//i - checkPath.isNotRelative = path => - REGIX_IS_WINDOWS_PATH_ABSOLUTE.test(path) - || isNotRelative(path) + inflator.push(input, true); + + // That will never happens, if you don't cheat with options :) + if (inflator.err) { throw inflator.msg || msg[inflator.err]; } + + return inflator.result; } -/***/ }), +/** + * inflateRaw(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to decompress. + * - options (Object): zlib inflate options. + * + * The same as [[inflate]], but creates raw data, without wrapper + * (header and adler32 crc). + **/ +function inflateRaw(input, options) { + options = options || {}; + options.raw = true; + return inflate(input, options); +} -/***/ 401: -/***/ (function(__unusedmodule, exports, __webpack_require__) { -"use strict"; +/** + * ungzip(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to decompress. + * - options (Object): zlib inflate options. + * + * Just shortcut to [[inflate]], because it autodetects format + * by header.content. Done for convenience. + **/ -// (C) 1995-2013 Jean-loup Gailly and Mark Adler -// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. +exports.Inflate = Inflate; +exports.inflate = inflate; +exports.inflateRaw = inflateRaw; +exports.ungzip = inflate; -var utils = __webpack_require__(999); -var adler32 = __webpack_require__(141); -var crc32 = __webpack_require__(613); -var inflate_fast = __webpack_require__(181); -var inflate_table = __webpack_require__(685); -var CODES = 0; -var LENS = 1; -var DISTS = 2; +/***/ }), -/* Public constants ==========================================================*/ -/* ===========================================================================*/ +/***/ 5483: +/***/ ((__unused_webpack_module, exports) => { +"use strict"; -/* Allowed flush values; see deflate() and inflate() below for details */ -//var Z_NO_FLUSH = 0; -//var Z_PARTIAL_FLUSH = 1; -//var Z_SYNC_FLUSH = 2; -//var Z_FULL_FLUSH = 3; -var Z_FINISH = 4; -var Z_BLOCK = 5; -var Z_TREES = 6; -/* Return codes for the compression/decompression functions. Negative values - * are errors, positive values are used for special but normal events. - */ -var Z_OK = 0; -var Z_STREAM_END = 1; -var Z_NEED_DICT = 2; -//var Z_ERRNO = -1; -var Z_STREAM_ERROR = -2; -var Z_DATA_ERROR = -3; -var Z_MEM_ERROR = -4; -var Z_BUF_ERROR = -5; -//var Z_VERSION_ERROR = -6; +var TYPED_OK = (typeof Uint8Array !== 'undefined') && + (typeof Uint16Array !== 'undefined') && + (typeof Int32Array !== 'undefined'); -/* The deflate compression method */ -var Z_DEFLATED = 8; +function _has(obj, key) { + return Object.prototype.hasOwnProperty.call(obj, key); +} +exports.assign = function (obj /*from1, from2, from3, ...*/) { + var sources = Array.prototype.slice.call(arguments, 1); + while (sources.length) { + var source = sources.shift(); + if (!source) { continue; } -/* STATES ====================================================================*/ -/* ===========================================================================*/ + if (typeof source !== 'object') { + throw new TypeError(source + 'must be non-object'); + } + for (var p in source) { + if (_has(source, p)) { + obj[p] = source[p]; + } + } + } -var HEAD = 1; /* i: waiting for magic header */ -var FLAGS = 2; /* i: waiting for method and flags (gzip) */ -var TIME = 3; /* i: waiting for modification time (gzip) */ -var OS = 4; /* i: waiting for extra flags and operating system (gzip) */ -var EXLEN = 5; /* i: waiting for extra length (gzip) */ -var EXTRA = 6; /* i: waiting for extra bytes (gzip) */ -var NAME = 7; /* i: waiting for end of file name (gzip) */ -var COMMENT = 8; /* i: waiting for end of comment (gzip) */ -var HCRC = 9; /* i: waiting for header crc (gzip) */ -var DICTID = 10; /* i: waiting for dictionary check value */ -var DICT = 11; /* waiting for inflateSetDictionary() call */ -var TYPE = 12; /* i: waiting for type bits, including last-flag bit */ -var TYPEDO = 13; /* i: same, but skip check to exit inflate on new block */ -var STORED = 14; /* i: waiting for stored size (length and complement) */ -var COPY_ = 15; /* i/o: same as COPY below, but only first time in */ -var COPY = 16; /* i/o: waiting for input or output to copy stored block */ -var TABLE = 17; /* i: waiting for dynamic block table lengths */ -var LENLENS = 18; /* i: waiting for code length code lengths */ -var CODELENS = 19; /* i: waiting for length/lit and distance code lengths */ -var LEN_ = 20; /* i: same as LEN below, but only first time in */ -var LEN = 21; /* i: waiting for length/lit/eob code */ -var LENEXT = 22; /* i: waiting for length extra bits */ -var DIST = 23; /* i: waiting for distance code */ -var DISTEXT = 24; /* i: waiting for distance extra bits */ -var MATCH = 25; /* o: waiting for output space to copy string */ -var LIT = 26; /* o: waiting for output space to write literal */ -var CHECK = 27; /* i: waiting for 32-bit check value */ -var LENGTH = 28; /* i: waiting for 32-bit length (gzip) */ -var DONE = 29; /* finished check, done -- remain here until reset */ -var BAD = 30; /* got a data error -- remain here until reset */ -var MEM = 31; /* got an inflate() memory error -- remain here until reset */ -var SYNC = 32; /* looking for synchronization bytes to restart inflate() */ + return obj; +}; -/* ===========================================================================*/ +// reduce buffer size, avoiding mem copy +exports.shrinkBuf = function (buf, size) { + if (buf.length === size) { return buf; } + if (buf.subarray) { return buf.subarray(0, size); } + buf.length = size; + return buf; +}; -var ENOUGH_LENS = 852; -var ENOUGH_DISTS = 592; -//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); +var fnTyped = { + arraySet: function (dest, src, src_offs, len, dest_offs) { + if (src.subarray && dest.subarray) { + dest.set(src.subarray(src_offs, src_offs + len), dest_offs); + return; + } + // Fallback to ordinary array + for (var i = 0; i < len; i++) { + dest[dest_offs + i] = src[src_offs + i]; + } + }, + // Join array of chunks to single array. + flattenChunks: function (chunks) { + var i, l, len, pos, chunk, result; -var MAX_WBITS = 15; -/* 32K LZ77 window */ -var DEF_WBITS = MAX_WBITS; + // calculate data length + len = 0; + for (i = 0, l = chunks.length; i < l; i++) { + len += chunks[i].length; + } + // join chunks + result = new Uint8Array(len); + pos = 0; + for (i = 0, l = chunks.length; i < l; i++) { + chunk = chunks[i]; + result.set(chunk, pos); + pos += chunk.length; + } -function zswap32(q) { - return (((q >>> 24) & 0xff) + - ((q >>> 8) & 0xff00) + - ((q & 0xff00) << 8) + - ((q & 0xff) << 24)); -} + return result; + } +}; +var fnUntyped = { + arraySet: function (dest, src, src_offs, len, dest_offs) { + for (var i = 0; i < len; i++) { + dest[dest_offs + i] = src[src_offs + i]; + } + }, + // Join array of chunks to single array. + flattenChunks: function (chunks) { + return [].concat.apply([], chunks); + } +}; -function InflateState() { - this.mode = 0; /* current inflate mode */ - this.last = false; /* true if processing last block */ - this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */ - this.havedict = false; /* true if dictionary provided */ - this.flags = 0; /* gzip header method and flags (0 if zlib) */ - this.dmax = 0; /* zlib header max distance (INFLATE_STRICT) */ - this.check = 0; /* protected copy of check value */ - this.total = 0; /* protected copy of output count */ - // TODO: may be {} - this.head = null; /* where to save gzip header information */ - /* sliding window */ - this.wbits = 0; /* log base 2 of requested window size */ - this.wsize = 0; /* window size or zero if not using window */ - this.whave = 0; /* valid bytes in the window */ - this.wnext = 0; /* window write index */ - this.window = null; /* allocated sliding window, if needed */ +// Enable/Disable typed arrays use, for testing +// +exports.setTyped = function (on) { + if (on) { + exports.Buf8 = Uint8Array; + exports.Buf16 = Uint16Array; + exports.Buf32 = Int32Array; + exports.assign(exports, fnTyped); + } else { + exports.Buf8 = Array; + exports.Buf16 = Array; + exports.Buf32 = Array; + exports.assign(exports, fnUntyped); + } +}; - /* bit accumulator */ - this.hold = 0; /* input bit accumulator */ - this.bits = 0; /* number of bits in "in" */ +exports.setTyped(TYPED_OK); - /* for string and stored block copying */ - this.length = 0; /* literal or length of data to copy */ - this.offset = 0; /* distance back to copy string from */ - /* for table and code decoding */ - this.extra = 0; /* extra bits needed */ +/***/ }), - /* fixed and dynamic code tables */ - this.lencode = null; /* starting table for length/literal codes */ - this.distcode = null; /* starting table for distance codes */ - this.lenbits = 0; /* index bits for lencode */ - this.distbits = 0; /* index bits for distcode */ +/***/ 2380: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - /* dynamic table building */ - this.ncode = 0; /* number of code length code lengths */ - this.nlen = 0; /* number of length code lengths */ - this.ndist = 0; /* number of distance code lengths */ - this.have = 0; /* number of code lengths in lens[] */ - this.next = null; /* next available space in codes[] */ +"use strict"; +// String encode/decode helpers - this.lens = new utils.Buf16(320); /* temporary storage for code lengths */ - this.work = new utils.Buf16(288); /* work area for code table building */ - /* - because we don't have pointers in js, we use lencode and distcode directly - as buffers so we don't need codes - */ - //this.codes = new utils.Buf32(ENOUGH); /* space for code tables */ - this.lendyn = null; /* dynamic table for length/literal codes (JS specific) */ - this.distdyn = null; /* dynamic table for distance codes (JS specific) */ - this.sane = 0; /* if false, allow invalid distance too far */ - this.back = 0; /* bits back of last unprocessed length/lit */ - this.was = 0; /* initial length of match */ -} -function inflateResetKeep(strm) { - var state; +var utils = __nccwpck_require__(5483); - if (!strm || !strm.state) { return Z_STREAM_ERROR; } - state = strm.state; - strm.total_in = strm.total_out = state.total = 0; - strm.msg = ''; /*Z_NULL*/ - if (state.wrap) { /* to support ill-conceived Java test suite */ - strm.adler = state.wrap & 1; - } - state.mode = HEAD; - state.last = 0; - state.havedict = 0; - state.dmax = 32768; - state.head = null/*Z_NULL*/; - state.hold = 0; - state.bits = 0; - //state.lencode = state.distcode = state.next = state.codes; - state.lencode = state.lendyn = new utils.Buf32(ENOUGH_LENS); - state.distcode = state.distdyn = new utils.Buf32(ENOUGH_DISTS); - state.sane = 1; - state.back = -1; - //Tracev((stderr, "inflate: reset\n")); - return Z_OK; -} +// Quick check if we can use fast array to bin string conversion +// +// - apply(Array) can fail on Android 2.2 +// - apply(Uint8Array) can fail on iOS 5.1 Safari +// +var STR_APPLY_OK = true; +var STR_APPLY_UIA_OK = true; -function inflateReset(strm) { - var state; +try { String.fromCharCode.apply(null, [ 0 ]); } catch (__) { STR_APPLY_OK = false; } +try { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; } - if (!strm || !strm.state) { return Z_STREAM_ERROR; } - state = strm.state; - state.wsize = 0; - state.whave = 0; - state.wnext = 0; - return inflateResetKeep(strm); +// Table with utf8 lengths (calculated by first byte of sequence) +// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS, +// because max possible codepoint is 0x10ffff +var _utf8len = new utils.Buf8(256); +for (var q = 0; q < 256; q++) { + _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1); } +_utf8len[254] = _utf8len[254] = 1; // Invalid sequence start -function inflateReset2(strm, windowBits) { - var wrap; - var state; - /* get the state */ - if (!strm || !strm.state) { return Z_STREAM_ERROR; } - state = strm.state; +// convert string to array (typed, when possible) +exports.string2buf = function (str) { + var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0; - /* extract wrap request from windowBits parameter */ - if (windowBits < 0) { - wrap = 0; - windowBits = -windowBits; - } - else { - wrap = (windowBits >> 4) + 1; - if (windowBits < 48) { - windowBits &= 15; + // count binary size + for (m_pos = 0; m_pos < str_len; m_pos++) { + c = str.charCodeAt(m_pos); + if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) { + c2 = str.charCodeAt(m_pos + 1); + if ((c2 & 0xfc00) === 0xdc00) { + c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); + m_pos++; + } } + buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4; } - /* set number of window bits, free window if different */ - if (windowBits && (windowBits < 8 || windowBits > 15)) { - return Z_STREAM_ERROR; + // allocate buffer + buf = new utils.Buf8(buf_len); + + // convert + for (i = 0, m_pos = 0; i < buf_len; m_pos++) { + c = str.charCodeAt(m_pos); + if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) { + c2 = str.charCodeAt(m_pos + 1); + if ((c2 & 0xfc00) === 0xdc00) { + c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); + m_pos++; + } + } + if (c < 0x80) { + /* one byte */ + buf[i++] = c; + } else if (c < 0x800) { + /* two bytes */ + buf[i++] = 0xC0 | (c >>> 6); + buf[i++] = 0x80 | (c & 0x3f); + } else if (c < 0x10000) { + /* three bytes */ + buf[i++] = 0xE0 | (c >>> 12); + buf[i++] = 0x80 | (c >>> 6 & 0x3f); + buf[i++] = 0x80 | (c & 0x3f); + } else { + /* four bytes */ + buf[i++] = 0xf0 | (c >>> 18); + buf[i++] = 0x80 | (c >>> 12 & 0x3f); + buf[i++] = 0x80 | (c >>> 6 & 0x3f); + buf[i++] = 0x80 | (c & 0x3f); + } } - if (state.window !== null && state.wbits !== windowBits) { - state.window = null; + + return buf; +}; + +// Helper (used in 2 places) +function buf2binstring(buf, len) { + // On Chrome, the arguments in a function call that are allowed is `65534`. + // If the length of the buffer is smaller than that, we can use this optimization, + // otherwise we will take a slower path. + if (len < 65534) { + if ((buf.subarray && STR_APPLY_UIA_OK) || (!buf.subarray && STR_APPLY_OK)) { + return String.fromCharCode.apply(null, utils.shrinkBuf(buf, len)); + } } - /* update state and reset the rest of it */ - state.wrap = wrap; - state.wbits = windowBits; - return inflateReset(strm); + var result = ''; + for (var i = 0; i < len; i++) { + result += String.fromCharCode(buf[i]); + } + return result; } -function inflateInit2(strm, windowBits) { - var ret; - var state; - if (!strm) { return Z_STREAM_ERROR; } - //strm.msg = Z_NULL; /* in case we return an error */ +// Convert byte array to binary string +exports.buf2binstring = function (buf) { + return buf2binstring(buf, buf.length); +}; - state = new InflateState(); - //if (state === Z_NULL) return Z_MEM_ERROR; - //Tracev((stderr, "inflate: allocated\n")); - strm.state = state; - state.window = null/*Z_NULL*/; - ret = inflateReset2(strm, windowBits); - if (ret !== Z_OK) { - strm.state = null/*Z_NULL*/; +// Convert binary string (typed, when possible) +exports.binstring2buf = function (str) { + var buf = new utils.Buf8(str.length); + for (var i = 0, len = buf.length; i < len; i++) { + buf[i] = str.charCodeAt(i); } - return ret; -} - -function inflateInit(strm) { - return inflateInit2(strm, DEF_WBITS); -} + return buf; +}; -/* - Return state with length and distance decoding tables and index sizes set to - fixed code decoding. Normally this returns fixed tables from inffixed.h. - If BUILDFIXED is defined, then instead this routine builds the tables the - first time it's called, and returns those tables the first time and - thereafter. This reduces the size of the code by about 2K bytes, in - exchange for a little execution time. However, BUILDFIXED should not be - used for threaded applications, since the rewriting of the tables and virgin - may not be thread-safe. - */ -var virgin = true; +// convert array to string +exports.buf2string = function (buf, max) { + var i, out, c, c_len; + var len = max || buf.length; -var lenfix, distfix; // We have no pointers in JS, so keep tables separate + // Reserve max possible length (2 words per char) + // NB: by unknown reasons, Array is significantly faster for + // String.fromCharCode.apply than Uint16Array. + var utf16buf = new Array(len * 2); -function fixedtables(state) { - /* build fixed huffman tables if first call (may not be thread safe) */ - if (virgin) { - var sym; + for (out = 0, i = 0; i < len;) { + c = buf[i++]; + // quick process ascii + if (c < 0x80) { utf16buf[out++] = c; continue; } - lenfix = new utils.Buf32(512); - distfix = new utils.Buf32(32); + c_len = _utf8len[c]; + // skip 5 & 6 byte codes + if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; } - /* literal/length table */ - sym = 0; - while (sym < 144) { state.lens[sym++] = 8; } - while (sym < 256) { state.lens[sym++] = 9; } - while (sym < 280) { state.lens[sym++] = 7; } - while (sym < 288) { state.lens[sym++] = 8; } + // apply mask on first byte + c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07; + // join the rest + while (c_len > 1 && i < len) { + c = (c << 6) | (buf[i++] & 0x3f); + c_len--; + } - inflate_table(LENS, state.lens, 0, 288, lenfix, 0, state.work, { bits: 9 }); + // terminated by end of string? + if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; } - /* distance table */ - sym = 0; - while (sym < 32) { state.lens[sym++] = 5; } + if (c < 0x10000) { + utf16buf[out++] = c; + } else { + c -= 0x10000; + utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff); + utf16buf[out++] = 0xdc00 | (c & 0x3ff); + } + } - inflate_table(DISTS, state.lens, 0, 32, distfix, 0, state.work, { bits: 5 }); + return buf2binstring(utf16buf, out); +}; - /* do this just once */ - virgin = false; - } - state.lencode = lenfix; - state.lenbits = 9; - state.distcode = distfix; - state.distbits = 5; -} +// Calculate max possible position in utf8 buffer, +// that will not break sequence. If that's not possible +// - (very small limits) return max size as is. +// +// buf[] - utf8 bytes array +// max - length limit (mandatory); +exports.utf8border = function (buf, max) { + var pos; + max = max || buf.length; + if (max > buf.length) { max = buf.length; } -/* - Update the window with the last wsize (normally 32K) bytes written before - returning. If window does not exist yet, create it. This is only called - when a window is already in use, or when output has been written during this - inflate call, but the end of the deflate stream has not been reached yet. - It is also called to create a window for dictionary data when a dictionary - is loaded. + // go back from last position, until start of sequence found + pos = max - 1; + while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; } - Providing output buffers larger than 32K to inflate() should provide a speed - advantage, since only the last 32K of output is copied to the sliding window - upon return from inflate(), and since all distances after the first 32K of - output will fall in the output data, making match copies simpler and faster. - The advantage may be dependent on the size of the processor's data caches. - */ -function updatewindow(strm, src, end, copy) { - var dist; - var state = strm.state; + // Very small and broken sequence, + // return max, because we should return something anyway. + if (pos < 0) { return max; } - /* if it hasn't been done already, allocate space for the window */ - if (state.window === null) { - state.wsize = 1 << state.wbits; - state.wnext = 0; - state.whave = 0; + // If we came to start of buffer - that means buffer is too small, + // return max too. + if (pos === 0) { return max; } - state.window = new utils.Buf8(state.wsize); - } + return (pos + _utf8len[buf[pos]] > max) ? pos : max; +}; - /* copy state->wsize or less output bytes into the circular window */ - if (copy >= state.wsize) { - utils.arraySet(state.window, src, end - state.wsize, state.wsize, 0); - state.wnext = 0; - state.whave = state.wsize; - } - else { - dist = state.wsize - state.wnext; - if (dist > copy) { - dist = copy; - } - //zmemcpy(state->window + state->wnext, end - copy, dist); - utils.arraySet(state.window, src, end - copy, dist, state.wnext); - copy -= dist; - if (copy) { - //zmemcpy(state->window, end - copy, copy); - utils.arraySet(state.window, src, end - copy, copy, 0); - state.wnext = copy; - state.whave = state.wsize; - } - else { - state.wnext += dist; - if (state.wnext === state.wsize) { state.wnext = 0; } - if (state.whave < state.wsize) { state.whave += dist; } - } + +/***/ }), + +/***/ 6924: +/***/ ((module) => { + +"use strict"; + + +// Note: adler32 takes 12% for level 0 and 2% for level 6. +// It isn't worth it to make additional optimizations as in original. +// Small size is preferable. + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +function adler32(adler, buf, len, pos) { + var s1 = (adler & 0xffff) |0, + s2 = ((adler >>> 16) & 0xffff) |0, + n = 0; + + while (len !== 0) { + // Set limit ~ twice less than 5552, to keep + // s2 in 31-bits, because we force signed ints. + // in other case %= will fail. + n = len > 2000 ? 2000 : len; + len -= n; + + do { + s1 = (s1 + buf[pos++]) |0; + s2 = (s2 + s1) |0; + } while (--n); + + s1 %= 65521; + s2 %= 65521; } - return 0; + + return (s1 | (s2 << 16)) |0; } -function inflate(strm, flush) { - var state; - var input, output; // input/output buffers - var next; /* next input INDEX */ - var put; /* next output INDEX */ - var have, left; /* available input and output */ - var hold; /* bit buffer */ - var bits; /* bits in bit buffer */ - var _in, _out; /* save starting available input and output */ - var copy; /* number of stored or match bytes to copy */ - var from; /* where to copy match bytes from */ - var from_source; - var here = 0; /* current decoding table entry */ - var here_bits, here_op, here_val; // paked "here" denormalized (JS specific) - //var last; /* parent table entry */ - var last_bits, last_op, last_val; // paked "last" denormalized (JS specific) - var len; /* length to copy for repeats, bits to drop */ - var ret; /* return code */ - var hbuf = new utils.Buf8(4); /* buffer for gzip header crc calculation */ - var opts; - var n; // temporary var for NEED_BITS +module.exports = adler32; - var order = /* permutation of code lengths */ - [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ]; +/***/ }), - if (!strm || !strm.state || !strm.output || - (!strm.input && strm.avail_in !== 0)) { - return Z_STREAM_ERROR; - } +/***/ 8282: +/***/ ((module) => { - state = strm.state; - if (state.mode === TYPE) { state.mode = TYPEDO; } /* skip check */ +"use strict"; - //--- LOAD() --- - put = strm.next_out; - output = strm.output; - left = strm.avail_out; - next = strm.next_in; - input = strm.input; - have = strm.avail_in; - hold = state.hold; - bits = state.bits; - //--- +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. - _in = have; - _out = left; - ret = Z_OK; +module.exports = { - inf_leave: // goto emulation - for (;;) { - switch (state.mode) { - case HEAD: - if (state.wrap === 0) { - state.mode = TYPEDO; - break; - } - //=== NEEDBITS(16); - while (bits < 16) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - if ((state.wrap & 2) && hold === 0x8b1f) { /* gzip header */ - state.check = 0/*crc32(0L, Z_NULL, 0)*/; - //=== CRC2(state.check, hold); - hbuf[0] = hold & 0xff; - hbuf[1] = (hold >>> 8) & 0xff; - state.check = crc32(state.check, hbuf, 2, 0); - //===// + /* Allowed flush values; see deflate() and inflate() below for details */ + Z_NO_FLUSH: 0, + Z_PARTIAL_FLUSH: 1, + Z_SYNC_FLUSH: 2, + Z_FULL_FLUSH: 3, + Z_FINISH: 4, + Z_BLOCK: 5, + Z_TREES: 6, - //=== INITBITS(); - hold = 0; - bits = 0; - //===// - state.mode = FLAGS; - break; - } - state.flags = 0; /* expect zlib header */ - if (state.head) { - state.head.done = false; - } - if (!(state.wrap & 1) || /* check if zlib header allowed */ - (((hold & 0xff)/*BITS(8)*/ << 8) + (hold >> 8)) % 31) { - strm.msg = 'incorrect header check'; - state.mode = BAD; - break; - } - if ((hold & 0x0f)/*BITS(4)*/ !== Z_DEFLATED) { - strm.msg = 'unknown compression method'; - state.mode = BAD; - break; - } - //--- DROPBITS(4) ---// - hold >>>= 4; - bits -= 4; - //---// - len = (hold & 0x0f)/*BITS(4)*/ + 8; - if (state.wbits === 0) { - state.wbits = len; - } - else if (len > state.wbits) { - strm.msg = 'invalid window size'; - state.mode = BAD; - break; - } - state.dmax = 1 << len; - //Tracev((stderr, "inflate: zlib header ok\n")); - strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/; - state.mode = hold & 0x200 ? DICTID : TYPE; - //=== INITBITS(); - hold = 0; - bits = 0; - //===// - break; - case FLAGS: - //=== NEEDBITS(16); */ - while (bits < 16) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - state.flags = hold; - if ((state.flags & 0xff) !== Z_DEFLATED) { - strm.msg = 'unknown compression method'; - state.mode = BAD; - break; - } - if (state.flags & 0xe000) { - strm.msg = 'unknown header flags set'; - state.mode = BAD; - break; - } - if (state.head) { - state.head.text = ((hold >> 8) & 1); - } - if (state.flags & 0x0200) { - //=== CRC2(state.check, hold); - hbuf[0] = hold & 0xff; - hbuf[1] = (hold >>> 8) & 0xff; - state.check = crc32(state.check, hbuf, 2, 0); - //===// - } - //=== INITBITS(); - hold = 0; - bits = 0; - //===// - state.mode = TIME; - /* falls through */ - case TIME: - //=== NEEDBITS(32); */ - while (bits < 32) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - if (state.head) { - state.head.time = hold; - } - if (state.flags & 0x0200) { - //=== CRC4(state.check, hold) - hbuf[0] = hold & 0xff; - hbuf[1] = (hold >>> 8) & 0xff; - hbuf[2] = (hold >>> 16) & 0xff; - hbuf[3] = (hold >>> 24) & 0xff; - state.check = crc32(state.check, hbuf, 4, 0); - //=== - } - //=== INITBITS(); - hold = 0; - bits = 0; - //===// - state.mode = OS; - /* falls through */ - case OS: - //=== NEEDBITS(16); */ - while (bits < 16) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - if (state.head) { - state.head.xflags = (hold & 0xff); - state.head.os = (hold >> 8); - } - if (state.flags & 0x0200) { - //=== CRC2(state.check, hold); - hbuf[0] = hold & 0xff; - hbuf[1] = (hold >>> 8) & 0xff; - state.check = crc32(state.check, hbuf, 2, 0); - //===// - } - //=== INITBITS(); - hold = 0; - bits = 0; - //===// - state.mode = EXLEN; - /* falls through */ - case EXLEN: - if (state.flags & 0x0400) { - //=== NEEDBITS(16); */ - while (bits < 16) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - state.length = hold; - if (state.head) { - state.head.extra_len = hold; - } - if (state.flags & 0x0200) { - //=== CRC2(state.check, hold); - hbuf[0] = hold & 0xff; - hbuf[1] = (hold >>> 8) & 0xff; - state.check = crc32(state.check, hbuf, 2, 0); - //===// - } - //=== INITBITS(); - hold = 0; - bits = 0; - //===// - } - else if (state.head) { - state.head.extra = null/*Z_NULL*/; - } - state.mode = EXTRA; - /* falls through */ - case EXTRA: - if (state.flags & 0x0400) { - copy = state.length; - if (copy > have) { copy = have; } - if (copy) { - if (state.head) { - len = state.head.extra_len - state.length; - if (!state.head.extra) { - // Use untyped array for more convenient processing later - state.head.extra = new Array(state.head.extra_len); - } - utils.arraySet( - state.head.extra, - input, - next, - // extra field is limited to 65536 bytes - // - no need for additional size check - copy, - /*len + copy > state.head.extra_max - len ? state.head.extra_max : copy,*/ - len - ); - //zmemcpy(state.head.extra + len, next, - // len + copy > state.head.extra_max ? - // state.head.extra_max - len : copy); - } - if (state.flags & 0x0200) { - state.check = crc32(state.check, input, copy, next); - } - have -= copy; - next += copy; - state.length -= copy; - } - if (state.length) { break inf_leave; } - } - state.length = 0; - state.mode = NAME; - /* falls through */ - case NAME: - if (state.flags & 0x0800) { - if (have === 0) { break inf_leave; } - copy = 0; - do { - // TODO: 2 or 1 bytes? - len = input[next + copy++]; - /* use constant limit because in js we should not preallocate memory */ - if (state.head && len && - (state.length < 65536 /*state.head.name_max*/)) { - state.head.name += String.fromCharCode(len); - } - } while (len && copy < have); + /* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ + Z_OK: 0, + Z_STREAM_END: 1, + Z_NEED_DICT: 2, + Z_ERRNO: -1, + Z_STREAM_ERROR: -2, + Z_DATA_ERROR: -3, + //Z_MEM_ERROR: -4, + Z_BUF_ERROR: -5, + //Z_VERSION_ERROR: -6, - if (state.flags & 0x0200) { - state.check = crc32(state.check, input, copy, next); - } - have -= copy; - next += copy; - if (len) { break inf_leave; } - } - else if (state.head) { - state.head.name = null; - } - state.length = 0; - state.mode = COMMENT; - /* falls through */ - case COMMENT: - if (state.flags & 0x1000) { - if (have === 0) { break inf_leave; } - copy = 0; - do { - len = input[next + copy++]; - /* use constant limit because in js we should not preallocate memory */ - if (state.head && len && - (state.length < 65536 /*state.head.comm_max*/)) { - state.head.comment += String.fromCharCode(len); - } - } while (len && copy < have); - if (state.flags & 0x0200) { - state.check = crc32(state.check, input, copy, next); - } - have -= copy; - next += copy; - if (len) { break inf_leave; } - } - else if (state.head) { - state.head.comment = null; - } - state.mode = HCRC; - /* falls through */ - case HCRC: - if (state.flags & 0x0200) { - //=== NEEDBITS(16); */ - while (bits < 16) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - if (hold !== (state.check & 0xffff)) { - strm.msg = 'header crc mismatch'; - state.mode = BAD; - break; - } - //=== INITBITS(); - hold = 0; - bits = 0; - //===// - } - if (state.head) { - state.head.hcrc = ((state.flags >> 9) & 1); - state.head.done = true; - } - strm.adler = state.check = 0; - state.mode = TYPE; - break; - case DICTID: - //=== NEEDBITS(32); */ - while (bits < 32) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - strm.adler = state.check = zswap32(hold); - //=== INITBITS(); - hold = 0; - bits = 0; - //===// - state.mode = DICT; - /* falls through */ - case DICT: - if (state.havedict === 0) { - //--- RESTORE() --- - strm.next_out = put; - strm.avail_out = left; - strm.next_in = next; - strm.avail_in = have; - state.hold = hold; - state.bits = bits; - //--- - return Z_NEED_DICT; - } - strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/; - state.mode = TYPE; - /* falls through */ - case TYPE: - if (flush === Z_BLOCK || flush === Z_TREES) { break inf_leave; } - /* falls through */ - case TYPEDO: - if (state.last) { - //--- BYTEBITS() ---// - hold >>>= bits & 7; - bits -= bits & 7; - //---// - state.mode = CHECK; - break; - } - //=== NEEDBITS(3); */ - while (bits < 3) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - state.last = (hold & 0x01)/*BITS(1)*/; - //--- DROPBITS(1) ---// - hold >>>= 1; - bits -= 1; - //---// + /* compression levels */ + Z_NO_COMPRESSION: 0, + Z_BEST_SPEED: 1, + Z_BEST_COMPRESSION: 9, + Z_DEFAULT_COMPRESSION: -1, - switch ((hold & 0x03)/*BITS(2)*/) { - case 0: /* stored block */ - //Tracev((stderr, "inflate: stored block%s\n", - // state.last ? " (last)" : "")); - state.mode = STORED; - break; - case 1: /* fixed block */ - fixedtables(state); - //Tracev((stderr, "inflate: fixed codes block%s\n", - // state.last ? " (last)" : "")); - state.mode = LEN_; /* decode codes */ - if (flush === Z_TREES) { - //--- DROPBITS(2) ---// - hold >>>= 2; - bits -= 2; - //---// - break inf_leave; - } - break; - case 2: /* dynamic block */ - //Tracev((stderr, "inflate: dynamic codes block%s\n", - // state.last ? " (last)" : "")); - state.mode = TABLE; - break; - case 3: - strm.msg = 'invalid block type'; - state.mode = BAD; - } - //--- DROPBITS(2) ---// - hold >>>= 2; - bits -= 2; - //---// - break; - case STORED: - //--- BYTEBITS() ---// /* go to byte boundary */ - hold >>>= bits & 7; - bits -= bits & 7; - //---// - //=== NEEDBITS(32); */ - while (bits < 32) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - if ((hold & 0xffff) !== ((hold >>> 16) ^ 0xffff)) { - strm.msg = 'invalid stored block lengths'; - state.mode = BAD; - break; - } - state.length = hold & 0xffff; - //Tracev((stderr, "inflate: stored length %u\n", - // state.length)); - //=== INITBITS(); - hold = 0; - bits = 0; - //===// - state.mode = COPY_; - if (flush === Z_TREES) { break inf_leave; } - /* falls through */ - case COPY_: - state.mode = COPY; - /* falls through */ - case COPY: - copy = state.length; - if (copy) { - if (copy > have) { copy = have; } - if (copy > left) { copy = left; } - if (copy === 0) { break inf_leave; } - //--- zmemcpy(put, next, copy); --- - utils.arraySet(output, input, next, copy, put); - //---// - have -= copy; - next += copy; - left -= copy; - put += copy; - state.length -= copy; - break; - } - //Tracev((stderr, "inflate: stored end\n")); - state.mode = TYPE; - break; - case TABLE: - //=== NEEDBITS(14); */ - while (bits < 14) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - state.nlen = (hold & 0x1f)/*BITS(5)*/ + 257; - //--- DROPBITS(5) ---// - hold >>>= 5; - bits -= 5; - //---// - state.ndist = (hold & 0x1f)/*BITS(5)*/ + 1; - //--- DROPBITS(5) ---// - hold >>>= 5; - bits -= 5; - //---// - state.ncode = (hold & 0x0f)/*BITS(4)*/ + 4; - //--- DROPBITS(4) ---// - hold >>>= 4; - bits -= 4; - //---// -//#ifndef PKZIP_BUG_WORKAROUND - if (state.nlen > 286 || state.ndist > 30) { - strm.msg = 'too many length or distance symbols'; - state.mode = BAD; - break; - } -//#endif - //Tracev((stderr, "inflate: table sizes ok\n")); - state.have = 0; - state.mode = LENLENS; - /* falls through */ - case LENLENS: - while (state.have < state.ncode) { - //=== NEEDBITS(3); - while (bits < 3) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - state.lens[order[state.have++]] = (hold & 0x07);//BITS(3); - //--- DROPBITS(3) ---// - hold >>>= 3; - bits -= 3; - //---// - } - while (state.have < 19) { - state.lens[order[state.have++]] = 0; - } - // We have separate tables & no pointers. 2 commented lines below not needed. - //state.next = state.codes; - //state.lencode = state.next; - // Switch to use dynamic table - state.lencode = state.lendyn; - state.lenbits = 7; - opts = { bits: state.lenbits }; - ret = inflate_table(CODES, state.lens, 0, 19, state.lencode, 0, state.work, opts); - state.lenbits = opts.bits; + Z_FILTERED: 1, + Z_HUFFMAN_ONLY: 2, + Z_RLE: 3, + Z_FIXED: 4, + Z_DEFAULT_STRATEGY: 0, - if (ret) { - strm.msg = 'invalid code lengths set'; - state.mode = BAD; - break; - } - //Tracev((stderr, "inflate: code lengths ok\n")); - state.have = 0; - state.mode = CODELENS; - /* falls through */ - case CODELENS: - while (state.have < state.nlen + state.ndist) { - for (;;) { - here = state.lencode[hold & ((1 << state.lenbits) - 1)];/*BITS(state.lenbits)*/ - here_bits = here >>> 24; - here_op = (here >>> 16) & 0xff; - here_val = here & 0xffff; + /* Possible values of the data_type field (though see inflate()) */ + Z_BINARY: 0, + Z_TEXT: 1, + //Z_ASCII: 1, // = Z_TEXT (deprecated) + Z_UNKNOWN: 2, - if ((here_bits) <= bits) { break; } - //--- PULLBYTE() ---// - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - //---// - } - if (here_val < 16) { - //--- DROPBITS(here.bits) ---// - hold >>>= here_bits; - bits -= here_bits; - //---// - state.lens[state.have++] = here_val; - } - else { - if (here_val === 16) { - //=== NEEDBITS(here.bits + 2); - n = here_bits + 2; - while (bits < n) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - //--- DROPBITS(here.bits) ---// - hold >>>= here_bits; - bits -= here_bits; - //---// - if (state.have === 0) { - strm.msg = 'invalid bit length repeat'; - state.mode = BAD; - break; - } - len = state.lens[state.have - 1]; - copy = 3 + (hold & 0x03);//BITS(2); - //--- DROPBITS(2) ---// - hold >>>= 2; - bits -= 2; - //---// - } - else if (here_val === 17) { - //=== NEEDBITS(here.bits + 3); - n = here_bits + 3; - while (bits < n) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - //--- DROPBITS(here.bits) ---// - hold >>>= here_bits; - bits -= here_bits; - //---// - len = 0; - copy = 3 + (hold & 0x07);//BITS(3); - //--- DROPBITS(3) ---// - hold >>>= 3; - bits -= 3; - //---// - } - else { - //=== NEEDBITS(here.bits + 7); - n = here_bits + 7; - while (bits < n) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - //--- DROPBITS(here.bits) ---// - hold >>>= here_bits; - bits -= here_bits; - //---// - len = 0; - copy = 11 + (hold & 0x7f);//BITS(7); - //--- DROPBITS(7) ---// - hold >>>= 7; - bits -= 7; - //---// - } - if (state.have + copy > state.nlen + state.ndist) { - strm.msg = 'invalid bit length repeat'; - state.mode = BAD; - break; - } - while (copy--) { - state.lens[state.have++] = len; - } - } - } + /* The deflate compression method */ + Z_DEFLATED: 8 + //Z_NULL: null // Use -1 or null inline, depending on var type +}; - /* handle error breaks in while */ - if (state.mode === BAD) { break; } - /* check for end-of-block code (better have one) */ - if (state.lens[256] === 0) { - strm.msg = 'invalid code -- missing end-of-block'; - state.mode = BAD; - break; - } +/***/ }), - /* build code tables -- note: do not change the lenbits or distbits - values here (9 and 6) without reading the comments in inftrees.h - concerning the ENOUGH constants, which depend on those values */ - state.lenbits = 9; +/***/ 7242: +/***/ ((module) => { - opts = { bits: state.lenbits }; - ret = inflate_table(LENS, state.lens, 0, state.nlen, state.lencode, 0, state.work, opts); - // We have separate tables & no pointers. 2 commented lines below not needed. - // state.next_index = opts.table_index; - state.lenbits = opts.bits; - // state.lencode = state.next; +"use strict"; - if (ret) { - strm.msg = 'invalid literal/lengths set'; - state.mode = BAD; - break; - } - state.distbits = 6; - //state.distcode.copy(state.codes); - // Switch to use dynamic table - state.distcode = state.distdyn; - opts = { bits: state.distbits }; - ret = inflate_table(DISTS, state.lens, state.nlen, state.ndist, state.distcode, 0, state.work, opts); - // We have separate tables & no pointers. 2 commented lines below not needed. - // state.next_index = opts.table_index; - state.distbits = opts.bits; - // state.distcode = state.next; +// Note: we can't get significant speed boost here. +// So write code to minimize size - no pregenerated tables +// and array tools dependencies. - if (ret) { - strm.msg = 'invalid distances set'; - state.mode = BAD; - break; - } - //Tracev((stderr, 'inflate: codes ok\n')); - state.mode = LEN_; - if (flush === Z_TREES) { break inf_leave; } - /* falls through */ - case LEN_: - state.mode = LEN; - /* falls through */ - case LEN: - if (have >= 6 && left >= 258) { - //--- RESTORE() --- - strm.next_out = put; - strm.avail_out = left; - strm.next_in = next; - strm.avail_in = have; - state.hold = hold; - state.bits = bits; - //--- - inflate_fast(strm, _out); - //--- LOAD() --- - put = strm.next_out; - output = strm.output; - left = strm.avail_out; - next = strm.next_in; - input = strm.input; - have = strm.avail_in; - hold = state.hold; - bits = state.bits; - //--- +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. - if (state.mode === TYPE) { - state.back = -1; - } - break; - } - state.back = 0; - for (;;) { - here = state.lencode[hold & ((1 << state.lenbits) - 1)]; /*BITS(state.lenbits)*/ - here_bits = here >>> 24; - here_op = (here >>> 16) & 0xff; - here_val = here & 0xffff; +// Use ordinary array, since untyped makes no boost here +function makeTable() { + var c, table = []; - if (here_bits <= bits) { break; } - //--- PULLBYTE() ---// - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - //---// - } - if (here_op && (here_op & 0xf0) === 0) { - last_bits = here_bits; - last_op = here_op; - last_val = here_val; - for (;;) { - here = state.lencode[last_val + - ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)]; - here_bits = here >>> 24; - here_op = (here >>> 16) & 0xff; - here_val = here & 0xffff; + for (var n = 0; n < 256; n++) { + c = n; + for (var k = 0; k < 8; k++) { + c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1)); + } + table[n] = c; + } - if ((last_bits + here_bits) <= bits) { break; } - //--- PULLBYTE() ---// - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - //---// - } - //--- DROPBITS(last.bits) ---// - hold >>>= last_bits; - bits -= last_bits; - //---// - state.back += last_bits; - } - //--- DROPBITS(here.bits) ---// - hold >>>= here_bits; - bits -= here_bits; - //---// - state.back += here_bits; - state.length = here_val; - if (here_op === 0) { - //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? - // "inflate: literal '%c'\n" : - // "inflate: literal 0x%02x\n", here.val)); - state.mode = LIT; - break; - } - if (here_op & 32) { - //Tracevv((stderr, "inflate: end of block\n")); - state.back = -1; - state.mode = TYPE; - break; - } - if (here_op & 64) { - strm.msg = 'invalid literal/length code'; - state.mode = BAD; - break; - } - state.extra = here_op & 15; - state.mode = LENEXT; - /* falls through */ - case LENEXT: - if (state.extra) { - //=== NEEDBITS(state.extra); - n = state.extra; - while (bits < n) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - state.length += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/; - //--- DROPBITS(state.extra) ---// - hold >>>= state.extra; - bits -= state.extra; - //---// - state.back += state.extra; - } - //Tracevv((stderr, "inflate: length %u\n", state.length)); - state.was = state.length; - state.mode = DIST; - /* falls through */ - case DIST: - for (;;) { - here = state.distcode[hold & ((1 << state.distbits) - 1)];/*BITS(state.distbits)*/ - here_bits = here >>> 24; - here_op = (here >>> 16) & 0xff; - here_val = here & 0xffff; + return table; +} - if ((here_bits) <= bits) { break; } - //--- PULLBYTE() ---// - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - //---// - } - if ((here_op & 0xf0) === 0) { - last_bits = here_bits; - last_op = here_op; - last_val = here_val; - for (;;) { - here = state.distcode[last_val + - ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)]; - here_bits = here >>> 24; - here_op = (here >>> 16) & 0xff; - here_val = here & 0xffff; +// Create table on load. Just 255 signed longs. Not a problem. +var crcTable = makeTable(); - if ((last_bits + here_bits) <= bits) { break; } - //--- PULLBYTE() ---// - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - //---// - } - //--- DROPBITS(last.bits) ---// - hold >>>= last_bits; - bits -= last_bits; - //---// - state.back += last_bits; - } - //--- DROPBITS(here.bits) ---// - hold >>>= here_bits; - bits -= here_bits; - //---// - state.back += here_bits; - if (here_op & 64) { - strm.msg = 'invalid distance code'; - state.mode = BAD; - break; - } - state.offset = here_val; - state.extra = (here_op) & 15; - state.mode = DISTEXT; - /* falls through */ - case DISTEXT: - if (state.extra) { - //=== NEEDBITS(state.extra); - n = state.extra; - while (bits < n) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - state.offset += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/; - //--- DROPBITS(state.extra) ---// - hold >>>= state.extra; - bits -= state.extra; - //---// - state.back += state.extra; - } -//#ifdef INFLATE_STRICT - if (state.offset > state.dmax) { - strm.msg = 'invalid distance too far back'; - state.mode = BAD; - break; - } -//#endif - //Tracevv((stderr, "inflate: distance %u\n", state.offset)); - state.mode = MATCH; - /* falls through */ - case MATCH: - if (left === 0) { break inf_leave; } - copy = _out - left; - if (state.offset > copy) { /* copy from window */ - copy = state.offset - copy; - if (copy > state.whave) { - if (state.sane) { - strm.msg = 'invalid distance too far back'; - state.mode = BAD; - break; - } -// (!) This block is disabled in zlib defaults, -// don't enable it for binary compatibility -//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR -// Trace((stderr, "inflate.c too far\n")); -// copy -= state.whave; -// if (copy > state.length) { copy = state.length; } -// if (copy > left) { copy = left; } -// left -= copy; -// state.length -= copy; -// do { -// output[put++] = 0; -// } while (--copy); -// if (state.length === 0) { state.mode = LEN; } -// break; -//#endif - } - if (copy > state.wnext) { - copy -= state.wnext; - from = state.wsize - copy; - } - else { - from = state.wnext - copy; - } - if (copy > state.length) { copy = state.length; } - from_source = state.window; - } - else { /* copy from output */ - from_source = output; - from = put - state.offset; - copy = state.length; - } - if (copy > left) { copy = left; } - left -= copy; - state.length -= copy; - do { - output[put++] = from_source[from++]; - } while (--copy); - if (state.length === 0) { state.mode = LEN; } - break; - case LIT: - if (left === 0) { break inf_leave; } - output[put++] = state.length; - left--; - state.mode = LEN; - break; - case CHECK: - if (state.wrap) { - //=== NEEDBITS(32); - while (bits < 32) { - if (have === 0) { break inf_leave; } - have--; - // Use '|' instead of '+' to make sure that result is signed - hold |= input[next++] << bits; - bits += 8; - } - //===// - _out -= left; - strm.total_out += _out; - state.total += _out; - if (_out) { - strm.adler = state.check = - /*UPDATE(state.check, put - _out, _out);*/ - (state.flags ? crc32(state.check, output, _out, put - _out) : adler32(state.check, output, _out, put - _out)); - } - _out = left; - // NB: crc32 stored as signed 32-bit int, zswap32 returns signed too - if ((state.flags ? hold : zswap32(hold)) !== state.check) { - strm.msg = 'incorrect data check'; - state.mode = BAD; - break; - } - //=== INITBITS(); - hold = 0; - bits = 0; - //===// - //Tracev((stderr, "inflate: check matches trailer\n")); - } - state.mode = LENGTH; - /* falls through */ - case LENGTH: - if (state.wrap && state.flags) { - //=== NEEDBITS(32); - while (bits < 32) { - if (have === 0) { break inf_leave; } - have--; - hold += input[next++] << bits; - bits += 8; - } - //===// - if (hold !== (state.total & 0xffffffff)) { - strm.msg = 'incorrect length check'; - state.mode = BAD; - break; - } - //=== INITBITS(); - hold = 0; - bits = 0; - //===// - //Tracev((stderr, "inflate: length matches trailer\n")); - } - state.mode = DONE; - /* falls through */ - case DONE: - ret = Z_STREAM_END; - break inf_leave; - case BAD: - ret = Z_DATA_ERROR; - break inf_leave; - case MEM: - return Z_MEM_ERROR; - case SYNC: - /* falls through */ - default: - return Z_STREAM_ERROR; - } - } +function crc32(crc, buf, len, pos) { + var t = crcTable, + end = pos + len; - // inf_leave <- here is real place for "goto inf_leave", emulated via "break inf_leave" + crc ^= -1; - /* - Return from inflate(), updating the total counts and the check value. - If there was no progress during the inflate() call, return a buffer - error. Call updatewindow() to create and/or update the window state. - Note: a memory error from inflate() is non-recoverable. - */ - - //--- RESTORE() --- - strm.next_out = put; - strm.avail_out = left; - strm.next_in = next; - strm.avail_in = have; - state.hold = hold; - state.bits = bits; - //--- - - if (state.wsize || (_out !== strm.avail_out && state.mode < BAD && - (state.mode < CHECK || flush !== Z_FINISH))) { - if (updatewindow(strm, strm.output, strm.next_out, _out - strm.avail_out)) { - state.mode = MEM; - return Z_MEM_ERROR; - } - } - _in -= strm.avail_in; - _out -= strm.avail_out; - strm.total_in += _in; - strm.total_out += _out; - state.total += _out; - if (state.wrap && _out) { - strm.adler = state.check = /*UPDATE(state.check, strm.next_out - _out, _out);*/ - (state.flags ? crc32(state.check, output, _out, strm.next_out - _out) : adler32(state.check, output, _out, strm.next_out - _out)); - } - strm.data_type = state.bits + (state.last ? 64 : 0) + - (state.mode === TYPE ? 128 : 0) + - (state.mode === LEN_ || state.mode === COPY_ ? 256 : 0); - if (((_in === 0 && _out === 0) || flush === Z_FINISH) && ret === Z_OK) { - ret = Z_BUF_ERROR; + for (var i = pos; i < end; i++) { + crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF]; } - return ret; + + return (crc ^ (-1)); // >>> 0; } -function inflateEnd(strm) { - if (!strm || !strm.state /*|| strm->zfree == (free_func)0*/) { - return Z_STREAM_ERROR; - } +module.exports = crc32; - var state = strm.state; - if (state.window) { - state.window = null; - } - strm.state = null; - return Z_OK; -} -function inflateGetHeader(strm, head) { - var state; +/***/ }), - /* check state */ - if (!strm || !strm.state) { return Z_STREAM_ERROR; } - state = strm.state; - if ((state.wrap & 2) === 0) { return Z_STREAM_ERROR; } +/***/ 978: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - /* save header structure */ - state.head = head; - head.done = false; - return Z_OK; -} +"use strict"; -function inflateSetDictionary(strm, dictionary) { - var dictLength = dictionary.length; - var state; - var dictid; - var ret; +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. - /* check state */ - if (!strm /* == Z_NULL */ || !strm.state /* == Z_NULL */) { return Z_STREAM_ERROR; } - state = strm.state; +var utils = __nccwpck_require__(5483); +var trees = __nccwpck_require__(8754); +var adler32 = __nccwpck_require__(6924); +var crc32 = __nccwpck_require__(7242); +var msg = __nccwpck_require__(1890); - if (state.wrap !== 0 && state.mode !== DICT) { - return Z_STREAM_ERROR; - } +/* Public constants ==========================================================*/ +/* ===========================================================================*/ - /* check for correct dictionary identifier */ - if (state.mode === DICT) { - dictid = 1; /* adler32(0, null, 0)*/ - /* dictid = adler32(dictid, dictionary, dictLength); */ - dictid = adler32(dictid, dictionary, dictLength, 0); - if (dictid !== state.check) { - return Z_DATA_ERROR; - } - } - /* copy dictionary to window using updatewindow(), which will amend the - existing dictionary if appropriate */ - ret = updatewindow(strm, dictionary, dictLength, dictLength); - if (ret) { - state.mode = MEM; - return Z_MEM_ERROR; - } - state.havedict = 1; - // Tracev((stderr, "inflate: dictionary set\n")); - return Z_OK; -} -exports.inflateReset = inflateReset; -exports.inflateReset2 = inflateReset2; -exports.inflateResetKeep = inflateResetKeep; -exports.inflateInit = inflateInit; -exports.inflateInit2 = inflateInit2; -exports.inflate = inflate; -exports.inflateEnd = inflateEnd; -exports.inflateGetHeader = inflateGetHeader; -exports.inflateSetDictionary = inflateSetDictionary; -exports.inflateInfo = 'pako inflate (from Nodeca project)'; +/* Allowed flush values; see deflate() and inflate() below for details */ +var Z_NO_FLUSH = 0; +var Z_PARTIAL_FLUSH = 1; +//var Z_SYNC_FLUSH = 2; +var Z_FULL_FLUSH = 3; +var Z_FINISH = 4; +var Z_BLOCK = 5; +//var Z_TREES = 6; -/* Not implemented -exports.inflateCopy = inflateCopy; -exports.inflateGetDictionary = inflateGetDictionary; -exports.inflateMark = inflateMark; -exports.inflatePrime = inflatePrime; -exports.inflateSync = inflateSync; -exports.inflateSyncPoint = inflateSyncPoint; -exports.inflateUndermine = inflateUndermine; -*/ + +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ +var Z_OK = 0; +var Z_STREAM_END = 1; +//var Z_NEED_DICT = 2; +//var Z_ERRNO = -1; +var Z_STREAM_ERROR = -2; +var Z_DATA_ERROR = -3; +//var Z_MEM_ERROR = -4; +var Z_BUF_ERROR = -5; +//var Z_VERSION_ERROR = -6; -/***/ }), +/* compression levels */ +//var Z_NO_COMPRESSION = 0; +//var Z_BEST_SPEED = 1; +//var Z_BEST_COMPRESSION = 9; +var Z_DEFAULT_COMPRESSION = -1; -/***/ 403: -/***/ (function(__unusedmodule, exports, __webpack_require__) { -"use strict"; +var Z_FILTERED = 1; +var Z_HUFFMAN_ONLY = 2; +var Z_RLE = 3; +var Z_FIXED = 4; +var Z_DEFAULT_STRATEGY = 0; -Object.defineProperty(exports, "__esModule", { value: true }); -const path = __webpack_require__(622); -const fsStat = __webpack_require__(231); -const fs = __webpack_require__(874); -class Settings { - constructor(_options = {}) { - this._options = _options; - this.followSymbolicLinks = this._getValue(this._options.followSymbolicLinks, false); - this.fs = fs.createFileSystemAdapter(this._options.fs); - this.pathSegmentSeparator = this._getValue(this._options.pathSegmentSeparator, path.sep); - this.stats = this._getValue(this._options.stats, false); - this.throwErrorOnBrokenSymbolicLink = this._getValue(this._options.throwErrorOnBrokenSymbolicLink, true); - this.fsStatSettings = new fsStat.Settings({ - followSymbolicLink: this.followSymbolicLinks, - fs: this.fs, - throwErrorOnBrokenSymbolicLink: this.throwErrorOnBrokenSymbolicLink - }); - } - _getValue(option, value) { - return option !== null && option !== void 0 ? option : value; - } -} -exports.default = Settings; +/* Possible values of the data_type field (though see inflate()) */ +//var Z_BINARY = 0; +//var Z_TEXT = 1; +//var Z_ASCII = 1; // = Z_TEXT +var Z_UNKNOWN = 2; -/***/ }), +/* The deflate compression method */ +var Z_DEFLATED = 8; -/***/ 406: -/***/ (function(module, __unusedexports, __webpack_require__) { +/*============================================================================*/ -"use strict"; -const taskManager = __webpack_require__(384); -const patternManager = __webpack_require__(36); -const async_1 = __webpack_require__(113); -const stream_1 = __webpack_require__(775); -const sync_1 = __webpack_require__(78); -const settings_1 = __webpack_require__(332); -const utils = __webpack_require__(444); -async function FastGlob(source, options) { - assertPatternsInput(source); - const works = getWorks(source, async_1.default, options); - const result = await Promise.all(works); - return utils.array.flatten(result); +var MAX_MEM_LEVEL = 9; +/* Maximum value for memLevel in deflateInit2 */ +var MAX_WBITS = 15; +/* 32K LZ77 window */ +var DEF_MEM_LEVEL = 8; + + +var LENGTH_CODES = 29; +/* number of length codes, not counting the special END_BLOCK code */ +var LITERALS = 256; +/* number of literal bytes 0..255 */ +var L_CODES = LITERALS + 1 + LENGTH_CODES; +/* number of Literal or Length codes, including the END_BLOCK code */ +var D_CODES = 30; +/* number of distance codes */ +var BL_CODES = 19; +/* number of codes used to transfer the bit lengths */ +var HEAP_SIZE = 2 * L_CODES + 1; +/* maximum heap size */ +var MAX_BITS = 15; +/* All codes must not exceed MAX_BITS bits */ + +var MIN_MATCH = 3; +var MAX_MATCH = 258; +var MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1); + +var PRESET_DICT = 0x20; + +var INIT_STATE = 42; +var EXTRA_STATE = 69; +var NAME_STATE = 73; +var COMMENT_STATE = 91; +var HCRC_STATE = 103; +var BUSY_STATE = 113; +var FINISH_STATE = 666; + +var BS_NEED_MORE = 1; /* block not completed, need more input or more output */ +var BS_BLOCK_DONE = 2; /* block flush performed */ +var BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */ +var BS_FINISH_DONE = 4; /* finish done, accept no more input or output */ + +var OS_CODE = 0x03; // Unix :) . Don't detect, use this default. + +function err(strm, errorCode) { + strm.msg = msg[errorCode]; + return errorCode; } -// https://github.com/typescript-eslint/typescript-eslint/issues/60 -// eslint-disable-next-line no-redeclare -(function (FastGlob) { - function sync(source, options) { - assertPatternsInput(source); - const works = getWorks(source, sync_1.default, options); - return utils.array.flatten(works); - } - FastGlob.sync = sync; - function stream(source, options) { - assertPatternsInput(source); - const works = getWorks(source, stream_1.default, options); - /** - * The stream returned by the provider cannot work with an asynchronous iterator. - * To support asynchronous iterators, regardless of the number of tasks, we always multiplex streams. - * This affects performance (+25%). I don't see best solution right now. - */ - return utils.stream.merge(works); - } - FastGlob.stream = stream; - function generateTasks(source, options) { - assertPatternsInput(source); - const patterns = patternManager.transform([].concat(source)); - const settings = new settings_1.default(options); - return taskManager.generate(patterns, settings); - } - FastGlob.generateTasks = generateTasks; - function isDynamicPattern(source, options) { - assertPatternsInput(source); - const settings = new settings_1.default(options); - return utils.pattern.isDynamicPattern(source, settings); - } - FastGlob.isDynamicPattern = isDynamicPattern; - function escapePath(source) { - assertPatternsInput(source); - return utils.path.escape(source); - } - FastGlob.escapePath = escapePath; -})(FastGlob || (FastGlob = {})); -function getWorks(source, _Provider, options) { - const patterns = patternManager.transform([].concat(source)); - const settings = new settings_1.default(options); - const tasks = taskManager.generate(patterns, settings); - const provider = new _Provider(settings); - return tasks.map(provider.read, provider); -} -function assertPatternsInput(input) { - const source = [].concat(input); - const isValidSource = source.every((item) => utils.string.isString(item) && !utils.string.isEmpty(item)); - if (!isValidSource) { - throw new TypeError('Patterns must be a string (non empty) or an array of strings'); - } -} -module.exports = FastGlob; - -/***/ }), - -/***/ 413: -/***/ (function(module) { +function rank(f) { + return ((f) << 1) - ((f) > 4 ? 9 : 0); +} -module.exports = require("stream"); +function zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } } -/***/ }), -/***/ 418: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/* ========================================================================= + * Flush as much pending output as possible. All deflate() output goes + * through this function so some applications may wish to modify it + * to avoid allocating a large strm->output buffer and copying into it. + * (See also read_buf()). + */ +function flush_pending(strm) { + var s = strm.state; -"use strict"; + //_tr_flush_bits(s); + var len = s.pending; + if (len > strm.avail_out) { + len = strm.avail_out; + } + if (len === 0) { return; } -Object.defineProperty(exports, "__esModule", { value: true }); -exports.removeLeadingDotSegment = exports.escape = exports.makeAbsolute = exports.unixify = void 0; -const path = __webpack_require__(622); -const LEADING_DOT_SEGMENT_CHARACTERS_COUNT = 2; // ./ or .\\ -const UNESCAPED_GLOB_SYMBOLS_RE = /(\\?)([()*?[\]{|}]|^!|[!+@](?=\())/g; -/** - * Designed to work only with simple paths: `dir\\file`. - */ -function unixify(filepath) { - return filepath.replace(/\\/g, '/'); -} -exports.unixify = unixify; -function makeAbsolute(cwd, filepath) { - return path.resolve(cwd, filepath); -} -exports.makeAbsolute = makeAbsolute; -function escape(pattern) { - return pattern.replace(UNESCAPED_GLOB_SYMBOLS_RE, '\\$2'); -} -exports.escape = escape; -function removeLeadingDotSegment(entry) { - // We do not use `startsWith` because this is 10x slower than current implementation for some cases. - // eslint-disable-next-line @typescript-eslint/prefer-string-starts-ends-with - if (entry.charAt(0) === '.') { - const secondCharactery = entry.charAt(1); - if (secondCharactery === '/' || secondCharactery === '\\') { - return entry.slice(LEADING_DOT_SEGMENT_CHARACTERS_COUNT); - } - } - return entry; + utils.arraySet(strm.output, s.pending_buf, s.pending_out, len, strm.next_out); + strm.next_out += len; + s.pending_out += len; + strm.total_out += len; + strm.avail_out -= len; + s.pending -= len; + if (s.pending === 0) { + s.pending_out = 0; + } } -exports.removeLeadingDotSegment = removeLeadingDotSegment; -/***/ }), +function flush_block_only(s, last) { + trees._tr_flush_block(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last); + s.block_start = s.strstart; + flush_pending(s.strm); +} -/***/ 422: -/***/ (function(module, __unusedexports, __webpack_require__) { -try { - var util = __webpack_require__(669); - /* istanbul ignore next */ - if (typeof util.inherits !== 'function') throw ''; - module.exports = util.inherits; -} catch (e) { - /* istanbul ignore next */ - module.exports = __webpack_require__(315); +function put_byte(s, b) { + s.pending_buf[s.pending++] = b; } -/***/ }), - -/***/ 435: -/***/ (function(module, __unusedexports, __webpack_require__) { +/* ========================================================================= + * Put a short in the pending buffer. The 16-bit value is put in MSB order. + * IN assertion: the stream state is correct and there is enough room in + * pending_buf. + */ +function putShortMSB(s, b) { +// put_byte(s, (Byte)(b >> 8)); +// put_byte(s, (Byte)(b & 0xff)); + s.pending_buf[s.pending++] = (b >>> 8) & 0xff; + s.pending_buf[s.pending++] = b & 0xff; +} -"use strict"; +/* =========================================================================== + * Read a new buffer from the current input stream, update the adler32 + * and total number of bytes read. All deflate() input goes through + * this function so some applications may wish to modify it to avoid + * allocating a large strm->input buffer and copying from it. + * (See also flush_pending()). + */ +function read_buf(strm, buf, start, size) { + var len = strm.avail_in; -const fill = __webpack_require__(730); -const utils = __webpack_require__(225); + if (len > size) { len = size; } + if (len === 0) { return 0; } -const compile = (ast, options = {}) => { - let walk = (node, parent = {}) => { - let invalidBlock = utils.isInvalidBrace(parent); - let invalidNode = node.invalid === true && options.escapeInvalid === true; - let invalid = invalidBlock === true || invalidNode === true; - let prefix = options.escapeInvalid === true ? '\\' : ''; - let output = ''; + strm.avail_in -= len; - if (node.isOpen === true) { - return prefix + node.value; - } - if (node.isClose === true) { - return prefix + node.value; - } + // zmemcpy(buf, strm->next_in, len); + utils.arraySet(buf, strm.input, strm.next_in, len, start); + if (strm.state.wrap === 1) { + strm.adler = adler32(strm.adler, buf, len, start); + } - if (node.type === 'open') { - return invalid ? (prefix + node.value) : '('; - } + else if (strm.state.wrap === 2) { + strm.adler = crc32(strm.adler, buf, len, start); + } - if (node.type === 'close') { - return invalid ? (prefix + node.value) : ')'; - } + strm.next_in += len; + strm.total_in += len; - if (node.type === 'comma') { - return node.prev.type === 'comma' ? '' : (invalid ? node.value : '|'); - } + return len; +} - if (node.value) { - return node.value; - } - if (node.nodes && node.ranges > 0) { - let args = utils.reduce(node.nodes); - let range = fill(...args, { ...options, wrap: false, toRegex: true }); +/* =========================================================================== + * Set match_start to the longest match starting at the given string and + * return its length. Matches shorter or equal to prev_length are discarded, + * in which case the result is equal to prev_length and match_start is + * garbage. + * IN assertions: cur_match is the head of the hash chain for the current + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 + * OUT assertion: the match length is not greater than s->lookahead. + */ +function longest_match(s, cur_match) { + var chain_length = s.max_chain_length; /* max hash chain length */ + var scan = s.strstart; /* current string */ + var match; /* matched string */ + var len; /* length of current match */ + var best_len = s.prev_length; /* best match length so far */ + var nice_match = s.nice_match; /* stop if match long enough */ + var limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ? + s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/; - if (range.length !== 0) { - return args.length > 1 && range.length > 1 ? `(${range})` : range; - } - } + var _win = s.window; // shortcut - if (node.nodes) { - for (let child of node.nodes) { - output += walk(child, node); - } - } - return output; - }; + var wmask = s.w_mask; + var prev = s.prev; - return walk(ast); -}; + /* Stop when cur_match becomes <= limit. To simplify the code, + * we prevent matches with the string of window index 0. + */ -module.exports = compile; + var strend = s.strstart + MAX_MATCH; + var scan_end1 = _win[scan + best_len - 1]; + var scan_end = _win[scan + best_len]; + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + * It is easy to get rid of this optimization if necessary. + */ + // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); -/***/ }), + /* Do not waste too much time if we already have a good match: */ + if (s.prev_length >= s.good_match) { + chain_length >>= 2; + } + /* Do not look for matches beyond the end of the input. This is necessary + * to make deflate deterministic. + */ + if (nice_match > s.lookahead) { nice_match = s.lookahead; } -/***/ 440: -/***/ (function(module) { + // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); -"use strict"; + do { + // Assert(cur_match < s->strstart, "no future"); + match = cur_match; + /* Skip to next match if the match length cannot increase + * or if the match length is less than 2. Note that the checks below + * for insufficient lookahead only occur occasionally for performance + * reasons. Therefore uninitialized memory will be accessed, and + * conditional jumps will be made that depend on those values. + * However the length of the match is limited to the lookahead, so + * the output of deflate is not affected by the uninitialized values. + */ -function reusify (Constructor) { - var head = new Constructor() - var tail = head - - function get () { - var current = head - - if (current.next) { - head = current.next - } else { - head = new Constructor() - tail = head + if (_win[match + best_len] !== scan_end || + _win[match + best_len - 1] !== scan_end1 || + _win[match] !== _win[scan] || + _win[++match] !== _win[scan + 1]) { + continue; } - current.next = null - - return current - } - - function release (obj) { - tail.next = obj - tail = obj - } - - return { - get: get, - release: release - } -} - -module.exports = reusify - - -/***/ }), - -/***/ 441: -/***/ (function(module, __unusedexports, __webpack_require__) { - -"use strict"; - - -const fill = __webpack_require__(730); -const stringify = __webpack_require__(382); -const utils = __webpack_require__(225); + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scan += 2; + match++; + // Assert(*scan == *match, "match[2]?"); -const append = (queue = '', stash = '', enclose = false) => { - let result = []; + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + /*jshint noempty:false*/ + } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + scan < strend); - queue = [].concat(queue); - stash = [].concat(stash); + // Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); - if (!stash.length) return queue; - if (!queue.length) { - return enclose ? utils.flatten(stash).map(ele => `{${ele}}`) : stash; - } + len = MAX_MATCH - (strend - scan); + scan = strend - MAX_MATCH; - for (let item of queue) { - if (Array.isArray(item)) { - for (let value of item) { - result.push(append(value, stash, enclose)); - } - } else { - for (let ele of stash) { - if (enclose === true && typeof ele === 'string') ele = `{${ele}}`; - result.push(Array.isArray(ele) ? append(item, ele, enclose) : (item + ele)); + if (len > best_len) { + s.match_start = cur_match; + best_len = len; + if (len >= nice_match) { + break; } + scan_end1 = _win[scan + best_len - 1]; + scan_end = _win[scan + best_len]; } + } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0); + + if (best_len <= s.lookahead) { + return best_len; } - return utils.flatten(result); -}; + return s.lookahead; +} -const expand = (ast, options = {}) => { - let rangeLimit = options.rangeLimit === void 0 ? 1000 : options.rangeLimit; - let walk = (node, parent = {}) => { - node.queue = []; +/* =========================================================================== + * Fill the window when the lookahead becomes insufficient. + * Updates strstart and lookahead. + * + * IN assertion: lookahead < MIN_LOOKAHEAD + * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD + * At least one byte has been read, or avail_in == 0; reads are + * performed for at least two bytes (required for the zip translate_eol + * option -- not supported here). + */ +function fill_window(s) { + var _w_size = s.w_size; + var p, n, m, more, str; - let p = parent; - let q = parent.queue; + //Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); - while (p.type !== 'brace' && p.type !== 'root' && p.parent) { - p = p.parent; - q = p.queue; - } + do { + more = s.window_size - s.lookahead - s.strstart; - if (node.invalid || node.dollar) { - q.push(append(q.pop(), stringify(node, options))); - return; - } + // JS ints have 32 bit, block below not needed + /* Deal with !@#$% 64K limit: */ + //if (sizeof(int) <= 2) { + // if (more == 0 && s->strstart == 0 && s->lookahead == 0) { + // more = wsize; + // + // } else if (more == (unsigned)(-1)) { + // /* Very unlikely, but possible on 16 bit machine if + // * strstart == 0 && lookahead == 1 (input done a byte at time) + // */ + // more--; + // } + //} - if (node.type === 'brace' && node.invalid !== true && node.nodes.length === 2) { - q.push(append(q.pop(), ['{}'])); - return; - } - if (node.nodes && node.ranges > 0) { - let args = utils.reduce(node.nodes); + /* If the window is almost full and there is insufficient lookahead, + * move the upper half to the lower one to make room in the upper half. + */ + if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) { - if (utils.exceedsLimit(...args, options.step, rangeLimit)) { - throw new RangeError('expanded array length exceeds range limit. Use options.rangeLimit to increase or disable the limit.'); - } + utils.arraySet(s.window, s.window, _w_size, _w_size, 0); + s.match_start -= _w_size; + s.strstart -= _w_size; + /* we now have strstart >= MAX_DIST */ + s.block_start -= _w_size; - let range = fill(...args, options); - if (range.length === 0) { - range = stringify(node, options); - } + /* Slide the hash table (could be avoided with 32 bit values + at the expense of memory usage). We slide even when level == 0 + to keep the hash table consistent if we switch back to level > 0 + later. (Using level 0 permanently is not an optimal usage of + zlib, so we don't care about this pathological case.) + */ - q.push(append(q.pop(), range)); - node.nodes = []; - return; - } + n = s.hash_size; + p = n; + do { + m = s.head[--p]; + s.head[p] = (m >= _w_size ? m - _w_size : 0); + } while (--n); - let enclose = utils.encloseBrace(node); - let queue = node.queue; - let block = node; + n = _w_size; + p = n; + do { + m = s.prev[--p]; + s.prev[p] = (m >= _w_size ? m - _w_size : 0); + /* If n is not on any hash chain, prev[n] is garbage but + * its value will never be used. + */ + } while (--n); - while (block.type !== 'brace' && block.type !== 'root' && block.parent) { - block = block.parent; - queue = block.queue; + more += _w_size; + } + if (s.strm.avail_in === 0) { + break; } - for (let i = 0; i < node.nodes.length; i++) { - let child = node.nodes[i]; - - if (child.type === 'comma' && node.type === 'brace') { - if (i === 1) queue.push(''); - queue.push(''); - continue; - } + /* If there was no sliding: + * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && + * more == window_size - lookahead - strstart + * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) + * => more >= window_size - 2*WSIZE + 2 + * In the BIG_MEM or MMAP case (not yet supported), + * window_size == input_size + MIN_LOOKAHEAD && + * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. + * Otherwise, window_size == 2*WSIZE so more >= 2. + * If there was sliding, more >= WSIZE. So in all cases, more >= 2. + */ + //Assert(more >= 2, "more < 2"); + n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more); + s.lookahead += n; - if (child.type === 'close') { - q.push(append(q.pop(), queue, enclose)); - continue; - } + /* Initialize the hash value now that we have some input: */ + if (s.lookahead + s.insert >= MIN_MATCH) { + str = s.strstart - s.insert; + s.ins_h = s.window[str]; - if (child.value && child.type !== 'open') { - queue.push(append(queue.pop(), child.value)); - continue; - } + /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + 1]) & s.hash_mask; +//#if MIN_MATCH != 3 +// Call update_hash() MIN_MATCH-3 more times +//#endif + while (s.insert) { + /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask; - if (child.nodes) { - walk(child, node); + s.prev[str & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = str; + str++; + s.insert--; + if (s.lookahead + s.insert < MIN_MATCH) { + break; + } } } + /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, + * but this is not important since only literal bytes will be emitted. + */ - return queue; - }; - - return utils.flatten(walk(ast)); -}; + } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0); -module.exports = expand; - - -/***/ }), - -/***/ 444: -/***/ (function(__unusedmodule, exports, __webpack_require__) { - -"use strict"; + /* If the WIN_INIT bytes after the end of the current data have never been + * written, then zero those bytes in order to avoid memory check reports of + * the use of uninitialized (or uninitialised as Julian writes) bytes by + * the longest match routines. Update the high water mark for the next + * time through here. WIN_INIT is set to MAX_MATCH since the longest match + * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. + */ +// if (s.high_water < s.window_size) { +// var curr = s.strstart + s.lookahead; +// var init = 0; +// +// if (s.high_water < curr) { +// /* Previous high water mark below current data -- zero WIN_INIT +// * bytes or up to end of window, whichever is less. +// */ +// init = s.window_size - curr; +// if (init > WIN_INIT) +// init = WIN_INIT; +// zmemzero(s->window + curr, (unsigned)init); +// s->high_water = curr + init; +// } +// else if (s->high_water < (ulg)curr + WIN_INIT) { +// /* High water mark at or above current data, but below current data +// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up +// * to end of window, whichever is less. +// */ +// init = (ulg)curr + WIN_INIT - s->high_water; +// if (init > s->window_size - s->high_water) +// init = s->window_size - s->high_water; +// zmemzero(s->window + s->high_water, (unsigned)init); +// s->high_water += init; +// } +// } +// +// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, +// "not enough room for search"); +} -Object.defineProperty(exports, "__esModule", { value: true }); -exports.string = exports.stream = exports.pattern = exports.path = exports.fs = exports.errno = exports.array = void 0; -const array = __webpack_require__(453); -exports.array = array; -const errno = __webpack_require__(115); -exports.errno = errno; -const fs = __webpack_require__(43); -exports.fs = fs; -const path = __webpack_require__(418); -exports.path = path; -const pattern = __webpack_require__(724); -exports.pattern = pattern; -const stream = __webpack_require__(42); -exports.stream = stream; -const string = __webpack_require__(884); -exports.string = string; +/* =========================================================================== + * Copy without compression as much as possible from the input stream, return + * the current block state. + * This function does not insert new strings in the dictionary since + * uncompressible data is probably not useful. This function is used + * only for the level=0 compression option. + * NOTE: this function should be optimized to avoid extra copying from + * window to pending_buf. + */ +function deflate_stored(s, flush) { + /* Stored blocks are limited to 0xffff bytes, pending_buf is limited + * to pending_buf_size, and each stored block has a 5 byte header: + */ + var max_block_size = 0xffff; + if (max_block_size > s.pending_buf_size - 5) { + max_block_size = s.pending_buf_size - 5; + } -/***/ }), + /* Copy as much as possible from input to output: */ + for (;;) { + /* Fill the window as much as possible: */ + if (s.lookahead <= 1) { -/***/ 453: -/***/ (function(__unusedmodule, exports) { + //Assert(s->strstart < s->w_size+MAX_DIST(s) || + // s->block_start >= (long)s->w_size, "slide too late"); +// if (!(s.strstart < s.w_size + (s.w_size - MIN_LOOKAHEAD) || +// s.block_start >= s.w_size)) { +// throw new Error("slide too late"); +// } -"use strict"; + fill_window(s); + if (s.lookahead === 0 && flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } -Object.defineProperty(exports, "__esModule", { value: true }); -exports.splitWhen = exports.flatten = void 0; -function flatten(items) { - return items.reduce((collection, item) => [].concat(collection, item), []); -} -exports.flatten = flatten; -function splitWhen(items, predicate) { - const result = [[]]; - let groupIndex = 0; - for (const item of items) { - if (predicate(item)) { - groupIndex++; - result[groupIndex] = []; - } - else { - result[groupIndex].push(item); - } + if (s.lookahead === 0) { + break; + } + /* flush the current block */ } - return result; -} -exports.splitWhen = splitWhen; + //Assert(s->block_start >= 0L, "block gone"); +// if (s.block_start < 0) throw new Error("block gone"); + s.strstart += s.lookahead; + s.lookahead = 0; -/***/ }), + /* Emit a stored block if pending_buf will be full: */ + var max_start = s.block_start + max_block_size; -/***/ 519: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + if (s.strstart === 0 || s.strstart >= max_start) { + /* strstart == 0 is possible when wraparound on 16-bit machine */ + s.lookahead = s.strstart - max_start; + s.strstart = max_start; + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ -"use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -const fsStat = __webpack_require__(231); -const fsWalk = __webpack_require__(522); -const reader_1 = __webpack_require__(949); -class ReaderSync extends reader_1.default { - constructor() { - super(...arguments); - this._walkSync = fsWalk.walkSync; - this._statSync = fsStat.statSync; - } - dynamic(root, options) { - return this._walkSync(root, options); - } - static(patterns, options) { - const entries = []; - for (const pattern of patterns) { - const filepath = this._getFullEntryPath(pattern); - const entry = this._getEntry(filepath, pattern, options); - if (entry === null || !options.entryFilter(entry)) { - continue; - } - entries.push(entry); - } - return entries; - } - _getEntry(filepath, pattern, options) { - try { - const stats = this._getStat(filepath); - return this._makeEntry(stats, pattern); - } - catch (error) { - if (options.errorFilter(error)) { - return null; - } - throw error; - } } - _getStat(filepath) { - return this._statSync(filepath, this._fsStatSettings); + /* Flush if we may have to slide, otherwise block_start may become + * negative and the data will be gone: + */ + if (s.strstart - s.block_start >= (s.w_size - MIN_LOOKAHEAD)) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ } -} -exports.default = ReaderSync; - - -/***/ }), - -/***/ 522: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + } -"use strict"; + s.insert = 0; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.Settings = exports.walkStream = exports.walkSync = exports.walk = void 0; -const async_1 = __webpack_require__(768); -const stream_1 = __webpack_require__(798); -const sync_1 = __webpack_require__(833); -const settings_1 = __webpack_require__(611); -exports.Settings = settings_1.default; -function walk(directory, optionsOrSettingsOrCallback, callback) { - if (typeof optionsOrSettingsOrCallback === 'function') { - new async_1.default(directory, getSettings()).read(optionsOrSettingsOrCallback); - return; - } - new async_1.default(directory, getSettings(optionsOrSettingsOrCallback)).read(callback); -} -exports.walk = walk; -function walkSync(directory, optionsOrSettings) { - const settings = getSettings(optionsOrSettings); - const provider = new sync_1.default(directory, settings); - return provider.read(); -} -exports.walkSync = walkSync; -function walkStream(directory, optionsOrSettings) { - const settings = getSettings(optionsOrSettings); - const provider = new stream_1.default(directory, settings); - return provider.read(); -} -exports.walkStream = walkStream; -function getSettings(settingsOrOptions = {}) { - if (settingsOrOptions instanceof settings_1.default) { - return settingsOrOptions; + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; } - return new settings_1.default(settingsOrOptions); -} - + /***/ + return BS_FINISH_DONE; + } -/***/ }), + if (s.strstart > s.block_start) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } -/***/ 529: -/***/ (function(module, __unusedexports, __webpack_require__) { + return BS_NEED_MORE; +} -/* - * A JavaScript implementation of the Secure Hash Algorithm, SHA-1, as defined - * in FIPS PUB 180-1 - * Version 2.1a Copyright Paul Johnston 2000 - 2002. - * Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet - * Distributed under the BSD License - * See http://pajhome.org.uk/crypt/md5 for details. +/* =========================================================================== + * Compress as much as possible from the input stream, return the current + * block state. + * This function does not perform lazy evaluation of matches and inserts + * new strings in the dictionary only for unmatched strings or for short + * matches. It is used only for the fast compression options. */ +function deflate_fast(s, flush) { + var hash_head; /* head of the hash chain */ + var bflush; /* set if current block must be flushed */ -var inherits = __webpack_require__(422) -var Hash = __webpack_require__(61) -var Buffer = __webpack_require__(149).Buffer + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s.lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + if (s.lookahead === 0) { + break; /* flush the current block */ + } + } -var K = [ - 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc | 0, 0xca62c1d6 | 0 -] - -var W = new Array(80) - -function Sha1 () { - this.init() - this._w = W - - Hash.call(this, 64, 56) -} - -inherits(Sha1, Hash) + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = 0/*NIL*/; + if (s.lookahead >= MIN_MATCH) { + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + } -Sha1.prototype.init = function () { - this._a = 0x67452301 - this._b = 0xefcdab89 - this._c = 0x98badcfe - this._d = 0x10325476 - this._e = 0xc3d2e1f0 + /* Find the longest match, discarding those <= prev_length. + * At this point we have always match_length < MIN_MATCH + */ + if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s.match_length = longest_match(s, hash_head); + /* longest_match() sets match_start */ + } + if (s.match_length >= MIN_MATCH) { + // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only - return this -} + /*** _tr_tally_dist(s, s.strstart - s.match_start, + s.match_length - MIN_MATCH, bflush); ***/ + bflush = trees._tr_tally(s, s.strstart - s.match_start, s.match_length - MIN_MATCH); -function rotl1 (num) { - return (num << 1) | (num >>> 31) -} + s.lookahead -= s.match_length; -function rotl5 (num) { - return (num << 5) | (num >>> 27) -} + /* Insert new strings in the hash table only if the match length + * is not too large. This saves time but degrades compression. + */ + if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH) { + s.match_length--; /* string at strstart already in table */ + do { + s.strstart++; + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + /* strstart never exceeds WSIZE-MAX_MATCH, so there are + * always MIN_MATCH bytes ahead. + */ + } while (--s.match_length !== 0); + s.strstart++; + } else + { + s.strstart += s.match_length; + s.match_length = 0; + s.ins_h = s.window[s.strstart]; + /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + 1]) & s.hash_mask; -function rotl30 (num) { - return (num << 30) | (num >>> 2) -} +//#if MIN_MATCH != 3 +// Call UPDATE_HASH() MIN_MATCH-3 more times +//#endif + /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not + * matter since it will be recomputed at next deflate call. + */ + } + } else { + /* No match, output a literal byte */ + //Tracevv((stderr,"%c", s.window[s.strstart])); + /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart]); -function ft (s, b, c, d) { - if (s === 0) return (b & c) | ((~b) & d) - if (s === 2) return (b & c) | (b & d) | (c & d) - return b ^ c ^ d + s.lookahead--; + s.strstart++; + } + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1); + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.last_lit) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + return BS_BLOCK_DONE; } -Sha1.prototype._update = function (M) { - var W = this._w +/* =========================================================================== + * Same as above, but achieves better compression. We use a lazy + * evaluation for matches: a match is finally adopted only if there is + * no better match at the next window position. + */ +function deflate_slow(s, flush) { + var hash_head; /* head of hash chain */ + var bflush; /* set if current block must be flushed */ - var a = this._a | 0 - var b = this._b | 0 - var c = this._c | 0 - var d = this._d | 0 - var e = this._e | 0 + var max_insert; - for (var i = 0; i < 16; ++i) W[i] = M.readInt32BE(i * 4) - for (; i < 80; ++i) W[i] = rotl1(W[i - 3] ^ W[i - 8] ^ W[i - 14] ^ W[i - 16]) + /* Process the input block. */ + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s.lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + if (s.lookahead === 0) { break; } /* flush the current block */ + } - for (var j = 0; j < 80; ++j) { - var s = ~~(j / 20) - var t = (rotl5(a) + ft(s, b, c, d) + e + W[j] + K[s]) | 0 + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = 0/*NIL*/; + if (s.lookahead >= MIN_MATCH) { + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + } - e = d - d = c - c = rotl30(b) - b = a - a = t - } + /* Find the longest match, discarding those <= prev_length. + */ + s.prev_length = s.match_length; + s.prev_match = s.match_start; + s.match_length = MIN_MATCH - 1; - this._a = (a + this._a) | 0 - this._b = (b + this._b) | 0 - this._c = (c + this._c) | 0 - this._d = (d + this._d) | 0 - this._e = (e + this._e) | 0 -} + if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match && + s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s.match_length = longest_match(s, hash_head); + /* longest_match() sets match_start */ -Sha1.prototype._hash = function () { - var H = Buffer.allocUnsafe(20) + if (s.match_length <= 5 && + (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH && s.strstart - s.match_start > 4096/*TOO_FAR*/))) { - H.writeInt32BE(this._a | 0, 0) - H.writeInt32BE(this._b | 0, 4) - H.writeInt32BE(this._c | 0, 8) - H.writeInt32BE(this._d | 0, 12) - H.writeInt32BE(this._e | 0, 16) + /* If prev_match is also MIN_MATCH, match_start is garbage + * but we will ignore the current match anyway. + */ + s.match_length = MIN_MATCH - 1; + } + } + /* If there was a match at the previous step and the current + * match is not better, output the previous match: + */ + if (s.prev_length >= MIN_MATCH && s.match_length <= s.prev_length) { + max_insert = s.strstart + s.lookahead - MIN_MATCH; + /* Do not insert strings in hash table beyond this. */ - return H -} + //check_match(s, s.strstart-1, s.prev_match, s.prev_length); -module.exports = Sha1 + /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match, + s.prev_length - MIN_MATCH, bflush);***/ + bflush = trees._tr_tally(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH); + /* Insert in hash table all strings up to the end of the match. + * strstart-1 and strstart are already inserted. If there is not + * enough lookahead, the last two strings are not inserted in + * the hash table. + */ + s.lookahead -= s.prev_length - 1; + s.prev_length -= 2; + do { + if (++s.strstart <= max_insert) { + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + } + } while (--s.prev_length !== 0); + s.match_available = 0; + s.match_length = MIN_MATCH - 1; + s.strstart++; + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } -/***/ }), - -/***/ 537: -/***/ (function(module, __unusedexports, __webpack_require__) { - -"use strict"; + } else if (s.match_available) { + /* If there was no match at the previous position, output a + * single literal. If there was a match but the current match + * is longer, truncate the previous match to a single literal. + */ + //Tracevv((stderr,"%c", s->window[s->strstart-1])); + /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]); + if (bflush) { + /*** FLUSH_BLOCK_ONLY(s, 0) ***/ + flush_block_only(s, false); + /***/ + } + s.strstart++; + s.lookahead--; + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + } else { + /* There is no previous match to compare with, wait for + * the next step to decide. + */ + s.match_available = 1; + s.strstart++; + s.lookahead--; + } + } + //Assert (flush != Z_NO_FLUSH, "no flush?"); + if (s.match_available) { + //Tracevv((stderr,"%c", s->window[s->strstart-1])); + /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]); -const utils = __webpack_require__(265); -const { - CHAR_ASTERISK, /* * */ - CHAR_AT, /* @ */ - CHAR_BACKWARD_SLASH, /* \ */ - CHAR_COMMA, /* , */ - CHAR_DOT, /* . */ - CHAR_EXCLAMATION_MARK, /* ! */ - CHAR_FORWARD_SLASH, /* / */ - CHAR_LEFT_CURLY_BRACE, /* { */ - CHAR_LEFT_PARENTHESES, /* ( */ - CHAR_LEFT_SQUARE_BRACKET, /* [ */ - CHAR_PLUS, /* + */ - CHAR_QUESTION_MARK, /* ? */ - CHAR_RIGHT_CURLY_BRACE, /* } */ - CHAR_RIGHT_PARENTHESES, /* ) */ - CHAR_RIGHT_SQUARE_BRACKET /* ] */ -} = __webpack_require__(199); + s.match_available = 0; + } + s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1; + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.last_lit) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } -const isPathSeparator = code => { - return code === CHAR_FORWARD_SLASH || code === CHAR_BACKWARD_SLASH; -}; + return BS_BLOCK_DONE; +} -const depth = token => { - if (token.isPrefix !== true) { - token.depth = token.isGlobstar ? Infinity : 1; - } -}; -/** - * Quickly scans a glob pattern and returns an object with a handful of - * useful properties, like `isGlob`, `path` (the leading non-glob, if it exists), - * `glob` (the actual pattern), `negated` (true if the path starts with `!` but not - * with `!(`) and `negatedExtglob` (true if the path starts with `!(`). - * - * ```js - * const pm = require('picomatch'); - * console.log(pm.scan('foo/bar/*.js')); - * { isGlob: true, input: 'foo/bar/*.js', base: 'foo/bar', glob: '*.js' } - * ``` - * @param {String} `str` - * @param {Object} `options` - * @return {Object} Returns an object with tokens and regex source string. - * @api public +/* =========================================================================== + * For Z_RLE, simply look for runs of bytes, generate matches only of distance + * one. Do not maintain a hash table. (It will be regenerated if this run of + * deflate switches away from Z_RLE.) */ +function deflate_rle(s, flush) { + var bflush; /* set if current block must be flushed */ + var prev; /* byte at distance one to match */ + var scan, strend; /* scan goes up to strend for length of run */ -const scan = (input, options) => { - const opts = options || {}; + var _win = s.window; - const length = input.length - 1; - const scanToEnd = opts.parts === true || opts.scanToEnd === true; - const slashes = []; - const tokens = []; - const parts = []; + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the longest run, plus one for the unrolled loop. + */ + if (s.lookahead <= MAX_MATCH) { + fill_window(s); + if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + if (s.lookahead === 0) { break; } /* flush the current block */ + } - let str = input; - let index = -1; - let start = 0; - let lastIndex = 0; - let isBrace = false; - let isBracket = false; - let isGlob = false; - let isExtglob = false; - let isGlobstar = false; - let braceEscaped = false; - let backslashes = false; - let negated = false; - let negatedExtglob = false; - let finished = false; - let braces = 0; - let prev; - let code; - let token = { value: '', depth: 0, isGlob: false }; + /* See how many times the previous byte repeats */ + s.match_length = 0; + if (s.lookahead >= MIN_MATCH && s.strstart > 0) { + scan = s.strstart - 1; + prev = _win[scan]; + if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) { + strend = s.strstart + MAX_MATCH; + do { + /*jshint noempty:false*/ + } while (prev === _win[++scan] && prev === _win[++scan] && + prev === _win[++scan] && prev === _win[++scan] && + prev === _win[++scan] && prev === _win[++scan] && + prev === _win[++scan] && prev === _win[++scan] && + scan < strend); + s.match_length = MAX_MATCH - (strend - scan); + if (s.match_length > s.lookahead) { + s.match_length = s.lookahead; + } + } + //Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); + } - const eos = () => index >= length; - const peek = () => str.charCodeAt(index + 1); - const advance = () => { - prev = code; - return str.charCodeAt(++index); - }; + /* Emit match if have run of MIN_MATCH or longer, else emit literal */ + if (s.match_length >= MIN_MATCH) { + //check_match(s, s.strstart, s.strstart - 1, s.match_length); - while (index < length) { - code = advance(); - let next; + /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/ + bflush = trees._tr_tally(s, 1, s.match_length - MIN_MATCH); - if (code === CHAR_BACKWARD_SLASH) { - backslashes = token.backslashes = true; - code = advance(); + s.lookahead -= s.match_length; + s.strstart += s.match_length; + s.match_length = 0; + } else { + /* No match, output a literal byte */ + //Tracevv((stderr,"%c", s->window[s->strstart])); + /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart]); - if (code === CHAR_LEFT_CURLY_BRACE) { - braceEscaped = true; + s.lookahead--; + s.strstart++; + } + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; } - continue; + /***/ } + } + s.insert = 0; + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.last_lit) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + return BS_BLOCK_DONE; +} - if (braceEscaped === true || code === CHAR_LEFT_CURLY_BRACE) { - braces++; - - while (eos() !== true && (code = advance())) { - if (code === CHAR_BACKWARD_SLASH) { - backslashes = token.backslashes = true; - advance(); - continue; - } +/* =========================================================================== + * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. + * (It will be regenerated if this run of deflate switches away from Huffman.) + */ +function deflate_huff(s, flush) { + var bflush; /* set if current block must be flushed */ - if (code === CHAR_LEFT_CURLY_BRACE) { - braces++; - continue; + for (;;) { + /* Make sure that we have a literal to write. */ + if (s.lookahead === 0) { + fill_window(s); + if (s.lookahead === 0) { + if (flush === Z_NO_FLUSH) { + return BS_NEED_MORE; } + break; /* flush the current block */ + } + } - if (braceEscaped !== true && code === CHAR_DOT && (code = advance()) === CHAR_DOT) { - isBrace = token.isBrace = true; - isGlob = token.isGlob = true; - finished = true; - - if (scanToEnd === true) { - continue; - } - - break; - } + /* Output a literal byte */ + s.match_length = 0; + //Tracevv((stderr,"%c", s->window[s->strstart])); + /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart]); + s.lookahead--; + s.strstart++; + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + s.insert = 0; + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.last_lit) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + return BS_BLOCK_DONE; +} - if (braceEscaped !== true && code === CHAR_COMMA) { - isBrace = token.isBrace = true; - isGlob = token.isGlob = true; - finished = true; +/* Values for max_lazy_match, good_match and max_chain_length, depending on + * the desired pack level (0..9). The values given below have been tuned to + * exclude worst case performance for pathological files. Better values may be + * found for specific files. + */ +function Config(good_length, max_lazy, nice_length, max_chain, func) { + this.good_length = good_length; + this.max_lazy = max_lazy; + this.nice_length = nice_length; + this.max_chain = max_chain; + this.func = func; +} - if (scanToEnd === true) { - continue; - } +var configuration_table; - break; - } +configuration_table = [ + /* good lazy nice chain */ + new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */ + new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */ + new Config(4, 5, 16, 8, deflate_fast), /* 2 */ + new Config(4, 6, 32, 32, deflate_fast), /* 3 */ - if (code === CHAR_RIGHT_CURLY_BRACE) { - braces--; + new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */ + new Config(8, 16, 32, 32, deflate_slow), /* 5 */ + new Config(8, 16, 128, 128, deflate_slow), /* 6 */ + new Config(8, 32, 128, 256, deflate_slow), /* 7 */ + new Config(32, 128, 258, 1024, deflate_slow), /* 8 */ + new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */ +]; - if (braces === 0) { - braceEscaped = false; - isBrace = token.isBrace = true; - finished = true; - break; - } - } - } - if (scanToEnd === true) { - continue; - } +/* =========================================================================== + * Initialize the "longest match" routines for a new zlib stream + */ +function lm_init(s) { + s.window_size = 2 * s.w_size; - break; - } + /*** CLEAR_HASH(s); ***/ + zero(s.head); // Fill with NIL (= 0); - if (code === CHAR_FORWARD_SLASH) { - slashes.push(index); - tokens.push(token); - token = { value: '', depth: 0, isGlob: false }; + /* Set the default configuration parameters: + */ + s.max_lazy_match = configuration_table[s.level].max_lazy; + s.good_match = configuration_table[s.level].good_length; + s.nice_match = configuration_table[s.level].nice_length; + s.max_chain_length = configuration_table[s.level].max_chain; - if (finished === true) continue; - if (prev === CHAR_DOT && index === (start + 1)) { - start += 2; - continue; - } + s.strstart = 0; + s.block_start = 0; + s.lookahead = 0; + s.insert = 0; + s.match_length = s.prev_length = MIN_MATCH - 1; + s.match_available = 0; + s.ins_h = 0; +} - lastIndex = index + 1; - continue; - } - if (opts.noext !== true) { - const isExtglobChar = code === CHAR_PLUS - || code === CHAR_AT - || code === CHAR_ASTERISK - || code === CHAR_QUESTION_MARK - || code === CHAR_EXCLAMATION_MARK; +function DeflateState() { + this.strm = null; /* pointer back to this zlib stream */ + this.status = 0; /* as the name implies */ + this.pending_buf = null; /* output still pending */ + this.pending_buf_size = 0; /* size of pending_buf */ + this.pending_out = 0; /* next pending byte to output to the stream */ + this.pending = 0; /* nb of bytes in the pending buffer */ + this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */ + this.gzhead = null; /* gzip header information to write */ + this.gzindex = 0; /* where in extra, name, or comment */ + this.method = Z_DEFLATED; /* can only be DEFLATED */ + this.last_flush = -1; /* value of flush param for previous deflate call */ - if (isExtglobChar === true && peek() === CHAR_LEFT_PARENTHESES) { - isGlob = token.isGlob = true; - isExtglob = token.isExtglob = true; - finished = true; - if (code === CHAR_EXCLAMATION_MARK && index === start) { - negatedExtglob = true; - } + this.w_size = 0; /* LZ77 window size (32K by default) */ + this.w_bits = 0; /* log2(w_size) (8..16) */ + this.w_mask = 0; /* w_size - 1 */ - if (scanToEnd === true) { - while (eos() !== true && (code = advance())) { - if (code === CHAR_BACKWARD_SLASH) { - backslashes = token.backslashes = true; - code = advance(); - continue; - } + this.window = null; + /* Sliding window. Input bytes are read into the second half of the window, + * and move to the first half later to keep a dictionary of at least wSize + * bytes. With this organization, matches are limited to a distance of + * wSize-MAX_MATCH bytes, but this ensures that IO is always + * performed with a length multiple of the block size. + */ - if (code === CHAR_RIGHT_PARENTHESES) { - isGlob = token.isGlob = true; - finished = true; - break; - } - } - continue; - } - break; - } - } + this.window_size = 0; + /* Actual size of window: 2*wSize, except when the user input buffer + * is directly used as sliding window. + */ - if (code === CHAR_ASTERISK) { - if (prev === CHAR_ASTERISK) isGlobstar = token.isGlobstar = true; - isGlob = token.isGlob = true; - finished = true; + this.prev = null; + /* Link to older string with same hash index. To limit the size of this + * array to 64K, this link is maintained only for the last 32K strings. + * An index in this array is thus a window index modulo 32K. + */ - if (scanToEnd === true) { - continue; - } - break; - } + this.head = null; /* Heads of the hash chains or NIL. */ - if (code === CHAR_QUESTION_MARK) { - isGlob = token.isGlob = true; - finished = true; + this.ins_h = 0; /* hash index of string to be inserted */ + this.hash_size = 0; /* number of elements in hash table */ + this.hash_bits = 0; /* log2(hash_size) */ + this.hash_mask = 0; /* hash_size-1 */ - if (scanToEnd === true) { - continue; - } - break; - } + this.hash_shift = 0; + /* Number of bits by which ins_h must be shifted at each input + * step. It must be such that after MIN_MATCH steps, the oldest + * byte no longer takes part in the hash key, that is: + * hash_shift * MIN_MATCH >= hash_bits + */ - if (code === CHAR_LEFT_SQUARE_BRACKET) { - while (eos() !== true && (next = advance())) { - if (next === CHAR_BACKWARD_SLASH) { - backslashes = token.backslashes = true; - advance(); - continue; - } + this.block_start = 0; + /* Window position at the beginning of the current output block. Gets + * negative when the window is moved backwards. + */ - if (next === CHAR_RIGHT_SQUARE_BRACKET) { - isBracket = token.isBracket = true; - isGlob = token.isGlob = true; - finished = true; - break; - } - } + this.match_length = 0; /* length of best match */ + this.prev_match = 0; /* previous match */ + this.match_available = 0; /* set if previous match exists */ + this.strstart = 0; /* start of string to insert */ + this.match_start = 0; /* start of matching string */ + this.lookahead = 0; /* number of valid bytes ahead in window */ - if (scanToEnd === true) { - continue; - } + this.prev_length = 0; + /* Length of the best match at previous step. Matches not greater than this + * are discarded. This is used in the lazy match evaluation. + */ - break; - } + this.max_chain_length = 0; + /* To speed up deflation, hash chains are never searched beyond this + * length. A higher limit improves compression ratio but degrades the + * speed. + */ - if (opts.nonegate !== true && code === CHAR_EXCLAMATION_MARK && index === start) { - negated = token.negated = true; - start++; - continue; - } + this.max_lazy_match = 0; + /* Attempt to find a better match only when the current match is strictly + * smaller than this value. This mechanism is used only for compression + * levels >= 4. + */ + // That's alias to max_lazy_match, don't use directly + //this.max_insert_length = 0; + /* Insert new strings in the hash table only if the match length is not + * greater than this length. This saves time but degrades compression. + * max_insert_length is used only for compression levels <= 3. + */ - if (opts.noparen !== true && code === CHAR_LEFT_PARENTHESES) { - isGlob = token.isGlob = true; + this.level = 0; /* compression level (1..9) */ + this.strategy = 0; /* favor or force Huffman coding*/ - if (scanToEnd === true) { - while (eos() !== true && (code = advance())) { - if (code === CHAR_LEFT_PARENTHESES) { - backslashes = token.backslashes = true; - code = advance(); - continue; - } + this.good_match = 0; + /* Use a faster search when the previous match is longer than this */ - if (code === CHAR_RIGHT_PARENTHESES) { - finished = true; - break; - } - } - continue; - } - break; - } + this.nice_match = 0; /* Stop searching when current match exceeds this */ - if (isGlob === true) { - finished = true; + /* used by trees.c: */ - if (scanToEnd === true) { - continue; - } + /* Didn't use ct_data typedef below to suppress compiler warning */ - break; - } - } + // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ + // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ + // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ - if (opts.noext === true) { - isExtglob = false; - isGlob = false; - } + // Use flat array of DOUBLE size, with interleaved fata, + // because JS does not support effective + this.dyn_ltree = new utils.Buf16(HEAP_SIZE * 2); + this.dyn_dtree = new utils.Buf16((2 * D_CODES + 1) * 2); + this.bl_tree = new utils.Buf16((2 * BL_CODES + 1) * 2); + zero(this.dyn_ltree); + zero(this.dyn_dtree); + zero(this.bl_tree); - let base = str; - let prefix = ''; - let glob = ''; + this.l_desc = null; /* desc. for literal tree */ + this.d_desc = null; /* desc. for distance tree */ + this.bl_desc = null; /* desc. for bit length tree */ - if (start > 0) { - prefix = str.slice(0, start); - str = str.slice(start); - lastIndex -= start; - } + //ush bl_count[MAX_BITS+1]; + this.bl_count = new utils.Buf16(MAX_BITS + 1); + /* number of codes at each bit length for an optimal tree */ - if (base && isGlob === true && lastIndex > 0) { - base = str.slice(0, lastIndex); - glob = str.slice(lastIndex); - } else if (isGlob === true) { - base = ''; - glob = str; - } else { - base = str; - } + //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ + this.heap = new utils.Buf16(2 * L_CODES + 1); /* heap used to build the Huffman trees */ + zero(this.heap); - if (base && base !== '' && base !== '/' && base !== str) { - if (isPathSeparator(base.charCodeAt(base.length - 1))) { - base = base.slice(0, -1); - } - } + this.heap_len = 0; /* number of elements in the heap */ + this.heap_max = 0; /* element of largest frequency */ + /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. + * The same heap array is used to build all trees. + */ - if (opts.unescape === true) { - if (glob) glob = utils.removeBackslashes(glob); + this.depth = new utils.Buf16(2 * L_CODES + 1); //uch depth[2*L_CODES+1]; + zero(this.depth); + /* Depth of each subtree used as tie breaker for trees of equal frequency + */ - if (base && backslashes === true) { - base = utils.removeBackslashes(base); - } - } + this.l_buf = 0; /* buffer index for literals or lengths */ - const state = { - prefix, - input, - start, - base, - glob, - isBrace, - isBracket, - isGlob, - isExtglob, - isGlobstar, - negated, - negatedExtglob - }; + this.lit_bufsize = 0; + /* Size of match buffer for literals/lengths. There are 4 reasons for + * limiting lit_bufsize to 64K: + * - frequencies can be kept in 16 bit counters + * - if compression is not successful for the first block, all input + * data is still in the window so we can still emit a stored block even + * when input comes from standard input. (This can also be done for + * all blocks if lit_bufsize is not greater than 32K.) + * - if compression is not successful for a file smaller than 64K, we can + * even emit a stored file instead of a stored block (saving 5 bytes). + * This is applicable only for zip (not gzip or zlib). + * - creating new Huffman trees less frequently may not provide fast + * adaptation to changes in the input data statistics. (Take for + * example a binary file with poorly compressible code followed by + * a highly compressible string table.) Smaller buffer sizes give + * fast adaptation but have of course the overhead of transmitting + * trees more frequently. + * - I can't count above 4 + */ - if (opts.tokens === true) { - state.maxDepth = 0; - if (!isPathSeparator(code)) { - tokens.push(token); - } - state.tokens = tokens; - } + this.last_lit = 0; /* running index in l_buf */ - if (opts.parts === true || opts.tokens === true) { - let prevIndex; + this.d_buf = 0; + /* Buffer index for distances. To simplify the code, d_buf and l_buf have + * the same number of elements. To use different lengths, an extra flag + * array would be necessary. + */ - for (let idx = 0; idx < slashes.length; idx++) { - const n = prevIndex ? prevIndex + 1 : start; - const i = slashes[idx]; - const value = input.slice(n, i); - if (opts.tokens) { - if (idx === 0 && start !== 0) { - tokens[idx].isPrefix = true; - tokens[idx].value = prefix; - } else { - tokens[idx].value = value; - } - depth(tokens[idx]); - state.maxDepth += tokens[idx].depth; - } - if (idx !== 0 || value !== '') { - parts.push(value); - } - prevIndex = i; - } + this.opt_len = 0; /* bit length of current block with optimal trees */ + this.static_len = 0; /* bit length of current block with static trees */ + this.matches = 0; /* number of string matches in current block */ + this.insert = 0; /* bytes at end of window left to insert */ - if (prevIndex && prevIndex + 1 < input.length) { - const value = input.slice(prevIndex + 1); - parts.push(value); - if (opts.tokens) { - tokens[tokens.length - 1].value = value; - depth(tokens[tokens.length - 1]); - state.maxDepth += tokens[tokens.length - 1].depth; - } - } + this.bi_buf = 0; + /* Output buffer. bits are inserted starting at the bottom (least + * significant bits). + */ + this.bi_valid = 0; + /* Number of valid bits in bi_buf. All bits above the last valid bit + * are always zero. + */ - state.slashes = slashes; - state.parts = parts; - } + // Used for window memory init. We safely ignore it for JS. That makes + // sense only for pointers and memory check tools. + //this.high_water = 0; + /* High water mark offset in window for initialized bytes -- bytes above + * this are set to zero in order to avoid memory check warnings when + * longest match routines access bytes past the input. This is then + * updated to the new high water mark. + */ +} - return state; -}; -module.exports = scan; +function deflateResetKeep(strm) { + var s; + if (!strm || !strm.state) { + return err(strm, Z_STREAM_ERROR); + } -/***/ }), + strm.total_in = strm.total_out = 0; + strm.data_type = Z_UNKNOWN; -/***/ 538: -/***/ (function(module, __unusedexports, __webpack_require__) { + s = strm.state; + s.pending = 0; + s.pending_out = 0; -"use strict"; + if (s.wrap < 0) { + s.wrap = -s.wrap; + /* was made negative by deflate(..., Z_FINISH); */ + } + s.status = (s.wrap ? INIT_STATE : BUSY_STATE); + strm.adler = (s.wrap === 2) ? + 0 // crc32(0, Z_NULL, 0) + : + 1; // adler32(0, Z_NULL, 0) + s.last_flush = Z_NO_FLUSH; + trees._tr_init(s); + return Z_OK; +} -/* - * merge2 - * https://github.com/teambition/merge2 - * - * Copyright (c) 2014-2020 Teambition - * Licensed under the MIT license. - */ -const Stream = __webpack_require__(413) -const PassThrough = Stream.PassThrough -const slice = Array.prototype.slice -module.exports = merge2 +function deflateReset(strm) { + var ret = deflateResetKeep(strm); + if (ret === Z_OK) { + lm_init(strm.state); + } + return ret; +} -function merge2 () { - const streamsQueue = [] - const args = slice.call(arguments) - let merging = false - let options = args[args.length - 1] - if (options && !Array.isArray(options) && options.pipe == null) { - args.pop() - } else { - options = {} - } +function deflateSetHeader(strm, head) { + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + if (strm.state.wrap !== 2) { return Z_STREAM_ERROR; } + strm.state.gzhead = head; + return Z_OK; +} - const doEnd = options.end !== false - const doPipeError = options.pipeError === true - if (options.objectMode == null) { - options.objectMode = true - } - if (options.highWaterMark == null) { - options.highWaterMark = 64 * 1024 - } - const mergedStream = PassThrough(options) - function addStream () { - for (let i = 0, len = arguments.length; i < len; i++) { - streamsQueue.push(pauseStreams(arguments[i], options)) - } - mergeStream() - return this +function deflateInit2(strm, level, method, windowBits, memLevel, strategy) { + if (!strm) { // === Z_NULL + return Z_STREAM_ERROR; } + var wrap = 1; - function mergeStream () { - if (merging) { - return - } - merging = true - - let streams = streamsQueue.shift() - if (!streams) { - process.nextTick(endStream) - return - } - if (!Array.isArray(streams)) { - streams = [streams] - } - - let pipesCount = streams.length + 1 - - function next () { - if (--pipesCount > 0) { - return - } - merging = false - mergeStream() - } - - function pipe (stream) { - function onend () { - stream.removeListener('merge2UnpipeEnd', onend) - stream.removeListener('end', onend) - if (doPipeError) { - stream.removeListener('error', onerror) - } - next() - } - function onerror (err) { - mergedStream.emit('error', err) - } - // skip ended stream - if (stream._readableState.endEmitted) { - return next() - } - - stream.on('merge2UnpipeEnd', onend) - stream.on('end', onend) - - if (doPipeError) { - stream.on('error', onerror) - } - - stream.pipe(mergedStream, { end: false }) - // compatible for old stream - stream.resume() - } - - for (let i = 0; i < streams.length; i++) { - pipe(streams[i]) - } + if (level === Z_DEFAULT_COMPRESSION) { + level = 6; + } - next() + if (windowBits < 0) { /* suppress zlib wrapper */ + wrap = 0; + windowBits = -windowBits; } - function endStream () { - merging = false - // emit 'queueDrain' when all streams merged. - mergedStream.emit('queueDrain') - if (doEnd) { - mergedStream.end() - } + else if (windowBits > 15) { + wrap = 2; /* write gzip wrapper instead */ + windowBits -= 16; } - mergedStream.setMaxListeners(0) - mergedStream.add = addStream - mergedStream.on('unpipe', function (stream) { - stream.emit('merge2UnpipeEnd') - }) - if (args.length) { - addStream.apply(null, args) + if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED || + windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || + strategy < 0 || strategy > Z_FIXED) { + return err(strm, Z_STREAM_ERROR); } - return mergedStream -} -// check and pause streams for pipe. -function pauseStreams (streams, options) { - if (!Array.isArray(streams)) { - // Backwards-compat with old-style streams - if (!streams._readableState && streams.pipe) { - streams = streams.pipe(PassThrough(options)) - } - if (!streams._readableState || !streams.pause || !streams.pipe) { - throw new Error('Only readable stream can be merged.') - } - streams.pause() - } else { - for (let i = 0, len = streams.length; i < len; i++) { - streams[i] = pauseStreams(streams[i], options) - } - } - return streams -} + if (windowBits === 8) { + windowBits = 9; + } + /* until 256-byte window bug fixed */ -/***/ }), + var s = new DeflateState(); -/***/ 543: -/***/ (function(module) { + strm.state = s; + s.strm = strm; -"use strict"; + s.wrap = wrap; + s.gzhead = null; + s.w_bits = windowBits; + s.w_size = 1 << s.w_bits; + s.w_mask = s.w_size - 1; + s.hash_bits = memLevel + 7; + s.hash_size = 1 << s.hash_bits; + s.hash_mask = s.hash_size - 1; + s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH); -var AsyncLock = function (opts) { - opts = opts || {}; + s.window = new utils.Buf8(s.w_size * 2); + s.head = new utils.Buf16(s.hash_size); + s.prev = new utils.Buf16(s.w_size); - this.Promise = opts.Promise || Promise; + // Don't need mem init magic for JS. + //s.high_water = 0; /* nothing written to s->window yet */ - // format: {key : [fn, fn]} - // queues[key] = null indicates no job running for key - this.queues = Object.create(null); + s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ - // lock is reentrant for same domain - this.domainReentrant = opts.domainReentrant || false; - if (this.domainReentrant) { - if (typeof process === 'undefined' || typeof process.domain === 'undefined') { - throw new Error( - 'Domain-reentrant locks require `process.domain` to exist. Please flip `opts.domainReentrant = false`, ' + - 'use a NodeJS version that still implements Domain, or install a browser polyfill.'); - } - // domain of current running func {key : fn} - this.domains = Object.create(null); - } + s.pending_buf_size = s.lit_bufsize * 4; - this.timeout = opts.timeout || AsyncLock.DEFAULT_TIMEOUT; - this.maxOccupationTime = opts.maxOccupationTime || AsyncLock.DEFAULT_MAX_OCCUPATION_TIME; - if (opts.maxPending === Infinity || (Number.isInteger(opts.maxPending) && opts.maxPending >= 0)) { - this.maxPending = opts.maxPending; - } else { - this.maxPending = AsyncLock.DEFAULT_MAX_PENDING; - } -}; + //overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); + //s->pending_buf = (uchf *) overlay; + s.pending_buf = new utils.Buf8(s.pending_buf_size); -AsyncLock.DEFAULT_TIMEOUT = 0; //Never -AsyncLock.DEFAULT_MAX_OCCUPATION_TIME = 0; //Never -AsyncLock.DEFAULT_MAX_PENDING = 1000; + // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`) + //s->d_buf = overlay + s->lit_bufsize/sizeof(ush); + s.d_buf = 1 * s.lit_bufsize; -/** - * Acquire Locks - * - * @param {String|Array} key resource key or keys to lock - * @param {function} fn async function - * @param {function} cb callback function, otherwise will return a promise - * @param {Object} opts options - */ -AsyncLock.prototype.acquire = function (key, fn, cb, opts) { - if (Array.isArray(key)) { - return this._acquireBatch(key, fn, cb, opts); - } + //s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; + s.l_buf = (1 + 2) * s.lit_bufsize; - if (typeof (fn) !== 'function') { - throw new Error('You must pass a function to execute'); - } + s.level = level; + s.strategy = strategy; + s.method = method; - // faux-deferred promise using new Promise() (as Promise.defer is deprecated) - var deferredResolve = null; - var deferredReject = null; - var deferred = null; + return deflateReset(strm); +} - if (typeof (cb) !== 'function') { - opts = cb; - cb = null; +function deflateInit(strm, level) { + return deflateInit2(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY); +} - // will return a promise - deferred = new this.Promise(function(resolve, reject) { - deferredResolve = resolve; - deferredReject = reject; - }); - } - opts = opts || {}; +function deflate(strm, flush) { + var old_flush, s; + var beg, val; // for gzip header write only - var resolved = false; - var timer = null; - var occupationTimer = null; - var self = this; + if (!strm || !strm.state || + flush > Z_BLOCK || flush < 0) { + return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR; + } - var done = function (locked, err, ret) { + s = strm.state; - if (occupationTimer) { - clearTimeout(occupationTimer); - occupationTimer = null; - } + if (!strm.output || + (!strm.input && strm.avail_in !== 0) || + (s.status === FINISH_STATE && flush !== Z_FINISH)) { + return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR); + } - if (locked) { - if (!!self.queues[key] && self.queues[key].length === 0) { - delete self.queues[key]; - } - if (self.domainReentrant) { - delete self.domains[key]; - } - } - - if (!resolved) { - if (!deferred) { - if (typeof (cb) === 'function') { - cb(err, ret); - } - } - else { - //promise mode - if (err) { - deferredReject(err); - } - else { - deferredResolve(ret); - } - } - resolved = true; - } - - if (locked) { - //run next func - if (!!self.queues[key] && self.queues[key].length > 0) { - self.queues[key].shift()(); - } - } - }; - - var exec = function (locked) { - if (resolved) { // may due to timed out - return done(locked); - } - - if (timer) { - clearTimeout(timer); - timer = null; - } - - if (self.domainReentrant && locked) { - self.domains[key] = process.domain; - } - - // Callback mode - if (fn.length === 1) { - var called = false; - fn(function (err, ret) { - if (!called) { - called = true; - done(locked, err, ret); - } - }); - } - else { - // Promise mode - self._promiseTry(function () { - return fn(); - }) - .then(function(ret){ - done(locked, undefined, ret); - }, function(error){ - done(locked, error); - }); - } - }; - - if (self.domainReentrant && !!process.domain) { - exec = process.domain.bind(exec); - } - - if (!self.queues[key]) { - self.queues[key] = []; - exec(true); - } - else if (self.domainReentrant && !!process.domain && process.domain === self.domains[key]) { - // If code is in the same domain of current running task, run it directly - // Since lock is re-enterable - exec(false); - } - else if (self.queues[key].length >= self.maxPending) { - done(false, new Error('Too many pending tasks in queue ' + key)); - } - else { - var taskFn = function () { - exec(true); - }; - if (opts.skipQueue) { - self.queues[key].unshift(taskFn); - } else { - self.queues[key].push(taskFn); - } - - var timeout = opts.timeout || self.timeout; - if (timeout) { - timer = setTimeout(function () { - timer = null; - done(false, new Error('async-lock timed out in queue ' + key)); - }, timeout); - } - } - - var maxOccupationTime = opts.maxOccupationTime || self.maxOccupationTime; - if (maxOccupationTime) { - occupationTimer = setTimeout(function () { - if (!!self.queues[key]) { - done(false, new Error('Maximum occupation time is exceeded in queue ' + key)); - } - }, maxOccupationTime); - } - - if (deferred) { - return deferred; - } -}; - -/* - * Below is how this function works: - * - * Equivalent code: - * self.acquire(key1, function(cb){ - * self.acquire(key2, function(cb){ - * self.acquire(key3, fn, cb); - * }, cb); - * }, cb); - * - * Equivalent code: - * var fn3 = getFn(key3, fn); - * var fn2 = getFn(key2, fn3); - * var fn1 = getFn(key1, fn2); - * fn1(cb); - */ -AsyncLock.prototype._acquireBatch = function (keys, fn, cb, opts) { - if (typeof (cb) !== 'function') { - opts = cb; - cb = null; - } - - var self = this; - var getFn = function (key, fn) { - return function (cb) { - self.acquire(key, fn, cb, opts); - }; - }; - - var fnx = fn; - keys.reverse().forEach(function (key) { - fnx = getFn(key, fnx); - }); - - if (typeof (cb) === 'function') { - fnx(cb); - } - else { - return new this.Promise(function (resolve, reject) { - // check for promise mode in case keys is empty array - if (fnx.length === 1) { - fnx(function (err, ret) { - if (err) { - reject(err); - } - else { - resolve(ret); - } - }); - } else { - resolve(fnx()); - } - }); - } -}; - -/* - * Whether there is any running or pending asyncFunc - * - * @param {String} key - */ -AsyncLock.prototype.isBusy = function (key) { - if (!key) { - return Object.keys(this.queues).length > 0; - } - else { - return !!this.queues[key]; - } -}; - -/** - * Promise.try() implementation to become independent of Q-specific methods - */ -AsyncLock.prototype._promiseTry = function(fn) { - try { - return this.Promise.resolve(fn()); - } catch (e) { - return this.Promise.reject(e); - } -}; - -module.exports = AsyncLock; - - -/***/ }), - -/***/ 551: -/***/ (function(__unusedmodule, exports) { - -/*! crc32.js (C) 2014-present SheetJS -- http://sheetjs.com */ -/* vim: set ts=2: */ -/*exported CRC32 */ -var CRC32; -(function (factory) { - /*jshint ignore:start */ - /*eslint-disable */ - if(typeof DO_NOT_EXPORT_CRC === 'undefined') { - if(true) { - factory(exports); - } else {} - } else { - factory(CRC32 = {}); - } - /*eslint-enable */ - /*jshint ignore:end */ -}(function(CRC32) { -CRC32.version = '1.2.1'; -/*global Int32Array */ -function signed_crc_table() { - var c = 0, table = new Array(256); - - for(var n =0; n != 256; ++n){ - c = n; - c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); - c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); - c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); - c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); - c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); - c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); - c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); - c = ((c&1) ? (-306674912 ^ (c >>> 1)) : (c >>> 1)); - table[n] = c; - } - - return typeof Int32Array !== 'undefined' ? new Int32Array(table) : table; -} + s.strm = strm; /* just in case */ + old_flush = s.last_flush; + s.last_flush = flush; -var T0 = signed_crc_table(); -function slice_by_16_tables(T) { - var c = 0, v = 0, n = 0, table = typeof Int32Array !== 'undefined' ? new Int32Array(4096) : new Array(4096) ; + /* Write the header */ + if (s.status === INIT_STATE) { - for(n = 0; n != 256; ++n) table[n] = T[n]; - for(n = 0; n != 256; ++n) { - v = T[n]; - for(c = 256 + n; c < 4096; c += 256) v = table[c] = (v >>> 8) ^ T[v & 0xFF]; - } - var out = []; - for(n = 1; n != 16; ++n) out[n - 1] = typeof Int32Array !== 'undefined' ? table.subarray(n * 256, n * 256 + 256) : table.slice(n * 256, n * 256 + 256); - return out; -} -var TT = slice_by_16_tables(T0); -var T1 = TT[0], T2 = TT[1], T3 = TT[2], T4 = TT[3], T5 = TT[4]; -var T6 = TT[5], T7 = TT[6], T8 = TT[7], T9 = TT[8], Ta = TT[9]; -var Tb = TT[10], Tc = TT[11], Td = TT[12], Te = TT[13], Tf = TT[14]; -function crc32_bstr(bstr, seed) { - var C = seed ^ -1; - for(var i = 0, L = bstr.length; i < L;) C = (C>>>8) ^ T0[(C^bstr.charCodeAt(i++))&0xFF]; - return ~C; -} - -function crc32_buf(B, seed) { - var C = seed ^ -1, L = B.length - 15, i = 0; - for(; i < L;) C = - Tf[B[i++] ^ (C & 255)] ^ - Te[B[i++] ^ ((C >> 8) & 255)] ^ - Td[B[i++] ^ ((C >> 16) & 255)] ^ - Tc[B[i++] ^ (C >>> 24)] ^ - Tb[B[i++]] ^ Ta[B[i++]] ^ T9[B[i++]] ^ T8[B[i++]] ^ - T7[B[i++]] ^ T6[B[i++]] ^ T5[B[i++]] ^ T4[B[i++]] ^ - T3[B[i++]] ^ T2[B[i++]] ^ T1[B[i++]] ^ T0[B[i++]]; - L += 15; - while(i < L) C = (C>>>8) ^ T0[(C^B[i++])&0xFF]; - return ~C; -} - -function crc32_str(str, seed) { - var C = seed ^ -1; - for(var i = 0, L = str.length, c = 0, d = 0; i < L;) { - c = str.charCodeAt(i++); - if(c < 0x80) { - C = (C>>>8) ^ T0[(C^c)&0xFF]; - } else if(c < 0x800) { - C = (C>>>8) ^ T0[(C ^ (192|((c>>6)&31)))&0xFF]; - C = (C>>>8) ^ T0[(C ^ (128|(c&63)))&0xFF]; - } else if(c >= 0xD800 && c < 0xE000) { - c = (c&1023)+64; d = str.charCodeAt(i++)&1023; - C = (C>>>8) ^ T0[(C ^ (240|((c>>8)&7)))&0xFF]; - C = (C>>>8) ^ T0[(C ^ (128|((c>>2)&63)))&0xFF]; - C = (C>>>8) ^ T0[(C ^ (128|((d>>6)&15)|((c&3)<<4)))&0xFF]; - C = (C>>>8) ^ T0[(C ^ (128|(d&63)))&0xFF]; - } else { - C = (C>>>8) ^ T0[(C ^ (224|((c>>12)&15)))&0xFF]; - C = (C>>>8) ^ T0[(C ^ (128|((c>>6)&63)))&0xFF]; - C = (C>>>8) ^ T0[(C ^ (128|(c&63)))&0xFF]; - } - } - return ~C; -} -CRC32.table = T0; -// $FlowIgnore -CRC32.bstr = crc32_bstr; -// $FlowIgnore -CRC32.buf = crc32_buf; -// $FlowIgnore -CRC32.str = crc32_str; -})); - - -/***/ }), - -/***/ 593: -/***/ (function(__unusedmodule, exports, __webpack_require__) { - -"use strict"; - -var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); -}) : (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - o[k2] = m[k]; -})); -var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { - Object.defineProperty(o, "default", { enumerable: true, value: v }); -}) : function(o, v) { - o["default"] = v; -}); -var __importStar = (this && this.__importStar) || function (mod) { - if (mod && mod.__esModule) return mod; - var result = {}; - if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); - __setModuleDefault(result, mod); - return result; -}; -var __importDefault = (this && this.__importDefault) || function (mod) { - return (mod && mod.__esModule) ? mod : { "default": mod }; -}; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.main = exports.exec = void 0; -const child_process = __importStar(__webpack_require__(129)); -const fast_glob_1 = __webpack_require__(406); -const fs_1 = __importStar(__webpack_require__(747)); -const git_url_parse_1 = __importDefault(__webpack_require__(253)); -const os_1 = __webpack_require__(87); -const path = __importStar(__webpack_require__(622)); -const isomorphic_git_1 = __importDefault(__webpack_require__(956)); -const io_1 = __webpack_require__(1); -/** - * Custom wrapper around the child_process module - */ -const exec = async (cmd, opts) => { - const { log } = opts; - const env = (opts === null || opts === void 0 ? void 0 : opts.env) || {}; - const ps = child_process.spawn('bash', ['-c', cmd], { - env: { - HOME: process.env.HOME, - ...env, - }, - cwd: opts.cwd, - stdio: ['pipe', 'pipe', 'pipe'], - }); - const output = { - stderr: '', - stdout: '', - }; - // We won't be providing any input to command - ps.stdin.end(); - ps.stdout.on('data', (data) => { - output.stdout += data; - log.log(`data`, data.toString()); - }); - ps.stderr.on('data', (data) => { - output.stderr += data; - log.error(data.toString()); - }); - return new Promise((resolve, reject) => ps.on('close', (code) => { - if (code !== 0) { - reject(new Error('Process exited with code: ' + code + ':\n' + output.stderr)); - } - else { - resolve(output); - } - })); -}; -exports.exec = exec; -const DEFAULT_MESSAGE = 'Update {target-branch} to output generated at {sha}'; -// Error messages -const KNOWN_HOSTS_WARNING = ` -##[warning] KNOWN_HOSTS_FILE not set -This will probably mean that host verification will fail later on -`; -const KNOWN_HOSTS_ERROR = (host) => ` -##[error] Host key verification failed! -This is probably because you forgot to supply a value for KNOWN_HOSTS_FILE -or the file is invalid or doesn't correctly verify the host ${host} -`; -const SSH_KEY_ERROR = ` -##[error] Permission denied (publickey) -Make sure that the ssh private key is set correctly, and -that the public key has been added to the target repo -`; -const INVALID_KEY_ERROR = ` -##[error] Error loading key: invalid format -Please check that you're setting the environment variable -SSH_PRIVATE_KEY correctly -`; -// Paths -const REPO_SELF = 'self'; -const RESOURCES = path.join(path.dirname(__dirname), 'resources'); -const KNOWN_HOSTS_GITHUB = path.join(RESOURCES, 'known_hosts_github.com'); -const SSH_FOLDER = path.join((0, os_1.homedir)(), '.ssh'); -const KNOWN_HOSTS_TARGET = path.join(SSH_FOLDER, 'known_hosts'); -const SSH_AGENT_PID_EXTRACT = /SSH_AGENT_PID=([0-9]+);/; -const genConfig = (env = process.env) => { - if (!env.REPO) - throw new Error('REPO must be specified'); - if (!env.BRANCH) - throw new Error('BRANCH must be specified'); - if (!env.FOLDER) - throw new Error('FOLDER must be specified'); - const repo = env.REPO; - const branch = env.BRANCH; - const folder = env.FOLDER; - const squashHistory = env.SQUASH_HISTORY === 'true'; - const skipEmptyCommits = env.SKIP_EMPTY_COMMITS === 'true'; - const message = env.MESSAGE || DEFAULT_MESSAGE; - const tag = env.TAG; - // Determine the type of URL - if (repo === REPO_SELF) { - if (!env.GITHUB_TOKEN) - throw new Error('GITHUB_TOKEN must be specified when REPO == self'); - if (!env.GITHUB_REPOSITORY) - throw new Error('GITHUB_REPOSITORY must be specified when REPO == self'); - const url = `https://x-access-token:${env.GITHUB_TOKEN}@github.com/${env.GITHUB_REPOSITORY}.git`; - const config = { - repo: url, - branch, - folder, - squashHistory, - skipEmptyCommits, - mode: 'self', - message, - tag, - }; - return config; - } - const parsedUrl = (0, git_url_parse_1.default)(repo); - if (parsedUrl.protocol === 'ssh') { - if (!env.SSH_PRIVATE_KEY) - throw new Error('SSH_PRIVATE_KEY must be specified when REPO uses ssh'); - const config = { - repo, - branch, - folder, - squashHistory, - skipEmptyCommits, - mode: 'ssh', - parsedUrl, - privateKey: env.SSH_PRIVATE_KEY, - knownHostsFile: env.KNOWN_HOSTS_FILE, - message, - tag, - }; - return config; - } - throw new Error('Unsupported REPO URL'); -}; -const writeToProcess = (command, args, opts) => new Promise((resolve, reject) => { - const child = child_process.spawn(command, args, { - env: opts.env, - stdio: 'pipe', - }); - child.stdin.setDefaultEncoding('utf-8'); - child.stdin.write(opts.data); - child.stdin.end(); - child.on('error', reject); - let stderr = ''; - child.stdout.on('data', (data) => { - /* istanbul ignore next */ - opts.log.log(data.toString()); - }); - child.stderr.on('data', (data) => { - stderr += data; - opts.log.error(data.toString()); - }); - child.on('close', (code) => { - /* istanbul ignore else */ - if (code === 0) { - resolve(); - } - else { - reject(new Error(stderr)); - } - }); -}); -const main = async ({ env = process.env, log, }) => { - var _a, _b; - const config = genConfig(env); - // Calculate paths that use temp diractory - const TMP_PATH = await fs_1.promises.mkdtemp(path.join((0, os_1.tmpdir)(), 'git-publish-subdir-action-')); - const REPO_TEMP = path.join(TMP_PATH, 'repo'); - const SSH_AUTH_SOCK = path.join(TMP_PATH, 'ssh_agent.sock'); - if (!env.GITHUB_EVENT_PATH) - throw new Error('Expected GITHUB_EVENT_PATH'); - const event = JSON.parse((await fs_1.promises.readFile(env.GITHUB_EVENT_PATH)).toString()); - const name = env.COMMIT_NAME || - ((_a = event.pusher) === null || _a === void 0 ? void 0 : _a.name) || - env.GITHUB_ACTOR || - 'Git Publish Subdirectory'; - const email = env.COMMIT_EMAIL || - ((_b = event.pusher) === null || _b === void 0 ? void 0 : _b.email) || - (env.GITHUB_ACTOR - ? `${env.GITHUB_ACTOR}@users.noreply.github.com` - : 'nobody@nowhere'); - const tag = env.TAG; - // Set Git Config - await (0, exports.exec)(`git config --global user.name "${name}"`, { log }); - await (0, exports.exec)(`git config --global user.email "${email}"`, { log }); - /** - * Get information about the current git repository - */ - const getGitInformation = async () => { - // Get the root git directory - let dir = process.cwd(); - while (true) { - const isGitRepo = await fs_1.promises - .stat(path.join(dir, '.git')) - .then((s) => s.isDirectory()) - .catch(() => false); - if (isGitRepo) { - break; - } - // We need to traverse up one - const next = path.dirname(dir); - if (next === dir) { - log.log(`##[info] Not running in git directory, unable to get information about source commit`); - return { - commitMessage: '', - sha: '', - }; - } - else { - dir = next; - } - } - // Get current sha of repo to use in commit message - const gitLog = await isomorphic_git_1.default.log({ - fs: fs_1.default, - depth: 1, - dir, - }); - const commit = gitLog.length > 0 ? gitLog[0] : undefined; - if (!commit) { - log.log(`##[info] Unable to get information about HEAD commit`); - return { - commitMessage: '', - sha: '', - }; - } - return { - // Use trim to remove the trailing newline - commitMessage: commit.commit.message.trim(), - sha: commit.oid, - }; - }; - const gitInfo = await getGitInformation(); - // Environment to pass to children - const childEnv = Object.assign({}, process.env, { - SSH_AUTH_SOCK, - }); - if (config.mode === 'ssh') { - // Copy over the known_hosts file if set - let known_hosts = config.knownHostsFile; - // Use well-known known_hosts for certain domains - if (!known_hosts && config.parsedUrl.resource === 'github.com') { - known_hosts = KNOWN_HOSTS_GITHUB; - } - if (!known_hosts) { - log.warn(KNOWN_HOSTS_WARNING); - } - else { - await (0, io_1.mkdirP)(SSH_FOLDER); - await fs_1.promises.copyFile(known_hosts, KNOWN_HOSTS_TARGET); - } - // Setup ssh-agent with private key - log.log(`Setting up ssh-agent on ${SSH_AUTH_SOCK}`); - const sshAgentMatch = SSH_AGENT_PID_EXTRACT.exec((await (0, exports.exec)(`ssh-agent -a ${SSH_AUTH_SOCK}`, { log, env: childEnv })) - .stdout); - /* istanbul ignore if */ - if (!sshAgentMatch) - throw new Error('Unexpected output from ssh-agent'); - childEnv.SSH_AGENT_PID = sshAgentMatch[1]; - log.log(`Adding private key to ssh-agent at ${SSH_AUTH_SOCK}`); - await writeToProcess('ssh-add', ['-'], { - data: config.privateKey + '\n', - env: childEnv, - log, - }); - log.log(`Private key added`); - } - // Clone the target repo - await (0, exports.exec)(`git clone "${config.repo}" "${REPO_TEMP}"`, { - log, - env: childEnv, - }).catch((err) => { - const s = err.toString(); - /* istanbul ignore else */ - if (config.mode === 'ssh') { - /* istanbul ignore else */ - if (s.indexOf('Host key verification failed') !== -1) { - log.error(KNOWN_HOSTS_ERROR(config.parsedUrl.resource)); - } - else if (s.indexOf('Permission denied (publickey') !== -1) { - log.error(SSH_KEY_ERROR); - } + if (s.wrap === 2) { // GZIP header + strm.adler = 0; //crc32(0L, Z_NULL, 0); + put_byte(s, 31); + put_byte(s, 139); + put_byte(s, 8); + if (!s.gzhead) { // s->gzhead == Z_NULL + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, s.level === 9 ? 2 : + (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? + 4 : 0)); + put_byte(s, OS_CODE); + s.status = BUSY_STATE; + } + else { + put_byte(s, (s.gzhead.text ? 1 : 0) + + (s.gzhead.hcrc ? 2 : 0) + + (!s.gzhead.extra ? 0 : 4) + + (!s.gzhead.name ? 0 : 8) + + (!s.gzhead.comment ? 0 : 16) + ); + put_byte(s, s.gzhead.time & 0xff); + put_byte(s, (s.gzhead.time >> 8) & 0xff); + put_byte(s, (s.gzhead.time >> 16) & 0xff); + put_byte(s, (s.gzhead.time >> 24) & 0xff); + put_byte(s, s.level === 9 ? 2 : + (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? + 4 : 0)); + put_byte(s, s.gzhead.os & 0xff); + if (s.gzhead.extra && s.gzhead.extra.length) { + put_byte(s, s.gzhead.extra.length & 0xff); + put_byte(s, (s.gzhead.extra.length >> 8) & 0xff); } - throw err; - }); - if (!config.squashHistory) { - // Fetch branch if it exists - await (0, exports.exec)(`git fetch -u origin ${config.branch}:${config.branch}`, { - log, - env: childEnv, - cwd: REPO_TEMP, - }).catch((err) => { - const s = err.toString(); - /* istanbul ignore if */ - if (s.indexOf("Couldn't find remote ref") === -1) { - log.error("##[warning] Failed to fetch target branch, probably doesn't exist"); - log.error(err); - } - }); - // Check if branch already exists - log.log(`##[info] Checking if branch ${config.branch} exists already`); - const branchCheck = await (0, exports.exec)(`git branch --list "${config.branch}"`, { - log, - env: childEnv, - cwd: REPO_TEMP, - }); - if (branchCheck.stdout.trim() === '') { - // Branch does not exist yet, let's check it out as an orphan - log.log(`##[info] ${config.branch} does not exist, creating as orphan`); - await (0, exports.exec)(`git checkout --orphan "${config.branch}"`, { - log, - env: childEnv, - cwd: REPO_TEMP, - }); + if (s.gzhead.hcrc) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending, 0); } - else { - await (0, exports.exec)(`git checkout "${config.branch}"`, { - log, - env: childEnv, - cwd: REPO_TEMP, - }); + s.gzindex = 0; + s.status = EXTRA_STATE; + } + } + else // DEFLATE header + { + var header = (Z_DEFLATED + ((s.w_bits - 8) << 4)) << 8; + var level_flags = -1; + + if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) { + level_flags = 0; + } else if (s.level < 6) { + level_flags = 1; + } else if (s.level === 6) { + level_flags = 2; + } else { + level_flags = 3; + } + header |= (level_flags << 6); + if (s.strstart !== 0) { header |= PRESET_DICT; } + header += 31 - (header % 31); + + s.status = BUSY_STATE; + putShortMSB(s, header); + + /* Save the adler32 of the preset dictionary: */ + if (s.strstart !== 0) { + putShortMSB(s, strm.adler >>> 16); + putShortMSB(s, strm.adler & 0xffff); + } + strm.adler = 1; // adler32(0L, Z_NULL, 0); + } + } + +//#ifdef GZIP + if (s.status === EXTRA_STATE) { + if (s.gzhead.extra/* != Z_NULL*/) { + beg = s.pending; /* start of bytes to update crc */ + + while (s.gzindex < (s.gzhead.extra.length & 0xffff)) { + if (s.pending === s.pending_buf_size) { + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + flush_pending(strm); + beg = s.pending; + if (s.pending === s.pending_buf_size) { + break; + } } + put_byte(s, s.gzhead.extra[s.gzindex] & 0xff); + s.gzindex++; + } + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + if (s.gzindex === s.gzhead.extra.length) { + s.gzindex = 0; + s.status = NAME_STATE; + } } else { - // Checkout a random branch so we can delete the target branch if it exists - log.log('Checking out temp branch'); - await (0, exports.exec)(`git checkout -b "${Math.random().toString(36).substring(2)}"`, { - log, - env: childEnv, - cwd: REPO_TEMP, - }); - // Delete the target branch if it exists - await (0, exports.exec)(`git branch -D "${config.branch}"`, { - log, - env: childEnv, - cwd: REPO_TEMP, - }).catch((err) => { }); - // Checkout target branch as an orphan - await (0, exports.exec)(`git checkout --orphan "${config.branch}"`, { - log, - env: childEnv, - cwd: REPO_TEMP, - }); - log.log('Checked out orphan'); + s.status = NAME_STATE; } - // // Update contents of branch - log.log(`##[info] Updating branch ${config.branch}`); - /** - * The list of globs we'll use for clearing - */ - const globs = await (async () => { - if (env.CLEAR_GLOBS_FILE) { - // We need to use a custom mechanism to clear the files - log.log(`##[info] Using custom glob file to clear target branch ${env.CLEAR_GLOBS_FILE}`); - const globList = (await fs_1.promises.readFile(env.CLEAR_GLOBS_FILE)) - .toString() - .split('\n') - .map((s) => s.trim()) - .filter((s) => s !== ''); - return globList; - } - else if (env.TARGET_DIR) { - log.log(`##[info] Removing all files from target dir ${env.TARGET_DIR} on target branch`); - return [`${env.TARGET_DIR}/**/*`, '!.git']; + } + if (s.status === NAME_STATE) { + if (s.gzhead.name/* != Z_NULL*/) { + beg = s.pending; /* start of bytes to update crc */ + //int val; + + do { + if (s.pending === s.pending_buf_size) { + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + flush_pending(strm); + beg = s.pending; + if (s.pending === s.pending_buf_size) { + val = 1; + break; + } } - else { - // Remove all files - log.log(`##[info] Removing all files from target branch`); - return ['**/*', '!.git']; + // JS specific: little magic to add zero terminator to end of string + if (s.gzindex < s.gzhead.name.length) { + val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff; + } else { + val = 0; } - })(); - const filesToDelete = (0, fast_glob_1.stream)(globs, { - absolute: true, - dot: true, - followSymbolicLinks: false, - cwd: REPO_TEMP, - }); - // Delete all files from the filestream - for await (const entry of filesToDelete) { - await fs_1.promises.unlink(entry); - } - const folder = path.resolve(process.cwd(), config.folder); - const destinationFolder = env.TARGET_DIR ? env.TARGET_DIR : './'; - // Make sure the destination folder exists - await (0, io_1.mkdirP)(path.resolve(REPO_TEMP, destinationFolder)); - log.log(`##[info] Copying all files from ${folder}`); - await (0, io_1.cp)(`${folder}/`, `${REPO_TEMP}/${destinationFolder}/`, { - recursive: true, - copySourceDirectory: false, - }); - await (0, exports.exec)(`git add -A .`, { log, env: childEnv, cwd: REPO_TEMP }); - const message = config.message - .replace(/\{target\-branch\}/g, config.branch) - .replace(/\{sha\}/g, gitInfo.sha.substr(0, 7)) - .replace(/\{long\-sha\}/g, gitInfo.sha) - .replace(/\{msg\}/g, gitInfo.commitMessage); - await isomorphic_git_1.default.commit({ - fs: fs_1.default, - dir: REPO_TEMP, - message, - author: { email, name }, - }); - if (tag) { - log.log(`##[info] Tagging commit with ${tag}`); - await isomorphic_git_1.default.tag({ - fs: fs_1.default, - dir: REPO_TEMP, - ref: tag, - force: true, - }); - } - if (config.skipEmptyCommits) { - log.log(`##[info] Checking whether contents have changed before pushing`); - // Before we push, check whether it changed the tree, - // and avoid pushing if not - const head = await isomorphic_git_1.default.resolveRef({ - fs: fs_1.default, - dir: REPO_TEMP, - ref: 'HEAD', - }); - const currentCommit = await isomorphic_git_1.default.readCommit({ - fs: fs_1.default, - dir: REPO_TEMP, - oid: head, - }); - if (currentCommit.commit.parent.length === 1) { - const previousCommit = await isomorphic_git_1.default.readCommit({ - fs: fs_1.default, - dir: REPO_TEMP, - oid: currentCommit.commit.parent[0], - }); - if (currentCommit.commit.tree === previousCommit.commit.tree) { - log.log(`##[info] Contents of target repo unchanged, exiting.`); - return; - } + put_byte(s, val); + } while (val !== 0); + + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + if (val === 0) { + s.gzindex = 0; + s.status = COMMENT_STATE; + } + } + else { + s.status = COMMENT_STATE; + } + } + if (s.status === COMMENT_STATE) { + if (s.gzhead.comment/* != Z_NULL*/) { + beg = s.pending; /* start of bytes to update crc */ + //int val; + + do { + if (s.pending === s.pending_buf_size) { + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + flush_pending(strm); + beg = s.pending; + if (s.pending === s.pending_buf_size) { + val = 1; + break; + } + } + // JS specific: little magic to add zero terminator to end of string + if (s.gzindex < s.gzhead.comment.length) { + val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff; + } else { + val = 0; } + put_byte(s, val); + } while (val !== 0); + + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + if (val === 0) { + s.status = HCRC_STATE; + } } - log.log(`##[info] Pushing`); - const forceArg = config.squashHistory ? '-f' : ''; - const tagsArg = tag ? '--tags' : ''; - const push = await (0, exports.exec)(`git push ${forceArg} origin "${config.branch}" ${tagsArg}`, { log, env: childEnv, cwd: REPO_TEMP }); - log.log(push.stdout); - log.log(`##[info] Deployment Successful`); - if (config.mode === 'ssh') { - log.log(`##[info] Killing ssh-agent`); - await (0, exports.exec)(`ssh-agent -k`, { log, env: childEnv }); + else { + s.status = HCRC_STATE; } -}; -exports.main = main; + } + if (s.status === HCRC_STATE) { + if (s.gzhead.hcrc) { + if (s.pending + 2 > s.pending_buf_size) { + flush_pending(strm); + } + if (s.pending + 2 <= s.pending_buf_size) { + put_byte(s, strm.adler & 0xff); + put_byte(s, (strm.adler >> 8) & 0xff); + strm.adler = 0; //crc32(0L, Z_NULL, 0); + s.status = BUSY_STATE; + } + } + else { + s.status = BUSY_STATE; + } + } +//#endif + /* Flush as much pending output as possible */ + if (s.pending !== 0) { + flush_pending(strm); + if (strm.avail_out === 0) { + /* Since avail_out is 0, deflate will be called again with + * more output space, but possibly with both pending and + * avail_in equal to zero. There won't be anything to do, + * but this is not an error situation so make sure we + * return OK instead of BUF_ERROR at next call of deflate: + */ + s.last_flush = -1; + return Z_OK; + } -/***/ }), + /* Make sure there is something to do and avoid duplicate consecutive + * flushes. For repeated and useless calls with Z_FINISH, we keep + * returning Z_STREAM_END instead of Z_BUF_ERROR. + */ + } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) && + flush !== Z_FINISH) { + return err(strm, Z_BUF_ERROR); + } -/***/ 608: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + /* User must not provide more input after the first FINISH: */ + if (s.status === FINISH_STATE && strm.avail_in !== 0) { + return err(strm, Z_BUF_ERROR); + } -"use strict"; + /* Start a new block or continue the current one. + */ + if (strm.avail_in !== 0 || s.lookahead !== 0 || + (flush !== Z_NO_FLUSH && s.status !== FINISH_STATE)) { + var bstate = (s.strategy === Z_HUFFMAN_ONLY) ? deflate_huff(s, flush) : + (s.strategy === Z_RLE ? deflate_rle(s, flush) : + configuration_table[s.level].func(s, flush)); -Object.defineProperty(exports, "__esModule", { value: true }); -const stream_1 = __webpack_require__(413); -const fsStat = __webpack_require__(231); -const fsWalk = __webpack_require__(522); -const reader_1 = __webpack_require__(949); -class ReaderStream extends reader_1.default { - constructor() { - super(...arguments); - this._walkStream = fsWalk.walkStream; - this._stat = fsStat.stat; + if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) { + s.status = FINISH_STATE; } - dynamic(root, options) { - return this._walkStream(root, options); + if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) { + if (strm.avail_out === 0) { + s.last_flush = -1; + /* avoid BUF_ERROR next call, see above */ + } + return Z_OK; + /* If flush != Z_NO_FLUSH && avail_out == 0, the next call + * of deflate should use the same flush parameter to make sure + * that the flush is complete. So we don't have to output an + * empty block here, this will be done at next call. This also + * ensures that for a very small output buffer, we emit at most + * one empty block. + */ } - static(patterns, options) { - const filepaths = patterns.map(this._getFullEntryPath, this); - const stream = new stream_1.PassThrough({ objectMode: true }); - stream._write = (index, _enc, done) => { - return this._getEntry(filepaths[index], patterns[index], options) - .then((entry) => { - if (entry !== null && options.entryFilter(entry)) { - stream.push(entry); - } - if (index === filepaths.length - 1) { - stream.end(); - } - done(); - }) - .catch(done); - }; - for (let i = 0; i < filepaths.length; i++) { - stream.write(i); + if (bstate === BS_BLOCK_DONE) { + if (flush === Z_PARTIAL_FLUSH) { + trees._tr_align(s); + } + else if (flush !== Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ + + trees._tr_stored_block(s, 0, 0, false); + /* For a full flush, this empty block will be recognized + * as a special marker by inflate_sync(). + */ + if (flush === Z_FULL_FLUSH) { + /*** CLEAR_HASH(s); ***/ /* forget history */ + zero(s.head); // Fill with NIL (= 0); + + if (s.lookahead === 0) { + s.strstart = 0; + s.block_start = 0; + s.insert = 0; + } } - return stream; - } - _getEntry(filepath, pattern, options) { - return this._getStat(filepath) - .then((stats) => this._makeEntry(stats, pattern)) - .catch((error) => { - if (options.errorFilter(error)) { - return null; - } - throw error; - }); - } - _getStat(filepath) { - return new Promise((resolve, reject) => { - this._stat(filepath, this._fsStatSettings, (error, stats) => { - return error === null ? resolve(stats) : reject(error); - }); - }); + } + flush_pending(strm); + if (strm.avail_out === 0) { + s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */ + return Z_OK; + } } + } + //Assert(strm->avail_out > 0, "bug2"); + //if (strm.avail_out <= 0) { throw new Error("bug2");} + + if (flush !== Z_FINISH) { return Z_OK; } + if (s.wrap <= 0) { return Z_STREAM_END; } + + /* Write the trailer */ + if (s.wrap === 2) { + put_byte(s, strm.adler & 0xff); + put_byte(s, (strm.adler >> 8) & 0xff); + put_byte(s, (strm.adler >> 16) & 0xff); + put_byte(s, (strm.adler >> 24) & 0xff); + put_byte(s, strm.total_in & 0xff); + put_byte(s, (strm.total_in >> 8) & 0xff); + put_byte(s, (strm.total_in >> 16) & 0xff); + put_byte(s, (strm.total_in >> 24) & 0xff); + } + else + { + putShortMSB(s, strm.adler >>> 16); + putShortMSB(s, strm.adler & 0xffff); + } + + flush_pending(strm); + /* If avail_out is zero, the application will call deflate again + * to flush the rest. + */ + if (s.wrap > 0) { s.wrap = -s.wrap; } + /* write the trailer only once! */ + return s.pending !== 0 ? Z_OK : Z_STREAM_END; +} + +function deflateEnd(strm) { + var status; + + if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) { + return Z_STREAM_ERROR; + } + + status = strm.state.status; + if (status !== INIT_STATE && + status !== EXTRA_STATE && + status !== NAME_STATE && + status !== COMMENT_STATE && + status !== HCRC_STATE && + status !== BUSY_STATE && + status !== FINISH_STATE + ) { + return err(strm, Z_STREAM_ERROR); + } + + strm.state = null; + + return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK; } -exports.default = ReaderStream; -/***/ }), +/* ========================================================================= + * Initializes the compression dictionary from the given byte + * sequence without producing any compressed output. + */ +function deflateSetDictionary(strm, dictionary) { + var dictLength = dictionary.length; + + var s; + var str, n; + var wrap; + var avail; + var next; + var input; + var tmpDict; + + if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) { + return Z_STREAM_ERROR; + } + + s = strm.state; + wrap = s.wrap; + + if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) { + return Z_STREAM_ERROR; + } -/***/ 611: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + /* when using zlib wrappers, compute Adler-32 for provided dictionary */ + if (wrap === 1) { + /* adler32(strm->adler, dictionary, dictLength); */ + strm.adler = adler32(strm.adler, dictionary, dictLength, 0); + } -"use strict"; + s.wrap = 0; /* avoid computing Adler-32 in read_buf */ -Object.defineProperty(exports, "__esModule", { value: true }); -const path = __webpack_require__(622); -const fsScandir = __webpack_require__(661); -class Settings { - constructor(_options = {}) { - this._options = _options; - this.basePath = this._getValue(this._options.basePath, undefined); - this.concurrency = this._getValue(this._options.concurrency, Number.POSITIVE_INFINITY); - this.deepFilter = this._getValue(this._options.deepFilter, null); - this.entryFilter = this._getValue(this._options.entryFilter, null); - this.errorFilter = this._getValue(this._options.errorFilter, null); - this.pathSegmentSeparator = this._getValue(this._options.pathSegmentSeparator, path.sep); - this.fsScandirSettings = new fsScandir.Settings({ - followSymbolicLinks: this._options.followSymbolicLinks, - fs: this._options.fs, - pathSegmentSeparator: this._options.pathSegmentSeparator, - stats: this._options.stats, - throwErrorOnBrokenSymbolicLink: this._options.throwErrorOnBrokenSymbolicLink - }); - } - _getValue(option, value) { - return option !== null && option !== void 0 ? option : value; + /* if dictionary would fill window, just replace the history */ + if (dictLength >= s.w_size) { + if (wrap === 0) { /* already empty otherwise */ + /*** CLEAR_HASH(s); ***/ + zero(s.head); // Fill with NIL (= 0); + s.strstart = 0; + s.block_start = 0; + s.insert = 0; } + /* use the tail */ + // dictionary = dictionary.slice(dictLength - s.w_size); + tmpDict = new utils.Buf8(s.w_size); + utils.arraySet(tmpDict, dictionary, dictLength - s.w_size, s.w_size, 0); + dictionary = tmpDict; + dictLength = s.w_size; + } + /* insert dictionary into window and hash */ + avail = strm.avail_in; + next = strm.next_in; + input = strm.input; + strm.avail_in = dictLength; + strm.next_in = 0; + strm.input = dictionary; + fill_window(s); + while (s.lookahead >= MIN_MATCH) { + str = s.strstart; + n = s.lookahead - (MIN_MATCH - 1); + do { + /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask; + + s.prev[str & s.w_mask] = s.head[s.ins_h]; + + s.head[s.ins_h] = str; + str++; + } while (--n); + s.strstart = str; + s.lookahead = MIN_MATCH - 1; + fill_window(s); + } + s.strstart += s.lookahead; + s.block_start = s.strstart; + s.insert = s.lookahead; + s.lookahead = 0; + s.match_length = s.prev_length = MIN_MATCH - 1; + s.match_available = 0; + strm.next_in = next; + strm.input = input; + strm.avail_in = avail; + s.wrap = wrap; + return Z_OK; } -exports.default = Settings; + + +exports.deflateInit = deflateInit; +exports.deflateInit2 = deflateInit2; +exports.deflateReset = deflateReset; +exports.deflateResetKeep = deflateResetKeep; +exports.deflateSetHeader = deflateSetHeader; +exports.deflate = deflate; +exports.deflateEnd = deflateEnd; +exports.deflateSetDictionary = deflateSetDictionary; +exports.deflateInfo = 'pako deflate (from Nodeca project)'; + +/* Not implemented +exports.deflateBound = deflateBound; +exports.deflateCopy = deflateCopy; +exports.deflateParams = deflateParams; +exports.deflatePending = deflatePending; +exports.deflatePrime = deflatePrime; +exports.deflateTune = deflateTune; +*/ /***/ }), -/***/ 613: -/***/ (function(module) { +/***/ 5105: +/***/ ((module) => { "use strict"; -// Note: we can't get significant speed boost here. -// So write code to minimize size - no pregenerated tables -// and array tools dependencies. - // (C) 1995-2013 Jean-loup Gailly and Mark Adler // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin // @@ -12498,442 +10788,402 @@ exports.default = Settings; // misrepresented as being the original software. // 3. This notice may not be removed or altered from any source distribution. -// Use ordinary array, since untyped makes no boost here -function makeTable() { - var c, table = []; - - for (var n = 0; n < 256; n++) { - c = n; - for (var k = 0; k < 8; k++) { - c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1)); - } - table[n] = c; - } - - return table; -} - -// Create table on load. Just 255 signed longs. Not a problem. -var crcTable = makeTable(); - - -function crc32(crc, buf, len, pos) { - var t = crcTable, - end = pos + len; - - crc ^= -1; - - for (var i = pos; i < end; i++) { - crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF]; - } - - return (crc ^ (-1)); // >>> 0; -} - - -module.exports = crc32; - - -/***/ }), - -/***/ 614: -/***/ (function(module) { - -module.exports = require("events"); - -/***/ }), - -/***/ 617: -/***/ (function(__unusedmodule, exports) { - -"use strict"; - -Object.defineProperty(exports, "__esModule", { value: true }); -exports.joinPathSegments = exports.replacePathSegmentSeparator = exports.isAppliedFilter = exports.isFatalError = void 0; -function isFatalError(settings, error) { - if (settings.errorFilter === null) { - return true; - } - return !settings.errorFilter(error); -} -exports.isFatalError = isFatalError; -function isAppliedFilter(filter, value) { - return filter === null || filter(value); -} -exports.isAppliedFilter = isAppliedFilter; -function replacePathSegmentSeparator(filepath, separator) { - return filepath.split(/[/\\]/).join(separator); -} -exports.replacePathSegmentSeparator = replacePathSegmentSeparator; -function joinPathSegments(a, b, separator) { - if (a === '') { - return b; - } - /** - * The correct handling of cases when the first segment is a root (`/`, `C:/`) or UNC path (`//?/C:/`). - */ - if (a.endsWith(separator)) { - return a + b; - } - return a + separator + b; -} -exports.joinPathSegments = joinPathSegments; - - -/***/ }), - -/***/ 622: -/***/ (function(module) { - -module.exports = require("path"); - -/***/ }), - -/***/ 641: -/***/ (function(__unusedmodule, exports) { - -"use strict"; - -Object.defineProperty(exports, "__esModule", { value: true }); -exports.read = void 0; -function read(path, settings) { - const lstat = settings.fs.lstatSync(path); - if (!lstat.isSymbolicLink() || !settings.followSymbolicLink) { - return lstat; - } - try { - const stat = settings.fs.statSync(path); - if (settings.markSymbolicLink) { - stat.isSymbolicLink = () => true; - } - return stat; - } - catch (error) { - if (!settings.throwErrorOnBrokenSymbolicLink) { - return lstat; - } - throw error; - } -} -exports.read = read; - - -/***/ }), - -/***/ 661: -/***/ (function(__unusedmodule, exports, __webpack_require__) { - -"use strict"; - -Object.defineProperty(exports, "__esModule", { value: true }); -exports.Settings = exports.scandirSync = exports.scandir = void 0; -const async = __webpack_require__(182); -const sync = __webpack_require__(148); -const settings_1 = __webpack_require__(403); -exports.Settings = settings_1.default; -function scandir(path, optionsOrSettingsOrCallback, callback) { - if (typeof optionsOrSettingsOrCallback === 'function') { - async.read(path, getSettings(), optionsOrSettingsOrCallback); - return; - } - async.read(path, getSettings(optionsOrSettingsOrCallback), callback); -} -exports.scandir = scandir; -function scandirSync(path, optionsOrSettings) { - const settings = getSettings(optionsOrSettings); - return sync.read(path, settings); -} -exports.scandirSync = scandirSync; -function getSettings(settingsOrOptions = {}) { - if (settingsOrOptions instanceof settings_1.default) { - return settingsOrOptions; - } - return new settings_1.default(settingsOrOptions); +function GZheader() { + /* true if compressed data believed to be text */ + this.text = 0; + /* modification time */ + this.time = 0; + /* extra flags (not used when writing a gzip file) */ + this.xflags = 0; + /* operating system */ + this.os = 0; + /* pointer to extra field or Z_NULL if none */ + this.extra = null; + /* extra field length (valid if extra != Z_NULL) */ + this.extra_len = 0; // Actually, we don't need it in JS, + // but leave for few code modifications + + // + // Setup limits is not necessary because in js we should not preallocate memory + // for inflate use constant limit in 65536 bytes + // + + /* space at extra (only when reading header) */ + // this.extra_max = 0; + /* pointer to zero-terminated file name or Z_NULL */ + this.name = ''; + /* space at name (only when reading header) */ + // this.name_max = 0; + /* pointer to zero-terminated comment or Z_NULL */ + this.comment = ''; + /* space at comment (only when reading header) */ + // this.comm_max = 0; + /* true if there was or will be a header crc */ + this.hcrc = 0; + /* true when done reading gzip header (not used when writing a gzip file) */ + this.done = false; } +module.exports = GZheader; + /***/ }), -/***/ 666: -/***/ (function(module, __unusedexports, __webpack_require__) { +/***/ 5349: +/***/ ((module) => { "use strict"; -var protocols = __webpack_require__(737); +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. -/** - * parsePath - * Parses the input url. - * - * @name parsePath - * @function - * @param {String} url The input url. - * @return {Object} An object containing the following fields: - * - * - `protocols` (Array): An array with the url protocols (usually it has one element). - * - `protocol` (String): The first protocol or `"file"`. - * - `port` (String): The domain port (default: `""`). - * - `resource` (String): The url domain/hostname. - * - `host` (String): The url domain (including subdomain and port). - * - `user` (String): The authentication user (default: `""`). - * - `password` (String): The authentication password (default: `""`). - * - `pathname` (String): The url pathname. - * - `hash` (String): The url hash. - * - `search` (String): The url querystring value (excluding `?`). - * - `href` (String): The normalized input url. - * - `query` (Object): The url querystring, parsed as object. - * - `parse_failed` (Boolean): Whether the parsing failed or not. - */ -function parsePath(url) { +// See state defs from inflate.js +var BAD = 30; /* got a data error -- remain here until reset */ +var TYPE = 12; /* i: waiting for type bits, including last-flag bit */ - var output = { - protocols: [], - protocol: null, - port: null, - resource: "", - host: "", - user: "", - password: "", - pathname: "", - hash: "", - search: "", - href: url, - query: {}, - parse_failed: false - }; +/* + Decode literal, length, and distance codes and write out the resulting + literal and match bytes until either not enough input or output is + available, an end-of-block is encountered, or a data error is encountered. + When large enough input and output buffers are supplied to inflate(), for + example, a 16K input buffer and a 64K output buffer, more than 95% of the + inflate execution time is spent in this routine. - try { - var parsed = new URL(url); - output.protocols = protocols(parsed); - output.protocol = output.protocols[0]; - output.port = parsed.port; - output.resource = parsed.hostname; - output.host = parsed.host; - output.user = parsed.username || ""; - output.password = parsed.password || ""; - output.pathname = parsed.pathname; - output.hash = parsed.hash.slice(1); - output.search = parsed.search.slice(1); - output.href = parsed.href; - output.query = Object.fromEntries(parsed.searchParams); - } catch (e) { - // TODO Maybe check if it is a valid local file path - // In any case, these will be parsed by higher - // level parsers such as parse-url, git-url-parse, git-up - output.protocols = ["file"]; - output.protocol = output.protocols[0]; - output.port = ""; - output.resource = ""; - output.user = ""; - output.pathname = ""; - output.hash = ""; - output.search = ""; - output.href = url; - output.query = {}; - output.parse_failed = true; - } + Entry assumptions: - return output; -} + state.mode === LEN + strm.avail_in >= 6 + strm.avail_out >= 258 + start >= strm.avail_out + state.bits < 8 -module.exports = parsePath; + On return, state.mode is one of: -/***/ }), + LEN -- ran out of enough output space or enough available input + TYPE -- reached end of block code, inflate() to interpret next block + BAD -- error in block data -/***/ 669: -/***/ (function(module) { + Notes: -module.exports = require("util"); + - The maximum input bits used by a length/distance pair is 15 bits for the + length code, 5 bits for the length extra, 15 bits for the distance code, + and 13 bits for the distance extra. This totals 48 bits, or six bytes. + Therefore if strm.avail_in >= 6, then there is enough input to avoid + checking for available input while decoding. -/***/ }), + - The maximum bytes that a single length/distance pair can output is 258 + bytes, which is the maximum length that can be coded. inflate_fast() + requires strm.avail_out >= 258 for each loop to avoid checking for + output space. + */ +module.exports = function inflate_fast(strm, start) { + var state; + var _in; /* local strm.input */ + var last; /* have enough input while in < last */ + var _out; /* local strm.output */ + var beg; /* inflate()'s initial strm.output */ + var end; /* while out < end, enough space available */ +//#ifdef INFLATE_STRICT + var dmax; /* maximum distance from zlib header */ +//#endif + var wsize; /* window size or zero if not using window */ + var whave; /* valid bytes in the window */ + var wnext; /* window write index */ + // Use `s_window` instead `window`, avoid conflict with instrumentation tools + var s_window; /* allocated sliding window, if wsize != 0 */ + var hold; /* local strm.hold */ + var bits; /* local strm.bits */ + var lcode; /* local strm.lencode */ + var dcode; /* local strm.distcode */ + var lmask; /* mask for first level of length codes */ + var dmask; /* mask for first level of distance codes */ + var here; /* retrieved table entry */ + var op; /* code bits, operation, extra bits, or */ + /* window position, window bytes to copy */ + var len; /* match length, unused bytes */ + var dist; /* match distance */ + var from; /* where to copy match from */ + var from_source; -/***/ 672: -/***/ (function(__unusedmodule, exports, __webpack_require__) { -"use strict"; + var input, output; // JS specific, because we have no pointers -var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); -}) : (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - o[k2] = m[k]; -})); -var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { - Object.defineProperty(o, "default", { enumerable: true, value: v }); -}) : function(o, v) { - o["default"] = v; -}); -var __importStar = (this && this.__importStar) || function (mod) { - if (mod && mod.__esModule) return mod; - var result = {}; - if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); - __setModuleDefault(result, mod); - return result; -}; -var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { - function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } - return new (P || (P = Promise))(function (resolve, reject) { - function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } - function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } - function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } - step((generator = generator.apply(thisArg, _arguments || [])).next()); - }); -}; -var _a; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.getCmdPath = exports.tryGetExecutablePath = exports.isRooted = exports.isDirectory = exports.exists = exports.IS_WINDOWS = exports.unlink = exports.symlink = exports.stat = exports.rmdir = exports.rename = exports.readlink = exports.readdir = exports.mkdir = exports.lstat = exports.copyFile = exports.chmod = void 0; -const fs = __importStar(__webpack_require__(747)); -const path = __importStar(__webpack_require__(622)); -_a = fs.promises, exports.chmod = _a.chmod, exports.copyFile = _a.copyFile, exports.lstat = _a.lstat, exports.mkdir = _a.mkdir, exports.readdir = _a.readdir, exports.readlink = _a.readlink, exports.rename = _a.rename, exports.rmdir = _a.rmdir, exports.stat = _a.stat, exports.symlink = _a.symlink, exports.unlink = _a.unlink; -exports.IS_WINDOWS = process.platform === 'win32'; -function exists(fsPath) { - return __awaiter(this, void 0, void 0, function* () { - try { - yield exports.stat(fsPath); - } - catch (err) { - if (err.code === 'ENOENT') { - return false; - } - throw err; - } - return true; - }); -} -exports.exists = exists; -function isDirectory(fsPath, useStat = false) { - return __awaiter(this, void 0, void 0, function* () { - const stats = useStat ? yield exports.stat(fsPath) : yield exports.lstat(fsPath); - return stats.isDirectory(); - }); -} -exports.isDirectory = isDirectory; -/** - * On OSX/Linux, true if path starts with '/'. On Windows, true for paths like: - * \, \hello, \\hello\share, C:, and C:\hello (and corresponding alternate separator cases). - */ -function isRooted(p) { - p = normalizeSeparators(p); - if (!p) { - throw new Error('isRooted() parameter "p" cannot be empty'); - } - if (exports.IS_WINDOWS) { - return (p.startsWith('\\') || /^[A-Z]:/i.test(p) // e.g. \ or \hello or \\hello - ); // e.g. C: or C:\hello + /* copy state to local variables */ + state = strm.state; + //here = state.here; + _in = strm.next_in; + input = strm.input; + last = _in + (strm.avail_in - 5); + _out = strm.next_out; + output = strm.output; + beg = _out - (start - strm.avail_out); + end = _out + (strm.avail_out - 257); +//#ifdef INFLATE_STRICT + dmax = state.dmax; +//#endif + wsize = state.wsize; + whave = state.whave; + wnext = state.wnext; + s_window = state.window; + hold = state.hold; + bits = state.bits; + lcode = state.lencode; + dcode = state.distcode; + lmask = (1 << state.lenbits) - 1; + dmask = (1 << state.distbits) - 1; + + + /* decode literals and length/distances until end-of-block or not enough + input data or output space */ + + top: + do { + if (bits < 15) { + hold += input[_in++] << bits; + bits += 8; + hold += input[_in++] << bits; + bits += 8; } - return p.startsWith('/'); -} -exports.isRooted = isRooted; -/** - * Best effort attempt to determine whether a file exists and is executable. - * @param filePath file path to check - * @param extensions additional file extensions to try - * @return if file exists and is executable, returns the file path. otherwise empty string. - */ -function tryGetExecutablePath(filePath, extensions) { - return __awaiter(this, void 0, void 0, function* () { - let stats = undefined; - try { - // test file exists - stats = yield exports.stat(filePath); + + here = lcode[hold & lmask]; + + dolen: + for (;;) { // Goto emulation + op = here >>> 24/*here.bits*/; + hold >>>= op; + bits -= op; + op = (here >>> 16) & 0xff/*here.op*/; + if (op === 0) { /* literal */ + //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + // "inflate: literal '%c'\n" : + // "inflate: literal 0x%02x\n", here.val)); + output[_out++] = here & 0xffff/*here.val*/; + } + else if (op & 16) { /* length base */ + len = here & 0xffff/*here.val*/; + op &= 15; /* number of extra bits */ + if (op) { + if (bits < op) { + hold += input[_in++] << bits; + bits += 8; + } + len += hold & ((1 << op) - 1); + hold >>>= op; + bits -= op; } - catch (err) { - if (err.code !== 'ENOENT') { - // eslint-disable-next-line no-console - console.log(`Unexpected error attempting to determine if executable file exists '${filePath}': ${err}`); - } + //Tracevv((stderr, "inflate: length %u\n", len)); + if (bits < 15) { + hold += input[_in++] << bits; + bits += 8; + hold += input[_in++] << bits; + bits += 8; } - if (stats && stats.isFile()) { - if (exports.IS_WINDOWS) { - // on Windows, test for valid extension - const upperExt = path.extname(filePath).toUpperCase(); - if (extensions.some(validExt => validExt.toUpperCase() === upperExt)) { - return filePath; - } - } - else { - if (isUnixExecutable(stats)) { - return filePath; - } + here = dcode[hold & dmask]; + + dodist: + for (;;) { // goto emulation + op = here >>> 24/*here.bits*/; + hold >>>= op; + bits -= op; + op = (here >>> 16) & 0xff/*here.op*/; + + if (op & 16) { /* distance base */ + dist = here & 0xffff/*here.val*/; + op &= 15; /* number of extra bits */ + if (bits < op) { + hold += input[_in++] << bits; + bits += 8; + if (bits < op) { + hold += input[_in++] << bits; + bits += 8; + } } - } - // try each extension - const originalFilePath = filePath; - for (const extension of extensions) { - filePath = originalFilePath + extension; - stats = undefined; - try { - stats = yield exports.stat(filePath); + dist += hold & ((1 << op) - 1); +//#ifdef INFLATE_STRICT + if (dist > dmax) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break top; } - catch (err) { - if (err.code !== 'ENOENT') { - // eslint-disable-next-line no-console - console.log(`Unexpected error attempting to determine if executable file exists '${filePath}': ${err}`); +//#endif + hold >>>= op; + bits -= op; + //Tracevv((stderr, "inflate: distance %u\n", dist)); + op = _out - beg; /* max distance in output */ + if (dist > op) { /* see if copy from window */ + op = dist - op; /* distance back in window */ + if (op > whave) { + if (state.sane) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break top; } - } - if (stats && stats.isFile()) { - if (exports.IS_WINDOWS) { - // preserve the case of the actual file (since an extension was appended) - try { - const directory = path.dirname(filePath); - const upperName = path.basename(filePath).toUpperCase(); - for (const actualName of yield exports.readdir(directory)) { - if (upperName === actualName.toUpperCase()) { - filePath = path.join(directory, actualName); - break; - } - } - } - catch (err) { - // eslint-disable-next-line no-console - console.log(`Unexpected error attempting to determine the actual case of the file '${filePath}': ${err}`); - } - return filePath; + +// (!) This block is disabled in zlib defaults, +// don't enable it for binary compatibility +//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR +// if (len <= op - whave) { +// do { +// output[_out++] = 0; +// } while (--len); +// continue top; +// } +// len -= op - whave; +// do { +// output[_out++] = 0; +// } while (--op > whave); +// if (op === 0) { +// from = _out - dist; +// do { +// output[_out++] = output[from++]; +// } while (--len); +// continue top; +// } +//#endif + } + from = 0; // window index + from_source = s_window; + if (wnext === 0) { /* very common case */ + from += wsize - op; + if (op < len) { /* some from window */ + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = _out - dist; /* rest from output */ + from_source = output; } - else { - if (isUnixExecutable(stats)) { - return filePath; - } + } + else if (wnext < op) { /* wrap around window */ + from += wsize + wnext - op; + op -= wnext; + if (op < len) { /* some from end of window */ + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = 0; + if (wnext < len) { /* some from start of window */ + op = wnext; + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = _out - dist; /* rest from output */ + from_source = output; + } + } + } + else { /* contiguous in window */ + from += wnext - op; + if (op < len) { /* some from window */ + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = _out - dist; /* rest from output */ + from_source = output; + } + } + while (len > 2) { + output[_out++] = from_source[from++]; + output[_out++] = from_source[from++]; + output[_out++] = from_source[from++]; + len -= 3; + } + if (len) { + output[_out++] = from_source[from++]; + if (len > 1) { + output[_out++] = from_source[from++]; + } + } + } + else { + from = _out - dist; /* copy direct from output */ + do { /* minimum length is three */ + output[_out++] = output[from++]; + output[_out++] = output[from++]; + output[_out++] = output[from++]; + len -= 3; + } while (len > 2); + if (len) { + output[_out++] = output[from++]; + if (len > 1) { + output[_out++] = output[from++]; } + } } + } + else if ((op & 64) === 0) { /* 2nd level distance code */ + here = dcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))]; + continue dodist; + } + else { + strm.msg = 'invalid distance code'; + state.mode = BAD; + break top; + } + + break; // need to emulate goto via "continue" } - return ''; - }); -} -exports.tryGetExecutablePath = tryGetExecutablePath; -function normalizeSeparators(p) { - p = p || ''; - if (exports.IS_WINDOWS) { - // convert slashes on Windows - p = p.replace(/\//g, '\\'); - // remove redundant slashes - return p.replace(/\\\\+/g, '\\'); + } + else if ((op & 64) === 0) { /* 2nd level length code */ + here = lcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))]; + continue dolen; + } + else if (op & 32) { /* end-of-block */ + //Tracevv((stderr, "inflate: end of block\n")); + state.mode = TYPE; + break top; + } + else { + strm.msg = 'invalid literal/length code'; + state.mode = BAD; + break top; + } + + break; // need to emulate goto via "continue" } - // remove redundant slashes - return p.replace(/\/\/+/g, '/'); -} -// on Mac/Linux, test the execute bit -// R W X R W X R W X -// 256 128 64 32 16 8 4 2 1 -function isUnixExecutable(stats) { - return ((stats.mode & 1) > 0 || - ((stats.mode & 8) > 0 && stats.gid === process.getgid()) || - ((stats.mode & 64) > 0 && stats.uid === process.getuid())); -} -// Get the path of cmd.exe in windows -function getCmdPath() { - var _a; - return (_a = process.env['COMSPEC']) !== null && _a !== void 0 ? _a : `cmd.exe`; -} -exports.getCmdPath = getCmdPath; -//# sourceMappingURL=io-util.js.map + } while (_in < last && _out < end); + + /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ + len = bits >> 3; + _in -= len; + bits -= len << 3; + hold &= (1 << bits) - 1; + + /* update state and return */ + strm.next_in = _in; + strm.next_out = _out; + strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last)); + strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end)); + state.hold = hold; + state.bits = bits; + return; +}; + /***/ }), -/***/ 685: -/***/ (function(module, __unusedexports, __webpack_require__) { +/***/ 409: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; @@ -12957,12848 +11207,13453 @@ exports.getCmdPath = getCmdPath; // misrepresented as being the original software. // 3. This notice may not be removed or altered from any source distribution. -var utils = __webpack_require__(999); - -var MAXBITS = 15; -var ENOUGH_LENS = 852; -var ENOUGH_DISTS = 592; -//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); +var utils = __nccwpck_require__(5483); +var adler32 = __nccwpck_require__(6924); +var crc32 = __nccwpck_require__(7242); +var inflate_fast = __nccwpck_require__(5349); +var inflate_table = __nccwpck_require__(6895); var CODES = 0; var LENS = 1; var DISTS = 2; -var lbase = [ /* Length codes 257..285 base */ - 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, - 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 -]; - -var lext = [ /* Length codes 257..285 extra */ - 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, - 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78 -]; - -var dbase = [ /* Distance codes 0..29 base */ - 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, - 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, - 8193, 12289, 16385, 24577, 0, 0 -]; - -var dext = [ /* Distance codes 0..29 extra */ - 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, - 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, - 28, 28, 29, 29, 64, 64 -]; - -module.exports = function inflate_table(type, lens, lens_index, codes, table, table_index, work, opts) -{ - var bits = opts.bits; - //here = opts.here; /* table entry for duplication */ +/* Public constants ==========================================================*/ +/* ===========================================================================*/ - var len = 0; /* a code's length in bits */ - var sym = 0; /* index of code symbols */ - var min = 0, max = 0; /* minimum and maximum code lengths */ - var root = 0; /* number of index bits for root table */ - var curr = 0; /* number of index bits for current table */ - var drop = 0; /* code bits to drop for sub-table */ - var left = 0; /* number of prefix codes available */ - var used = 0; /* code entries in table used */ - var huff = 0; /* Huffman code */ - var incr; /* for incrementing code, index */ - var fill; /* index for replicating entries */ - var low; /* low bits for current root entry */ - var mask; /* mask for low root bits */ - var next; /* next available space in table */ - var base = null; /* base value table to use */ - var base_index = 0; -// var shoextra; /* extra bits table to use */ - var end; /* use base and extra for symbol > end */ - var count = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */ - var offs = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */ - var extra = null; - var extra_index = 0; - var here_bits, here_op, here_val; +/* Allowed flush values; see deflate() and inflate() below for details */ +//var Z_NO_FLUSH = 0; +//var Z_PARTIAL_FLUSH = 1; +//var Z_SYNC_FLUSH = 2; +//var Z_FULL_FLUSH = 3; +var Z_FINISH = 4; +var Z_BLOCK = 5; +var Z_TREES = 6; - /* - Process a set of code lengths to create a canonical Huffman code. The - code lengths are lens[0..codes-1]. Each length corresponds to the - symbols 0..codes-1. The Huffman code is generated by first sorting the - symbols by length from short to long, and retaining the symbol order - for codes with equal lengths. Then the code starts with all zero bits - for the first code of the shortest length, and the codes are integer - increments for the same length, and zeros are appended as the length - increases. For the deflate format, these bits are stored backwards - from their more natural integer increment ordering, and so when the - decoding tables are built in the large loop below, the integer codes - are incremented backwards. - This routine assumes, but does not check, that all of the entries in - lens[] are in the range 0..MAXBITS. The caller must assure this. - 1..MAXBITS is interpreted as that code length. zero means that that - symbol does not occur in this code. +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ +var Z_OK = 0; +var Z_STREAM_END = 1; +var Z_NEED_DICT = 2; +//var Z_ERRNO = -1; +var Z_STREAM_ERROR = -2; +var Z_DATA_ERROR = -3; +var Z_MEM_ERROR = -4; +var Z_BUF_ERROR = -5; +//var Z_VERSION_ERROR = -6; - The codes are sorted by computing a count of codes for each length, - creating from that a table of starting indices for each length in the - sorted table, and then entering the symbols in order in the sorted - table. The sorted table is work[], with that space being provided by - the caller. +/* The deflate compression method */ +var Z_DEFLATED = 8; - The length counts are used for other purposes as well, i.e. finding - the minimum and maximum length codes, determining if there are any - codes at all, checking for a valid set of lengths, and looking ahead - at length counts to determine sub-table sizes when building the - decoding tables. - */ - /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ - for (len = 0; len <= MAXBITS; len++) { - count[len] = 0; - } - for (sym = 0; sym < codes; sym++) { - count[lens[lens_index + sym]]++; - } +/* STATES ====================================================================*/ +/* ===========================================================================*/ - /* bound code lengths, force root to be within code lengths */ - root = bits; - for (max = MAXBITS; max >= 1; max--) { - if (count[max] !== 0) { break; } - } - if (root > max) { - root = max; - } - if (max === 0) { /* no symbols to code at all */ - //table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */ - //table.bits[opts.table_index] = 1; //here.bits = (var char)1; - //table.val[opts.table_index++] = 0; //here.val = (var short)0; - table[table_index++] = (1 << 24) | (64 << 16) | 0; +var HEAD = 1; /* i: waiting for magic header */ +var FLAGS = 2; /* i: waiting for method and flags (gzip) */ +var TIME = 3; /* i: waiting for modification time (gzip) */ +var OS = 4; /* i: waiting for extra flags and operating system (gzip) */ +var EXLEN = 5; /* i: waiting for extra length (gzip) */ +var EXTRA = 6; /* i: waiting for extra bytes (gzip) */ +var NAME = 7; /* i: waiting for end of file name (gzip) */ +var COMMENT = 8; /* i: waiting for end of comment (gzip) */ +var HCRC = 9; /* i: waiting for header crc (gzip) */ +var DICTID = 10; /* i: waiting for dictionary check value */ +var DICT = 11; /* waiting for inflateSetDictionary() call */ +var TYPE = 12; /* i: waiting for type bits, including last-flag bit */ +var TYPEDO = 13; /* i: same, but skip check to exit inflate on new block */ +var STORED = 14; /* i: waiting for stored size (length and complement) */ +var COPY_ = 15; /* i/o: same as COPY below, but only first time in */ +var COPY = 16; /* i/o: waiting for input or output to copy stored block */ +var TABLE = 17; /* i: waiting for dynamic block table lengths */ +var LENLENS = 18; /* i: waiting for code length code lengths */ +var CODELENS = 19; /* i: waiting for length/lit and distance code lengths */ +var LEN_ = 20; /* i: same as LEN below, but only first time in */ +var LEN = 21; /* i: waiting for length/lit/eob code */ +var LENEXT = 22; /* i: waiting for length extra bits */ +var DIST = 23; /* i: waiting for distance code */ +var DISTEXT = 24; /* i: waiting for distance extra bits */ +var MATCH = 25; /* o: waiting for output space to copy string */ +var LIT = 26; /* o: waiting for output space to write literal */ +var CHECK = 27; /* i: waiting for 32-bit check value */ +var LENGTH = 28; /* i: waiting for 32-bit length (gzip) */ +var DONE = 29; /* finished check, done -- remain here until reset */ +var BAD = 30; /* got a data error -- remain here until reset */ +var MEM = 31; /* got an inflate() memory error -- remain here until reset */ +var SYNC = 32; /* looking for synchronization bytes to restart inflate() */ - //table.op[opts.table_index] = 64; - //table.bits[opts.table_index] = 1; - //table.val[opts.table_index++] = 0; - table[table_index++] = (1 << 24) | (64 << 16) | 0; +/* ===========================================================================*/ - opts.bits = 1; - return 0; /* no symbols, but wait for decoding to report error */ - } - for (min = 1; min < max; min++) { - if (count[min] !== 0) { break; } - } - if (root < min) { - root = min; - } - /* check for an over-subscribed or incomplete set of lengths */ - left = 1; - for (len = 1; len <= MAXBITS; len++) { - left <<= 1; - left -= count[len]; - if (left < 0) { - return -1; - } /* over-subscribed */ - } - if (left > 0 && (type === CODES || max !== 1)) { - return -1; /* incomplete set */ - } - /* generate offsets into symbol table for each length for sorting */ - offs[1] = 0; - for (len = 1; len < MAXBITS; len++) { - offs[len + 1] = offs[len] + count[len]; - } +var ENOUGH_LENS = 852; +var ENOUGH_DISTS = 592; +//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); - /* sort symbols by length, by symbol order within each length */ - for (sym = 0; sym < codes; sym++) { - if (lens[lens_index + sym] !== 0) { - work[offs[lens[lens_index + sym]]++] = sym; - } - } +var MAX_WBITS = 15; +/* 32K LZ77 window */ +var DEF_WBITS = MAX_WBITS; - /* - Create and fill in decoding tables. In this loop, the table being - filled is at next and has curr index bits. The code being used is huff - with length len. That code is converted to an index by dropping drop - bits off of the bottom. For codes where len is less than drop + curr, - those top drop + curr - len bits are incremented through all values to - fill the table with replicated entries. - root is the number of index bits for the root table. When len exceeds - root, sub-tables are created pointed to by the root entry with an index - of the low root bits of huff. This is saved in low to check for when a - new sub-table should be started. drop is zero when the root table is - being filled, and drop is root when sub-tables are being filled. +function zswap32(q) { + return (((q >>> 24) & 0xff) + + ((q >>> 8) & 0xff00) + + ((q & 0xff00) << 8) + + ((q & 0xff) << 24)); +} - When a new sub-table is needed, it is necessary to look ahead in the - code lengths to determine what size sub-table is needed. The length - counts are used for this, and so count[] is decremented as codes are - entered in the tables. - used keeps track of how many table entries have been allocated from the - provided *table space. It is checked for LENS and DIST tables against - the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in - the initial root table size constants. See the comments in inftrees.h - for more information. +function InflateState() { + this.mode = 0; /* current inflate mode */ + this.last = false; /* true if processing last block */ + this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */ + this.havedict = false; /* true if dictionary provided */ + this.flags = 0; /* gzip header method and flags (0 if zlib) */ + this.dmax = 0; /* zlib header max distance (INFLATE_STRICT) */ + this.check = 0; /* protected copy of check value */ + this.total = 0; /* protected copy of output count */ + // TODO: may be {} + this.head = null; /* where to save gzip header information */ - sym increments through all symbols, and the loop terminates when - all codes of length max, i.e. all codes, have been processed. This - routine permits incomplete codes, so another loop after this one fills - in the rest of the decoding tables with invalid code markers. - */ + /* sliding window */ + this.wbits = 0; /* log base 2 of requested window size */ + this.wsize = 0; /* window size or zero if not using window */ + this.whave = 0; /* valid bytes in the window */ + this.wnext = 0; /* window write index */ + this.window = null; /* allocated sliding window, if needed */ - /* set up for code type */ - // poor man optimization - use if-else instead of switch, - // to avoid deopts in old v8 - if (type === CODES) { - base = extra = work; /* dummy value--not used */ - end = 19; + /* bit accumulator */ + this.hold = 0; /* input bit accumulator */ + this.bits = 0; /* number of bits in "in" */ - } else if (type === LENS) { - base = lbase; - base_index -= 257; - extra = lext; - extra_index -= 257; - end = 256; + /* for string and stored block copying */ + this.length = 0; /* literal or length of data to copy */ + this.offset = 0; /* distance back to copy string from */ - } else { /* DISTS */ - base = dbase; - extra = dext; - end = -1; - } + /* for table and code decoding */ + this.extra = 0; /* extra bits needed */ - /* initialize opts for loop */ - huff = 0; /* starting code */ - sym = 0; /* starting code symbol */ - len = min; /* starting code length */ - next = table_index; /* current table to fill in */ - curr = root; /* current table index bits */ - drop = 0; /* current bits to drop from code for index */ - low = -1; /* trigger new sub-table when len > root */ - used = 1 << root; /* use root table entries */ - mask = used - 1; /* mask for comparing low */ + /* fixed and dynamic code tables */ + this.lencode = null; /* starting table for length/literal codes */ + this.distcode = null; /* starting table for distance codes */ + this.lenbits = 0; /* index bits for lencode */ + this.distbits = 0; /* index bits for distcode */ - /* check available table space */ - if ((type === LENS && used > ENOUGH_LENS) || - (type === DISTS && used > ENOUGH_DISTS)) { - return 1; - } + /* dynamic table building */ + this.ncode = 0; /* number of code length code lengths */ + this.nlen = 0; /* number of length code lengths */ + this.ndist = 0; /* number of distance code lengths */ + this.have = 0; /* number of code lengths in lens[] */ + this.next = null; /* next available space in codes[] */ - /* process all codes and make table entries */ - for (;;) { - /* create table entry */ - here_bits = len - drop; - if (work[sym] < end) { - here_op = 0; - here_val = work[sym]; - } - else if (work[sym] > end) { - here_op = extra[extra_index + work[sym]]; - here_val = base[base_index + work[sym]]; - } - else { - here_op = 32 + 64; /* end of block */ - here_val = 0; - } + this.lens = new utils.Buf16(320); /* temporary storage for code lengths */ + this.work = new utils.Buf16(288); /* work area for code table building */ - /* replicate for those indices with low len bits equal to huff */ - incr = 1 << (len - drop); - fill = 1 << curr; - min = fill; /* save offset to next table */ - do { - fill -= incr; - table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) | here_val |0; - } while (fill !== 0); + /* + because we don't have pointers in js, we use lencode and distcode directly + as buffers so we don't need codes + */ + //this.codes = new utils.Buf32(ENOUGH); /* space for code tables */ + this.lendyn = null; /* dynamic table for length/literal codes (JS specific) */ + this.distdyn = null; /* dynamic table for distance codes (JS specific) */ + this.sane = 0; /* if false, allow invalid distance too far */ + this.back = 0; /* bits back of last unprocessed length/lit */ + this.was = 0; /* initial length of match */ +} - /* backwards increment the len-bit code huff */ - incr = 1 << (len - 1); - while (huff & incr) { - incr >>= 1; - } - if (incr !== 0) { - huff &= incr - 1; - huff += incr; - } else { - huff = 0; - } +function inflateResetKeep(strm) { + var state; - /* go to next symbol, update count, len */ - sym++; - if (--count[len] === 0) { - if (len === max) { break; } - len = lens[lens_index + work[sym]]; - } + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + state = strm.state; + strm.total_in = strm.total_out = state.total = 0; + strm.msg = ''; /*Z_NULL*/ + if (state.wrap) { /* to support ill-conceived Java test suite */ + strm.adler = state.wrap & 1; + } + state.mode = HEAD; + state.last = 0; + state.havedict = 0; + state.dmax = 32768; + state.head = null/*Z_NULL*/; + state.hold = 0; + state.bits = 0; + //state.lencode = state.distcode = state.next = state.codes; + state.lencode = state.lendyn = new utils.Buf32(ENOUGH_LENS); + state.distcode = state.distdyn = new utils.Buf32(ENOUGH_DISTS); - /* create new sub-table if needed */ - if (len > root && (huff & mask) !== low) { - /* if first time, transition to sub-tables */ - if (drop === 0) { - drop = root; - } + state.sane = 1; + state.back = -1; + //Tracev((stderr, "inflate: reset\n")); + return Z_OK; +} - /* increment past last table */ - next += min; /* here min is 1 << curr */ +function inflateReset(strm) { + var state; - /* determine length of next table */ - curr = len - drop; - left = 1 << curr; - while (curr + drop < max) { - left -= count[curr + drop]; - if (left <= 0) { break; } - curr++; - left <<= 1; - } + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + state = strm.state; + state.wsize = 0; + state.whave = 0; + state.wnext = 0; + return inflateResetKeep(strm); - /* check for enough space */ - used += 1 << curr; - if ((type === LENS && used > ENOUGH_LENS) || - (type === DISTS && used > ENOUGH_DISTS)) { - return 1; - } +} - /* point entry in root table to sub-table */ - low = huff & mask; - /*table.op[low] = curr; - table.bits[low] = root; - table.val[low] = next - opts.table_index;*/ - table[low] = (root << 24) | (curr << 16) | (next - table_index) |0; +function inflateReset2(strm, windowBits) { + var wrap; + var state; + + /* get the state */ + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + state = strm.state; + + /* extract wrap request from windowBits parameter */ + if (windowBits < 0) { + wrap = 0; + windowBits = -windowBits; + } + else { + wrap = (windowBits >> 4) + 1; + if (windowBits < 48) { + windowBits &= 15; } } - /* fill in remaining table entry if code is incomplete (guaranteed to have - at most one remaining entry, since if the code is incomplete, the - maximum code length that was allowed to get this far is one bit) */ - if (huff !== 0) { - //table.op[next + huff] = 64; /* invalid code marker */ - //table.bits[next + huff] = len - drop; - //table.val[next + huff] = 0; - table[next + huff] = ((len - drop) << 24) | (64 << 16) |0; - } + /* set number of window bits, free window if different */ + if (windowBits && (windowBits < 8 || windowBits > 15)) { + return Z_STREAM_ERROR; + } + if (state.window !== null && state.wbits !== windowBits) { + state.window = null; + } + + /* update state and reset the rest of it */ + state.wrap = wrap; + state.wbits = windowBits; + return inflateReset(strm); +} - /* set return parameters */ - //opts.table_index += used; - opts.bits = root; - return 0; -}; +function inflateInit2(strm, windowBits) { + var ret; + var state; + if (!strm) { return Z_STREAM_ERROR; } + //strm.msg = Z_NULL; /* in case we return an error */ -/***/ }), + state = new InflateState(); -/***/ 689: -/***/ (function(module, __unusedexports, __webpack_require__) { + //if (state === Z_NULL) return Z_MEM_ERROR; + //Tracev((stderr, "inflate: allocated\n")); + strm.state = state; + state.window = null/*Z_NULL*/; + ret = inflateReset2(strm, windowBits); + if (ret !== Z_OK) { + strm.state = null/*Z_NULL*/; + } + return ret; +} -"use strict"; +function inflateInit(strm) { + return inflateInit2(strm, DEF_WBITS); +} -/* eslint-disable no-var */ +/* + Return state with length and distance decoding tables and index sizes set to + fixed code decoding. Normally this returns fixed tables from inffixed.h. + If BUILDFIXED is defined, then instead this routine builds the tables the + first time it's called, and returns those tables the first time and + thereafter. This reduces the size of the code by about 2K bytes, in + exchange for a little execution time. However, BUILDFIXED should not be + used for threaded applications, since the rewriting of the tables and virgin + may not be thread-safe. + */ +var virgin = true; -var reusify = __webpack_require__(440) +var lenfix, distfix; // We have no pointers in JS, so keep tables separate -function fastqueue (context, worker, concurrency) { - if (typeof context === 'function') { - concurrency = worker - worker = context - context = null - } +function fixedtables(state) { + /* build fixed huffman tables if first call (may not be thread safe) */ + if (virgin) { + var sym; - if (concurrency < 1) { - throw new Error('fastqueue concurrency must be greater than 1') - } + lenfix = new utils.Buf32(512); + distfix = new utils.Buf32(32); - var cache = reusify(Task) - var queueHead = null - var queueTail = null - var _running = 0 - var errorHandler = null + /* literal/length table */ + sym = 0; + while (sym < 144) { state.lens[sym++] = 8; } + while (sym < 256) { state.lens[sym++] = 9; } + while (sym < 280) { state.lens[sym++] = 7; } + while (sym < 288) { state.lens[sym++] = 8; } - var self = { - push: push, - drain: noop, - saturated: noop, - pause: pause, - paused: false, - concurrency: concurrency, - running: running, - resume: resume, - idle: idle, - length: length, - getQueue: getQueue, - unshift: unshift, - empty: noop, - kill: kill, - killAndDrain: killAndDrain, - error: error - } + inflate_table(LENS, state.lens, 0, 288, lenfix, 0, state.work, { bits: 9 }); - return self + /* distance table */ + sym = 0; + while (sym < 32) { state.lens[sym++] = 5; } - function running () { - return _running - } + inflate_table(DISTS, state.lens, 0, 32, distfix, 0, state.work, { bits: 5 }); - function pause () { - self.paused = true + /* do this just once */ + virgin = false; } - function length () { - var current = queueHead - var counter = 0 + state.lencode = lenfix; + state.lenbits = 9; + state.distcode = distfix; + state.distbits = 5; +} - while (current) { - current = current.next - counter++ - } - return counter - } +/* + Update the window with the last wsize (normally 32K) bytes written before + returning. If window does not exist yet, create it. This is only called + when a window is already in use, or when output has been written during this + inflate call, but the end of the deflate stream has not been reached yet. + It is also called to create a window for dictionary data when a dictionary + is loaded. - function getQueue () { - var current = queueHead - var tasks = [] + Providing output buffers larger than 32K to inflate() should provide a speed + advantage, since only the last 32K of output is copied to the sliding window + upon return from inflate(), and since all distances after the first 32K of + output will fall in the output data, making match copies simpler and faster. + The advantage may be dependent on the size of the processor's data caches. + */ +function updatewindow(strm, src, end, copy) { + var dist; + var state = strm.state; - while (current) { - tasks.push(current.value) - current = current.next - } + /* if it hasn't been done already, allocate space for the window */ + if (state.window === null) { + state.wsize = 1 << state.wbits; + state.wnext = 0; + state.whave = 0; - return tasks + state.window = new utils.Buf8(state.wsize); } - function resume () { - if (!self.paused) return - self.paused = false - for (var i = 0; i < self.concurrency; i++) { - _running++ - release() + /* copy state->wsize or less output bytes into the circular window */ + if (copy >= state.wsize) { + utils.arraySet(state.window, src, end - state.wsize, state.wsize, 0); + state.wnext = 0; + state.whave = state.wsize; + } + else { + dist = state.wsize - state.wnext; + if (dist > copy) { + dist = copy; + } + //zmemcpy(state->window + state->wnext, end - copy, dist); + utils.arraySet(state.window, src, end - copy, dist, state.wnext); + copy -= dist; + if (copy) { + //zmemcpy(state->window, end - copy, copy); + utils.arraySet(state.window, src, end - copy, copy, 0); + state.wnext = copy; + state.whave = state.wsize; + } + else { + state.wnext += dist; + if (state.wnext === state.wsize) { state.wnext = 0; } + if (state.whave < state.wsize) { state.whave += dist; } } } + return 0; +} - function idle () { - return _running === 0 && self.length() === 0 - } +function inflate(strm, flush) { + var state; + var input, output; // input/output buffers + var next; /* next input INDEX */ + var put; /* next output INDEX */ + var have, left; /* available input and output */ + var hold; /* bit buffer */ + var bits; /* bits in bit buffer */ + var _in, _out; /* save starting available input and output */ + var copy; /* number of stored or match bytes to copy */ + var from; /* where to copy match bytes from */ + var from_source; + var here = 0; /* current decoding table entry */ + var here_bits, here_op, here_val; // paked "here" denormalized (JS specific) + //var last; /* parent table entry */ + var last_bits, last_op, last_val; // paked "last" denormalized (JS specific) + var len; /* length to copy for repeats, bits to drop */ + var ret; /* return code */ + var hbuf = new utils.Buf8(4); /* buffer for gzip header crc calculation */ + var opts; - function push (value, done) { - var current = cache.get() + var n; // temporary var for NEED_BITS - current.context = context - current.release = release - current.value = value - current.callback = done || noop - current.errorHandler = errorHandler + var order = /* permutation of code lengths */ + [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ]; - if (_running === self.concurrency || self.paused) { - if (queueTail) { - queueTail.next = current - queueTail = current - } else { - queueHead = current - queueTail = current - self.saturated() - } - } else { - _running++ - worker.call(context, current.value, current.worked) - } + + if (!strm || !strm.state || !strm.output || + (!strm.input && strm.avail_in !== 0)) { + return Z_STREAM_ERROR; } - function unshift (value, done) { - var current = cache.get() + state = strm.state; + if (state.mode === TYPE) { state.mode = TYPEDO; } /* skip check */ + + + //--- LOAD() --- + put = strm.next_out; + output = strm.output; + left = strm.avail_out; + next = strm.next_in; + input = strm.input; + have = strm.avail_in; + hold = state.hold; + bits = state.bits; + //--- + + _in = have; + _out = left; + ret = Z_OK; - current.context = context - current.release = release - current.value = value - current.callback = done || noop + inf_leave: // goto emulation + for (;;) { + switch (state.mode) { + case HEAD: + if (state.wrap === 0) { + state.mode = TYPEDO; + break; + } + //=== NEEDBITS(16); + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if ((state.wrap & 2) && hold === 0x8b1f) { /* gzip header */ + state.check = 0/*crc32(0L, Z_NULL, 0)*/; + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32(state.check, hbuf, 2, 0); + //===// - if (_running === self.concurrency || self.paused) { - if (queueHead) { - current.next = queueHead - queueHead = current - } else { - queueHead = current - queueTail = current - self.saturated() - } - } else { - _running++ - worker.call(context, current.value, current.worked) - } - } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = FLAGS; + break; + } + state.flags = 0; /* expect zlib header */ + if (state.head) { + state.head.done = false; + } + if (!(state.wrap & 1) || /* check if zlib header allowed */ + (((hold & 0xff)/*BITS(8)*/ << 8) + (hold >> 8)) % 31) { + strm.msg = 'incorrect header check'; + state.mode = BAD; + break; + } + if ((hold & 0x0f)/*BITS(4)*/ !== Z_DEFLATED) { + strm.msg = 'unknown compression method'; + state.mode = BAD; + break; + } + //--- DROPBITS(4) ---// + hold >>>= 4; + bits -= 4; + //---// + len = (hold & 0x0f)/*BITS(4)*/ + 8; + if (state.wbits === 0) { + state.wbits = len; + } + else if (len > state.wbits) { + strm.msg = 'invalid window size'; + state.mode = BAD; + break; + } + state.dmax = 1 << len; + //Tracev((stderr, "inflate: zlib header ok\n")); + strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/; + state.mode = hold & 0x200 ? DICTID : TYPE; + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + break; + case FLAGS: + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.flags = hold; + if ((state.flags & 0xff) !== Z_DEFLATED) { + strm.msg = 'unknown compression method'; + state.mode = BAD; + break; + } + if (state.flags & 0xe000) { + strm.msg = 'unknown header flags set'; + state.mode = BAD; + break; + } + if (state.head) { + state.head.text = ((hold >> 8) & 1); + } + if (state.flags & 0x0200) { + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32(state.check, hbuf, 2, 0); + //===// + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = TIME; + /* falls through */ + case TIME: + //=== NEEDBITS(32); */ + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (state.head) { + state.head.time = hold; + } + if (state.flags & 0x0200) { + //=== CRC4(state.check, hold) + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + hbuf[2] = (hold >>> 16) & 0xff; + hbuf[3] = (hold >>> 24) & 0xff; + state.check = crc32(state.check, hbuf, 4, 0); + //=== + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = OS; + /* falls through */ + case OS: + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (state.head) { + state.head.xflags = (hold & 0xff); + state.head.os = (hold >> 8); + } + if (state.flags & 0x0200) { + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32(state.check, hbuf, 2, 0); + //===// + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = EXLEN; + /* falls through */ + case EXLEN: + if (state.flags & 0x0400) { + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.length = hold; + if (state.head) { + state.head.extra_len = hold; + } + if (state.flags & 0x0200) { + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32(state.check, hbuf, 2, 0); + //===// + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + } + else if (state.head) { + state.head.extra = null/*Z_NULL*/; + } + state.mode = EXTRA; + /* falls through */ + case EXTRA: + if (state.flags & 0x0400) { + copy = state.length; + if (copy > have) { copy = have; } + if (copy) { + if (state.head) { + len = state.head.extra_len - state.length; + if (!state.head.extra) { + // Use untyped array for more convenient processing later + state.head.extra = new Array(state.head.extra_len); + } + utils.arraySet( + state.head.extra, + input, + next, + // extra field is limited to 65536 bytes + // - no need for additional size check + copy, + /*len + copy > state.head.extra_max - len ? state.head.extra_max : copy,*/ + len + ); + //zmemcpy(state.head.extra + len, next, + // len + copy > state.head.extra_max ? + // state.head.extra_max - len : copy); + } + if (state.flags & 0x0200) { + state.check = crc32(state.check, input, copy, next); + } + have -= copy; + next += copy; + state.length -= copy; + } + if (state.length) { break inf_leave; } + } + state.length = 0; + state.mode = NAME; + /* falls through */ + case NAME: + if (state.flags & 0x0800) { + if (have === 0) { break inf_leave; } + copy = 0; + do { + // TODO: 2 or 1 bytes? + len = input[next + copy++]; + /* use constant limit because in js we should not preallocate memory */ + if (state.head && len && + (state.length < 65536 /*state.head.name_max*/)) { + state.head.name += String.fromCharCode(len); + } + } while (len && copy < have); - function release (holder) { - if (holder) { - cache.release(holder) - } - var next = queueHead - if (next) { - if (!self.paused) { - if (queueTail === queueHead) { - queueTail = null + if (state.flags & 0x0200) { + state.check = crc32(state.check, input, copy, next); + } + have -= copy; + next += copy; + if (len) { break inf_leave; } } - queueHead = next.next - next.next = null - worker.call(context, next.value, next.worked) - if (queueTail === null) { - self.empty() + else if (state.head) { + state.head.name = null; + } + state.length = 0; + state.mode = COMMENT; + /* falls through */ + case COMMENT: + if (state.flags & 0x1000) { + if (have === 0) { break inf_leave; } + copy = 0; + do { + len = input[next + copy++]; + /* use constant limit because in js we should not preallocate memory */ + if (state.head && len && + (state.length < 65536 /*state.head.comm_max*/)) { + state.head.comment += String.fromCharCode(len); + } + } while (len && copy < have); + if (state.flags & 0x0200) { + state.check = crc32(state.check, input, copy, next); + } + have -= copy; + next += copy; + if (len) { break inf_leave; } + } + else if (state.head) { + state.head.comment = null; + } + state.mode = HCRC; + /* falls through */ + case HCRC: + if (state.flags & 0x0200) { + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (hold !== (state.check & 0xffff)) { + strm.msg = 'header crc mismatch'; + state.mode = BAD; + break; + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + } + if (state.head) { + state.head.hcrc = ((state.flags >> 9) & 1); + state.head.done = true; + } + strm.adler = state.check = 0; + state.mode = TYPE; + break; + case DICTID: + //=== NEEDBITS(32); */ + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + strm.adler = state.check = zswap32(hold); + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = DICT; + /* falls through */ + case DICT: + if (state.havedict === 0) { + //--- RESTORE() --- + strm.next_out = put; + strm.avail_out = left; + strm.next_in = next; + strm.avail_in = have; + state.hold = hold; + state.bits = bits; + //--- + return Z_NEED_DICT; + } + strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/; + state.mode = TYPE; + /* falls through */ + case TYPE: + if (flush === Z_BLOCK || flush === Z_TREES) { break inf_leave; } + /* falls through */ + case TYPEDO: + if (state.last) { + //--- BYTEBITS() ---// + hold >>>= bits & 7; + bits -= bits & 7; + //---// + state.mode = CHECK; + break; + } + //=== NEEDBITS(3); */ + while (bits < 3) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.last = (hold & 0x01)/*BITS(1)*/; + //--- DROPBITS(1) ---// + hold >>>= 1; + bits -= 1; + //---// + + switch ((hold & 0x03)/*BITS(2)*/) { + case 0: /* stored block */ + //Tracev((stderr, "inflate: stored block%s\n", + // state.last ? " (last)" : "")); + state.mode = STORED; + break; + case 1: /* fixed block */ + fixedtables(state); + //Tracev((stderr, "inflate: fixed codes block%s\n", + // state.last ? " (last)" : "")); + state.mode = LEN_; /* decode codes */ + if (flush === Z_TREES) { + //--- DROPBITS(2) ---// + hold >>>= 2; + bits -= 2; + //---// + break inf_leave; + } + break; + case 2: /* dynamic block */ + //Tracev((stderr, "inflate: dynamic codes block%s\n", + // state.last ? " (last)" : "")); + state.mode = TABLE; + break; + case 3: + strm.msg = 'invalid block type'; + state.mode = BAD; + } + //--- DROPBITS(2) ---// + hold >>>= 2; + bits -= 2; + //---// + break; + case STORED: + //--- BYTEBITS() ---// /* go to byte boundary */ + hold >>>= bits & 7; + bits -= bits & 7; + //---// + //=== NEEDBITS(32); */ + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if ((hold & 0xffff) !== ((hold >>> 16) ^ 0xffff)) { + strm.msg = 'invalid stored block lengths'; + state.mode = BAD; + break; + } + state.length = hold & 0xffff; + //Tracev((stderr, "inflate: stored length %u\n", + // state.length)); + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = COPY_; + if (flush === Z_TREES) { break inf_leave; } + /* falls through */ + case COPY_: + state.mode = COPY; + /* falls through */ + case COPY: + copy = state.length; + if (copy) { + if (copy > have) { copy = have; } + if (copy > left) { copy = left; } + if (copy === 0) { break inf_leave; } + //--- zmemcpy(put, next, copy); --- + utils.arraySet(output, input, next, copy, put); + //---// + have -= copy; + next += copy; + left -= copy; + put += copy; + state.length -= copy; + break; + } + //Tracev((stderr, "inflate: stored end\n")); + state.mode = TYPE; + break; + case TABLE: + //=== NEEDBITS(14); */ + while (bits < 14) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.nlen = (hold & 0x1f)/*BITS(5)*/ + 257; + //--- DROPBITS(5) ---// + hold >>>= 5; + bits -= 5; + //---// + state.ndist = (hold & 0x1f)/*BITS(5)*/ + 1; + //--- DROPBITS(5) ---// + hold >>>= 5; + bits -= 5; + //---// + state.ncode = (hold & 0x0f)/*BITS(4)*/ + 4; + //--- DROPBITS(4) ---// + hold >>>= 4; + bits -= 4; + //---// +//#ifndef PKZIP_BUG_WORKAROUND + if (state.nlen > 286 || state.ndist > 30) { + strm.msg = 'too many length or distance symbols'; + state.mode = BAD; + break; + } +//#endif + //Tracev((stderr, "inflate: table sizes ok\n")); + state.have = 0; + state.mode = LENLENS; + /* falls through */ + case LENLENS: + while (state.have < state.ncode) { + //=== NEEDBITS(3); + while (bits < 3) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.lens[order[state.have++]] = (hold & 0x07);//BITS(3); + //--- DROPBITS(3) ---// + hold >>>= 3; + bits -= 3; + //---// } - } else { - _running-- - } - } else if (--_running === 0) { - self.drain() - } - } - - function kill () { - queueHead = null - queueTail = null - self.drain = noop - } - - function killAndDrain () { - queueHead = null - queueTail = null - self.drain() - self.drain = noop - } - - function error (handler) { - errorHandler = handler - } -} - -function noop () {} - -function Task () { - this.value = null - this.callback = noop - this.next = null - this.release = noop - this.context = null - this.errorHandler = null - - var self = this - - this.worked = function worked (err, result) { - var callback = self.callback - var errorHandler = self.errorHandler - var val = self.value - self.value = null - self.callback = noop - if (self.errorHandler) { - errorHandler(err, val) - } - callback.call(self.context, err, result) - self.release(self) - } -} - -function queueAsPromised (context, worker, concurrency) { - if (typeof context === 'function') { - concurrency = worker - worker = context - context = null - } - - function asyncWrapper (arg, cb) { - worker.call(this, arg) - .then(function (res) { - cb(null, res) - }, cb) - } - - var queue = fastqueue(context, asyncWrapper, concurrency) - - var pushCb = queue.push - var unshiftCb = queue.unshift - - queue.push = push - queue.unshift = unshift - queue.drained = drained - - return queue - - function push (value) { - var p = new Promise(function (resolve, reject) { - pushCb(value, function (err, result) { - if (err) { - reject(err) - return + while (state.have < 19) { + state.lens[order[state.have++]] = 0; } - resolve(result) - }) - }) - - // Let's fork the promise chain to - // make the error bubble up to the user but - // not lead to a unhandledRejection - p.catch(noop) + // We have separate tables & no pointers. 2 commented lines below not needed. + //state.next = state.codes; + //state.lencode = state.next; + // Switch to use dynamic table + state.lencode = state.lendyn; + state.lenbits = 7; - return p - } + opts = { bits: state.lenbits }; + ret = inflate_table(CODES, state.lens, 0, 19, state.lencode, 0, state.work, opts); + state.lenbits = opts.bits; - function unshift (value) { - var p = new Promise(function (resolve, reject) { - unshiftCb(value, function (err, result) { - if (err) { - reject(err) - return + if (ret) { + strm.msg = 'invalid code lengths set'; + state.mode = BAD; + break; } - resolve(result) - }) - }) - - // Let's fork the promise chain to - // make the error bubble up to the user but - // not lead to a unhandledRejection - p.catch(noop) - - return p - } - - function drained () { - var previousDrain = queue.drain - - var p = new Promise(function (resolve) { - queue.drain = function () { - previousDrain() - resolve() - } - }) - - return p - } -} - -module.exports = fastqueue -module.exports.promise = queueAsPromised - - -/***/ }), - -/***/ 691: -/***/ (function(module) { - -"use strict"; - - -// (C) 1995-2013 Jean-loup Gailly and Mark Adler -// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. - -module.exports = { + //Tracev((stderr, "inflate: code lengths ok\n")); + state.have = 0; + state.mode = CODELENS; + /* falls through */ + case CODELENS: + while (state.have < state.nlen + state.ndist) { + for (;;) { + here = state.lencode[hold & ((1 << state.lenbits) - 1)];/*BITS(state.lenbits)*/ + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; - /* Allowed flush values; see deflate() and inflate() below for details */ - Z_NO_FLUSH: 0, - Z_PARTIAL_FLUSH: 1, - Z_SYNC_FLUSH: 2, - Z_FULL_FLUSH: 3, - Z_FINISH: 4, - Z_BLOCK: 5, - Z_TREES: 6, + if ((here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + if (here_val < 16) { + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + state.lens[state.have++] = here_val; + } + else { + if (here_val === 16) { + //=== NEEDBITS(here.bits + 2); + n = here_bits + 2; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + if (state.have === 0) { + strm.msg = 'invalid bit length repeat'; + state.mode = BAD; + break; + } + len = state.lens[state.have - 1]; + copy = 3 + (hold & 0x03);//BITS(2); + //--- DROPBITS(2) ---// + hold >>>= 2; + bits -= 2; + //---// + } + else if (here_val === 17) { + //=== NEEDBITS(here.bits + 3); + n = here_bits + 3; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + len = 0; + copy = 3 + (hold & 0x07);//BITS(3); + //--- DROPBITS(3) ---// + hold >>>= 3; + bits -= 3; + //---// + } + else { + //=== NEEDBITS(here.bits + 7); + n = here_bits + 7; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + len = 0; + copy = 11 + (hold & 0x7f);//BITS(7); + //--- DROPBITS(7) ---// + hold >>>= 7; + bits -= 7; + //---// + } + if (state.have + copy > state.nlen + state.ndist) { + strm.msg = 'invalid bit length repeat'; + state.mode = BAD; + break; + } + while (copy--) { + state.lens[state.have++] = len; + } + } + } - /* Return codes for the compression/decompression functions. Negative values - * are errors, positive values are used for special but normal events. - */ - Z_OK: 0, - Z_STREAM_END: 1, - Z_NEED_DICT: 2, - Z_ERRNO: -1, - Z_STREAM_ERROR: -2, - Z_DATA_ERROR: -3, - //Z_MEM_ERROR: -4, - Z_BUF_ERROR: -5, - //Z_VERSION_ERROR: -6, + /* handle error breaks in while */ + if (state.mode === BAD) { break; } - /* compression levels */ - Z_NO_COMPRESSION: 0, - Z_BEST_SPEED: 1, - Z_BEST_COMPRESSION: 9, - Z_DEFAULT_COMPRESSION: -1, + /* check for end-of-block code (better have one) */ + if (state.lens[256] === 0) { + strm.msg = 'invalid code -- missing end-of-block'; + state.mode = BAD; + break; + } + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftrees.h + concerning the ENOUGH constants, which depend on those values */ + state.lenbits = 9; - Z_FILTERED: 1, - Z_HUFFMAN_ONLY: 2, - Z_RLE: 3, - Z_FIXED: 4, - Z_DEFAULT_STRATEGY: 0, + opts = { bits: state.lenbits }; + ret = inflate_table(LENS, state.lens, 0, state.nlen, state.lencode, 0, state.work, opts); + // We have separate tables & no pointers. 2 commented lines below not needed. + // state.next_index = opts.table_index; + state.lenbits = opts.bits; + // state.lencode = state.next; - /* Possible values of the data_type field (though see inflate()) */ - Z_BINARY: 0, - Z_TEXT: 1, - //Z_ASCII: 1, // = Z_TEXT (deprecated) - Z_UNKNOWN: 2, + if (ret) { + strm.msg = 'invalid literal/lengths set'; + state.mode = BAD; + break; + } - /* The deflate compression method */ - Z_DEFLATED: 8 - //Z_NULL: null // Use -1 or null inline, depending on var type -}; + state.distbits = 6; + //state.distcode.copy(state.codes); + // Switch to use dynamic table + state.distcode = state.distdyn; + opts = { bits: state.distbits }; + ret = inflate_table(DISTS, state.lens, state.nlen, state.ndist, state.distcode, 0, state.work, opts); + // We have separate tables & no pointers. 2 commented lines below not needed. + // state.next_index = opts.table_index; + state.distbits = opts.bits; + // state.distcode = state.next; + if (ret) { + strm.msg = 'invalid distances set'; + state.mode = BAD; + break; + } + //Tracev((stderr, 'inflate: codes ok\n')); + state.mode = LEN_; + if (flush === Z_TREES) { break inf_leave; } + /* falls through */ + case LEN_: + state.mode = LEN; + /* falls through */ + case LEN: + if (have >= 6 && left >= 258) { + //--- RESTORE() --- + strm.next_out = put; + strm.avail_out = left; + strm.next_in = next; + strm.avail_in = have; + state.hold = hold; + state.bits = bits; + //--- + inflate_fast(strm, _out); + //--- LOAD() --- + put = strm.next_out; + output = strm.output; + left = strm.avail_out; + next = strm.next_in; + input = strm.input; + have = strm.avail_in; + hold = state.hold; + bits = state.bits; + //--- -/***/ }), + if (state.mode === TYPE) { + state.back = -1; + } + break; + } + state.back = 0; + for (;;) { + here = state.lencode[hold & ((1 << state.lenbits) - 1)]; /*BITS(state.lenbits)*/ + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; -/***/ 703: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + if (here_bits <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + if (here_op && (here_op & 0xf0) === 0) { + last_bits = here_bits; + last_op = here_op; + last_val = here_val; + for (;;) { + here = state.lencode[last_val + + ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)]; + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; -"use strict"; + if ((last_bits + here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + //--- DROPBITS(last.bits) ---// + hold >>>= last_bits; + bits -= last_bits; + //---// + state.back += last_bits; + } + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + state.back += here_bits; + state.length = here_val; + if (here_op === 0) { + //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + // "inflate: literal '%c'\n" : + // "inflate: literal 0x%02x\n", here.val)); + state.mode = LIT; + break; + } + if (here_op & 32) { + //Tracevv((stderr, "inflate: end of block\n")); + state.back = -1; + state.mode = TYPE; + break; + } + if (here_op & 64) { + strm.msg = 'invalid literal/length code'; + state.mode = BAD; + break; + } + state.extra = here_op & 15; + state.mode = LENEXT; + /* falls through */ + case LENEXT: + if (state.extra) { + //=== NEEDBITS(state.extra); + n = state.extra; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.length += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/; + //--- DROPBITS(state.extra) ---// + hold >>>= state.extra; + bits -= state.extra; + //---// + state.back += state.extra; + } + //Tracevv((stderr, "inflate: length %u\n", state.length)); + state.was = state.length; + state.mode = DIST; + /* falls through */ + case DIST: + for (;;) { + here = state.distcode[hold & ((1 << state.distbits) - 1)];/*BITS(state.distbits)*/ + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; -Object.defineProperty(exports, "__esModule", { value: true }); -const utils = __webpack_require__(444); -class EntryFilter { - constructor(_settings, _micromatchOptions) { - this._settings = _settings; - this._micromatchOptions = _micromatchOptions; - this.index = new Map(); - } - getFilter(positive, negative) { - const positiveRe = utils.pattern.convertPatternsToRe(positive, this._micromatchOptions); - const negativeRe = utils.pattern.convertPatternsToRe(negative, this._micromatchOptions); - return (entry) => this._filter(entry, positiveRe, negativeRe); - } - _filter(entry, positiveRe, negativeRe) { - if (this._settings.unique && this._isDuplicateEntry(entry)) { - return false; + if ((here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// } - if (this._onlyFileFilter(entry) || this._onlyDirectoryFilter(entry)) { - return false; + if ((here_op & 0xf0) === 0) { + last_bits = here_bits; + last_op = here_op; + last_val = here_val; + for (;;) { + here = state.distcode[last_val + + ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)]; + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((last_bits + here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + //--- DROPBITS(last.bits) ---// + hold >>>= last_bits; + bits -= last_bits; + //---// + state.back += last_bits; } - if (this._isSkippedByAbsoluteNegativePatterns(entry.path, negativeRe)) { - return false; + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + state.back += here_bits; + if (here_op & 64) { + strm.msg = 'invalid distance code'; + state.mode = BAD; + break; } - const filepath = this._settings.baseNameMatch ? entry.name : entry.path; - const isMatched = this._isMatchToPatterns(filepath, positiveRe) && !this._isMatchToPatterns(entry.path, negativeRe); - if (this._settings.unique && isMatched) { - this._createIndexRecord(entry); + state.offset = here_val; + state.extra = (here_op) & 15; + state.mode = DISTEXT; + /* falls through */ + case DISTEXT: + if (state.extra) { + //=== NEEDBITS(state.extra); + n = state.extra; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.offset += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/; + //--- DROPBITS(state.extra) ---// + hold >>>= state.extra; + bits -= state.extra; + //---// + state.back += state.extra; } - return isMatched; - } - _isDuplicateEntry(entry) { - return this.index.has(entry.path); - } - _createIndexRecord(entry) { - this.index.set(entry.path, undefined); - } - _onlyFileFilter(entry) { - return this._settings.onlyFiles && !entry.dirent.isFile(); - } - _onlyDirectoryFilter(entry) { - return this._settings.onlyDirectories && !entry.dirent.isDirectory(); - } - _isSkippedByAbsoluteNegativePatterns(entryPath, patternsRe) { - if (!this._settings.absolute) { - return false; +//#ifdef INFLATE_STRICT + if (state.offset > state.dmax) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break; } - const fullpath = utils.path.makeAbsolute(this._settings.cwd, entryPath); - return utils.pattern.matchAny(fullpath, patternsRe); - } - /** - * First, just trying to apply patterns to the path. - * Second, trying to apply patterns to the path with final slash. - */ - _isMatchToPatterns(entryPath, patternsRe) { - const filepath = utils.path.removeLeadingDotSegment(entryPath); - return utils.pattern.matchAny(filepath, patternsRe) || utils.pattern.matchAny(filepath + '/', patternsRe); - } -} -exports.default = EntryFilter; - - -/***/ }), - -/***/ 720: -/***/ (function(module, __unusedexports, __webpack_require__) { - -"use strict"; - - -// Dependencies -var protocols = __webpack_require__(737); - -/** - * isSsh - * Checks if an input value is a ssh url or not. - * - * @name isSsh - * @function - * @param {String|Array} input The input url or an array of protocols. - * @return {Boolean} `true` if the input is a ssh url, `false` otherwise. - */ -function isSsh(input) { - - if (Array.isArray(input)) { - return input.indexOf("ssh") !== -1 || input.indexOf("rsync") !== -1; - } - - if (typeof input !== "string") { - return false; - } +//#endif + //Tracevv((stderr, "inflate: distance %u\n", state.offset)); + state.mode = MATCH; + /* falls through */ + case MATCH: + if (left === 0) { break inf_leave; } + copy = _out - left; + if (state.offset > copy) { /* copy from window */ + copy = state.offset - copy; + if (copy > state.whave) { + if (state.sane) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break; + } +// (!) This block is disabled in zlib defaults, +// don't enable it for binary compatibility +//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR +// Trace((stderr, "inflate.c too far\n")); +// copy -= state.whave; +// if (copy > state.length) { copy = state.length; } +// if (copy > left) { copy = left; } +// left -= copy; +// state.length -= copy; +// do { +// output[put++] = 0; +// } while (--copy); +// if (state.length === 0) { state.mode = LEN; } +// break; +//#endif + } + if (copy > state.wnext) { + copy -= state.wnext; + from = state.wsize - copy; + } + else { + from = state.wnext - copy; + } + if (copy > state.length) { copy = state.length; } + from_source = state.window; + } + else { /* copy from output */ + from_source = output; + from = put - state.offset; + copy = state.length; + } + if (copy > left) { copy = left; } + left -= copy; + state.length -= copy; + do { + output[put++] = from_source[from++]; + } while (--copy); + if (state.length === 0) { state.mode = LEN; } + break; + case LIT: + if (left === 0) { break inf_leave; } + output[put++] = state.length; + left--; + state.mode = LEN; + break; + case CHECK: + if (state.wrap) { + //=== NEEDBITS(32); + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + // Use '|' instead of '+' to make sure that result is signed + hold |= input[next++] << bits; + bits += 8; + } + //===// + _out -= left; + strm.total_out += _out; + state.total += _out; + if (_out) { + strm.adler = state.check = + /*UPDATE(state.check, put - _out, _out);*/ + (state.flags ? crc32(state.check, output, _out, put - _out) : adler32(state.check, output, _out, put - _out)); - var prots = protocols(input); - input = input.substring(input.indexOf("://") + 3); - if (isSsh(prots)) { - return true; + } + _out = left; + // NB: crc32 stored as signed 32-bit int, zswap32 returns signed too + if ((state.flags ? hold : zswap32(hold)) !== state.check) { + strm.msg = 'incorrect data check'; + state.mode = BAD; + break; + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + //Tracev((stderr, "inflate: check matches trailer\n")); + } + state.mode = LENGTH; + /* falls through */ + case LENGTH: + if (state.wrap && state.flags) { + //=== NEEDBITS(32); + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (hold !== (state.total & 0xffffffff)) { + strm.msg = 'incorrect length check'; + state.mode = BAD; + break; + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + //Tracev((stderr, "inflate: length matches trailer\n")); + } + state.mode = DONE; + /* falls through */ + case DONE: + ret = Z_STREAM_END; + break inf_leave; + case BAD: + ret = Z_DATA_ERROR; + break inf_leave; + case MEM: + return Z_MEM_ERROR; + case SYNC: + /* falls through */ + default: + return Z_STREAM_ERROR; } + } - // TODO This probably could be improved :) - var urlPortPattern = new RegExp('\.([a-zA-Z\\d]+):(\\d+)\/'); - return !input.match(urlPortPattern) && input.indexOf("@") < input.indexOf(":"); -} - -module.exports = isSsh; - -/***/ }), + // inf_leave <- here is real place for "goto inf_leave", emulated via "break inf_leave" -/***/ 724: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + /* + Return from inflate(), updating the total counts and the check value. + If there was no progress during the inflate() call, return a buffer + error. Call updatewindow() to create and/or update the window state. + Note: a memory error from inflate() is non-recoverable. + */ -"use strict"; + //--- RESTORE() --- + strm.next_out = put; + strm.avail_out = left; + strm.next_in = next; + strm.avail_in = have; + state.hold = hold; + state.bits = bits; + //--- -Object.defineProperty(exports, "__esModule", { value: true }); -exports.matchAny = exports.convertPatternsToRe = exports.makeRe = exports.getPatternParts = exports.expandBraceExpansion = exports.expandPatternsWithBraceExpansion = exports.isAffectDepthOfReadingPattern = exports.endsWithSlashGlobStar = exports.hasGlobStar = exports.getBaseDirectory = exports.isPatternRelatedToParentDirectory = exports.getPatternsOutsideCurrentDirectory = exports.getPatternsInsideCurrentDirectory = exports.getPositivePatterns = exports.getNegativePatterns = exports.isPositivePattern = exports.isNegativePattern = exports.convertToNegativePattern = exports.convertToPositivePattern = exports.isDynamicPattern = exports.isStaticPattern = void 0; -const path = __webpack_require__(622); -const globParent = __webpack_require__(763); -const micromatch = __webpack_require__(74); -const GLOBSTAR = '**'; -const ESCAPE_SYMBOL = '\\'; -const COMMON_GLOB_SYMBOLS_RE = /[*?]|^!/; -const REGEX_CHARACTER_CLASS_SYMBOLS_RE = /\[[^[]*]/; -const REGEX_GROUP_SYMBOLS_RE = /(?:^|[^!*+?@])\([^(]*\|[^|]*\)/; -const GLOB_EXTENSION_SYMBOLS_RE = /[!*+?@]\([^(]*\)/; -const BRACE_EXPANSION_SEPARATORS_RE = /,|\.\./; -function isStaticPattern(pattern, options = {}) { - return !isDynamicPattern(pattern, options); -} -exports.isStaticPattern = isStaticPattern; -function isDynamicPattern(pattern, options = {}) { - /** - * A special case with an empty string is necessary for matching patterns that start with a forward slash. - * An empty string cannot be a dynamic pattern. - * For example, the pattern `/lib/*` will be spread into parts: '', 'lib', '*'. - */ - if (pattern === '') { - return false; - } - /** - * When the `caseSensitiveMatch` option is disabled, all patterns must be marked as dynamic, because we cannot check - * filepath directly (without read directory). - */ - if (options.caseSensitiveMatch === false || pattern.includes(ESCAPE_SYMBOL)) { - return true; - } - if (COMMON_GLOB_SYMBOLS_RE.test(pattern) || REGEX_CHARACTER_CLASS_SYMBOLS_RE.test(pattern) || REGEX_GROUP_SYMBOLS_RE.test(pattern)) { - return true; - } - if (options.extglob !== false && GLOB_EXTENSION_SYMBOLS_RE.test(pattern)) { - return true; - } - if (options.braceExpansion !== false && hasBraceExpansion(pattern)) { - return true; - } - return false; -} -exports.isDynamicPattern = isDynamicPattern; -function hasBraceExpansion(pattern) { - const openingBraceIndex = pattern.indexOf('{'); - if (openingBraceIndex === -1) { - return false; - } - const closingBraceIndex = pattern.indexOf('}', openingBraceIndex + 1); - if (closingBraceIndex === -1) { - return false; - } - const braceContent = pattern.slice(openingBraceIndex, closingBraceIndex); - return BRACE_EXPANSION_SEPARATORS_RE.test(braceContent); -} -function convertToPositivePattern(pattern) { - return isNegativePattern(pattern) ? pattern.slice(1) : pattern; -} -exports.convertToPositivePattern = convertToPositivePattern; -function convertToNegativePattern(pattern) { - return '!' + pattern; -} -exports.convertToNegativePattern = convertToNegativePattern; -function isNegativePattern(pattern) { - return pattern.startsWith('!') && pattern[1] !== '('; -} -exports.isNegativePattern = isNegativePattern; -function isPositivePattern(pattern) { - return !isNegativePattern(pattern); -} -exports.isPositivePattern = isPositivePattern; -function getNegativePatterns(patterns) { - return patterns.filter(isNegativePattern); -} -exports.getNegativePatterns = getNegativePatterns; -function getPositivePatterns(patterns) { - return patterns.filter(isPositivePattern); -} -exports.getPositivePatterns = getPositivePatterns; -/** - * Returns patterns that can be applied inside the current directory. - * - * @example - * // ['./*', '*', 'a/*'] - * getPatternsInsideCurrentDirectory(['./*', '*', 'a/*', '../*', './../*']) - */ -function getPatternsInsideCurrentDirectory(patterns) { - return patterns.filter((pattern) => !isPatternRelatedToParentDirectory(pattern)); -} -exports.getPatternsInsideCurrentDirectory = getPatternsInsideCurrentDirectory; -/** - * Returns patterns to be expanded relative to (outside) the current directory. - * - * @example - * // ['../*', './../*'] - * getPatternsInsideCurrentDirectory(['./*', '*', 'a/*', '../*', './../*']) - */ -function getPatternsOutsideCurrentDirectory(patterns) { - return patterns.filter(isPatternRelatedToParentDirectory); -} -exports.getPatternsOutsideCurrentDirectory = getPatternsOutsideCurrentDirectory; -function isPatternRelatedToParentDirectory(pattern) { - return pattern.startsWith('..') || pattern.startsWith('./..'); -} -exports.isPatternRelatedToParentDirectory = isPatternRelatedToParentDirectory; -function getBaseDirectory(pattern) { - return globParent(pattern, { flipBackslashes: false }); -} -exports.getBaseDirectory = getBaseDirectory; -function hasGlobStar(pattern) { - return pattern.includes(GLOBSTAR); -} -exports.hasGlobStar = hasGlobStar; -function endsWithSlashGlobStar(pattern) { - return pattern.endsWith('/' + GLOBSTAR); -} -exports.endsWithSlashGlobStar = endsWithSlashGlobStar; -function isAffectDepthOfReadingPattern(pattern) { - const basename = path.basename(pattern); - return endsWithSlashGlobStar(pattern) || isStaticPattern(basename); -} -exports.isAffectDepthOfReadingPattern = isAffectDepthOfReadingPattern; -function expandPatternsWithBraceExpansion(patterns) { - return patterns.reduce((collection, pattern) => { - return collection.concat(expandBraceExpansion(pattern)); - }, []); -} -exports.expandPatternsWithBraceExpansion = expandPatternsWithBraceExpansion; -function expandBraceExpansion(pattern) { - return micromatch.braces(pattern, { - expand: true, - nodupes: true - }); -} -exports.expandBraceExpansion = expandBraceExpansion; -function getPatternParts(pattern, options) { - let { parts } = micromatch.scan(pattern, Object.assign(Object.assign({}, options), { parts: true })); - /** - * The scan method returns an empty array in some cases. - * See micromatch/picomatch#58 for more details. - */ - if (parts.length === 0) { - parts = [pattern]; - } - /** - * The scan method does not return an empty part for the pattern with a forward slash. - * This is another part of micromatch/picomatch#58. - */ - if (parts[0].startsWith('/')) { - parts[0] = parts[0].slice(1); - parts.unshift(''); + if (state.wsize || (_out !== strm.avail_out && state.mode < BAD && + (state.mode < CHECK || flush !== Z_FINISH))) { + if (updatewindow(strm, strm.output, strm.next_out, _out - strm.avail_out)) { + state.mode = MEM; + return Z_MEM_ERROR; } - return parts; -} -exports.getPatternParts = getPatternParts; -function makeRe(pattern, options) { - return micromatch.makeRe(pattern, options); -} -exports.makeRe = makeRe; -function convertPatternsToRe(patterns, options) { - return patterns.map((pattern) => makeRe(pattern, options)); -} -exports.convertPatternsToRe = convertPatternsToRe; -function matchAny(entry, patternsRe) { - return patternsRe.some((patternRe) => patternRe.test(entry)); + } + _in -= strm.avail_in; + _out -= strm.avail_out; + strm.total_in += _in; + strm.total_out += _out; + state.total += _out; + if (state.wrap && _out) { + strm.adler = state.check = /*UPDATE(state.check, strm.next_out - _out, _out);*/ + (state.flags ? crc32(state.check, output, _out, strm.next_out - _out) : adler32(state.check, output, _out, strm.next_out - _out)); + } + strm.data_type = state.bits + (state.last ? 64 : 0) + + (state.mode === TYPE ? 128 : 0) + + (state.mode === LEN_ || state.mode === COPY_ ? 256 : 0); + if (((_in === 0 && _out === 0) || flush === Z_FINISH) && ret === Z_OK) { + ret = Z_BUF_ERROR; + } + return ret; } -exports.matchAny = matchAny; - -/***/ }), - -/***/ 728: -/***/ (function(__unusedmodule, exports) { +function inflateEnd(strm) { -"use strict"; + if (!strm || !strm.state /*|| strm->zfree == (free_func)0*/) { + return Z_STREAM_ERROR; + } -Object.defineProperty(exports, "__esModule", { value: true }); -exports.read = void 0; -function read(path, settings, callback) { - settings.fs.lstat(path, (lstatError, lstat) => { - if (lstatError !== null) { - callFailureCallback(callback, lstatError); - return; - } - if (!lstat.isSymbolicLink() || !settings.followSymbolicLink) { - callSuccessCallback(callback, lstat); - return; - } - settings.fs.stat(path, (statError, stat) => { - if (statError !== null) { - if (settings.throwErrorOnBrokenSymbolicLink) { - callFailureCallback(callback, statError); - return; - } - callSuccessCallback(callback, lstat); - return; - } - if (settings.markSymbolicLink) { - stat.isSymbolicLink = () => true; - } - callSuccessCallback(callback, stat); - }); - }); -} -exports.read = read; -function callFailureCallback(callback, error) { - callback(error); -} -function callSuccessCallback(callback, result) { - callback(null, result); + var state = strm.state; + if (state.window) { + state.window = null; + } + strm.state = null; + return Z_OK; } +function inflateGetHeader(strm, head) { + var state; -/***/ }), + /* check state */ + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + state = strm.state; + if ((state.wrap & 2) === 0) { return Z_STREAM_ERROR; } -/***/ 730: -/***/ (function(module, __unusedexports, __webpack_require__) { + /* save header structure */ + state.head = head; + head.done = false; + return Z_OK; +} -"use strict"; -/*! - * fill-range - * - * Copyright (c) 2014-present, Jon Schlinkert. - * Licensed under the MIT License. - */ +function inflateSetDictionary(strm, dictionary) { + var dictLength = dictionary.length; + var state; + var dictid; + var ret; + /* check state */ + if (!strm /* == Z_NULL */ || !strm.state /* == Z_NULL */) { return Z_STREAM_ERROR; } + state = strm.state; -const util = __webpack_require__(669); -const toRegexRange = __webpack_require__(789); + if (state.wrap !== 0 && state.mode !== DICT) { + return Z_STREAM_ERROR; + } -const isObject = val => val !== null && typeof val === 'object' && !Array.isArray(val); + /* check for correct dictionary identifier */ + if (state.mode === DICT) { + dictid = 1; /* adler32(0, null, 0)*/ + /* dictid = adler32(dictid, dictionary, dictLength); */ + dictid = adler32(dictid, dictionary, dictLength, 0); + if (dictid !== state.check) { + return Z_DATA_ERROR; + } + } + /* copy dictionary to window using updatewindow(), which will amend the + existing dictionary if appropriate */ + ret = updatewindow(strm, dictionary, dictLength, dictLength); + if (ret) { + state.mode = MEM; + return Z_MEM_ERROR; + } + state.havedict = 1; + // Tracev((stderr, "inflate: dictionary set\n")); + return Z_OK; +} -const transform = toNumber => { - return value => toNumber === true ? Number(value) : String(value); -}; +exports.inflateReset = inflateReset; +exports.inflateReset2 = inflateReset2; +exports.inflateResetKeep = inflateResetKeep; +exports.inflateInit = inflateInit; +exports.inflateInit2 = inflateInit2; +exports.inflate = inflate; +exports.inflateEnd = inflateEnd; +exports.inflateGetHeader = inflateGetHeader; +exports.inflateSetDictionary = inflateSetDictionary; +exports.inflateInfo = 'pako inflate (from Nodeca project)'; -const isValidValue = value => { - return typeof value === 'number' || (typeof value === 'string' && value !== ''); -}; +/* Not implemented +exports.inflateCopy = inflateCopy; +exports.inflateGetDictionary = inflateGetDictionary; +exports.inflateMark = inflateMark; +exports.inflatePrime = inflatePrime; +exports.inflateSync = inflateSync; +exports.inflateSyncPoint = inflateSyncPoint; +exports.inflateUndermine = inflateUndermine; +*/ -const isNumber = num => Number.isInteger(+num); -const zeros = input => { - let value = `${input}`; - let index = -1; - if (value[0] === '-') value = value.slice(1); - if (value === '0') return false; - while (value[++index] === '0'); - return index > 0; -}; +/***/ }), -const stringify = (start, end, options) => { - if (typeof start === 'string' || typeof end === 'string') { - return true; - } - return options.stringify === true; -}; +/***/ 6895: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { -const pad = (input, maxLength, toNumber) => { - if (maxLength > 0) { - let dash = input[0] === '-' ? '-' : ''; - if (dash) input = input.slice(1); - input = (dash + input.padStart(dash ? maxLength - 1 : maxLength, '0')); - } - if (toNumber === false) { - return String(input); - } - return input; -}; +"use strict"; -const toMaxLen = (input, maxLength) => { - let negative = input[0] === '-' ? '-' : ''; - if (negative) { - input = input.slice(1); - maxLength--; - } - while (input.length < maxLength) input = '0' + input; - return negative ? ('-' + input) : input; -}; -const toSequence = (parts, options) => { - parts.negatives.sort((a, b) => a < b ? -1 : a > b ? 1 : 0); - parts.positives.sort((a, b) => a < b ? -1 : a > b ? 1 : 0); +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. - let prefix = options.capture ? '' : '?:'; - let positives = ''; - let negatives = ''; - let result; +var utils = __nccwpck_require__(5483); - if (parts.positives.length) { - positives = parts.positives.join('|'); - } +var MAXBITS = 15; +var ENOUGH_LENS = 852; +var ENOUGH_DISTS = 592; +//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); - if (parts.negatives.length) { - negatives = `-(${prefix}${parts.negatives.join('|')})`; - } +var CODES = 0; +var LENS = 1; +var DISTS = 2; - if (positives && negatives) { - result = `${positives}|${negatives}`; - } else { - result = positives || negatives; - } +var lbase = [ /* Length codes 257..285 base */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 +]; - if (options.wrap) { - return `(${prefix}${result})`; - } +var lext = [ /* Length codes 257..285 extra */ + 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, + 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78 +]; - return result; -}; +var dbase = [ /* Distance codes 0..29 base */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577, 0, 0 +]; -const toRange = (a, b, isNumbers, options) => { - if (isNumbers) { - return toRegexRange(a, b, { wrap: false, ...options }); - } +var dext = [ /* Distance codes 0..29 extra */ + 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, + 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, + 28, 28, 29, 29, 64, 64 +]; - let start = String.fromCharCode(a); - if (a === b) return start; +module.exports = function inflate_table(type, lens, lens_index, codes, table, table_index, work, opts) +{ + var bits = opts.bits; + //here = opts.here; /* table entry for duplication */ - let stop = String.fromCharCode(b); - return `[${start}-${stop}]`; -}; + var len = 0; /* a code's length in bits */ + var sym = 0; /* index of code symbols */ + var min = 0, max = 0; /* minimum and maximum code lengths */ + var root = 0; /* number of index bits for root table */ + var curr = 0; /* number of index bits for current table */ + var drop = 0; /* code bits to drop for sub-table */ + var left = 0; /* number of prefix codes available */ + var used = 0; /* code entries in table used */ + var huff = 0; /* Huffman code */ + var incr; /* for incrementing code, index */ + var fill; /* index for replicating entries */ + var low; /* low bits for current root entry */ + var mask; /* mask for low root bits */ + var next; /* next available space in table */ + var base = null; /* base value table to use */ + var base_index = 0; +// var shoextra; /* extra bits table to use */ + var end; /* use base and extra for symbol > end */ + var count = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */ + var offs = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */ + var extra = null; + var extra_index = 0; -const toRegex = (start, end, options) => { - if (Array.isArray(start)) { - let wrap = options.wrap === true; - let prefix = options.capture ? '' : '?:'; - return wrap ? `(${prefix}${start.join('|')})` : start.join('|'); - } - return toRegexRange(start, end, options); -}; + var here_bits, here_op, here_val; -const rangeError = (...args) => { - return new RangeError('Invalid range arguments: ' + util.inspect(...args)); -}; + /* + Process a set of code lengths to create a canonical Huffman code. The + code lengths are lens[0..codes-1]. Each length corresponds to the + symbols 0..codes-1. The Huffman code is generated by first sorting the + symbols by length from short to long, and retaining the symbol order + for codes with equal lengths. Then the code starts with all zero bits + for the first code of the shortest length, and the codes are integer + increments for the same length, and zeros are appended as the length + increases. For the deflate format, these bits are stored backwards + from their more natural integer increment ordering, and so when the + decoding tables are built in the large loop below, the integer codes + are incremented backwards. -const invalidRange = (start, end, options) => { - if (options.strictRanges === true) throw rangeError([start, end]); - return []; -}; + This routine assumes, but does not check, that all of the entries in + lens[] are in the range 0..MAXBITS. The caller must assure this. + 1..MAXBITS is interpreted as that code length. zero means that that + symbol does not occur in this code. + + The codes are sorted by computing a count of codes for each length, + creating from that a table of starting indices for each length in the + sorted table, and then entering the symbols in order in the sorted + table. The sorted table is work[], with that space being provided by + the caller. + + The length counts are used for other purposes as well, i.e. finding + the minimum and maximum length codes, determining if there are any + codes at all, checking for a valid set of lengths, and looking ahead + at length counts to determine sub-table sizes when building the + decoding tables. + */ -const invalidStep = (step, options) => { - if (options.strictRanges === true) { - throw new TypeError(`Expected step "${step}" to be a number`); + /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ + for (len = 0; len <= MAXBITS; len++) { + count[len] = 0; + } + for (sym = 0; sym < codes; sym++) { + count[lens[lens_index + sym]]++; } - return []; -}; - -const fillNumbers = (start, end, step = 1, options = {}) => { - let a = Number(start); - let b = Number(end); - if (!Number.isInteger(a) || !Number.isInteger(b)) { - if (options.strictRanges === true) throw rangeError([start, end]); - return []; + /* bound code lengths, force root to be within code lengths */ + root = bits; + for (max = MAXBITS; max >= 1; max--) { + if (count[max] !== 0) { break; } + } + if (root > max) { + root = max; } + if (max === 0) { /* no symbols to code at all */ + //table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */ + //table.bits[opts.table_index] = 1; //here.bits = (var char)1; + //table.val[opts.table_index++] = 0; //here.val = (var short)0; + table[table_index++] = (1 << 24) | (64 << 16) | 0; - // fix negative zero - if (a === 0) a = 0; - if (b === 0) b = 0; - let descending = a > b; - let startString = String(start); - let endString = String(end); - let stepString = String(step); - step = Math.max(Math.abs(step), 1); + //table.op[opts.table_index] = 64; + //table.bits[opts.table_index] = 1; + //table.val[opts.table_index++] = 0; + table[table_index++] = (1 << 24) | (64 << 16) | 0; - let padded = zeros(startString) || zeros(endString) || zeros(stepString); - let maxLen = padded ? Math.max(startString.length, endString.length, stepString.length) : 0; - let toNumber = padded === false && stringify(start, end, options) === false; - let format = options.transform || transform(toNumber); + opts.bits = 1; + return 0; /* no symbols, but wait for decoding to report error */ + } + for (min = 1; min < max; min++) { + if (count[min] !== 0) { break; } + } + if (root < min) { + root = min; + } - if (options.toRegex && step === 1) { - return toRange(toMaxLen(start, maxLen), toMaxLen(end, maxLen), true, options); + /* check for an over-subscribed or incomplete set of lengths */ + left = 1; + for (len = 1; len <= MAXBITS; len++) { + left <<= 1; + left -= count[len]; + if (left < 0) { + return -1; + } /* over-subscribed */ + } + if (left > 0 && (type === CODES || max !== 1)) { + return -1; /* incomplete set */ } - let parts = { negatives: [], positives: [] }; - let push = num => parts[num < 0 ? 'negatives' : 'positives'].push(Math.abs(num)); - let range = []; - let index = 0; + /* generate offsets into symbol table for each length for sorting */ + offs[1] = 0; + for (len = 1; len < MAXBITS; len++) { + offs[len + 1] = offs[len] + count[len]; + } - while (descending ? a >= b : a <= b) { - if (options.toRegex === true && step > 1) { - push(a); - } else { - range.push(pad(format(a, index), maxLen, toNumber)); + /* sort symbols by length, by symbol order within each length */ + for (sym = 0; sym < codes; sym++) { + if (lens[lens_index + sym] !== 0) { + work[offs[lens[lens_index + sym]]++] = sym; } - a = descending ? a - step : a + step; - index++; } - if (options.toRegex === true) { - return step > 1 - ? toSequence(parts, options) - : toRegex(range, null, { wrap: false, ...options }); - } + /* + Create and fill in decoding tables. In this loop, the table being + filled is at next and has curr index bits. The code being used is huff + with length len. That code is converted to an index by dropping drop + bits off of the bottom. For codes where len is less than drop + curr, + those top drop + curr - len bits are incremented through all values to + fill the table with replicated entries. - return range; -}; + root is the number of index bits for the root table. When len exceeds + root, sub-tables are created pointed to by the root entry with an index + of the low root bits of huff. This is saved in low to check for when a + new sub-table should be started. drop is zero when the root table is + being filled, and drop is root when sub-tables are being filled. -const fillLetters = (start, end, step = 1, options = {}) => { - if ((!isNumber(start) && start.length > 1) || (!isNumber(end) && end.length > 1)) { - return invalidRange(start, end, options); - } + When a new sub-table is needed, it is necessary to look ahead in the + code lengths to determine what size sub-table is needed. The length + counts are used for this, and so count[] is decremented as codes are + entered in the tables. + used keeps track of how many table entries have been allocated from the + provided *table space. It is checked for LENS and DIST tables against + the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in + the initial root table size constants. See the comments in inftrees.h + for more information. - let format = options.transform || (val => String.fromCharCode(val)); - let a = `${start}`.charCodeAt(0); - let b = `${end}`.charCodeAt(0); + sym increments through all symbols, and the loop terminates when + all codes of length max, i.e. all codes, have been processed. This + routine permits incomplete codes, so another loop after this one fills + in the rest of the decoding tables with invalid code markers. + */ - let descending = a > b; - let min = Math.min(a, b); - let max = Math.max(a, b); + /* set up for code type */ + // poor man optimization - use if-else instead of switch, + // to avoid deopts in old v8 + if (type === CODES) { + base = extra = work; /* dummy value--not used */ + end = 19; - if (options.toRegex && step === 1) { - return toRange(min, max, false, options); + } else if (type === LENS) { + base = lbase; + base_index -= 257; + extra = lext; + extra_index -= 257; + end = 256; + + } else { /* DISTS */ + base = dbase; + extra = dext; + end = -1; } - let range = []; - let index = 0; + /* initialize opts for loop */ + huff = 0; /* starting code */ + sym = 0; /* starting code symbol */ + len = min; /* starting code length */ + next = table_index; /* current table to fill in */ + curr = root; /* current table index bits */ + drop = 0; /* current bits to drop from code for index */ + low = -1; /* trigger new sub-table when len > root */ + used = 1 << root; /* use root table entries */ + mask = used - 1; /* mask for comparing low */ - while (descending ? a >= b : a <= b) { - range.push(format(a, index)); - a = descending ? a - step : a + step; - index++; + /* check available table space */ + if ((type === LENS && used > ENOUGH_LENS) || + (type === DISTS && used > ENOUGH_DISTS)) { + return 1; } - if (options.toRegex === true) { - return toRegex(range, null, { wrap: false, options }); - } + /* process all codes and make table entries */ + for (;;) { + /* create table entry */ + here_bits = len - drop; + if (work[sym] < end) { + here_op = 0; + here_val = work[sym]; + } + else if (work[sym] > end) { + here_op = extra[extra_index + work[sym]]; + here_val = base[base_index + work[sym]]; + } + else { + here_op = 32 + 64; /* end of block */ + here_val = 0; + } - return range; -}; + /* replicate for those indices with low len bits equal to huff */ + incr = 1 << (len - drop); + fill = 1 << curr; + min = fill; /* save offset to next table */ + do { + fill -= incr; + table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) | here_val |0; + } while (fill !== 0); -const fill = (start, end, step, options = {}) => { - if (end == null && isValidValue(start)) { - return [start]; - } + /* backwards increment the len-bit code huff */ + incr = 1 << (len - 1); + while (huff & incr) { + incr >>= 1; + } + if (incr !== 0) { + huff &= incr - 1; + huff += incr; + } else { + huff = 0; + } - if (!isValidValue(start) || !isValidValue(end)) { - return invalidRange(start, end, options); - } + /* go to next symbol, update count, len */ + sym++; + if (--count[len] === 0) { + if (len === max) { break; } + len = lens[lens_index + work[sym]]; + } - if (typeof step === 'function') { - return fill(start, end, 1, { transform: step }); - } + /* create new sub-table if needed */ + if (len > root && (huff & mask) !== low) { + /* if first time, transition to sub-tables */ + if (drop === 0) { + drop = root; + } - if (isObject(step)) { - return fill(start, end, 0, step); - } + /* increment past last table */ + next += min; /* here min is 1 << curr */ - let opts = { ...options }; - if (opts.capture === true) opts.wrap = true; - step = step || opts.step || 1; + /* determine length of next table */ + curr = len - drop; + left = 1 << curr; + while (curr + drop < max) { + left -= count[curr + drop]; + if (left <= 0) { break; } + curr++; + left <<= 1; + } + + /* check for enough space */ + used += 1 << curr; + if ((type === LENS && used > ENOUGH_LENS) || + (type === DISTS && used > ENOUGH_DISTS)) { + return 1; + } - if (!isNumber(step)) { - if (step != null && !isObject(step)) return invalidStep(step, opts); - return fill(start, end, 1, step); + /* point entry in root table to sub-table */ + low = huff & mask; + /*table.op[low] = curr; + table.bits[low] = root; + table.val[low] = next - opts.table_index;*/ + table[low] = (root << 24) | (curr << 16) | (next - table_index) |0; + } } - if (isNumber(start) && isNumber(end)) { - return fillNumbers(start, end, step, opts); + /* fill in remaining table entry if code is incomplete (guaranteed to have + at most one remaining entry, since if the code is incomplete, the + maximum code length that was allowed to get this far is one bit) */ + if (huff !== 0) { + //table.op[next + huff] = 64; /* invalid code marker */ + //table.bits[next + huff] = len - drop; + //table.val[next + huff] = 0; + table[next + huff] = ((len - drop) << 24) | (64 << 16) |0; } - return fillLetters(start, end, Math.max(Math.abs(step), 1), opts); + /* set return parameters */ + //opts.table_index += used; + opts.bits = root; + return 0; }; -module.exports = fill; - /***/ }), -/***/ 737: -/***/ (function(module) { +/***/ 1890: +/***/ ((module) => { "use strict"; -/** - * protocols - * Returns the protocols of an input url. - * - * @name protocols - * @function - * @param {String|URL} input The input url (string or `URL` instance) - * @param {Boolean|Number} first If `true`, the first protocol will be returned. If number, it will represent the zero-based index of the protocols array. - * @return {Array|String} The array of protocols or the specified protocol. - */ -module.exports = function protocols(input, first) { - - if (first === true) { - first = 0; - } - - var prots = ""; - if (typeof input === "string") { - try { - prots = new URL(input).protocol; - } catch (e) {} - } else if (input && input.constructor === URL) { - prots = input.protocol; - } - - var splits = prots.split(/\:|\+/).filter(Boolean); - - if (typeof first === "number") { - return splits[first]; - } +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. - return splits; +module.exports = { + 2: 'need dictionary', /* Z_NEED_DICT 2 */ + 1: 'stream end', /* Z_STREAM_END 1 */ + 0: '', /* Z_OK 0 */ + '-1': 'file error', /* Z_ERRNO (-1) */ + '-2': 'stream error', /* Z_STREAM_ERROR (-2) */ + '-3': 'data error', /* Z_DATA_ERROR (-3) */ + '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */ + '-5': 'buffer error', /* Z_BUF_ERROR (-5) */ + '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */ }; -/***/ }), -/***/ 747: -/***/ (function(module) { +/***/ }), -module.exports = require("fs"); +/***/ 8754: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -/***/ }), +"use strict"; -/***/ 750: -/***/ (function(module, __unusedexports, __webpack_require__) { -// Copyright (c) 2006, 2008 Tony Garnock-Jones -// Copyright (c) 2006, 2008 LShift Ltd. +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin // -// Permission is hereby granted, free of charge, to any person -// obtaining a copy of this software and associated documentation files -// (the "Software"), to deal in the Software without restriction, -// including without limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of the Software, -// and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. // -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -// BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -var onp = __webpack_require__(290); - -function longestCommonSubsequence(file1, file2) { - var diff = new onp(file1, file2); - diff.compose(); - var ses = diff.getses(); - - var root; - var prev; - var file1RevIdx = file1.length - 1, - file2RevIdx = file2.length - 1; - for (var i = ses.length - 1; i >= 0; --i) { - if (ses[i].t === diff.SES_COMMON) { - if (prev) { - prev.chain = { - file1index: file1RevIdx, - file2index: file2RevIdx, - chain: null - }; - prev = prev.chain; - } else { - root = { - file1index: file1RevIdx, - file2index: file2RevIdx, - chain: null - }; - prev = root; - } - file1RevIdx--; - file2RevIdx--; - } else if (ses[i].t === diff.SES_DELETE) { - file1RevIdx--; - } else if (ses[i].t === diff.SES_ADD) { - file2RevIdx--; - } - } - - var tail = { - file1index: -1, - file2index: -1, - chain: null - }; +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. - if (!prev) { - return tail; - } +/* eslint-disable space-unary-ops */ - prev.chain = tail; +var utils = __nccwpck_require__(5483); - return root; -} +/* Public constants ==========================================================*/ +/* ===========================================================================*/ -function diffIndices(file1, file2) { - // We apply the LCS to give a simple representation of the - // offsets and lengths of mismatched chunks in the input - // files. This is used by diff3_merge_indices below. - var result = []; - var tail1 = file1.length; - var tail2 = file2.length; +//var Z_FILTERED = 1; +//var Z_HUFFMAN_ONLY = 2; +//var Z_RLE = 3; +var Z_FIXED = 4; +//var Z_DEFAULT_STRATEGY = 0; - for (var candidate = longestCommonSubsequence(file1, file2); candidate !== null; candidate = candidate.chain) { - var mismatchLength1 = tail1 - candidate.file1index - 1; - var mismatchLength2 = tail2 - candidate.file2index - 1; - tail1 = candidate.file1index; - tail2 = candidate.file2index; +/* Possible values of the data_type field (though see inflate()) */ +var Z_BINARY = 0; +var Z_TEXT = 1; +//var Z_ASCII = 1; // = Z_TEXT +var Z_UNKNOWN = 2; - if (mismatchLength1 || mismatchLength2) { - result.push({ - file1: [tail1 + 1, mismatchLength1], - file2: [tail2 + 1, mismatchLength2] - }); - } - } +/*============================================================================*/ - result.reverse(); - return result; -} -function diff3MergeIndices(a, o, b) { - // Given three files, A, O, and B, where both A and B are - // independently derived from O, returns a fairly complicated - // internal representation of merge decisions it's taken. The - // interested reader may wish to consult - // - // Sanjeev Khanna, Keshav Kunal, and Benjamin C. Pierce. "A - // Formal Investigation of Diff3." In Arvind and Prasad, - // editors, Foundations of Software Technology and Theoretical - // Computer Science (FSTTCS), December 2007. - // - // (http://www.cis.upenn.edu/~bcpierce/papers/diff3-short.pdf) - var i; +function zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } } - var m1 = diffIndices(o, a); - var m2 = diffIndices(o, b); +// From zutil.h - var hunks = []; +var STORED_BLOCK = 0; +var STATIC_TREES = 1; +var DYN_TREES = 2; +/* The three kinds of block type */ - function addHunk(h, side) { - hunks.push([h.file1[0], side, h.file1[1], h.file2[0], h.file2[1]]); - } - for (i = 0; i < m1.length; i++) { - addHunk(m1[i], 0); - } - for (i = 0; i < m2.length; i++) { - addHunk(m2[i], 2); - } - hunks.sort(function(x, y) { - return x[0] - y[0] - }); +var MIN_MATCH = 3; +var MAX_MATCH = 258; +/* The minimum and maximum match lengths */ - var result = []; - var commonOffset = 0; +// From deflate.h +/* =========================================================================== + * Internal compression state. + */ - function copyCommon(targetOffset) { - if (targetOffset > commonOffset) { - result.push([1, commonOffset, targetOffset - commonOffset]); - commonOffset = targetOffset; - } - } +var LENGTH_CODES = 29; +/* number of length codes, not counting the special END_BLOCK code */ - for (var hunkIndex = 0; hunkIndex < hunks.length; hunkIndex++) { - var firstHunkIndex = hunkIndex; - var hunk = hunks[hunkIndex]; - var regionLhs = hunk[0]; - var regionRhs = regionLhs + hunk[2]; - while (hunkIndex < hunks.length - 1) { - var maybeOverlapping = hunks[hunkIndex + 1]; - var maybeLhs = maybeOverlapping[0]; - if (maybeLhs > regionRhs) break; - regionRhs = Math.max(regionRhs, maybeLhs + maybeOverlapping[2]); - hunkIndex++; - } +var LITERALS = 256; +/* number of literal bytes 0..255 */ - copyCommon(regionLhs); - if (firstHunkIndex == hunkIndex) { - // The "overlap" was only one hunk long, meaning that - // there's no conflict here. Either a and o were the - // same, or b and o were the same. - if (hunk[4] > 0) { - result.push([hunk[1], hunk[3], hunk[4]]); - } - } else { - // A proper conflict. Determine the extents of the - // regions involved from a, o and b. Effectively merge - // all the hunks on the left into one giant hunk, and - // do the same for the right; then, correct for skew - // in the regions of o that each side changed, and - // report appropriate spans for the three sides. - var regions = { - 0: [a.length, -1, o.length, -1], - 2: [b.length, -1, o.length, -1] - }; - for (i = firstHunkIndex; i <= hunkIndex; i++) { - hunk = hunks[i]; - var side = hunk[1]; - var r = regions[side]; - var oLhs = hunk[0]; - var oRhs = oLhs + hunk[2]; - var abLhs = hunk[3]; - var abRhs = abLhs + hunk[4]; - r[0] = Math.min(abLhs, r[0]); - r[1] = Math.max(abRhs, r[1]); - r[2] = Math.min(oLhs, r[2]); - r[3] = Math.max(oRhs, r[3]); - } - var aLhs = regions[0][0] + (regionLhs - regions[0][2]); - var aRhs = regions[0][1] + (regionRhs - regions[0][3]); - var bLhs = regions[2][0] + (regionLhs - regions[2][2]); - var bRhs = regions[2][1] + (regionRhs - regions[2][3]); - result.push([-1, - aLhs, aRhs - aLhs, - regionLhs, regionRhs - regionLhs, - bLhs, bRhs - bLhs - ]); - } - commonOffset = regionRhs; - } +var L_CODES = LITERALS + 1 + LENGTH_CODES; +/* number of Literal or Length codes, including the END_BLOCK code */ - copyCommon(o.length); - return result; -} +var D_CODES = 30; +/* number of distance codes */ -function diff3Merge(a, o, b) { - // Applies the output of Diff.diff3_merge_indices to actually - // construct the merged file; the returned result alternates - // between "ok" and "conflict" blocks. +var BL_CODES = 19; +/* number of codes used to transfer the bit lengths */ - var result = []; - var files = [a, o, b]; - var indices = diff3MergeIndices(a, o, b); +var HEAP_SIZE = 2 * L_CODES + 1; +/* maximum heap size */ - var okLines = []; +var MAX_BITS = 15; +/* All codes must not exceed MAX_BITS bits */ - function flushOk() { - if (okLines.length) { - result.push({ - ok: okLines - }); - } - okLines = []; - } +var Buf_size = 16; +/* size of bit buffer in bi_buf */ - function pushOk(xs) { - for (var j = 0; j < xs.length; j++) { - okLines.push(xs[j]); - } - } - function isTrueConflict(rec) { - if (rec[2] != rec[6]) return true; - var aoff = rec[1]; - var boff = rec[5]; - for (var j = 0; j < rec[2]; j++) { - if (a[j + aoff] != b[j + boff]) return true; - } - return false; - } +/* =========================================================================== + * Constants + */ - for (var i = 0; i < indices.length; i++) { - var x = indices[i]; - var side = x[0]; - if (side == -1) { - if (!isTrueConflict(x)) { - pushOk(files[0].slice(x[1], x[1] + x[2])); - } else { - flushOk(); - result.push({ - conflict: { - a: a.slice(x[1], x[1] + x[2]), - aIndex: x[1], - o: o.slice(x[3], x[3] + x[4]), - oIndex: x[3], - b: b.slice(x[5], x[5] + x[6]), - bIndex: x[5] - } - }); - } - } else { - pushOk(files[side].slice(x[1], x[1] + x[2])); - } - } +var MAX_BL_BITS = 7; +/* Bit length codes must not exceed MAX_BL_BITS bits */ - flushOk(); - return result; -} +var END_BLOCK = 256; +/* end of block literal code */ -module.exports = diff3Merge; +var REP_3_6 = 16; +/* repeat previous bit length 3-6 times (2 bits of repeat count) */ +var REPZ_3_10 = 17; +/* repeat a zero length 3-10 times (3 bits of repeat count) */ -/***/ }), +var REPZ_11_138 = 18; +/* repeat a zero length 11-138 times (7 bits of repeat count) */ -/***/ 763: -/***/ (function(module, __unusedexports, __webpack_require__) { +/* eslint-disable comma-spacing,array-bracket-spacing */ +var extra_lbits = /* extra bits for each length code */ + [0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0]; -"use strict"; +var extra_dbits = /* extra bits for each distance code */ + [0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13]; +var extra_blbits = /* extra bits for each bit length code */ + [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7]; -var isGlob = __webpack_require__(357); -var pathPosixDirname = __webpack_require__(622).posix.dirname; -var isWin32 = __webpack_require__(87).platform() === 'win32'; +var bl_order = + [16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]; +/* eslint-enable comma-spacing,array-bracket-spacing */ -var slash = '/'; -var backslash = /\\/g; -var enclosure = /[\{\[].*[\}\]]$/; -var globby = /(^|[^\\])([\{\[]|\([^\)]+$)/; -var escaped = /\\([\!\*\?\|\[\]\(\)\{\}])/g; +/* The lengths of the bit length codes are sent in order of decreasing + * probability, to avoid transmitting the lengths for unused bit length codes. + */ -/** - * @param {string} str - * @param {Object} opts - * @param {boolean} [opts.flipBackslashes=true] - * @returns {string} +/* =========================================================================== + * Local data. These are initialized only once. */ -module.exports = function globParent(str, opts) { - var options = Object.assign({ flipBackslashes: true }, opts); - // flip windows path separators - if (options.flipBackslashes && isWin32 && str.indexOf(slash) < 0) { - str = str.replace(backslash, slash); - } +// We pre-fill arrays with 0 to avoid uninitialized gaps - // special case for strings ending in enclosure containing path separator - if (enclosure.test(str)) { - str += slash; - } +var DIST_CODE_LEN = 512; /* see definition of array dist_code below */ - // preserves full path in case of trailing path separator - str += 'a'; +// !!!! Use flat array instead of structure, Freq = i*2, Len = i*2+1 +var static_ltree = new Array((L_CODES + 2) * 2); +zero(static_ltree); +/* The static literal tree. Since the bit lengths are imposed, there is no + * need for the L_CODES extra codes used during heap construction. However + * The codes 286 and 287 are needed to build a canonical tree (see _tr_init + * below). + */ - // remove path parts that are globby - do { - str = pathPosixDirname(str); - } while (isGlob(str) || globby.test(str)); +var static_dtree = new Array(D_CODES * 2); +zero(static_dtree); +/* The static distance tree. (Actually a trivial tree since all codes use + * 5 bits.) + */ - // remove escape chars and return result - return str.replace(escaped, '$1'); -}; +var _dist_code = new Array(DIST_CODE_LEN); +zero(_dist_code); +/* Distance codes. The first 256 values correspond to the distances + * 3 .. 258, the last 256 values correspond to the top 8 bits of + * the 15 bit distances. + */ +var _length_code = new Array(MAX_MATCH - MIN_MATCH + 1); +zero(_length_code); +/* length code for each normalized match length (0 == MIN_MATCH) */ -/***/ }), +var base_length = new Array(LENGTH_CODES); +zero(base_length); +/* First normalized length for each code (0 = MIN_MATCH) */ -/***/ 768: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +var base_dist = new Array(D_CODES); +zero(base_dist); +/* First normalized distance for each code (0 = distance of 1) */ -"use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -const async_1 = __webpack_require__(291); -class AsyncProvider { - constructor(_root, _settings) { - this._root = _root; - this._settings = _settings; - this._reader = new async_1.default(this._root, this._settings); - this._storage = []; - } - read(callback) { - this._reader.onError((error) => { - callFailureCallback(callback, error); - }); - this._reader.onEntry((entry) => { - this._storage.push(entry); - }); - this._reader.onEnd(() => { - callSuccessCallback(callback, this._storage); - }); - this._reader.read(); - } +function StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) { + + this.static_tree = static_tree; /* static tree or NULL */ + this.extra_bits = extra_bits; /* extra bits for each code or NULL */ + this.extra_base = extra_base; /* base index for extra_bits */ + this.elems = elems; /* max number of elements in the tree */ + this.max_length = max_length; /* max bit length for the codes */ + + // show if `static_tree` has data or dummy - needed for monomorphic objects + this.has_stree = static_tree && static_tree.length; } -exports.default = AsyncProvider; -function callFailureCallback(callback, error) { - callback(error); + + +var static_l_desc; +var static_d_desc; +var static_bl_desc; + + +function TreeDesc(dyn_tree, stat_desc) { + this.dyn_tree = dyn_tree; /* the dynamic tree */ + this.max_code = 0; /* largest code with non zero frequency */ + this.stat_desc = stat_desc; /* the corresponding static tree */ } -function callSuccessCallback(callback, entries) { - callback(null, entries); + + + +function d_code(dist) { + return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)]; } -/***/ }), +/* =========================================================================== + * Output a short LSB first on the stream. + * IN assertion: there is enough room in pendingBuf. + */ +function put_short(s, w) { +// put_byte(s, (uch)((w) & 0xff)); +// put_byte(s, (uch)((ush)(w) >> 8)); + s.pending_buf[s.pending++] = (w) & 0xff; + s.pending_buf[s.pending++] = (w >>> 8) & 0xff; +} -/***/ 775: -/***/ (function(__unusedmodule, exports, __webpack_require__) { -"use strict"; +/* =========================================================================== + * Send a value on a given number of bits. + * IN assertion: length <= 16 and value fits in length bits. + */ +function send_bits(s, value, length) { + if (s.bi_valid > (Buf_size - length)) { + s.bi_buf |= (value << s.bi_valid) & 0xffff; + put_short(s, s.bi_buf); + s.bi_buf = value >> (Buf_size - s.bi_valid); + s.bi_valid += length - Buf_size; + } else { + s.bi_buf |= (value << s.bi_valid) & 0xffff; + s.bi_valid += length; + } +} -Object.defineProperty(exports, "__esModule", { value: true }); -const stream_1 = __webpack_require__(413); -const stream_2 = __webpack_require__(608); -const provider_1 = __webpack_require__(2); -class ProviderStream extends provider_1.default { - constructor() { - super(...arguments); - this._reader = new stream_2.default(this._settings); - } - read(task) { - const root = this._getRootDirectory(task); - const options = this._getReaderOptions(task); - const source = this.api(root, task, options); - const destination = new stream_1.Readable({ objectMode: true, read: () => { } }); - source - .once('error', (error) => destination.emit('error', error)) - .on('data', (entry) => destination.emit('data', options.transform(entry))) - .once('end', () => destination.emit('end')); - destination - .once('close', () => source.destroy()); - return destination; - } - api(root, task, options) { - if (task.dynamic) { - return this._reader.dynamic(root, options); - } - return this._reader.static(task.patterns, options); - } + +function send_code(s, c, tree) { + send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/); } -exports.default = ProviderStream; -/***/ }), +/* =========================================================================== + * Reverse the first len bits of a code, using straightforward code (a faster + * method would use a table) + * IN assertion: 1 <= len <= 15 + */ +function bi_reverse(code, len) { + var res = 0; + do { + res |= code & 1; + code >>>= 1; + res <<= 1; + } while (--len > 0); + return res >>> 1; +} -/***/ 783: -/***/ (function(module, __unusedexports, __webpack_require__) { -"use strict"; +/* =========================================================================== + * Flush the bit buffer, keeping at most 7 bits in it. + */ +function bi_flush(s) { + if (s.bi_valid === 16) { + put_short(s, s.bi_buf); + s.bi_buf = 0; + s.bi_valid = 0; + } else if (s.bi_valid >= 8) { + s.pending_buf[s.pending++] = s.bi_buf & 0xff; + s.bi_buf >>= 8; + s.bi_valid -= 8; + } +} -const stringify = __webpack_require__(382); -const compile = __webpack_require__(435); -const expand = __webpack_require__(441); -const parse = __webpack_require__(227); -/** - * Expand the given pattern or create a regex-compatible string. - * - * ```js - * const braces = require('braces'); - * console.log(braces('{a,b,c}', { compile: true })); //=> ['(a|b|c)'] - * console.log(braces('{a,b,c}')); //=> ['a', 'b', 'c'] - * ``` - * @param {String} `str` - * @param {Object} `options` - * @return {String} - * @api public +/* =========================================================================== + * Compute the optimal bit lengths for a tree and update the total bit length + * for the current block. + * IN assertion: the fields freq and dad are set, heap[heap_max] and + * above are the tree nodes sorted by increasing frequency. + * OUT assertions: the field len is set to the optimal bit length, the + * array bl_count contains the frequencies for each bit length. + * The length opt_len is updated; static_len is also updated if stree is + * not null. */ +function gen_bitlen(s, desc) +// deflate_state *s; +// tree_desc *desc; /* the tree descriptor */ +{ + var tree = desc.dyn_tree; + var max_code = desc.max_code; + var stree = desc.stat_desc.static_tree; + var has_stree = desc.stat_desc.has_stree; + var extra = desc.stat_desc.extra_bits; + var base = desc.stat_desc.extra_base; + var max_length = desc.stat_desc.max_length; + var h; /* heap index */ + var n, m; /* iterate over the tree elements */ + var bits; /* bit length */ + var xbits; /* extra bits */ + var f; /* frequency */ + var overflow = 0; /* number of elements with bit length too large */ -const braces = (input, options = {}) => { - let output = []; + for (bits = 0; bits <= MAX_BITS; bits++) { + s.bl_count[bits] = 0; + } - if (Array.isArray(input)) { - for (let pattern of input) { - let result = braces.create(pattern, options); - if (Array.isArray(result)) { - output.push(...result); - } else { - output.push(result); - } + /* In a first pass, compute the optimal bit lengths (which may + * overflow in the case of the bit length tree). + */ + tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */ + + for (h = s.heap_max + 1; h < HEAP_SIZE; h++) { + n = s.heap[h]; + bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1; + if (bits > max_length) { + bits = max_length; + overflow++; } - } else { - output = [].concat(braces.create(input, options)); - } + tree[n * 2 + 1]/*.Len*/ = bits; + /* We overwrite tree[n].Dad which is no longer needed */ - if (options && options.expand === true && options.nodupes === true) { - output = [...new Set(output)]; + if (n > max_code) { continue; } /* not a leaf node */ + + s.bl_count[bits]++; + xbits = 0; + if (n >= base) { + xbits = extra[n - base]; + } + f = tree[n * 2]/*.Freq*/; + s.opt_len += f * (bits + xbits); + if (has_stree) { + s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits); + } } - return output; -}; + if (overflow === 0) { return; } -/** - * Parse the given `str` with the given `options`. - * - * ```js - * // braces.parse(pattern, [, options]); - * const ast = braces.parse('a/{b,c}/d'); - * console.log(ast); - * ``` - * @param {String} pattern Brace pattern to parse - * @param {Object} options - * @return {Object} Returns an AST - * @api public - */ + // Trace((stderr,"\nbit length overflow\n")); + /* This happens for example on obj2 and pic of the Calgary corpus */ -braces.parse = (input, options = {}) => parse(input, options); + /* Find the first bit length which could increase: */ + do { + bits = max_length - 1; + while (s.bl_count[bits] === 0) { bits--; } + s.bl_count[bits]--; /* move one leaf down the tree */ + s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */ + s.bl_count[max_length]--; + /* The brother of the overflow item also moves one step up, + * but this does not affect bl_count[max_length] + */ + overflow -= 2; + } while (overflow > 0); -/** - * Creates a braces string from an AST, or an AST node. - * - * ```js - * const braces = require('braces'); - * let ast = braces.parse('foo/{a,b}/bar'); - * console.log(stringify(ast.nodes[2])); //=> '{a,b}' - * ``` - * @param {String} `input` Brace pattern or AST. - * @param {Object} `options` - * @return {Array} Returns an array of expanded values. - * @api public + /* Now recompute all bit lengths, scanning in increasing frequency. + * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all + * lengths instead of fixing only the wrong ones. This idea is taken + * from 'ar' written by Haruhiko Okumura.) + */ + for (bits = max_length; bits !== 0; bits--) { + n = s.bl_count[bits]; + while (n !== 0) { + m = s.heap[--h]; + if (m > max_code) { continue; } + if (tree[m * 2 + 1]/*.Len*/ !== bits) { + // Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); + s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/; + tree[m * 2 + 1]/*.Len*/ = bits; + } + n--; + } + } +} + + +/* =========================================================================== + * Generate the codes for a given tree and bit counts (which need not be + * optimal). + * IN assertion: the array bl_count contains the bit length statistics for + * the given tree and the field len is set for all tree elements. + * OUT assertion: the field code is set for all tree elements of non + * zero code length. */ +function gen_codes(tree, max_code, bl_count) +// ct_data *tree; /* the tree to decorate */ +// int max_code; /* largest code with non zero frequency */ +// ushf *bl_count; /* number of codes at each bit length */ +{ + var next_code = new Array(MAX_BITS + 1); /* next code value for each bit length */ + var code = 0; /* running code value */ + var bits; /* bit index */ + var n; /* code index */ -braces.stringify = (input, options = {}) => { - if (typeof input === 'string') { - return stringify(braces.parse(input, options), options); + /* The distribution counts are first used to generate the code values + * without bit reversal. + */ + for (bits = 1; bits <= MAX_BITS; bits++) { + next_code[bits] = code = (code + bl_count[bits - 1]) << 1; } - return stringify(input, options); -}; + /* Check that the bit counts in bl_count are consistent. The last code + * must be all ones. + */ + //Assert (code + bl_count[MAX_BITS]-1 == (1< ['a/(b|c)/d'] - * ``` - * @param {String} `input` Brace pattern or AST. - * @param {Object} `options` - * @return {Array} Returns an array of expanded values. - * @api public - */ + for (n = 0; n <= max_code; n++) { + var len = tree[n * 2 + 1]/*.Len*/; + if (len === 0) { continue; } + /* Now reverse the bits */ + tree[n * 2]/*.Code*/ = bi_reverse(next_code[len]++, len); -braces.compile = (input, options = {}) => { - if (typeof input === 'string') { - input = braces.parse(input, options); + //Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", + // n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); } - return compile(input, options); -}; +} -/** - * Expands a brace pattern into an array. This method is called by the - * main [braces](#braces) function when `options.expand` is true. Before - * using this method it's recommended that you read the [performance notes](#performance)) - * and advantages of using [.compile](#compile) instead. - * - * ```js - * const braces = require('braces'); - * console.log(braces.expand('a/{b,c}/d')); - * //=> ['a/b/d', 'a/c/d']; - * ``` - * @param {String} `pattern` Brace pattern - * @param {Object} `options` - * @return {Array} Returns an array of expanded values. - * @api public + +/* =========================================================================== + * Initialize the various 'constant' tables. */ +function tr_static_init() { + var n; /* iterates over tree elements */ + var bits; /* bit counter */ + var length; /* length value */ + var code; /* code value */ + var dist; /* distance index */ + var bl_count = new Array(MAX_BITS + 1); + /* number of codes at each bit length for an optimal tree */ -braces.expand = (input, options = {}) => { - if (typeof input === 'string') { - input = braces.parse(input, options); - } + // do check in _tr_init() + //if (static_init_done) return; - let result = expand(input, options); + /* For some embedded targets, global variables are not initialized: */ +/*#ifdef NO_INIT_GLOBAL_POINTERS + static_l_desc.static_tree = static_ltree; + static_l_desc.extra_bits = extra_lbits; + static_d_desc.static_tree = static_dtree; + static_d_desc.extra_bits = extra_dbits; + static_bl_desc.extra_bits = extra_blbits; +#endif*/ - // filter out empty strings if specified - if (options.noempty === true) { - result = result.filter(Boolean); + /* Initialize the mapping length (0..255) -> length code (0..28) */ + length = 0; + for (code = 0; code < LENGTH_CODES - 1; code++) { + base_length[code] = length; + for (n = 0; n < (1 << extra_lbits[code]); n++) { + _length_code[length++] = code; + } } + //Assert (length == 256, "tr_static_init: length != 256"); + /* Note that the length 255 (match length 258) can be represented + * in two different ways: code 284 + 5 bits or code 285, so we + * overwrite length_code[255] to use the best encoding: + */ + _length_code[length - 1] = code; - // filter out duplicates if specified - if (options.nodupes === true) { - result = [...new Set(result)]; + /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ + dist = 0; + for (code = 0; code < 16; code++) { + base_dist[code] = dist; + for (n = 0; n < (1 << extra_dbits[code]); n++) { + _dist_code[dist++] = code; + } } - - return result; -}; - -/** - * Processes a brace pattern and returns either an expanded array - * (if `options.expand` is true), a highly optimized regex-compatible string. - * This method is called by the main [braces](#braces) function. - * - * ```js - * const braces = require('braces'); - * console.log(braces.create('user-{200..300}/project-{a,b,c}-{1..10}')) - * //=> 'user-(20[0-9]|2[1-9][0-9]|300)/project-(a|b|c)-([1-9]|10)' - * ``` - * @param {String} `pattern` Brace pattern - * @param {Object} `options` - * @return {Array} Returns an array of expanded values. - * @api public - */ - -braces.create = (input, options = {}) => { - if (input === '' || input.length < 3) { - return [input]; + //Assert (dist == 256, "tr_static_init: dist != 256"); + dist >>= 7; /* from now on, all distances are divided by 128 */ + for (; code < D_CODES; code++) { + base_dist[code] = dist << 7; + for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) { + _dist_code[256 + dist++] = code; + } } + //Assert (dist == 256, "tr_static_init: 256+dist != 512"); - return options.expand !== true - ? braces.compile(input, options) - : braces.expand(input, options); -}; + /* Construct the codes of the static literal tree */ + for (bits = 0; bits <= MAX_BITS; bits++) { + bl_count[bits] = 0; + } -/** - * Expose "braces" - */ + n = 0; + while (n <= 143) { + static_ltree[n * 2 + 1]/*.Len*/ = 8; + n++; + bl_count[8]++; + } + while (n <= 255) { + static_ltree[n * 2 + 1]/*.Len*/ = 9; + n++; + bl_count[9]++; + } + while (n <= 279) { + static_ltree[n * 2 + 1]/*.Len*/ = 7; + n++; + bl_count[7]++; + } + while (n <= 287) { + static_ltree[n * 2 + 1]/*.Len*/ = 8; + n++; + bl_count[8]++; + } + /* Codes 286 and 287 do not exist, but we must include them in the + * tree construction to get a canonical Huffman tree (longest code + * all ones) + */ + gen_codes(static_ltree, L_CODES + 1, bl_count); -module.exports = braces; + /* The static distance tree is trivial: */ + for (n = 0; n < D_CODES; n++) { + static_dtree[n * 2 + 1]/*.Len*/ = 5; + static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5); + } + // Now data ready and we can init static trees + static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS + 1, L_CODES, MAX_BITS); + static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES, MAX_BITS); + static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES, MAX_BL_BITS); -/***/ }), + //static_init_done = true; +} -/***/ 789: -/***/ (function(module, __unusedexports, __webpack_require__) { -"use strict"; -/*! - * to-regex-range - * - * Copyright (c) 2015-present, Jon Schlinkert. - * Released under the MIT License. +/* =========================================================================== + * Initialize a new block. */ +function init_block(s) { + var n; /* iterates over tree elements */ + /* Initialize the trees. */ + for (n = 0; n < L_CODES; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; } + for (n = 0; n < D_CODES; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; } + for (n = 0; n < BL_CODES; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; } + s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1; + s.opt_len = s.static_len = 0; + s.last_lit = s.matches = 0; +} -const isNumber = __webpack_require__(914); -const toRegexRange = (min, max, options) => { - if (isNumber(min) === false) { - throw new TypeError('toRegexRange: expected the first argument to be a number'); +/* =========================================================================== + * Flush the bit buffer and align the output on a byte boundary + */ +function bi_windup(s) +{ + if (s.bi_valid > 8) { + put_short(s, s.bi_buf); + } else if (s.bi_valid > 0) { + //put_byte(s, (Byte)s->bi_buf); + s.pending_buf[s.pending++] = s.bi_buf; } + s.bi_buf = 0; + s.bi_valid = 0; +} - if (max === void 0 || min === max) { - return String(min); - } +/* =========================================================================== + * Copy a stored block, storing first the length and its + * one's complement if requested. + */ +function copy_block(s, buf, len, header) +//DeflateState *s; +//charf *buf; /* the input data */ +//unsigned len; /* its length */ +//int header; /* true if block header must be written */ +{ + bi_windup(s); /* align on byte boundary */ - if (isNumber(max) === false) { - throw new TypeError('toRegexRange: expected the second argument to be a number.'); + if (header) { + put_short(s, len); + put_short(s, ~len); } +// while (len--) { +// put_byte(s, *buf++); +// } + utils.arraySet(s.pending_buf, s.window, buf, len, s.pending); + s.pending += len; +} - let opts = { relaxZeros: true, ...options }; - if (typeof opts.strictZeros === 'boolean') { - opts.relaxZeros = opts.strictZeros === false; - } +/* =========================================================================== + * Compares to subtrees, using the tree depth as tie breaker when + * the subtrees have equal frequency. This minimizes the worst case length. + */ +function smaller(tree, n, m, depth) { + var _n2 = n * 2; + var _m2 = m * 2; + return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ || + (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m])); +} - let relax = String(opts.relaxZeros); - let shorthand = String(opts.shorthand); - let capture = String(opts.capture); - let wrap = String(opts.wrap); - let cacheKey = min + ':' + max + '=' + relax + shorthand + capture + wrap; +/* =========================================================================== + * Restore the heap property by moving down the tree starting at node k, + * exchanging a node with the smallest of its two sons if necessary, stopping + * when the heap property is re-established (each father smaller than its + * two sons). + */ +function pqdownheap(s, tree, k) +// deflate_state *s; +// ct_data *tree; /* the tree to restore */ +// int k; /* node to move down */ +{ + var v = s.heap[k]; + var j = k << 1; /* left son of k */ + while (j <= s.heap_len) { + /* Set j to the smallest of the two sons: */ + if (j < s.heap_len && + smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) { + j++; + } + /* Exit if v is smaller than both sons */ + if (smaller(tree, v, s.heap[j], s.depth)) { break; } - if (toRegexRange.cache.hasOwnProperty(cacheKey)) { - return toRegexRange.cache[cacheKey].result; + /* Exchange v with the smallest son */ + s.heap[k] = s.heap[j]; + k = j; + + /* And continue down the tree, setting j to the left son of k */ + j <<= 1; } + s.heap[k] = v; +} - let a = Math.min(min, max); - let b = Math.max(min, max); - if (Math.abs(a - b) === 1) { - let result = min + '|' + max; - if (opts.capture) { - return `(${result})`; - } - if (opts.wrap === false) { - return result; - } - return `(?:${result})`; - } +// inlined manually +// var SMALLEST = 1; - let isPadded = hasPadding(min) || hasPadding(max); - let state = { min, max, a, b }; - let positives = []; - let negatives = []; +/* =========================================================================== + * Send the block data compressed using the given Huffman trees + */ +function compress_block(s, ltree, dtree) +// deflate_state *s; +// const ct_data *ltree; /* literal tree */ +// const ct_data *dtree; /* distance tree */ +{ + var dist; /* distance of matched string */ + var lc; /* match length or unmatched char (if dist == 0) */ + var lx = 0; /* running index in l_buf */ + var code; /* the code to send */ + var extra; /* number of extra bits to send */ - if (isPadded) { - state.isPadded = isPadded; - state.maxLen = String(state.max).length; - } + if (s.last_lit !== 0) { + do { + dist = (s.pending_buf[s.d_buf + lx * 2] << 8) | (s.pending_buf[s.d_buf + lx * 2 + 1]); + lc = s.pending_buf[s.l_buf + lx]; + lx++; - if (a < 0) { - let newMin = b < 0 ? Math.abs(b) : 1; - negatives = splitToPatterns(newMin, Math.abs(a), state, opts); - a = state.a = 0; - } + if (dist === 0) { + send_code(s, lc, ltree); /* send a literal byte */ + //Tracecv(isgraph(lc), (stderr," '%c' ", lc)); + } else { + /* Here, lc is the match length - MIN_MATCH */ + code = _length_code[lc]; + send_code(s, code + LITERALS + 1, ltree); /* send the length code */ + extra = extra_lbits[code]; + if (extra !== 0) { + lc -= base_length[code]; + send_bits(s, lc, extra); /* send the extra length bits */ + } + dist--; /* dist is now the match distance - 1 */ + code = d_code(dist); + //Assert (code < D_CODES, "bad d_code"); - if (b >= 0) { - positives = splitToPatterns(a, b, state, opts); - } + send_code(s, code, dtree); /* send the distance code */ + extra = extra_dbits[code]; + if (extra !== 0) { + dist -= base_dist[code]; + send_bits(s, dist, extra); /* send the extra distance bits */ + } + } /* literal or match pair ? */ - state.negatives = negatives; - state.positives = positives; - state.result = collatePatterns(negatives, positives, opts); + /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ + //Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, + // "pendingBuf overflow"); - if (opts.capture === true) { - state.result = `(${state.result})`; - } else if (opts.wrap !== false && (positives.length + negatives.length) > 1) { - state.result = `(?:${state.result})`; + } while (lx < s.last_lit); } - toRegexRange.cache[cacheKey] = state; - return state.result; -}; - -function collatePatterns(neg, pos, options) { - let onlyNegative = filterPatterns(neg, pos, '-', false, options) || []; - let onlyPositive = filterPatterns(pos, neg, '', false, options) || []; - let intersected = filterPatterns(neg, pos, '-?', true, options) || []; - let subpatterns = onlyNegative.concat(intersected).concat(onlyPositive); - return subpatterns.join('|'); + send_code(s, END_BLOCK, ltree); } -function splitToRanges(min, max) { - let nines = 1; - let zeros = 1; - let stop = countNines(min, nines); - let stops = new Set([max]); +/* =========================================================================== + * Construct one Huffman tree and assigns the code bit strings and lengths. + * Update the total bit length for the current block. + * IN assertion: the field freq is set for all tree elements. + * OUT assertions: the fields len and code are set to the optimal bit length + * and corresponding code. The length opt_len is updated; static_len is + * also updated if stree is not null. The field max_code is set. + */ +function build_tree(s, desc) +// deflate_state *s; +// tree_desc *desc; /* the tree descriptor */ +{ + var tree = desc.dyn_tree; + var stree = desc.stat_desc.static_tree; + var has_stree = desc.stat_desc.has_stree; + var elems = desc.stat_desc.elems; + var n, m; /* iterate over heap elements */ + var max_code = -1; /* largest code with non zero frequency */ + var node; /* new node being created */ - while (min <= stop && stop <= max) { - stops.add(stop); - nines += 1; - stop = countNines(min, nines); - } + /* Construct the initial heap, with least frequent element in + * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. + * heap[0] is not used. + */ + s.heap_len = 0; + s.heap_max = HEAP_SIZE; - stop = countZeros(max + 1, zeros) - 1; + for (n = 0; n < elems; n++) { + if (tree[n * 2]/*.Freq*/ !== 0) { + s.heap[++s.heap_len] = max_code = n; + s.depth[n] = 0; - while (min < stop && stop <= max) { - stops.add(stop); - zeros += 1; - stop = countZeros(max + 1, zeros) - 1; + } else { + tree[n * 2 + 1]/*.Len*/ = 0; + } } - stops = [...stops]; - stops.sort(compare); - return stops; -} - -/** - * Convert a range to a regex pattern - * @param {Number} `start` - * @param {Number} `stop` - * @return {String} - */ + /* The pkzip format requires that at least one distance code exists, + * and that at least one bit should be sent even if there is only one + * possible code. So to avoid special checks later on we force at least + * two codes of non zero frequency. + */ + while (s.heap_len < 2) { + node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0); + tree[node * 2]/*.Freq*/ = 1; + s.depth[node] = 0; + s.opt_len--; -function rangeToPattern(start, stop, options) { - if (start === stop) { - return { pattern: start, count: [], digits: 0 }; + if (has_stree) { + s.static_len -= stree[node * 2 + 1]/*.Len*/; + } + /* node is 0 or 1 so it does not have extra bits */ } + desc.max_code = max_code; - let zipped = zip(start, stop); - let digits = zipped.length; - let pattern = ''; - let count = 0; + /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, + * establish sub-heaps of increasing lengths: + */ + for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); } - for (let i = 0; i < digits; i++) { - let [startDigit, stopDigit] = zipped[i]; + /* Construct the Huffman tree by repeatedly combining the least two + * frequent nodes. + */ + node = elems; /* next internal node of the tree */ + do { + //pqremove(s, tree, n); /* n = node of least frequency */ + /*** pqremove ***/ + n = s.heap[1/*SMALLEST*/]; + s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--]; + pqdownheap(s, tree, 1/*SMALLEST*/); + /***/ - if (startDigit === stopDigit) { - pattern += startDigit; + m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */ + + s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */ + s.heap[--s.heap_max] = m; + + /* Create a new node father of n and m */ + tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/; + s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1; + tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node; - } else if (startDigit !== '0' || stopDigit !== '9') { - pattern += toCharacterClass(startDigit, stopDigit, options); + /* and insert the new node in the heap */ + s.heap[1/*SMALLEST*/] = node++; + pqdownheap(s, tree, 1/*SMALLEST*/); - } else { - count++; - } - } + } while (s.heap_len >= 2); - if (count) { - pattern += options.shorthand === true ? '\\d' : '[0-9]'; - } + s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/]; - return { pattern, count: [count], digits }; -} + /* At this point, the fields freq and dad are set. We can now + * generate the bit lengths. + */ + gen_bitlen(s, desc); -function splitToPatterns(min, max, tok, options) { - let ranges = splitToRanges(min, max); - let tokens = []; - let start = min; - let prev; + /* The field len is now set, we can generate the bit codes */ + gen_codes(tree, max_code, s.bl_count); +} - for (let i = 0; i < ranges.length; i++) { - let max = ranges[i]; - let obj = rangeToPattern(String(start), String(max), options); - let zeros = ''; - if (!tok.isPadded && prev && prev.pattern === obj.pattern) { - if (prev.count.length > 1) { - prev.count.pop(); - } +/* =========================================================================== + * Scan a literal or distance tree to determine the frequencies of the codes + * in the bit length tree. + */ +function scan_tree(s, tree, max_code) +// deflate_state *s; +// ct_data *tree; /* the tree to be scanned */ +// int max_code; /* and its largest code of non zero frequency */ +{ + var n; /* iterates over all tree elements */ + var prevlen = -1; /* last emitted length */ + var curlen; /* length of current code */ - prev.count.push(obj.count[0]); - prev.string = prev.pattern + toQuantifier(prev.count); - start = max + 1; - continue; - } + var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */ - if (tok.isPadded) { - zeros = padZeros(max, tok, options); - } + var count = 0; /* repeat count of the current code */ + var max_count = 7; /* max repeat count */ + var min_count = 4; /* min repeat count */ - obj.string = zeros + obj.pattern + toQuantifier(obj.count); - tokens.push(obj); - start = max + 1; - prev = obj; + if (nextlen === 0) { + max_count = 138; + min_count = 3; } + tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */ - return tokens; -} - -function filterPatterns(arr, comparison, prefix, intersection, options) { - let result = []; + for (n = 0; n <= max_code; n++) { + curlen = nextlen; + nextlen = tree[(n + 1) * 2 + 1]/*.Len*/; - for (let ele of arr) { - let { string } = ele; + if (++count < max_count && curlen === nextlen) { + continue; - // only push if _both_ are negative... - if (!intersection && !contains(comparison, 'string', string)) { - result.push(prefix + string); - } + } else if (count < min_count) { + s.bl_tree[curlen * 2]/*.Freq*/ += count; - // or _both_ are positive - if (intersection && contains(comparison, 'string', string)) { - result.push(prefix + string); - } - } - return result; -} + } else if (curlen !== 0) { -/** - * Zip strings - */ + if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; } + s.bl_tree[REP_3_6 * 2]/*.Freq*/++; -function zip(a, b) { - let arr = []; - for (let i = 0; i < a.length; i++) arr.push([a[i], b[i]]); - return arr; -} + } else if (count <= 10) { + s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++; -function compare(a, b) { - return a > b ? 1 : b > a ? -1 : 0; -} + } else { + s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++; + } -function contains(arr, key, val) { - return arr.some(ele => ele[key] === val); -} + count = 0; + prevlen = curlen; -function countNines(min, len) { - return Number(String(min).slice(0, -len) + '9'.repeat(len)); -} + if (nextlen === 0) { + max_count = 138; + min_count = 3; -function countZeros(integer, zeros) { - return integer - (integer % Math.pow(10, zeros)); -} + } else if (curlen === nextlen) { + max_count = 6; + min_count = 3; -function toQuantifier(digits) { - let [start = 0, stop = ''] = digits; - if (stop || start > 1) { - return `{${start + (stop ? ',' + stop : '')}}`; + } else { + max_count = 7; + min_count = 4; + } } - return ''; } -function toCharacterClass(a, b, options) { - return `[${a}${(b - a === 1) ? '' : '-'}${b}]`; -} -function hasPadding(str) { - return /^-?(0+)\d/.test(str); -} +/* =========================================================================== + * Send a literal or distance tree in compressed form, using the codes in + * bl_tree. + */ +function send_tree(s, tree, max_code) +// deflate_state *s; +// ct_data *tree; /* the tree to be scanned */ +// int max_code; /* and its largest code of non zero frequency */ +{ + var n; /* iterates over all tree elements */ + var prevlen = -1; /* last emitted length */ + var curlen; /* length of current code */ -function padZeros(value, tok, options) { - if (!tok.isPadded) { - return value; - } + var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */ - let diff = Math.abs(tok.maxLen - String(value).length); - let relax = options.relaxZeros !== false; + var count = 0; /* repeat count of the current code */ + var max_count = 7; /* max repeat count */ + var min_count = 4; /* min repeat count */ - switch (diff) { - case 0: - return ''; - case 1: - return relax ? '0?' : '0'; - case 2: - return relax ? '0{0,2}' : '00'; - default: { - return relax ? `0{0,${diff}}` : `0{${diff}}`; - } + /* tree[max_code+1].Len = -1; */ /* guard already set */ + if (nextlen === 0) { + max_count = 138; + min_count = 3; } -} -/** - * Cache - */ + for (n = 0; n <= max_code; n++) { + curlen = nextlen; + nextlen = tree[(n + 1) * 2 + 1]/*.Len*/; -toRegexRange.cache = {}; -toRegexRange.clearCache = () => (toRegexRange.cache = {}); + if (++count < max_count && curlen === nextlen) { + continue; -/** - * Expose `toRegexRange` - */ + } else if (count < min_count) { + do { send_code(s, curlen, s.bl_tree); } while (--count !== 0); -module.exports = toRegexRange; + } else if (curlen !== 0) { + if (curlen !== prevlen) { + send_code(s, curlen, s.bl_tree); + count--; + } + //Assert(count >= 3 && count <= 6, " 3_6?"); + send_code(s, REP_3_6, s.bl_tree); + send_bits(s, count - 3, 2); + } else if (count <= 10) { + send_code(s, REPZ_3_10, s.bl_tree); + send_bits(s, count - 3, 3); -/***/ }), + } else { + send_code(s, REPZ_11_138, s.bl_tree); + send_bits(s, count - 11, 7); + } -/***/ 798: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + count = 0; + prevlen = curlen; + if (nextlen === 0) { + max_count = 138; + min_count = 3; -"use strict"; + } else if (curlen === nextlen) { + max_count = 6; + min_count = 3; -Object.defineProperty(exports, "__esModule", { value: true }); -const stream_1 = __webpack_require__(413); -const async_1 = __webpack_require__(291); -class StreamProvider { - constructor(_root, _settings) { - this._root = _root; - this._settings = _settings; - this._reader = new async_1.default(this._root, this._settings); - this._stream = new stream_1.Readable({ - objectMode: true, - read: () => { }, - destroy: () => { - if (!this._reader.isDestroyed) { - this._reader.destroy(); - } - } - }); + } else { + max_count = 7; + min_count = 4; } - read() { - this._reader.onError((error) => { - this._stream.emit('error', error); - }); - this._reader.onEntry((entry) => { - this._stream.push(entry); - }); - this._reader.onEnd(() => { - this._stream.push(null); - }); - this._reader.read(); - return this._stream; + } +} + + +/* =========================================================================== + * Construct the Huffman tree for the bit lengths and return the index in + * bl_order of the last bit length code to send. + */ +function build_bl_tree(s) { + var max_blindex; /* index of last bit length code of non zero freq */ + + /* Determine the bit length frequencies for literal and distance trees */ + scan_tree(s, s.dyn_ltree, s.l_desc.max_code); + scan_tree(s, s.dyn_dtree, s.d_desc.max_code); + + /* Build the bit length tree: */ + build_tree(s, s.bl_desc); + /* opt_len now includes the length of the tree representations, except + * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. + */ + + /* Determine the number of bit length codes to send. The pkzip format + * requires that at least 4 bit length codes be sent. (appnote.txt says + * 3 but the actual value used is 4.) + */ + for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) { + if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) { + break; } + } + /* Update opt_len to include the bit length tree and counts */ + s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; + //Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", + // s->opt_len, s->static_len)); + + return max_blindex; } -exports.default = StreamProvider; -/***/ }), +/* =========================================================================== + * Send the header for a block using dynamic Huffman trees: the counts, the + * lengths of the bit length codes, the literal tree and the distance tree. + * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. + */ +function send_all_trees(s, lcodes, dcodes, blcodes) +// deflate_state *s; +// int lcodes, dcodes, blcodes; /* number of codes for each tree */ +{ + var rank; /* index in bl_order */ -/***/ 800: -/***/ (function(module, __unusedexports, __webpack_require__) { + //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); + //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, + // "too many codes"); + //Tracev((stderr, "\nbl counts: ")); + send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */ + send_bits(s, dcodes - 1, 5); + send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */ + for (rank = 0; rank < blcodes; rank++) { + //Tracev((stderr, "\nbl code %2d ", bl_order[rank])); + send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3); + } + //Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); -"use strict"; + send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */ + //Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); + send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */ + //Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); +} -var parsePath = __webpack_require__(666); -function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; } +/* =========================================================================== + * Check if the data type is TEXT or BINARY, using the following algorithm: + * - TEXT if the two conditions below are satisfied: + * a) There are no non-portable control characters belonging to the + * "black list" (0..6, 14..25, 28..31). + * b) There is at least one printable character belonging to the + * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). + * - BINARY otherwise. + * - The following partially-portable control characters form a + * "gray list" that is ignored in this detection algorithm: + * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). + * IN assertion: the fields Freq of dyn_ltree are set. + */ +function detect_data_type(s) { + /* black_mask is the bit mask of black-listed bytes + * set bits 0..6, 14..25, and 28..31 + * 0xf3ffc07f = binary 11110011111111111100000001111111 + */ + var black_mask = 0xf3ffc07f; + var n; -var parsePath__default = /*#__PURE__*/_interopDefaultLegacy(parsePath); + /* Check for non-textual ("black-listed") bytes. */ + for (n = 0; n <= 31; n++, black_mask >>>= 1) { + if ((black_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) { + return Z_BINARY; + } + } -// https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs -const DATA_URL_DEFAULT_MIME_TYPE = 'text/plain'; -const DATA_URL_DEFAULT_CHARSET = 'us-ascii'; + /* Check for textual ("white-listed") bytes. */ + if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 || + s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) { + return Z_TEXT; + } + for (n = 32; n < LITERALS; n++) { + if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) { + return Z_TEXT; + } + } -const testParameter = (name, filters) => filters.some(filter => filter instanceof RegExp ? filter.test(name) : filter === name); + /* There are no "black-listed" or "white-listed" bytes: + * this stream either is empty or has tolerated ("gray-listed") bytes only. + */ + return Z_BINARY; +} -const normalizeDataURL = (urlString, {stripHash}) => { - const match = /^data:(?[^,]*?),(?[^#]*?)(?:#(?.*))?$/.exec(urlString); - if (!match) { - throw new Error(`Invalid URL: ${urlString}`); - } +var static_init_done = false; - let {type, data, hash} = match.groups; - const mediaType = type.split(';'); - hash = stripHash ? '' : hash; +/* =========================================================================== + * Initialize the tree data structures for a new zlib stream. + */ +function _tr_init(s) +{ - let isBase64 = false; - if (mediaType[mediaType.length - 1] === 'base64') { - mediaType.pop(); - isBase64 = true; - } + if (!static_init_done) { + tr_static_init(); + static_init_done = true; + } - // Lowercase MIME type - const mimeType = (mediaType.shift() || '').toLowerCase(); - const attributes = mediaType - .map(attribute => { - let [key, value = ''] = attribute.split('=').map(string => string.trim()); + s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc); + s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc); + s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc); - // Lowercase `charset` - if (key === 'charset') { - value = value.toLowerCase(); + s.bi_buf = 0; + s.bi_valid = 0; - if (value === DATA_URL_DEFAULT_CHARSET) { - return ''; - } - } + /* Initialize the first block of the first file: */ + init_block(s); +} - return `${key}${value ? `=${value}` : ''}`; - }) - .filter(Boolean); - const normalizedMediaType = [ - ...attributes, - ]; +/* =========================================================================== + * Send a stored block + */ +function _tr_stored_block(s, buf, stored_len, last) +//DeflateState *s; +//charf *buf; /* input block */ +//ulg stored_len; /* length of input block */ +//int last; /* one if this is the last block for a file */ +{ + send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */ + copy_block(s, buf, stored_len, true); /* with header */ +} - if (isBase64) { - normalizedMediaType.push('base64'); - } - if (normalizedMediaType.length > 0 || (mimeType && mimeType !== DATA_URL_DEFAULT_MIME_TYPE)) { - normalizedMediaType.unshift(mimeType); - } +/* =========================================================================== + * Send one empty static block to give enough lookahead for inflate. + * This takes 10 bits, of which 7 may remain in the bit buffer. + */ +function _tr_align(s) { + send_bits(s, STATIC_TREES << 1, 3); + send_code(s, END_BLOCK, static_ltree); + bi_flush(s); +} - return `data:${normalizedMediaType.join(';')},${isBase64 ? data.trim() : data}${hash ? `#${hash}` : ''}`; -}; -function normalizeUrl(urlString, options) { - options = { - defaultProtocol: 'http:', - normalizeProtocol: true, - forceHttp: false, - forceHttps: false, - stripAuthentication: true, - stripHash: false, - stripTextFragment: true, - stripWWW: true, - removeQueryParameters: [/^utm_\w+/i], - removeTrailingSlash: true, - removeSingleSlash: true, - removeDirectoryIndex: false, - sortQueryParameters: true, - ...options, - }; +/* =========================================================================== + * Determine the best encoding for the current block: dynamic trees, static + * trees or store, and output the encoded block to the zip file. + */ +function _tr_flush_block(s, buf, stored_len, last) +//DeflateState *s; +//charf *buf; /* input block, or NULL if too old */ +//ulg stored_len; /* length of input block */ +//int last; /* one if this is the last block for a file */ +{ + var opt_lenb, static_lenb; /* opt_len and static_len in bytes */ + var max_blindex = 0; /* index of last bit length code of non zero freq */ - urlString = urlString.trim(); + /* Build the Huffman trees unless a stored block is forced */ + if (s.level > 0) { - // Data URL - if (/^data:/i.test(urlString)) { - return normalizeDataURL(urlString, options); - } + /* Check if the file is binary or text */ + if (s.strm.data_type === Z_UNKNOWN) { + s.strm.data_type = detect_data_type(s); + } - if (/^view-source:/i.test(urlString)) { - throw new Error('`view-source:` is not supported as it is a non-standard protocol'); - } + /* Construct the literal and distance trees */ + build_tree(s, s.l_desc); + // Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, + // s->static_len)); - const hasRelativeProtocol = urlString.startsWith('//'); - const isRelativeUrl = !hasRelativeProtocol && /^\.*\//.test(urlString); + build_tree(s, s.d_desc); + // Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, + // s->static_len)); + /* At this point, opt_len and static_len are the total bit lengths of + * the compressed block data, excluding the tree representations. + */ - // Prepend protocol - if (!isRelativeUrl) { - urlString = urlString.replace(/^(?!(?:\w+:)?\/\/)|^\/\//, options.defaultProtocol); - } + /* Build the bit length tree for the above two trees, and get the index + * in bl_order of the last bit length code to send. + */ + max_blindex = build_bl_tree(s); - const urlObject = new URL(urlString); + /* Determine the best encoding. Compute the block lengths in bytes. */ + opt_lenb = (s.opt_len + 3 + 7) >>> 3; + static_lenb = (s.static_len + 3 + 7) >>> 3; - if (options.forceHttp && options.forceHttps) { - throw new Error('The `forceHttp` and `forceHttps` options cannot be used together'); - } + // Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", + // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, + // s->last_lit)); - if (options.forceHttp && urlObject.protocol === 'https:') { - urlObject.protocol = 'http:'; - } + if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; } - if (options.forceHttps && urlObject.protocol === 'http:') { - urlObject.protocol = 'https:'; - } + } else { + // Assert(buf != (char*)0, "lost buf"); + opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ + } - // Remove auth - if (options.stripAuthentication) { - urlObject.username = ''; - urlObject.password = ''; - } + if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) { + /* 4: two words for the lengths */ - // Remove hash - if (options.stripHash) { - urlObject.hash = ''; - } else if (options.stripTextFragment) { - urlObject.hash = urlObject.hash.replace(/#?:~:text.*?$/i, ''); - } + /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. + * Otherwise we can't have processed more than WSIZE input bytes since + * the last block flush, because compression would have been + * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to + * transform a block into a stored block. + */ + _tr_stored_block(s, buf, stored_len, last); - // Remove duplicate slashes if not preceded by a protocol - // NOTE: This could be implemented using a single negative lookbehind - // regex, but we avoid that to maintain compatibility with older js engines - // which do not have support for that feature. - if (urlObject.pathname) { - // TODO: Replace everything below with `urlObject.pathname = urlObject.pathname.replace(/(?compressed_len == s->bits_sent, "bad compressed size"); + /* The above check is made mod 2^32, for files larger than 512 MB + * and uLong implemented on 32 bits. + */ + init_block(s); - const protocol = match[0]; - const protocolAtIndex = match.index; - const intermediate = urlObject.pathname.slice(lastIndex, protocolAtIndex); + if (last) { + bi_windup(s); + } + // Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, + // s->compressed_len-7*last)); +} - result += intermediate.replace(/\/{2,}/g, '/'); - result += protocol; - lastIndex = protocolAtIndex + protocol.length; - } +/* =========================================================================== + * Save the match info and tally the frequency counts. Return true if + * the current block must be flushed. + */ +function _tr_tally(s, dist, lc) +// deflate_state *s; +// unsigned dist; /* distance of matched string */ +// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ +{ + //var out_length, in_length, dcode; - const remnant = urlObject.pathname.slice(lastIndex, urlObject.pathname.length); - result += remnant.replace(/\/{2,}/g, '/'); + s.pending_buf[s.d_buf + s.last_lit * 2] = (dist >>> 8) & 0xff; + s.pending_buf[s.d_buf + s.last_lit * 2 + 1] = dist & 0xff; - urlObject.pathname = result; - } + s.pending_buf[s.l_buf + s.last_lit] = lc & 0xff; + s.last_lit++; - // Decode URI octets - if (urlObject.pathname) { - try { - urlObject.pathname = decodeURI(urlObject.pathname); - } catch {} - } + if (dist === 0) { + /* lc is the unmatched char */ + s.dyn_ltree[lc * 2]/*.Freq*/++; + } else { + s.matches++; + /* Here, lc is the match length - MIN_MATCH */ + dist--; /* dist = match distance - 1 */ + //Assert((ush)dist < (ush)MAX_DIST(s) && + // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && + // (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); - // Remove directory index - if (options.removeDirectoryIndex === true) { - options.removeDirectoryIndex = [/^index\.[a-z]+$/]; - } + s.dyn_ltree[(_length_code[lc] + LITERALS + 1) * 2]/*.Freq*/++; + s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++; + } - if (Array.isArray(options.removeDirectoryIndex) && options.removeDirectoryIndex.length > 0) { - let pathComponents = urlObject.pathname.split('/'); - const lastComponent = pathComponents[pathComponents.length - 1]; +// (!) This block is disabled in zlib defaults, +// don't enable it for binary compatibility - if (testParameter(lastComponent, options.removeDirectoryIndex)) { - pathComponents = pathComponents.slice(0, -1); - urlObject.pathname = pathComponents.slice(1).join('/') + '/'; - } - } +//#ifdef TRUNCATE_BLOCK +// /* Try to guess if it is profitable to stop the current block here */ +// if ((s.last_lit & 0x1fff) === 0 && s.level > 2) { +// /* Compute an upper bound for the compressed length */ +// out_length = s.last_lit*8; +// in_length = s.strstart - s.block_start; +// +// for (dcode = 0; dcode < D_CODES; dcode++) { +// out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]); +// } +// out_length >>>= 3; +// //Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", +// // s->last_lit, in_length, out_length, +// // 100L - out_length*100L/in_length)); +// if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) { +// return true; +// } +// } +//#endif - if (urlObject.hostname) { - // Remove trailing dot - urlObject.hostname = urlObject.hostname.replace(/\.$/, ''); + return (s.last_lit === s.lit_bufsize - 1); + /* We avoid equality with lit_bufsize because of wraparound at 64K + * on 16 bit machines and because stored blocks are restricted to + * 64K-1 bytes. + */ +} - // Remove `www.` - if (options.stripWWW && /^www\.(?!www\.)[a-z\-\d]{1,63}\.[a-z.\-\d]{2,63}$/.test(urlObject.hostname)) { - // Each label should be max 63 at length (min: 1). - // Source: https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names - // Each TLD should be up to 63 characters long (min: 2). - // It is technically possible to have a single character TLD, but none currently exist. - urlObject.hostname = urlObject.hostname.replace(/^www\./, ''); - } - } +exports._tr_init = _tr_init; +exports._tr_stored_block = _tr_stored_block; +exports._tr_flush_block = _tr_flush_block; +exports._tr_tally = _tr_tally; +exports._tr_align = _tr_align; - // Remove query unwanted parameters - if (Array.isArray(options.removeQueryParameters)) { - // eslint-disable-next-line unicorn/no-useless-spread -- We are intentionally spreading to get a copy. - for (const key of [...urlObject.searchParams.keys()]) { - if (testParameter(key, options.removeQueryParameters)) { - urlObject.searchParams.delete(key); - } - } - } - if (options.removeQueryParameters === true) { - urlObject.search = ''; - } +/***/ }), - // Sort query parameters - if (options.sortQueryParameters) { - urlObject.searchParams.sort(); +/***/ 6442: +/***/ ((module) => { - // Calling `.sort()` encodes the search parameters, so we need to decode them again. - try { - urlObject.search = decodeURIComponent(urlObject.search); - } catch {} - } +"use strict"; - if (options.removeTrailingSlash) { - urlObject.pathname = urlObject.pathname.replace(/\/$/, ''); - } - const oldUrlString = urlString; +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. - // Take advantage of many of the Node `url` normalizations - urlString = urlObject.toString(); +function ZStream() { + /* next input byte */ + this.input = null; // JS specific, because we have no pointers + this.next_in = 0; + /* number of bytes available at input */ + this.avail_in = 0; + /* total number of input bytes read so far */ + this.total_in = 0; + /* next output byte should be put there */ + this.output = null; // JS specific, because we have no pointers + this.next_out = 0; + /* remaining free space at output */ + this.avail_out = 0; + /* total number of bytes output so far */ + this.total_out = 0; + /* last error message, NULL if no error */ + this.msg = ''/*Z_NULL*/; + /* not visible by applications */ + this.state = null; + /* best guess about the data type: binary or text */ + this.data_type = 2/*Z_UNKNOWN*/; + /* adler32 value of the uncompressed data */ + this.adler = 0; +} - if (!options.removeSingleSlash && urlObject.pathname === '/' && !oldUrlString.endsWith('/') && urlObject.hash === '') { - urlString = urlString.replace(/\/$/, ''); - } +module.exports = ZStream; - // Remove ending `/` unless removeSingleSlash is false - if ((options.removeTrailingSlash || urlObject.pathname === '/') && urlObject.hash === '' && options.removeSingleSlash) { - urlString = urlString.replace(/\/$/, ''); - } - // Restore relative protocol, if applicable - if (hasRelativeProtocol && !options.normalizeProtocol) { - urlString = urlString.replace(/^http:\/\//, '//'); - } +/***/ }), - // Remove http/https - if (options.stripProtocol) { - urlString = urlString.replace(/^(?:https?:)?\/\//, ''); - } +/***/ 4795: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; - return urlString; -} -// Dependencies +var protocols = __nccwpck_require__(9217); /** - * parseUrl + * parsePath * Parses the input url. * - * **Note**: This *throws* if invalid urls are provided. - * - * @name parseUrl + * @name parsePath * @function * @param {String} url The input url. - * @param {Boolean|Object} normalize Whether to normalize the url or not. - * Default is `false`. If `true`, the url will - * be normalized. If an object, it will be the - * options object sent to [`normalize-url`](https://github.com/sindresorhus/normalize-url). - * - * For SSH urls, normalize won't work. - * * @return {Object} An object containing the following fields: * * - `protocols` (Array): An array with the url protocols (usually it has one element). - * - `protocol` (String): The first protocol, `"ssh"` (if the url is a ssh url) or `"file"`. - * - `port` (null|Number): The domain port. - * - `resource` (String): The url domain (including subdomains). - * - `user` (String): The authentication user (usually for ssh urls). + * - `protocol` (String): The first protocol or `"file"`. + * - `port` (String): The domain port (default: `""`). + * - `resource` (String): The url domain/hostname. + * - `host` (String): The url domain (including subdomain and port). + * - `user` (String): The authentication user (default: `""`). + * - `password` (String): The authentication password (default: `""`). * - `pathname` (String): The url pathname. * - `hash` (String): The url hash. - * - `search` (String): The url querystring value. - * - `href` (String): The input url. + * - `search` (String): The url querystring value (excluding `?`). + * - `href` (String): The normalized input url. * - `query` (Object): The url querystring, parsed as object. * - `parse_failed` (Boolean): Whether the parsing failed or not. */ -const parseUrl = (url, normalize = false) => { - - // Constants - const GIT_RE = /^(?:([a-z_][a-z0-9_-]{0,31})@|https?:\/\/)([\w\.\-@]+)[\/:]([\~,\.\w,\-,\_,\/]+?(?:\.git|\/)?)$/; +function parsePath(url) { - const throwErr = msg => { - const err = new Error(msg); - err.subject_url = url; - throw err + var output = { + protocols: [], + protocol: null, + port: null, + resource: "", + host: "", + user: "", + password: "", + pathname: "", + hash: "", + search: "", + href: url, + query: {}, + parse_failed: false }; - if (typeof url !== "string" || !url.trim()) { - throwErr("Invalid url."); - } - - if (url.length > parseUrl.MAX_INPUT_LENGTH) { - throwErr("Input exceeds maximum length. If needed, change the value of parseUrl.MAX_INPUT_LENGTH."); - } - - if (normalize) { - if (typeof normalize !== "object") { - normalize = { - stripHash: false - }; - } - url = normalizeUrl(url, normalize); - } - - const parsed = parsePath__default["default"](url); - - // Potential git-ssh urls - if (parsed.parse_failed) { - const matched = parsed.href.match(GIT_RE); - - if (matched) { - parsed.protocols = ["ssh"]; - parsed.protocol = "ssh"; - parsed.resource = matched[2]; - parsed.host = matched[2]; - parsed.user = matched[1]; - parsed.pathname = `/${matched[3]}`; - parsed.parse_failed = false; - } else { - throwErr("URL parsing failed."); - } + try { + var parsed = new URL(url); + output.protocols = protocols(parsed); + output.protocol = output.protocols[0]; + output.port = parsed.port; + output.resource = parsed.hostname; + output.host = parsed.host; + output.user = parsed.username || ""; + output.password = parsed.password || ""; + output.pathname = parsed.pathname; + output.hash = parsed.hash.slice(1); + output.search = parsed.search.slice(1); + output.href = parsed.href; + output.query = Object.fromEntries(parsed.searchParams); + } catch (e) { + // TODO Maybe check if it is a valid local file path + // In any case, these will be parsed by higher + // level parsers such as parse-url, git-url-parse, git-up + output.protocols = ["file"]; + output.protocol = output.protocols[0]; + output.port = ""; + output.resource = ""; + output.user = ""; + output.pathname = ""; + output.hash = ""; + output.search = ""; + output.href = url; + output.query = {}; + output.parse_failed = true; } - return parsed; -}; - -parseUrl.MAX_INPUT_LENGTH = 2048; - -module.exports = parseUrl; + return output; +} +module.exports = parsePath; /***/ }), -/***/ 802: -/***/ (function(module) { +/***/ 473: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { "use strict"; -const processFn = (fn, options) => function (...args) { - const P = options.promiseModule; - - return new P((resolve, reject) => { - if (options.multiArgs) { - args.push((...result) => { - if (options.errorFirst) { - if (result[0]) { - reject(result); - } else { - result.shift(); - resolve(result); - } - } else { - resolve(result); - } - }); - } else if (options.errorFirst) { - args.push((error, result) => { - if (error) { - reject(error); - } else { - resolve(result); - } - }); - } else { - args.push(resolve); - } +var parsePath = __nccwpck_require__(4795); - fn.apply(this, args); - }); -}; +function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; } -module.exports = (input, options) => { - options = Object.assign({ - exclude: [/.+(Sync|Stream)$/], - errorFirst: true, - promiseModule: Promise - }, options); +var parsePath__default = /*#__PURE__*/_interopDefaultLegacy(parsePath); - const objType = typeof input; - if (!(input !== null && (objType === 'object' || objType === 'function'))) { - throw new TypeError(`Expected \`input\` to be a \`Function\` or \`Object\`, got \`${input === null ? 'null' : objType}\``); - } +// https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs +const DATA_URL_DEFAULT_MIME_TYPE = 'text/plain'; +const DATA_URL_DEFAULT_CHARSET = 'us-ascii'; - const filter = key => { - const match = pattern => typeof pattern === 'string' ? key === pattern : pattern.test(key); - return options.include ? options.include.some(match) : !options.exclude.some(match); - }; +const testParameter = (name, filters) => filters.some(filter => filter instanceof RegExp ? filter.test(name) : filter === name); - let ret; - if (objType === 'function') { - ret = function (...args) { - return options.excludeMain ? input(...args) : processFn(input, options).apply(this, args); - }; - } else { - ret = Object.create(Object.getPrototypeOf(input)); - } +const normalizeDataURL = (urlString, {stripHash}) => { + const match = /^data:(?[^,]*?),(?[^#]*?)(?:#(?.*))?$/.exec(urlString); - for (const key in input) { // eslint-disable-line guard-for-in - const property = input[key]; - ret[key] = typeof property === 'function' && filter(key) ? processFn(property, options) : property; + if (!match) { + throw new Error(`Invalid URL: ${urlString}`); } - return ret; -}; - - -/***/ }), - -/***/ 806: -/***/ (function(module, __unusedexports, __webpack_require__) { - -"use strict"; - - -const constants = __webpack_require__(199); -const utils = __webpack_require__(265); - -/** - * Constants - */ - -const { - MAX_LENGTH, - POSIX_REGEX_SOURCE, - REGEX_NON_SPECIAL_CHARS, - REGEX_SPECIAL_CHARS_BACKREF, - REPLACEMENTS -} = constants; - -/** - * Helpers - */ - -const expandRange = (args, options) => { - if (typeof options.expandRange === 'function') { - return options.expandRange(...args, options); - } + let {type, data, hash} = match.groups; + const mediaType = type.split(';'); + hash = stripHash ? '' : hash; - args.sort(); - const value = `[${args.join('-')}]`; + let isBase64 = false; + if (mediaType[mediaType.length - 1] === 'base64') { + mediaType.pop(); + isBase64 = true; + } - try { - /* eslint-disable-next-line no-new */ - new RegExp(value); - } catch (ex) { - return args.map(v => utils.escapeRegex(v)).join('..'); - } + // Lowercase MIME type + const mimeType = (mediaType.shift() || '').toLowerCase(); + const attributes = mediaType + .map(attribute => { + let [key, value = ''] = attribute.split('=').map(string => string.trim()); - return value; -}; + // Lowercase `charset` + if (key === 'charset') { + value = value.toLowerCase(); -/** - * Create the message for a syntax error - */ + if (value === DATA_URL_DEFAULT_CHARSET) { + return ''; + } + } -const syntaxError = (type, char) => { - return `Missing ${type}: "${char}" - use "\\\\${char}" to match literal characters`; -}; + return `${key}${value ? `=${value}` : ''}`; + }) + .filter(Boolean); -/** - * Parse the given input string. - * @param {String} input - * @param {Object} options - * @return {Object} - */ + const normalizedMediaType = [ + ...attributes, + ]; -const parse = (input, options) => { - if (typeof input !== 'string') { - throw new TypeError('Expected a string'); - } + if (isBase64) { + normalizedMediaType.push('base64'); + } - input = REPLACEMENTS[input] || input; + if (normalizedMediaType.length > 0 || (mimeType && mimeType !== DATA_URL_DEFAULT_MIME_TYPE)) { + normalizedMediaType.unshift(mimeType); + } - const opts = { ...options }; - const max = typeof opts.maxLength === 'number' ? Math.min(MAX_LENGTH, opts.maxLength) : MAX_LENGTH; + return `data:${normalizedMediaType.join(';')},${isBase64 ? data.trim() : data}${hash ? `#${hash}` : ''}`; +}; - let len = input.length; - if (len > max) { - throw new SyntaxError(`Input length: ${len}, exceeds maximum allowed length: ${max}`); - } +function normalizeUrl(urlString, options) { + options = { + defaultProtocol: 'http:', + normalizeProtocol: true, + forceHttp: false, + forceHttps: false, + stripAuthentication: true, + stripHash: false, + stripTextFragment: true, + stripWWW: true, + removeQueryParameters: [/^utm_\w+/i], + removeTrailingSlash: true, + removeSingleSlash: true, + removeDirectoryIndex: false, + sortQueryParameters: true, + ...options, + }; - const bos = { type: 'bos', value: '', output: opts.prepend || '' }; - const tokens = [bos]; + urlString = urlString.trim(); - const capture = opts.capture ? '' : '?:'; - const win32 = utils.isWindows(options); + // Data URL + if (/^data:/i.test(urlString)) { + return normalizeDataURL(urlString, options); + } - // create constants based on platform, for windows or posix - const PLATFORM_CHARS = constants.globChars(win32); - const EXTGLOB_CHARS = constants.extglobChars(PLATFORM_CHARS); + if (/^view-source:/i.test(urlString)) { + throw new Error('`view-source:` is not supported as it is a non-standard protocol'); + } - const { - DOT_LITERAL, - PLUS_LITERAL, - SLASH_LITERAL, - ONE_CHAR, - DOTS_SLASH, - NO_DOT, - NO_DOT_SLASH, - NO_DOTS_SLASH, - QMARK, - QMARK_NO_DOT, - STAR, - START_ANCHOR - } = PLATFORM_CHARS; + const hasRelativeProtocol = urlString.startsWith('//'); + const isRelativeUrl = !hasRelativeProtocol && /^\.*\//.test(urlString); - const globstar = opts => { - return `(${capture}(?:(?!${START_ANCHOR}${opts.dot ? DOTS_SLASH : DOT_LITERAL}).)*?)`; - }; + // Prepend protocol + if (!isRelativeUrl) { + urlString = urlString.replace(/^(?!(?:\w+:)?\/\/)|^\/\//, options.defaultProtocol); + } - const nodot = opts.dot ? '' : NO_DOT; - const qmarkNoDot = opts.dot ? QMARK : QMARK_NO_DOT; - let star = opts.bash === true ? globstar(opts) : STAR; + const urlObject = new URL(urlString); - if (opts.capture) { - star = `(${star})`; - } + if (options.forceHttp && options.forceHttps) { + throw new Error('The `forceHttp` and `forceHttps` options cannot be used together'); + } - // minimatch options support - if (typeof opts.noext === 'boolean') { - opts.noextglob = opts.noext; - } + if (options.forceHttp && urlObject.protocol === 'https:') { + urlObject.protocol = 'http:'; + } - const state = { - input, - index: -1, - start: 0, - dot: opts.dot === true, - consumed: '', - output: '', - prefix: '', - backtrack: false, - negated: false, - brackets: 0, - braces: 0, - parens: 0, - quotes: 0, - globstar: false, - tokens - }; + if (options.forceHttps && urlObject.protocol === 'http:') { + urlObject.protocol = 'https:'; + } - input = utils.removePrefix(input, state); - len = input.length; + // Remove auth + if (options.stripAuthentication) { + urlObject.username = ''; + urlObject.password = ''; + } - const extglobs = []; - const braces = []; - const stack = []; - let prev = bos; - let value; + // Remove hash + if (options.stripHash) { + urlObject.hash = ''; + } else if (options.stripTextFragment) { + urlObject.hash = urlObject.hash.replace(/#?:~:text.*?$/i, ''); + } - /** - * Tokenizing helpers - */ + // Remove duplicate slashes if not preceded by a protocol + // NOTE: This could be implemented using a single negative lookbehind + // regex, but we avoid that to maintain compatibility with older js engines + // which do not have support for that feature. + if (urlObject.pathname) { + // TODO: Replace everything below with `urlObject.pathname = urlObject.pathname.replace(/(? state.index === len - 1; - const peek = state.peek = (n = 1) => input[state.index + n]; - const advance = state.advance = () => input[++state.index] || ''; - const remaining = () => input.slice(state.index + 1); - const consume = (value = '', num = 0) => { - state.consumed += value; - state.index += num; - }; + // Split the string by occurrences of this protocol regex, and perform + // duplicate-slash replacement on the strings between those occurrences + // (if any). + const protocolRegex = /\b[a-z][a-z\d+\-.]{1,50}:\/\//g; - const append = token => { - state.output += token.output != null ? token.output : token.value; - consume(token.value); - }; + let lastIndex = 0; + let result = ''; + for (;;) { + const match = protocolRegex.exec(urlObject.pathname); + if (!match) { + break; + } - const negate = () => { - let count = 1; + const protocol = match[0]; + const protocolAtIndex = match.index; + const intermediate = urlObject.pathname.slice(lastIndex, protocolAtIndex); - while (peek() === '!' && (peek(2) !== '(' || peek(3) === '?')) { - advance(); - state.start++; - count++; - } + result += intermediate.replace(/\/{2,}/g, '/'); + result += protocol; + lastIndex = protocolAtIndex + protocol.length; + } - if (count % 2 === 0) { - return false; - } + const remnant = urlObject.pathname.slice(lastIndex, urlObject.pathname.length); + result += remnant.replace(/\/{2,}/g, '/'); - state.negated = true; - state.start++; - return true; - }; + urlObject.pathname = result; + } - const increment = type => { - state[type]++; - stack.push(type); - }; + // Decode URI octets + if (urlObject.pathname) { + try { + urlObject.pathname = decodeURI(urlObject.pathname); + } catch {} + } - const decrement = type => { - state[type]--; - stack.pop(); - }; + // Remove directory index + if (options.removeDirectoryIndex === true) { + options.removeDirectoryIndex = [/^index\.[a-z]+$/]; + } - /** - * Push tokens onto the tokens array. This helper speeds up - * tokenizing by 1) helping us avoid backtracking as much as possible, - * and 2) helping us avoid creating extra tokens when consecutive - * characters are plain text. This improves performance and simplifies - * lookbehinds. - */ + if (Array.isArray(options.removeDirectoryIndex) && options.removeDirectoryIndex.length > 0) { + let pathComponents = urlObject.pathname.split('/'); + const lastComponent = pathComponents[pathComponents.length - 1]; - const push = tok => { - if (prev.type === 'globstar') { - const isBrace = state.braces > 0 && (tok.type === 'comma' || tok.type === 'brace'); - const isExtglob = tok.extglob === true || (extglobs.length && (tok.type === 'pipe' || tok.type === 'paren')); + if (testParameter(lastComponent, options.removeDirectoryIndex)) { + pathComponents = pathComponents.slice(0, -1); + urlObject.pathname = pathComponents.slice(1).join('/') + '/'; + } + } - if (tok.type !== 'slash' && tok.type !== 'paren' && !isBrace && !isExtglob) { - state.output = state.output.slice(0, -prev.output.length); - prev.type = 'star'; - prev.value = '*'; - prev.output = star; - state.output += prev.output; - } - } + if (urlObject.hostname) { + // Remove trailing dot + urlObject.hostname = urlObject.hostname.replace(/\.$/, ''); + + // Remove `www.` + if (options.stripWWW && /^www\.(?!www\.)[a-z\-\d]{1,63}\.[a-z.\-\d]{2,63}$/.test(urlObject.hostname)) { + // Each label should be max 63 at length (min: 1). + // Source: https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + // Each TLD should be up to 63 characters long (min: 2). + // It is technically possible to have a single character TLD, but none currently exist. + urlObject.hostname = urlObject.hostname.replace(/^www\./, ''); + } + } - if (extglobs.length && tok.type !== 'paren') { - extglobs[extglobs.length - 1].inner += tok.value; - } + // Remove query unwanted parameters + if (Array.isArray(options.removeQueryParameters)) { + // eslint-disable-next-line unicorn/no-useless-spread -- We are intentionally spreading to get a copy. + for (const key of [...urlObject.searchParams.keys()]) { + if (testParameter(key, options.removeQueryParameters)) { + urlObject.searchParams.delete(key); + } + } + } - if (tok.value || tok.output) append(tok); - if (prev && prev.type === 'text' && tok.type === 'text') { - prev.value += tok.value; - prev.output = (prev.output || '') + tok.value; - return; - } + if (options.removeQueryParameters === true) { + urlObject.search = ''; + } - tok.prev = prev; - tokens.push(tok); - prev = tok; - }; + // Sort query parameters + if (options.sortQueryParameters) { + urlObject.searchParams.sort(); - const extglobOpen = (type, value) => { - const token = { ...EXTGLOB_CHARS[value], conditions: 1, inner: '' }; + // Calling `.sort()` encodes the search parameters, so we need to decode them again. + try { + urlObject.search = decodeURIComponent(urlObject.search); + } catch {} + } - token.prev = prev; - token.parens = state.parens; - token.output = state.output; - const output = (opts.capture ? '(' : '') + token.open; + if (options.removeTrailingSlash) { + urlObject.pathname = urlObject.pathname.replace(/\/$/, ''); + } - increment('parens'); - push({ type, value, output: state.output ? '' : ONE_CHAR }); - push({ type: 'paren', extglob: true, value: advance(), output }); - extglobs.push(token); - }; + const oldUrlString = urlString; - const extglobClose = token => { - let output = token.close + (opts.capture ? ')' : ''); - let rest; + // Take advantage of many of the Node `url` normalizations + urlString = urlObject.toString(); - if (token.type === 'negate') { - let extglobStar = star; + if (!options.removeSingleSlash && urlObject.pathname === '/' && !oldUrlString.endsWith('/') && urlObject.hash === '') { + urlString = urlString.replace(/\/$/, ''); + } - if (token.inner && token.inner.length > 1 && token.inner.includes('/')) { - extglobStar = globstar(opts); - } + // Remove ending `/` unless removeSingleSlash is false + if ((options.removeTrailingSlash || urlObject.pathname === '/') && urlObject.hash === '' && options.removeSingleSlash) { + urlString = urlString.replace(/\/$/, ''); + } - if (extglobStar !== star || eos() || /^\)+$/.test(remaining())) { - output = token.close = `)$))${extglobStar}`; - } + // Restore relative protocol, if applicable + if (hasRelativeProtocol && !options.normalizeProtocol) { + urlString = urlString.replace(/^http:\/\//, '//'); + } - if (token.inner.includes('*') && (rest = remaining()) && /^\.[^\\/.]+$/.test(rest)) { - // Any non-magical string (`.ts`) or even nested expression (`.{ts,tsx}`) can follow after the closing parenthesis. - // In this case, we need to parse the string and use it in the output of the original pattern. - // Suitable patterns: `/!(*.d).ts`, `/!(*.d).{ts,tsx}`, `**/!(*-dbg).@(js)`. - // - // Disabling the `fastpaths` option due to a problem with parsing strings as `.ts` in the pattern like `**/!(*.d).ts`. - const expression = parse(rest, { ...options, fastpaths: false }).output; + // Remove http/https + if (options.stripProtocol) { + urlString = urlString.replace(/^(?:https?:)?\/\//, ''); + } - output = token.close = `)${expression})${extglobStar})`; - } + return urlString; +} - if (token.prev.type === 'bos') { - state.negatedExtglob = true; - } - } +// Dependencies - push({ type: 'paren', extglob: true, value, output }); - decrement('parens'); - }; +/** + * parseUrl + * Parses the input url. + * + * **Note**: This *throws* if invalid urls are provided. + * + * @name parseUrl + * @function + * @param {String} url The input url. + * @param {Boolean|Object} normalize Whether to normalize the url or not. + * Default is `false`. If `true`, the url will + * be normalized. If an object, it will be the + * options object sent to [`normalize-url`](https://github.com/sindresorhus/normalize-url). + * + * For SSH urls, normalize won't work. + * + * @return {Object} An object containing the following fields: + * + * - `protocols` (Array): An array with the url protocols (usually it has one element). + * - `protocol` (String): The first protocol, `"ssh"` (if the url is a ssh url) or `"file"`. + * - `port` (null|Number): The domain port. + * - `resource` (String): The url domain (including subdomains). + * - `user` (String): The authentication user (usually for ssh urls). + * - `pathname` (String): The url pathname. + * - `hash` (String): The url hash. + * - `search` (String): The url querystring value. + * - `href` (String): The input url. + * - `query` (Object): The url querystring, parsed as object. + * - `parse_failed` (Boolean): Whether the parsing failed or not. + */ +const parseUrl = (url, normalize = false) => { - /** - * Fast paths - */ + // Constants + const GIT_RE = /^(?:([a-z_][a-z0-9_-]{0,31})@|https?:\/\/)([\w\.\-@]+)[\/:]([\~,\.\w,\-,\_,\/]+?(?:\.git|\/)?)$/; - if (opts.fastpaths !== false && !/(^[*!]|[/()[\]{}"])/.test(input)) { - let backslashes = false; + const throwErr = msg => { + const err = new Error(msg); + err.subject_url = url; + throw err + }; - let output = input.replace(REGEX_SPECIAL_CHARS_BACKREF, (m, esc, chars, first, rest, index) => { - if (first === '\\') { - backslashes = true; - return m; - } + if (typeof url !== "string" || !url.trim()) { + throwErr("Invalid url."); + } - if (first === '?') { - if (esc) { - return esc + first + (rest ? QMARK.repeat(rest.length) : ''); - } - if (index === 0) { - return qmarkNoDot + (rest ? QMARK.repeat(rest.length) : ''); + if (url.length > parseUrl.MAX_INPUT_LENGTH) { + throwErr("Input exceeds maximum length. If needed, change the value of parseUrl.MAX_INPUT_LENGTH."); + } + + if (normalize) { + if (typeof normalize !== "object") { + normalize = { + stripHash: false + }; } - return QMARK.repeat(chars.length); - } + url = normalizeUrl(url, normalize); + } - if (first === '.') { - return DOT_LITERAL.repeat(chars.length); - } + const parsed = parsePath__default["default"](url); - if (first === '*') { - if (esc) { - return esc + first + (rest ? star : ''); - } - return star; - } - return esc ? m : `\\${m}`; - }); + // Potential git-ssh urls + if (parsed.parse_failed) { + const matched = parsed.href.match(GIT_RE); - if (backslashes === true) { - if (opts.unescape === true) { - output = output.replace(/\\/g, ''); - } else { - output = output.replace(/\\+/g, m => { - return m.length % 2 === 0 ? '\\\\' : (m ? '\\' : ''); - }); - } + if (matched) { + parsed.protocols = ["ssh"]; + parsed.protocol = "ssh"; + parsed.resource = matched[2]; + parsed.host = matched[2]; + parsed.user = matched[1]; + parsed.pathname = `/${matched[3]}`; + parsed.parse_failed = false; + } else { + throwErr("URL parsing failed."); + } } - if (output === input && opts.contains === true) { - state.output = input; - return state; - } + return parsed; +}; - state.output = utils.wrapOutput(output, state, options); - return state; - } +parseUrl.MAX_INPUT_LENGTH = 2048; - /** - * Tokenize input until we reach end-of-string - */ +module.exports = parseUrl; - while (!eos()) { - value = advance(); - if (value === '\u0000') { - continue; - } +/***/ }), - /** - * Escaped characters - */ +/***/ 8569: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - if (value === '\\') { - const next = peek(); +"use strict"; - if (next === '/' && opts.bash !== true) { - continue; - } - if (next === '.' || next === ';') { - continue; - } +module.exports = __nccwpck_require__(3322); - if (!next) { - value += '\\'; - push({ type: 'text', value }); - continue; - } - // collapse slashes to reduce potential for exploits - const match = /^\\+/.exec(remaining()); - let slashes = 0; +/***/ }), - if (match && match[0].length > 2) { - slashes = match[0].length; - state.index += slashes; - if (slashes % 2 !== 0) { - value += '\\'; - } - } +/***/ 6099: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - if (opts.unescape === true) { - value = advance(); - } else { - value += advance(); - } +"use strict"; - if (state.brackets === 0) { - push({ type: 'text', value }); - continue; - } - } - /** - * If we're inside a regex character class, continue - * until we reach the closing bracket. - */ +const path = __nccwpck_require__(1017); +const WIN_SLASH = '\\\\/'; +const WIN_NO_SLASH = `[^${WIN_SLASH}]`; - if (state.brackets > 0 && (value !== ']' || prev.value === '[' || prev.value === '[^')) { - if (opts.posix !== false && value === ':') { - const inner = prev.value.slice(1); - if (inner.includes('[')) { - prev.posix = true; +/** + * Posix glob regex + */ - if (inner.includes(':')) { - const idx = prev.value.lastIndexOf('['); - const pre = prev.value.slice(0, idx); - const rest = prev.value.slice(idx + 2); - const posix = POSIX_REGEX_SOURCE[rest]; - if (posix) { - prev.value = pre + posix; - state.backtrack = true; - advance(); +const DOT_LITERAL = '\\.'; +const PLUS_LITERAL = '\\+'; +const QMARK_LITERAL = '\\?'; +const SLASH_LITERAL = '\\/'; +const ONE_CHAR = '(?=.)'; +const QMARK = '[^/]'; +const END_ANCHOR = `(?:${SLASH_LITERAL}|$)`; +const START_ANCHOR = `(?:^|${SLASH_LITERAL})`; +const DOTS_SLASH = `${DOT_LITERAL}{1,2}${END_ANCHOR}`; +const NO_DOT = `(?!${DOT_LITERAL})`; +const NO_DOTS = `(?!${START_ANCHOR}${DOTS_SLASH})`; +const NO_DOT_SLASH = `(?!${DOT_LITERAL}{0,1}${END_ANCHOR})`; +const NO_DOTS_SLASH = `(?!${DOTS_SLASH})`; +const QMARK_NO_DOT = `[^.${SLASH_LITERAL}]`; +const STAR = `${QMARK}*?`; - if (!bos.output && tokens.indexOf(prev) === 1) { - bos.output = ONE_CHAR; - } - continue; - } - } - } - } +const POSIX_CHARS = { + DOT_LITERAL, + PLUS_LITERAL, + QMARK_LITERAL, + SLASH_LITERAL, + ONE_CHAR, + QMARK, + END_ANCHOR, + DOTS_SLASH, + NO_DOT, + NO_DOTS, + NO_DOT_SLASH, + NO_DOTS_SLASH, + QMARK_NO_DOT, + STAR, + START_ANCHOR +}; - if ((value === '[' && peek() !== ':') || (value === '-' && peek() === ']')) { - value = `\\${value}`; - } +/** + * Windows glob regex + */ - if (value === ']' && (prev.value === '[' || prev.value === '[^')) { - value = `\\${value}`; - } +const WINDOWS_CHARS = { + ...POSIX_CHARS, - if (opts.posix === true && value === '!' && prev.value === '[') { - value = '^'; - } + SLASH_LITERAL: `[${WIN_SLASH}]`, + QMARK: WIN_NO_SLASH, + STAR: `${WIN_NO_SLASH}*?`, + DOTS_SLASH: `${DOT_LITERAL}{1,2}(?:[${WIN_SLASH}]|$)`, + NO_DOT: `(?!${DOT_LITERAL})`, + NO_DOTS: `(?!(?:^|[${WIN_SLASH}])${DOT_LITERAL}{1,2}(?:[${WIN_SLASH}]|$))`, + NO_DOT_SLASH: `(?!${DOT_LITERAL}{0,1}(?:[${WIN_SLASH}]|$))`, + NO_DOTS_SLASH: `(?!${DOT_LITERAL}{1,2}(?:[${WIN_SLASH}]|$))`, + QMARK_NO_DOT: `[^.${WIN_SLASH}]`, + START_ANCHOR: `(?:^|[${WIN_SLASH}])`, + END_ANCHOR: `(?:[${WIN_SLASH}]|$)` +}; - prev.value += value; - append({ value }); - continue; - } +/** + * POSIX Bracket Regex + */ - /** - * If we're inside a quoted string, continue - * until we reach the closing double quote. - */ +const POSIX_REGEX_SOURCE = { + alnum: 'a-zA-Z0-9', + alpha: 'a-zA-Z', + ascii: '\\x00-\\x7F', + blank: ' \\t', + cntrl: '\\x00-\\x1F\\x7F', + digit: '0-9', + graph: '\\x21-\\x7E', + lower: 'a-z', + print: '\\x20-\\x7E ', + punct: '\\-!"#$%&\'()\\*+,./:;<=>?@[\\]^_`{|}~', + space: ' \\t\\r\\n\\v\\f', + upper: 'A-Z', + word: 'A-Za-z0-9_', + xdigit: 'A-Fa-f0-9' +}; - if (state.quotes === 1 && value !== '"') { - value = utils.escapeRegex(value); - prev.value += value; - append({ value }); - continue; - } +module.exports = { + MAX_LENGTH: 1024 * 64, + POSIX_REGEX_SOURCE, - /** - * Double quotes - */ + // regular expressions + REGEX_BACKSLASH: /\\(?![*+?^${}(|)[\]])/g, + REGEX_NON_SPECIAL_CHARS: /^[^@![\].,$*+?^{}()|\\/]+/, + REGEX_SPECIAL_CHARS: /[-*+?.^${}(|)[\]]/, + REGEX_SPECIAL_CHARS_BACKREF: /(\\?)((\W)(\3*))/g, + REGEX_SPECIAL_CHARS_GLOBAL: /([-*+?.^${}(|)[\]])/g, + REGEX_REMOVE_BACKSLASH: /(?:\[.*?[^\\]\]|\\(?=.))/g, - if (value === '"') { - state.quotes = state.quotes === 1 ? 0 : 1; - if (opts.keepQuotes === true) { - push({ type: 'text', value }); - } - continue; - } + // Replace globs with equivalent patterns to reduce parsing time. + REPLACEMENTS: { + '***': '*', + '**/**': '**', + '**/**/**': '**' + }, - /** - * Parentheses - */ + // Digits + CHAR_0: 48, /* 0 */ + CHAR_9: 57, /* 9 */ - if (value === '(') { - increment('parens'); - push({ type: 'paren', value }); - continue; - } + // Alphabet chars. + CHAR_UPPERCASE_A: 65, /* A */ + CHAR_LOWERCASE_A: 97, /* a */ + CHAR_UPPERCASE_Z: 90, /* Z */ + CHAR_LOWERCASE_Z: 122, /* z */ - if (value === ')') { - if (state.parens === 0 && opts.strictBrackets === true) { - throw new SyntaxError(syntaxError('opening', '(')); - } + CHAR_LEFT_PARENTHESES: 40, /* ( */ + CHAR_RIGHT_PARENTHESES: 41, /* ) */ - const extglob = extglobs[extglobs.length - 1]; - if (extglob && state.parens === extglob.parens + 1) { - extglobClose(extglobs.pop()); - continue; - } + CHAR_ASTERISK: 42, /* * */ - push({ type: 'paren', value, output: state.parens ? ')' : '\\)' }); - decrement('parens'); - continue; - } + // Non-alphabetic chars. + CHAR_AMPERSAND: 38, /* & */ + CHAR_AT: 64, /* @ */ + CHAR_BACKWARD_SLASH: 92, /* \ */ + CHAR_CARRIAGE_RETURN: 13, /* \r */ + CHAR_CIRCUMFLEX_ACCENT: 94, /* ^ */ + CHAR_COLON: 58, /* : */ + CHAR_COMMA: 44, /* , */ + CHAR_DOT: 46, /* . */ + CHAR_DOUBLE_QUOTE: 34, /* " */ + CHAR_EQUAL: 61, /* = */ + CHAR_EXCLAMATION_MARK: 33, /* ! */ + CHAR_FORM_FEED: 12, /* \f */ + CHAR_FORWARD_SLASH: 47, /* / */ + CHAR_GRAVE_ACCENT: 96, /* ` */ + CHAR_HASH: 35, /* # */ + CHAR_HYPHEN_MINUS: 45, /* - */ + CHAR_LEFT_ANGLE_BRACKET: 60, /* < */ + CHAR_LEFT_CURLY_BRACE: 123, /* { */ + CHAR_LEFT_SQUARE_BRACKET: 91, /* [ */ + CHAR_LINE_FEED: 10, /* \n */ + CHAR_NO_BREAK_SPACE: 160, /* \u00A0 */ + CHAR_PERCENT: 37, /* % */ + CHAR_PLUS: 43, /* + */ + CHAR_QUESTION_MARK: 63, /* ? */ + CHAR_RIGHT_ANGLE_BRACKET: 62, /* > */ + CHAR_RIGHT_CURLY_BRACE: 125, /* } */ + CHAR_RIGHT_SQUARE_BRACKET: 93, /* ] */ + CHAR_SEMICOLON: 59, /* ; */ + CHAR_SINGLE_QUOTE: 39, /* ' */ + CHAR_SPACE: 32, /* */ + CHAR_TAB: 9, /* \t */ + CHAR_UNDERSCORE: 95, /* _ */ + CHAR_VERTICAL_LINE: 124, /* | */ + CHAR_ZERO_WIDTH_NOBREAK_SPACE: 65279, /* \uFEFF */ - /** - * Square brackets - */ + SEP: path.sep, - if (value === '[') { - if (opts.nobracket === true || !remaining().includes(']')) { - if (opts.nobracket !== true && opts.strictBrackets === true) { - throw new SyntaxError(syntaxError('closing', ']')); - } + /** + * Create EXTGLOB_CHARS + */ - value = `\\${value}`; - } else { - increment('brackets'); - } + extglobChars(chars) { + return { + '!': { type: 'negate', open: '(?:(?!(?:', close: `))${chars.STAR})` }, + '?': { type: 'qmark', open: '(?:', close: ')?' }, + '+': { type: 'plus', open: '(?:', close: ')+' }, + '*': { type: 'star', open: '(?:', close: ')*' }, + '@': { type: 'at', open: '(?:', close: ')' } + }; + }, - push({ type: 'bracket', value }); - continue; - } + /** + * Create GLOB_CHARS + */ + + globChars(win32) { + return win32 === true ? WINDOWS_CHARS : POSIX_CHARS; + } +}; - if (value === ']') { - if (opts.nobracket === true || (prev && prev.type === 'bracket' && prev.value.length === 1)) { - push({ type: 'text', value, output: `\\${value}` }); - continue; - } - if (state.brackets === 0) { - if (opts.strictBrackets === true) { - throw new SyntaxError(syntaxError('opening', '[')); - } +/***/ }), - push({ type: 'text', value, output: `\\${value}` }); - continue; - } +/***/ 2139: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - decrement('brackets'); +"use strict"; - const prevValue = prev.value.slice(1); - if (prev.posix !== true && prevValue[0] === '^' && !prevValue.includes('/')) { - value = `/${value}`; - } - prev.value += value; - append({ value }); +const constants = __nccwpck_require__(6099); +const utils = __nccwpck_require__(479); - // when literal brackets are explicitly disabled - // assume we should match with a regex character class - if (opts.literalBrackets === false || utils.hasRegexChars(prevValue)) { - continue; - } +/** + * Constants + */ - const escaped = utils.escapeRegex(prev.value); - state.output = state.output.slice(0, -prev.value.length); +const { + MAX_LENGTH, + POSIX_REGEX_SOURCE, + REGEX_NON_SPECIAL_CHARS, + REGEX_SPECIAL_CHARS_BACKREF, + REPLACEMENTS +} = constants; - // when literal brackets are explicitly enabled - // assume we should escape the brackets to match literal characters - if (opts.literalBrackets === true) { - state.output += escaped; - prev.value = escaped; - continue; - } +/** + * Helpers + */ - // when the user specifies nothing, try to match both - prev.value = `(${capture}${escaped}|${prev.value})`; - state.output += prev.value; - continue; - } +const expandRange = (args, options) => { + if (typeof options.expandRange === 'function') { + return options.expandRange(...args, options); + } - /** - * Braces - */ + args.sort(); + const value = `[${args.join('-')}]`; - if (value === '{' && opts.nobrace !== true) { - increment('braces'); + try { + /* eslint-disable-next-line no-new */ + new RegExp(value); + } catch (ex) { + return args.map(v => utils.escapeRegex(v)).join('..'); + } - const open = { - type: 'brace', - value, - output: '(', - outputIndex: state.output.length, - tokensIndex: state.tokens.length - }; + return value; +}; - braces.push(open); - push(open); - continue; - } +/** + * Create the message for a syntax error + */ - if (value === '}') { - const brace = braces[braces.length - 1]; +const syntaxError = (type, char) => { + return `Missing ${type}: "${char}" - use "\\\\${char}" to match literal characters`; +}; - if (opts.nobrace === true || !brace) { - push({ type: 'text', value, output: value }); - continue; - } +/** + * Parse the given input string. + * @param {String} input + * @param {Object} options + * @return {Object} + */ - let output = ')'; +const parse = (input, options) => { + if (typeof input !== 'string') { + throw new TypeError('Expected a string'); + } - if (brace.dots === true) { - const arr = tokens.slice(); - const range = []; + input = REPLACEMENTS[input] || input; - for (let i = arr.length - 1; i >= 0; i--) { - tokens.pop(); - if (arr[i].type === 'brace') { - break; - } - if (arr[i].type !== 'dots') { - range.unshift(arr[i].value); - } - } + const opts = { ...options }; + const max = typeof opts.maxLength === 'number' ? Math.min(MAX_LENGTH, opts.maxLength) : MAX_LENGTH; - output = expandRange(range, opts); - state.backtrack = true; - } + let len = input.length; + if (len > max) { + throw new SyntaxError(`Input length: ${len}, exceeds maximum allowed length: ${max}`); + } - if (brace.comma !== true && brace.dots !== true) { - const out = state.output.slice(0, brace.outputIndex); - const toks = state.tokens.slice(brace.tokensIndex); - brace.value = brace.output = '\\{'; - value = output = '\\}'; - state.output = out; - for (const t of toks) { - state.output += (t.output || t.value); - } - } + const bos = { type: 'bos', value: '', output: opts.prepend || '' }; + const tokens = [bos]; - push({ type: 'brace', value, output }); - decrement('braces'); - braces.pop(); - continue; - } + const capture = opts.capture ? '' : '?:'; + const win32 = utils.isWindows(options); - /** - * Pipes - */ + // create constants based on platform, for windows or posix + const PLATFORM_CHARS = constants.globChars(win32); + const EXTGLOB_CHARS = constants.extglobChars(PLATFORM_CHARS); - if (value === '|') { - if (extglobs.length > 0) { - extglobs[extglobs.length - 1].conditions++; - } - push({ type: 'text', value }); - continue; - } + const { + DOT_LITERAL, + PLUS_LITERAL, + SLASH_LITERAL, + ONE_CHAR, + DOTS_SLASH, + NO_DOT, + NO_DOT_SLASH, + NO_DOTS_SLASH, + QMARK, + QMARK_NO_DOT, + STAR, + START_ANCHOR + } = PLATFORM_CHARS; - /** - * Commas - */ + const globstar = opts => { + return `(${capture}(?:(?!${START_ANCHOR}${opts.dot ? DOTS_SLASH : DOT_LITERAL}).)*?)`; + }; - if (value === ',') { - let output = value; + const nodot = opts.dot ? '' : NO_DOT; + const qmarkNoDot = opts.dot ? QMARK : QMARK_NO_DOT; + let star = opts.bash === true ? globstar(opts) : STAR; - const brace = braces[braces.length - 1]; - if (brace && stack[stack.length - 1] === 'braces') { - brace.comma = true; - output = '|'; - } + if (opts.capture) { + star = `(${star})`; + } - push({ type: 'comma', value, output }); - continue; - } + // minimatch options support + if (typeof opts.noext === 'boolean') { + opts.noextglob = opts.noext; + } - /** - * Slashes - */ + const state = { + input, + index: -1, + start: 0, + dot: opts.dot === true, + consumed: '', + output: '', + prefix: '', + backtrack: false, + negated: false, + brackets: 0, + braces: 0, + parens: 0, + quotes: 0, + globstar: false, + tokens + }; - if (value === '/') { - // if the beginning of the glob is "./", advance the start - // to the current index, and don't add the "./" characters - // to the state. This greatly simplifies lookbehinds when - // checking for BOS characters like "!" and "." (not "./") - if (prev.type === 'dot' && state.index === state.start + 1) { - state.start = state.index + 1; - state.consumed = ''; - state.output = ''; - tokens.pop(); - prev = bos; // reset "prev" to the first token - continue; - } + input = utils.removePrefix(input, state); + len = input.length; - push({ type: 'slash', value, output: SLASH_LITERAL }); - continue; - } + const extglobs = []; + const braces = []; + const stack = []; + let prev = bos; + let value; + + /** + * Tokenizing helpers + */ + + const eos = () => state.index === len - 1; + const peek = state.peek = (n = 1) => input[state.index + n]; + const advance = state.advance = () => input[++state.index] || ''; + const remaining = () => input.slice(state.index + 1); + const consume = (value = '', num = 0) => { + state.consumed += value; + state.index += num; + }; - /** - * Dots - */ + const append = token => { + state.output += token.output != null ? token.output : token.value; + consume(token.value); + }; - if (value === '.') { - if (state.braces > 0 && prev.type === 'dot') { - if (prev.value === '.') prev.output = DOT_LITERAL; - const brace = braces[braces.length - 1]; - prev.type = 'dots'; - prev.output += value; - prev.value += value; - brace.dots = true; - continue; - } + const negate = () => { + let count = 1; - if ((state.braces + state.parens) === 0 && prev.type !== 'bos' && prev.type !== 'slash') { - push({ type: 'text', value, output: DOT_LITERAL }); - continue; - } + while (peek() === '!' && (peek(2) !== '(' || peek(3) === '?')) { + advance(); + state.start++; + count++; + } - push({ type: 'dot', value, output: DOT_LITERAL }); - continue; + if (count % 2 === 0) { + return false; } - /** - * Question marks - */ + state.negated = true; + state.start++; + return true; + }; - if (value === '?') { - const isGroup = prev && prev.value === '('; - if (!isGroup && opts.noextglob !== true && peek() === '(' && peek(2) !== '?') { - extglobOpen('qmark', value); - continue; - } + const increment = type => { + state[type]++; + stack.push(type); + }; - if (prev && prev.type === 'paren') { - const next = peek(); - let output = value; + const decrement = type => { + state[type]--; + stack.pop(); + }; - if (next === '<' && !utils.supportsLookbehinds()) { - throw new Error('Node.js v10 or higher is required for regex lookbehinds'); - } + /** + * Push tokens onto the tokens array. This helper speeds up + * tokenizing by 1) helping us avoid backtracking as much as possible, + * and 2) helping us avoid creating extra tokens when consecutive + * characters are plain text. This improves performance and simplifies + * lookbehinds. + */ - if ((prev.value === '(' && !/[!=<:]/.test(next)) || (next === '<' && !/<([!=]|\w+>)/.test(remaining()))) { - output = `\\${value}`; - } + const push = tok => { + if (prev.type === 'globstar') { + const isBrace = state.braces > 0 && (tok.type === 'comma' || tok.type === 'brace'); + const isExtglob = tok.extglob === true || (extglobs.length && (tok.type === 'pipe' || tok.type === 'paren')); - push({ type: 'text', value, output }); - continue; + if (tok.type !== 'slash' && tok.type !== 'paren' && !isBrace && !isExtglob) { + state.output = state.output.slice(0, -prev.output.length); + prev.type = 'star'; + prev.value = '*'; + prev.output = star; + state.output += prev.output; } + } - if (opts.dot !== true && (prev.type === 'slash' || prev.type === 'bos')) { - push({ type: 'qmark', value, output: QMARK_NO_DOT }); - continue; - } + if (extglobs.length && tok.type !== 'paren') { + extglobs[extglobs.length - 1].inner += tok.value; + } - push({ type: 'qmark', value, output: QMARK }); - continue; + if (tok.value || tok.output) append(tok); + if (prev && prev.type === 'text' && tok.type === 'text') { + prev.value += tok.value; + prev.output = (prev.output || '') + tok.value; + return; } - /** - * Exclamation - */ + tok.prev = prev; + tokens.push(tok); + prev = tok; + }; - if (value === '!') { - if (opts.noextglob !== true && peek() === '(') { - if (peek(2) !== '?' || !/[!=<:]/.test(peek(3))) { - extglobOpen('negate', value); - continue; - } - } + const extglobOpen = (type, value) => { + const token = { ...EXTGLOB_CHARS[value], conditions: 1, inner: '' }; - if (opts.nonegate !== true && state.index === 0) { - negate(); - continue; - } - } + token.prev = prev; + token.parens = state.parens; + token.output = state.output; + const output = (opts.capture ? '(' : '') + token.open; - /** - * Plus - */ + increment('parens'); + push({ type, value, output: state.output ? '' : ONE_CHAR }); + push({ type: 'paren', extglob: true, value: advance(), output }); + extglobs.push(token); + }; - if (value === '+') { - if (opts.noextglob !== true && peek() === '(' && peek(2) !== '?') { - extglobOpen('plus', value); - continue; + const extglobClose = token => { + let output = token.close + (opts.capture ? ')' : ''); + let rest; + + if (token.type === 'negate') { + let extglobStar = star; + + if (token.inner && token.inner.length > 1 && token.inner.includes('/')) { + extglobStar = globstar(opts); } - if ((prev && prev.value === '(') || opts.regex === false) { - push({ type: 'plus', value, output: PLUS_LITERAL }); - continue; + if (extglobStar !== star || eos() || /^\)+$/.test(remaining())) { + output = token.close = `)$))${extglobStar}`; } - if ((prev && (prev.type === 'bracket' || prev.type === 'paren' || prev.type === 'brace')) || state.parens > 0) { - push({ type: 'plus', value }); - continue; + if (token.inner.includes('*') && (rest = remaining()) && /^\.[^\\/.]+$/.test(rest)) { + // Any non-magical string (`.ts`) or even nested expression (`.{ts,tsx}`) can follow after the closing parenthesis. + // In this case, we need to parse the string and use it in the output of the original pattern. + // Suitable patterns: `/!(*.d).ts`, `/!(*.d).{ts,tsx}`, `**/!(*-dbg).@(js)`. + // + // Disabling the `fastpaths` option due to a problem with parsing strings as `.ts` in the pattern like `**/!(*.d).ts`. + const expression = parse(rest, { ...options, fastpaths: false }).output; + + output = token.close = `)${expression})${extglobStar})`; } - push({ type: 'plus', value: PLUS_LITERAL }); - continue; + if (token.prev.type === 'bos') { + state.negatedExtglob = true; + } } - /** - * Plain text - */ + push({ type: 'paren', extglob: true, value, output }); + decrement('parens'); + }; - if (value === '@') { - if (opts.noextglob !== true && peek() === '(' && peek(2) !== '?') { - push({ type: 'at', extglob: true, value, output: '' }); - continue; + /** + * Fast paths + */ + + if (opts.fastpaths !== false && !/(^[*!]|[/()[\]{}"])/.test(input)) { + let backslashes = false; + + let output = input.replace(REGEX_SPECIAL_CHARS_BACKREF, (m, esc, chars, first, rest, index) => { + if (first === '\\') { + backslashes = true; + return m; } - push({ type: 'text', value }); - continue; - } + if (first === '?') { + if (esc) { + return esc + first + (rest ? QMARK.repeat(rest.length) : ''); + } + if (index === 0) { + return qmarkNoDot + (rest ? QMARK.repeat(rest.length) : ''); + } + return QMARK.repeat(chars.length); + } - /** - * Plain text - */ + if (first === '.') { + return DOT_LITERAL.repeat(chars.length); + } - if (value !== '*') { - if (value === '$' || value === '^') { - value = `\\${value}`; + if (first === '*') { + if (esc) { + return esc + first + (rest ? star : ''); + } + return star; } + return esc ? m : `\\${m}`; + }); - const match = REGEX_NON_SPECIAL_CHARS.exec(remaining()); - if (match) { - value += match[0]; - state.index += match[0].length; + if (backslashes === true) { + if (opts.unescape === true) { + output = output.replace(/\\/g, ''); + } else { + output = output.replace(/\\+/g, m => { + return m.length % 2 === 0 ? '\\\\' : (m ? '\\' : ''); + }); } + } - push({ type: 'text', value }); - continue; + if (output === input && opts.contains === true) { + state.output = input; + return state; } - /** - * Stars - */ + state.output = utils.wrapOutput(output, state, options); + return state; + } - if (prev && (prev.type === 'globstar' || prev.star === true)) { - prev.type = 'star'; - prev.star = true; - prev.value += value; - prev.output = star; - state.backtrack = true; - state.globstar = true; - consume(value); - continue; - } + /** + * Tokenize input until we reach end-of-string + */ - let rest = remaining(); - if (opts.noextglob !== true && /^\([^?]/.test(rest)) { - extglobOpen('star', value); + while (!eos()) { + value = advance(); + + if (value === '\u0000') { continue; } - if (prev.type === 'star') { - if (opts.noglobstar === true) { - consume(value); + /** + * Escaped characters + */ + + if (value === '\\') { + const next = peek(); + + if (next === '/' && opts.bash !== true) { continue; } - const prior = prev.prev; - const before = prior.prev; - const isStart = prior.type === 'slash' || prior.type === 'bos'; - const afterStar = before && (before.type === 'star' || before.type === 'globstar'); - - if (opts.bash === true && (!isStart || (rest[0] && rest[0] !== '/'))) { - push({ type: 'star', value, output: '' }); + if (next === '.' || next === ';') { continue; } - const isBrace = state.braces > 0 && (prior.type === 'comma' || prior.type === 'brace'); - const isExtglob = extglobs.length && (prior.type === 'pipe' || prior.type === 'paren'); - if (!isStart && prior.type !== 'paren' && !isBrace && !isExtglob) { - push({ type: 'star', value, output: '' }); + if (!next) { + value += '\\'; + push({ type: 'text', value }); continue; } - // strip consecutive `/**/` - while (rest.slice(0, 3) === '/**') { - const after = input[state.index + 4]; - if (after && after !== '/') { - break; + // collapse slashes to reduce potential for exploits + const match = /^\\+/.exec(remaining()); + let slashes = 0; + + if (match && match[0].length > 2) { + slashes = match[0].length; + state.index += slashes; + if (slashes % 2 !== 0) { + value += '\\'; } - rest = rest.slice(3); - consume('/**', 3); } - if (prior.type === 'bos' && eos()) { - prev.type = 'globstar'; - prev.value += value; - prev.output = globstar(opts); - state.output = prev.output; - state.globstar = true; - consume(value); - continue; + if (opts.unescape === true) { + value = advance(); + } else { + value += advance(); } - if (prior.type === 'slash' && prior.prev.type !== 'bos' && !afterStar && eos()) { - state.output = state.output.slice(0, -(prior.output + prev.output).length); - prior.output = `(?:${prior.output}`; - - prev.type = 'globstar'; - prev.output = globstar(opts) + (opts.strictSlashes ? ')' : '|$)'); - prev.value += value; - state.globstar = true; - state.output += prior.output + prev.output; - consume(value); + if (state.brackets === 0) { + push({ type: 'text', value }); continue; } + } - if (prior.type === 'slash' && prior.prev.type !== 'bos' && rest[0] === '/') { - const end = rest[1] !== void 0 ? '|$' : ''; - - state.output = state.output.slice(0, -(prior.output + prev.output).length); - prior.output = `(?:${prior.output}`; + /** + * If we're inside a regex character class, continue + * until we reach the closing bracket. + */ - prev.type = 'globstar'; - prev.output = `${globstar(opts)}${SLASH_LITERAL}|${SLASH_LITERAL}${end})`; - prev.value += value; + if (state.brackets > 0 && (value !== ']' || prev.value === '[' || prev.value === '[^')) { + if (opts.posix !== false && value === ':') { + const inner = prev.value.slice(1); + if (inner.includes('[')) { + prev.posix = true; - state.output += prior.output + prev.output; - state.globstar = true; + if (inner.includes(':')) { + const idx = prev.value.lastIndexOf('['); + const pre = prev.value.slice(0, idx); + const rest = prev.value.slice(idx + 2); + const posix = POSIX_REGEX_SOURCE[rest]; + if (posix) { + prev.value = pre + posix; + state.backtrack = true; + advance(); - consume(value + advance()); + if (!bos.output && tokens.indexOf(prev) === 1) { + bos.output = ONE_CHAR; + } + continue; + } + } + } + } - push({ type: 'slash', value: '/', output: '' }); - continue; + if ((value === '[' && peek() !== ':') || (value === '-' && peek() === ']')) { + value = `\\${value}`; } - if (prior.type === 'bos' && rest[0] === '/') { - prev.type = 'globstar'; - prev.value += value; - prev.output = `(?:^|${SLASH_LITERAL}|${globstar(opts)}${SLASH_LITERAL})`; - state.output = prev.output; - state.globstar = true; - consume(value + advance()); - push({ type: 'slash', value: '/', output: '' }); - continue; + if (value === ']' && (prev.value === '[' || prev.value === '[^')) { + value = `\\${value}`; } - // remove single star from output - state.output = state.output.slice(0, -prev.output.length); + if (opts.posix === true && value === '!' && prev.value === '[') { + value = '^'; + } - // reset previous token to globstar - prev.type = 'globstar'; - prev.output = globstar(opts); prev.value += value; - - // reset output with globstar - state.output += prev.output; - state.globstar = true; - consume(value); + append({ value }); continue; } - const token = { type: 'star', value, output: star }; + /** + * If we're inside a quoted string, continue + * until we reach the closing double quote. + */ - if (opts.bash === true) { - token.output = '.*?'; - if (prev.type === 'bos' || prev.type === 'slash') { - token.output = nodot + token.output; - } - push(token); + if (state.quotes === 1 && value !== '"') { + value = utils.escapeRegex(value); + prev.value += value; + append({ value }); continue; } - if (prev && (prev.type === 'bracket' || prev.type === 'paren') && opts.regex === true) { - token.output = value; - push(token); + /** + * Double quotes + */ + + if (value === '"') { + state.quotes = state.quotes === 1 ? 0 : 1; + if (opts.keepQuotes === true) { + push({ type: 'text', value }); + } continue; } - if (state.index === state.start || prev.type === 'slash' || prev.type === 'dot') { - if (prev.type === 'dot') { - state.output += NO_DOT_SLASH; - prev.output += NO_DOT_SLASH; + /** + * Parentheses + */ - } else if (opts.dot === true) { - state.output += NO_DOTS_SLASH; - prev.output += NO_DOTS_SLASH; + if (value === '(') { + increment('parens'); + push({ type: 'paren', value }); + continue; + } - } else { - state.output += nodot; - prev.output += nodot; + if (value === ')') { + if (state.parens === 0 && opts.strictBrackets === true) { + throw new SyntaxError(syntaxError('opening', '(')); } - if (peek() !== '*') { - state.output += ONE_CHAR; - prev.output += ONE_CHAR; + const extglob = extglobs[extglobs.length - 1]; + if (extglob && state.parens === extglob.parens + 1) { + extglobClose(extglobs.pop()); + continue; } - } - - push(token); - } - while (state.brackets > 0) { - if (opts.strictBrackets === true) throw new SyntaxError(syntaxError('closing', ']')); - state.output = utils.escapeLast(state.output, '['); - decrement('brackets'); - } - - while (state.parens > 0) { - if (opts.strictBrackets === true) throw new SyntaxError(syntaxError('closing', ')')); - state.output = utils.escapeLast(state.output, '('); - decrement('parens'); - } + push({ type: 'paren', value, output: state.parens ? ')' : '\\)' }); + decrement('parens'); + continue; + } - while (state.braces > 0) { - if (opts.strictBrackets === true) throw new SyntaxError(syntaxError('closing', '}')); - state.output = utils.escapeLast(state.output, '{'); - decrement('braces'); - } + /** + * Square brackets + */ - if (opts.strictSlashes !== true && (prev.type === 'star' || prev.type === 'bracket')) { - push({ type: 'maybe_slash', value: '', output: `${SLASH_LITERAL}?` }); - } + if (value === '[') { + if (opts.nobracket === true || !remaining().includes(']')) { + if (opts.nobracket !== true && opts.strictBrackets === true) { + throw new SyntaxError(syntaxError('closing', ']')); + } - // rebuild the output if we had to backtrack at any point - if (state.backtrack === true) { - state.output = ''; + value = `\\${value}`; + } else { + increment('brackets'); + } - for (const token of state.tokens) { - state.output += token.output != null ? token.output : token.value; + push({ type: 'bracket', value }); + continue; + } - if (token.suffix) { - state.output += token.suffix; + if (value === ']') { + if (opts.nobracket === true || (prev && prev.type === 'bracket' && prev.value.length === 1)) { + push({ type: 'text', value, output: `\\${value}` }); + continue; } - } - } - return state; -}; + if (state.brackets === 0) { + if (opts.strictBrackets === true) { + throw new SyntaxError(syntaxError('opening', '[')); + } -/** - * Fast paths for creating regular expressions for common glob patterns. - * This can significantly speed up processing and has very little downside - * impact when none of the fast paths match. - */ + push({ type: 'text', value, output: `\\${value}` }); + continue; + } -parse.fastpaths = (input, options) => { - const opts = { ...options }; - const max = typeof opts.maxLength === 'number' ? Math.min(MAX_LENGTH, opts.maxLength) : MAX_LENGTH; - const len = input.length; - if (len > max) { - throw new SyntaxError(`Input length: ${len}, exceeds maximum allowed length: ${max}`); - } + decrement('brackets'); - input = REPLACEMENTS[input] || input; - const win32 = utils.isWindows(options); + const prevValue = prev.value.slice(1); + if (prev.posix !== true && prevValue[0] === '^' && !prevValue.includes('/')) { + value = `/${value}`; + } - // create constants based on platform, for windows or posix - const { - DOT_LITERAL, - SLASH_LITERAL, - ONE_CHAR, - DOTS_SLASH, - NO_DOT, - NO_DOTS, - NO_DOTS_SLASH, - STAR, - START_ANCHOR - } = constants.globChars(win32); + prev.value += value; + append({ value }); - const nodot = opts.dot ? NO_DOTS : NO_DOT; - const slashDot = opts.dot ? NO_DOTS_SLASH : NO_DOT; - const capture = opts.capture ? '' : '?:'; - const state = { negated: false, prefix: '' }; - let star = opts.bash === true ? '.*?' : STAR; + // when literal brackets are explicitly disabled + // assume we should match with a regex character class + if (opts.literalBrackets === false || utils.hasRegexChars(prevValue)) { + continue; + } - if (opts.capture) { - star = `(${star})`; - } + const escaped = utils.escapeRegex(prev.value); + state.output = state.output.slice(0, -prev.value.length); - const globstar = opts => { - if (opts.noglobstar === true) return star; - return `(${capture}(?:(?!${START_ANCHOR}${opts.dot ? DOTS_SLASH : DOT_LITERAL}).)*?)`; - }; + // when literal brackets are explicitly enabled + // assume we should escape the brackets to match literal characters + if (opts.literalBrackets === true) { + state.output += escaped; + prev.value = escaped; + continue; + } - const create = str => { - switch (str) { - case '*': - return `${nodot}${ONE_CHAR}${star}`; + // when the user specifies nothing, try to match both + prev.value = `(${capture}${escaped}|${prev.value})`; + state.output += prev.value; + continue; + } - case '.*': - return `${DOT_LITERAL}${ONE_CHAR}${star}`; + /** + * Braces + */ - case '*.*': - return `${nodot}${star}${DOT_LITERAL}${ONE_CHAR}${star}`; + if (value === '{' && opts.nobrace !== true) { + increment('braces'); - case '*/*': - return `${nodot}${star}${SLASH_LITERAL}${ONE_CHAR}${slashDot}${star}`; + const open = { + type: 'brace', + value, + output: '(', + outputIndex: state.output.length, + tokensIndex: state.tokens.length + }; - case '**': - return nodot + globstar(opts); + braces.push(open); + push(open); + continue; + } - case '**/*': - return `(?:${nodot}${globstar(opts)}${SLASH_LITERAL})?${slashDot}${ONE_CHAR}${star}`; + if (value === '}') { + const brace = braces[braces.length - 1]; - case '**/*.*': - return `(?:${nodot}${globstar(opts)}${SLASH_LITERAL})?${slashDot}${star}${DOT_LITERAL}${ONE_CHAR}${star}`; + if (opts.nobrace === true || !brace) { + push({ type: 'text', value, output: value }); + continue; + } - case '**/.*': - return `(?:${nodot}${globstar(opts)}${SLASH_LITERAL})?${DOT_LITERAL}${ONE_CHAR}${star}`; + let output = ')'; - default: { - const match = /^(.*?)\.(\w+)$/.exec(str); - if (!match) return; + if (brace.dots === true) { + const arr = tokens.slice(); + const range = []; - const source = create(match[1]); - if (!source) return; + for (let i = arr.length - 1; i >= 0; i--) { + tokens.pop(); + if (arr[i].type === 'brace') { + break; + } + if (arr[i].type !== 'dots') { + range.unshift(arr[i].value); + } + } - return source + DOT_LITERAL + match[2]; + output = expandRange(range, opts); + state.backtrack = true; } - } - }; - - const output = utils.removePrefix(input, state); - let source = create(output); - if (source && opts.strictSlashes !== true) { - source += `${SLASH_LITERAL}?`; - } + if (brace.comma !== true && brace.dots !== true) { + const out = state.output.slice(0, brace.outputIndex); + const toks = state.tokens.slice(brace.tokensIndex); + brace.value = brace.output = '\\{'; + value = output = '\\}'; + state.output = out; + for (const t of toks) { + state.output += (t.output || t.value); + } + } - return source; -}; + push({ type: 'brace', value, output }); + decrement('braces'); + braces.pop(); + continue; + } -module.exports = parse; + /** + * Pipes + */ + if (value === '|') { + if (extglobs.length > 0) { + extglobs[extglobs.length - 1].conditions++; + } + push({ type: 'text', value }); + continue; + } -/***/ }), + /** + * Commas + */ -/***/ 807: -/***/ (function(module) { + if (value === ',') { + let output = value; -"use strict"; + const brace = braces[braces.length - 1]; + if (brace && stack[stack.length - 1] === 'braces') { + brace.comma = true; + output = '|'; + } + push({ type: 'comma', value, output }); + continue; + } -module.exports = { - MAX_LENGTH: 1024 * 64, + /** + * Slashes + */ - // Digits - CHAR_0: '0', /* 0 */ - CHAR_9: '9', /* 9 */ + if (value === '/') { + // if the beginning of the glob is "./", advance the start + // to the current index, and don't add the "./" characters + // to the state. This greatly simplifies lookbehinds when + // checking for BOS characters like "!" and "." (not "./") + if (prev.type === 'dot' && state.index === state.start + 1) { + state.start = state.index + 1; + state.consumed = ''; + state.output = ''; + tokens.pop(); + prev = bos; // reset "prev" to the first token + continue; + } - // Alphabet chars. - CHAR_UPPERCASE_A: 'A', /* A */ - CHAR_LOWERCASE_A: 'a', /* a */ - CHAR_UPPERCASE_Z: 'Z', /* Z */ - CHAR_LOWERCASE_Z: 'z', /* z */ + push({ type: 'slash', value, output: SLASH_LITERAL }); + continue; + } - CHAR_LEFT_PARENTHESES: '(', /* ( */ - CHAR_RIGHT_PARENTHESES: ')', /* ) */ + /** + * Dots + */ - CHAR_ASTERISK: '*', /* * */ + if (value === '.') { + if (state.braces > 0 && prev.type === 'dot') { + if (prev.value === '.') prev.output = DOT_LITERAL; + const brace = braces[braces.length - 1]; + prev.type = 'dots'; + prev.output += value; + prev.value += value; + brace.dots = true; + continue; + } - // Non-alphabetic chars. - CHAR_AMPERSAND: '&', /* & */ - CHAR_AT: '@', /* @ */ - CHAR_BACKSLASH: '\\', /* \ */ - CHAR_BACKTICK: '`', /* ` */ - CHAR_CARRIAGE_RETURN: '\r', /* \r */ - CHAR_CIRCUMFLEX_ACCENT: '^', /* ^ */ - CHAR_COLON: ':', /* : */ - CHAR_COMMA: ',', /* , */ - CHAR_DOLLAR: '$', /* . */ - CHAR_DOT: '.', /* . */ - CHAR_DOUBLE_QUOTE: '"', /* " */ - CHAR_EQUAL: '=', /* = */ - CHAR_EXCLAMATION_MARK: '!', /* ! */ - CHAR_FORM_FEED: '\f', /* \f */ - CHAR_FORWARD_SLASH: '/', /* / */ - CHAR_HASH: '#', /* # */ - CHAR_HYPHEN_MINUS: '-', /* - */ - CHAR_LEFT_ANGLE_BRACKET: '<', /* < */ - CHAR_LEFT_CURLY_BRACE: '{', /* { */ - CHAR_LEFT_SQUARE_BRACKET: '[', /* [ */ - CHAR_LINE_FEED: '\n', /* \n */ - CHAR_NO_BREAK_SPACE: '\u00A0', /* \u00A0 */ - CHAR_PERCENT: '%', /* % */ - CHAR_PLUS: '+', /* + */ - CHAR_QUESTION_MARK: '?', /* ? */ - CHAR_RIGHT_ANGLE_BRACKET: '>', /* > */ - CHAR_RIGHT_CURLY_BRACE: '}', /* } */ - CHAR_RIGHT_SQUARE_BRACKET: ']', /* ] */ - CHAR_SEMICOLON: ';', /* ; */ - CHAR_SINGLE_QUOTE: '\'', /* ' */ - CHAR_SPACE: ' ', /* */ - CHAR_TAB: '\t', /* \t */ - CHAR_UNDERSCORE: '_', /* _ */ - CHAR_VERTICAL_LINE: '|', /* | */ - CHAR_ZERO_WIDTH_NOBREAK_SPACE: '\uFEFF' /* \uFEFF */ -}; + if ((state.braces + state.parens) === 0 && prev.type !== 'bos' && prev.type !== 'slash') { + push({ type: 'text', value, output: DOT_LITERAL }); + continue; + } + push({ type: 'dot', value, output: DOT_LITERAL }); + continue; + } -/***/ }), + /** + * Question marks + */ -/***/ 827: -/***/ (function(module, __unusedexports, __webpack_require__) { + if (value === '?') { + const isGroup = prev && prev.value === '('; + if (!isGroup && opts.noextglob !== true && peek() === '(' && peek(2) !== '?') { + extglobOpen('qmark', value); + continue; + } -"use strict"; + if (prev && prev.type === 'paren') { + const next = peek(); + let output = value; + if (next === '<' && !utils.supportsLookbehinds()) { + throw new Error('Node.js v10 or higher is required for regex lookbehinds'); + } -module.exports = __webpack_require__(366); + if ((prev.value === '(' && !/[!=<:]/.test(next)) || (next === '<' && !/<([!=]|\w+>)/.test(remaining()))) { + output = `\\${value}`; + } + push({ type: 'text', value, output }); + continue; + } -/***/ }), + if (opts.dot !== true && (prev.type === 'slash' || prev.type === 'bos')) { + push({ type: 'qmark', value, output: QMARK_NO_DOT }); + continue; + } -/***/ 832: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + push({ type: 'qmark', value, output: QMARK }); + continue; + } -"use strict"; + /** + * Exclamation + */ + if (value === '!') { + if (opts.noextglob !== true && peek() === '(') { + if (peek(2) !== '?' || !/[!=<:]/.test(peek(3))) { + extglobOpen('negate', value); + continue; + } + } + if (opts.nonegate !== true && state.index === 0) { + negate(); + continue; + } + } -var zlib_inflate = __webpack_require__(401); -var utils = __webpack_require__(999); -var strings = __webpack_require__(279); -var c = __webpack_require__(691); -var msg = __webpack_require__(868); -var ZStream = __webpack_require__(991); -var GZheader = __webpack_require__(969); + /** + * Plus + */ -var toString = Object.prototype.toString; + if (value === '+') { + if (opts.noextglob !== true && peek() === '(' && peek(2) !== '?') { + extglobOpen('plus', value); + continue; + } -/** - * class Inflate - * - * Generic JS-style wrapper for zlib calls. If you don't need - * streaming behaviour - use more simple functions: [[inflate]] - * and [[inflateRaw]]. - **/ + if ((prev && prev.value === '(') || opts.regex === false) { + push({ type: 'plus', value, output: PLUS_LITERAL }); + continue; + } -/* internal - * inflate.chunks -> Array - * - * Chunks of output data, if [[Inflate#onData]] not overridden. - **/ + if ((prev && (prev.type === 'bracket' || prev.type === 'paren' || prev.type === 'brace')) || state.parens > 0) { + push({ type: 'plus', value }); + continue; + } -/** - * Inflate.result -> Uint8Array|Array|String - * - * Uncompressed result, generated by default [[Inflate#onData]] - * and [[Inflate#onEnd]] handlers. Filled after you push last chunk - * (call [[Inflate#push]] with `Z_FINISH` / `true` param) or if you - * push a chunk with explicit flush (call [[Inflate#push]] with - * `Z_SYNC_FLUSH` param). - **/ + push({ type: 'plus', value: PLUS_LITERAL }); + continue; + } -/** - * Inflate.err -> Number - * - * Error code after inflate finished. 0 (Z_OK) on success. - * Should be checked if broken data possible. - **/ + /** + * Plain text + */ -/** - * Inflate.msg -> String - * - * Error message, if [[Inflate.err]] != 0 - **/ + if (value === '@') { + if (opts.noextglob !== true && peek() === '(' && peek(2) !== '?') { + push({ type: 'at', extglob: true, value, output: '' }); + continue; + } + push({ type: 'text', value }); + continue; + } -/** - * new Inflate(options) - * - options (Object): zlib inflate options. - * - * Creates new inflator instance with specified params. Throws exception - * on bad params. Supported options: - * - * - `windowBits` - * - `dictionary` - * - * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) - * for more information on these. - * - * Additional options, for internal needs: - * - * - `chunkSize` - size of generated data chunks (16K by default) - * - `raw` (Boolean) - do raw inflate - * - `to` (String) - if equal to 'string', then result will be converted - * from utf8 to utf16 (javascript) string. When string output requested, - * chunk length can differ from `chunkSize`, depending on content. - * - * By default, when no options set, autodetect deflate/gzip data format via - * wrapper header. - * - * ##### Example: - * - * ```javascript - * var pako = require('pako') - * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9]) - * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]); - * - * var inflate = new pako.Inflate({ level: 3}); - * - * inflate.push(chunk1, false); - * inflate.push(chunk2, true); // true -> last chunk - * - * if (inflate.err) { throw new Error(inflate.err); } - * - * console.log(inflate.result); - * ``` - **/ -function Inflate(options) { - if (!(this instanceof Inflate)) return new Inflate(options); + /** + * Plain text + */ - this.options = utils.assign({ - chunkSize: 16384, - windowBits: 0, - to: '' - }, options || {}); + if (value !== '*') { + if (value === '$' || value === '^') { + value = `\\${value}`; + } - var opt = this.options; + const match = REGEX_NON_SPECIAL_CHARS.exec(remaining()); + if (match) { + value += match[0]; + state.index += match[0].length; + } - // Force window size for `raw` data, if not set directly, - // because we have no header for autodetect. - if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) { - opt.windowBits = -opt.windowBits; - if (opt.windowBits === 0) { opt.windowBits = -15; } - } + push({ type: 'text', value }); + continue; + } - // If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate - if ((opt.windowBits >= 0) && (opt.windowBits < 16) && - !(options && options.windowBits)) { - opt.windowBits += 32; - } + /** + * Stars + */ - // Gzip header has no info about windows size, we can do autodetect only - // for deflate. So, if window size not set, force it to max when gzip possible - if ((opt.windowBits > 15) && (opt.windowBits < 48)) { - // bit 3 (16) -> gzipped data - // bit 4 (32) -> autodetect gzip/deflate - if ((opt.windowBits & 15) === 0) { - opt.windowBits |= 15; + if (prev && (prev.type === 'globstar' || prev.star === true)) { + prev.type = 'star'; + prev.star = true; + prev.value += value; + prev.output = star; + state.backtrack = true; + state.globstar = true; + consume(value); + continue; } - } - this.err = 0; // error code, if happens (0 = Z_OK) - this.msg = ''; // error message - this.ended = false; // used to avoid multiple onEnd() calls - this.chunks = []; // chunks of compressed data + let rest = remaining(); + if (opts.noextglob !== true && /^\([^?]/.test(rest)) { + extglobOpen('star', value); + continue; + } - this.strm = new ZStream(); - this.strm.avail_out = 0; + if (prev.type === 'star') { + if (opts.noglobstar === true) { + consume(value); + continue; + } - var status = zlib_inflate.inflateInit2( - this.strm, - opt.windowBits - ); + const prior = prev.prev; + const before = prior.prev; + const isStart = prior.type === 'slash' || prior.type === 'bos'; + const afterStar = before && (before.type === 'star' || before.type === 'globstar'); - if (status !== c.Z_OK) { - throw new Error(msg[status]); - } + if (opts.bash === true && (!isStart || (rest[0] && rest[0] !== '/'))) { + push({ type: 'star', value, output: '' }); + continue; + } + + const isBrace = state.braces > 0 && (prior.type === 'comma' || prior.type === 'brace'); + const isExtglob = extglobs.length && (prior.type === 'pipe' || prior.type === 'paren'); + if (!isStart && prior.type !== 'paren' && !isBrace && !isExtglob) { + push({ type: 'star', value, output: '' }); + continue; + } + + // strip consecutive `/**/` + while (rest.slice(0, 3) === '/**') { + const after = input[state.index + 4]; + if (after && after !== '/') { + break; + } + rest = rest.slice(3); + consume('/**', 3); + } - this.header = new GZheader(); + if (prior.type === 'bos' && eos()) { + prev.type = 'globstar'; + prev.value += value; + prev.output = globstar(opts); + state.output = prev.output; + state.globstar = true; + consume(value); + continue; + } - zlib_inflate.inflateGetHeader(this.strm, this.header); + if (prior.type === 'slash' && prior.prev.type !== 'bos' && !afterStar && eos()) { + state.output = state.output.slice(0, -(prior.output + prev.output).length); + prior.output = `(?:${prior.output}`; - // Setup dictionary - if (opt.dictionary) { - // Convert data if needed - if (typeof opt.dictionary === 'string') { - opt.dictionary = strings.string2buf(opt.dictionary); - } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') { - opt.dictionary = new Uint8Array(opt.dictionary); - } - if (opt.raw) { //In raw mode we need to set the dictionary early - status = zlib_inflate.inflateSetDictionary(this.strm, opt.dictionary); - if (status !== c.Z_OK) { - throw new Error(msg[status]); + prev.type = 'globstar'; + prev.output = globstar(opts) + (opts.strictSlashes ? ')' : '|$)'); + prev.value += value; + state.globstar = true; + state.output += prior.output + prev.output; + consume(value); + continue; } - } - } -} -/** - * Inflate#push(data[, mode]) -> Boolean - * - data (Uint8Array|Array|ArrayBuffer|String): input data - * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes. - * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH. - * - * Sends input data to inflate pipe, generating [[Inflate#onData]] calls with - * new output chunks. Returns `true` on success. The last data block must have - * mode Z_FINISH (or `true`). That will flush internal pending buffers and call - * [[Inflate#onEnd]]. For interim explicit flushes (without ending the stream) you - * can use mode Z_SYNC_FLUSH, keeping the decompression context. - * - * On fail call [[Inflate#onEnd]] with error code and return false. - * - * We strongly recommend to use `Uint8Array` on input for best speed (output - * format is detected automatically). Also, don't skip last param and always - * use the same type in your code (boolean or number). That will improve JS speed. - * - * For regular `Array`-s make sure all elements are [0..255]. - * - * ##### Example - * - * ```javascript - * push(chunk, false); // push one of data chunks - * ... - * push(chunk, true); // push last chunk - * ``` - **/ -Inflate.prototype.push = function (data, mode) { - var strm = this.strm; - var chunkSize = this.options.chunkSize; - var dictionary = this.options.dictionary; - var status, _mode; - var next_out_utf8, tail, utf8str; + if (prior.type === 'slash' && prior.prev.type !== 'bos' && rest[0] === '/') { + const end = rest[1] !== void 0 ? '|$' : ''; - // Flag to properly process Z_BUF_ERROR on testing inflate call - // when we check that all output data was flushed. - var allowBufError = false; + state.output = state.output.slice(0, -(prior.output + prev.output).length); + prior.output = `(?:${prior.output}`; - if (this.ended) { return false; } - _mode = (mode === ~~mode) ? mode : ((mode === true) ? c.Z_FINISH : c.Z_NO_FLUSH); + prev.type = 'globstar'; + prev.output = `${globstar(opts)}${SLASH_LITERAL}|${SLASH_LITERAL}${end})`; + prev.value += value; - // Convert data if needed - if (typeof data === 'string') { - // Only binary strings can be decompressed on practice - strm.input = strings.binstring2buf(data); - } else if (toString.call(data) === '[object ArrayBuffer]') { - strm.input = new Uint8Array(data); - } else { - strm.input = data; - } + state.output += prior.output + prev.output; + state.globstar = true; - strm.next_in = 0; - strm.avail_in = strm.input.length; + consume(value + advance()); - do { - if (strm.avail_out === 0) { - strm.output = new utils.Buf8(chunkSize); - strm.next_out = 0; - strm.avail_out = chunkSize; - } + push({ type: 'slash', value: '/', output: '' }); + continue; + } - status = zlib_inflate.inflate(strm, c.Z_NO_FLUSH); /* no bad return value */ + if (prior.type === 'bos' && rest[0] === '/') { + prev.type = 'globstar'; + prev.value += value; + prev.output = `(?:^|${SLASH_LITERAL}|${globstar(opts)}${SLASH_LITERAL})`; + state.output = prev.output; + state.globstar = true; + consume(value + advance()); + push({ type: 'slash', value: '/', output: '' }); + continue; + } - if (status === c.Z_NEED_DICT && dictionary) { - status = zlib_inflate.inflateSetDictionary(this.strm, dictionary); - } + // remove single star from output + state.output = state.output.slice(0, -prev.output.length); - if (status === c.Z_BUF_ERROR && allowBufError === true) { - status = c.Z_OK; - allowBufError = false; - } + // reset previous token to globstar + prev.type = 'globstar'; + prev.output = globstar(opts); + prev.value += value; - if (status !== c.Z_STREAM_END && status !== c.Z_OK) { - this.onEnd(status); - this.ended = true; - return false; + // reset output with globstar + state.output += prev.output; + state.globstar = true; + consume(value); + continue; } - if (strm.next_out) { - if (strm.avail_out === 0 || status === c.Z_STREAM_END || (strm.avail_in === 0 && (_mode === c.Z_FINISH || _mode === c.Z_SYNC_FLUSH))) { - - if (this.options.to === 'string') { + const token = { type: 'star', value, output: star }; - next_out_utf8 = strings.utf8border(strm.output, strm.next_out); + if (opts.bash === true) { + token.output = '.*?'; + if (prev.type === 'bos' || prev.type === 'slash') { + token.output = nodot + token.output; + } + push(token); + continue; + } - tail = strm.next_out - next_out_utf8; - utf8str = strings.buf2string(strm.output, next_out_utf8); + if (prev && (prev.type === 'bracket' || prev.type === 'paren') && opts.regex === true) { + token.output = value; + push(token); + continue; + } - // move tail - strm.next_out = tail; - strm.avail_out = chunkSize - tail; - if (tail) { utils.arraySet(strm.output, strm.output, next_out_utf8, tail, 0); } + if (state.index === state.start || prev.type === 'slash' || prev.type === 'dot') { + if (prev.type === 'dot') { + state.output += NO_DOT_SLASH; + prev.output += NO_DOT_SLASH; - this.onData(utf8str); + } else if (opts.dot === true) { + state.output += NO_DOTS_SLASH; + prev.output += NO_DOTS_SLASH; - } else { - this.onData(utils.shrinkBuf(strm.output, strm.next_out)); - } + } else { + state.output += nodot; + prev.output += nodot; } - } - // When no more input data, we should check that internal inflate buffers - // are flushed. The only way to do it when avail_out = 0 - run one more - // inflate pass. But if output data not exists, inflate return Z_BUF_ERROR. - // Here we set flag to process this error properly. - // - // NOTE. Deflate does not return error in this case and does not needs such - // logic. - if (strm.avail_in === 0 && strm.avail_out === 0) { - allowBufError = true; + if (peek() !== '*') { + state.output += ONE_CHAR; + prev.output += ONE_CHAR; + } } - } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== c.Z_STREAM_END); - - if (status === c.Z_STREAM_END) { - _mode = c.Z_FINISH; + push(token); } - // Finalize on the last chunk. - if (_mode === c.Z_FINISH) { - status = zlib_inflate.inflateEnd(this.strm); - this.onEnd(status); - this.ended = true; - return status === c.Z_OK; + while (state.brackets > 0) { + if (opts.strictBrackets === true) throw new SyntaxError(syntaxError('closing', ']')); + state.output = utils.escapeLast(state.output, '['); + decrement('brackets'); } - // callback interim results if Z_SYNC_FLUSH. - if (_mode === c.Z_SYNC_FLUSH) { - this.onEnd(c.Z_OK); - strm.avail_out = 0; - return true; + while (state.parens > 0) { + if (opts.strictBrackets === true) throw new SyntaxError(syntaxError('closing', ')')); + state.output = utils.escapeLast(state.output, '('); + decrement('parens'); } - return true; -}; - - -/** - * Inflate#onData(chunk) -> Void - * - chunk (Uint8Array|Array|String): output data. Type of array depends - * on js engine support. When string output requested, each chunk - * will be string. - * - * By default, stores data blocks in `chunks[]` property and glue - * those in `onEnd`. Override this handler, if you need another behaviour. - **/ -Inflate.prototype.onData = function (chunk) { - this.chunks.push(chunk); -}; - - -/** - * Inflate#onEnd(status) -> Void - * - status (Number): inflate status. 0 (Z_OK) on success, - * other if not. - * - * Called either after you tell inflate that the input stream is - * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH) - * or if an error happened. By default - join collected chunks, - * free memory and fill `results` / `err` properties. - **/ -Inflate.prototype.onEnd = function (status) { - // On success - join - if (status === c.Z_OK) { - if (this.options.to === 'string') { - // Glue & convert here, until we teach pako to send - // utf8 aligned strings to onData - this.result = this.chunks.join(''); - } else { - this.result = utils.flattenChunks(this.chunks); + while (state.braces > 0) { + if (opts.strictBrackets === true) throw new SyntaxError(syntaxError('closing', '}')); + state.output = utils.escapeLast(state.output, '{'); + decrement('braces'); + } + + if (opts.strictSlashes !== true && (prev.type === 'star' || prev.type === 'bracket')) { + push({ type: 'maybe_slash', value: '', output: `${SLASH_LITERAL}?` }); + } + + // rebuild the output if we had to backtrack at any point + if (state.backtrack === true) { + state.output = ''; + + for (const token of state.tokens) { + state.output += token.output != null ? token.output : token.value; + + if (token.suffix) { + state.output += token.suffix; + } } } - this.chunks = []; - this.err = status; - this.msg = this.strm.msg; -}; + return state; +}; /** - * inflate(data[, options]) -> Uint8Array|Array|String - * - data (Uint8Array|Array|String): input data to decompress. - * - options (Object): zlib inflate options. - * - * Decompress `data` with inflate/ungzip and `options`. Autodetect - * format via wrapper header by default. That's why we don't provide - * separate `ungzip` method. - * - * Supported options are: - * - * - windowBits - * - * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) - * for more information. - * - * Sugar (options): - * - * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify - * negative windowBits implicitly. - * - `to` (String) - if equal to 'string', then result will be converted - * from utf8 to utf16 (javascript) string. When string output requested, - * chunk length can differ from `chunkSize`, depending on content. - * - * - * ##### Example: - * - * ```javascript - * var pako = require('pako') - * , input = pako.deflate([1,2,3,4,5,6,7,8,9]) - * , output; - * - * try { - * output = pako.inflate(input); - * } catch (err) - * console.log(err); - * } - * ``` - **/ -function inflate(input, options) { - var inflator = new Inflate(options); - - inflator.push(input, true); + * Fast paths for creating regular expressions for common glob patterns. + * This can significantly speed up processing and has very little downside + * impact when none of the fast paths match. + */ - // That will never happens, if you don't cheat with options :) - if (inflator.err) { throw inflator.msg || msg[inflator.err]; } +parse.fastpaths = (input, options) => { + const opts = { ...options }; + const max = typeof opts.maxLength === 'number' ? Math.min(MAX_LENGTH, opts.maxLength) : MAX_LENGTH; + const len = input.length; + if (len > max) { + throw new SyntaxError(`Input length: ${len}, exceeds maximum allowed length: ${max}`); + } - return inflator.result; -} + input = REPLACEMENTS[input] || input; + const win32 = utils.isWindows(options); + // create constants based on platform, for windows or posix + const { + DOT_LITERAL, + SLASH_LITERAL, + ONE_CHAR, + DOTS_SLASH, + NO_DOT, + NO_DOTS, + NO_DOTS_SLASH, + STAR, + START_ANCHOR + } = constants.globChars(win32); -/** - * inflateRaw(data[, options]) -> Uint8Array|Array|String - * - data (Uint8Array|Array|String): input data to decompress. - * - options (Object): zlib inflate options. - * - * The same as [[inflate]], but creates raw data, without wrapper - * (header and adler32 crc). - **/ -function inflateRaw(input, options) { - options = options || {}; - options.raw = true; - return inflate(input, options); -} + const nodot = opts.dot ? NO_DOTS : NO_DOT; + const slashDot = opts.dot ? NO_DOTS_SLASH : NO_DOT; + const capture = opts.capture ? '' : '?:'; + const state = { negated: false, prefix: '' }; + let star = opts.bash === true ? '.*?' : STAR; + if (opts.capture) { + star = `(${star})`; + } -/** - * ungzip(data[, options]) -> Uint8Array|Array|String - * - data (Uint8Array|Array|String): input data to decompress. - * - options (Object): zlib inflate options. - * - * Just shortcut to [[inflate]], because it autodetects format - * by header.content. Done for convenience. - **/ + const globstar = opts => { + if (opts.noglobstar === true) return star; + return `(${capture}(?:(?!${START_ANCHOR}${opts.dot ? DOTS_SLASH : DOT_LITERAL}).)*?)`; + }; + const create = str => { + switch (str) { + case '*': + return `${nodot}${ONE_CHAR}${star}`; -exports.Inflate = Inflate; -exports.inflate = inflate; -exports.inflateRaw = inflateRaw; -exports.ungzip = inflate; + case '.*': + return `${DOT_LITERAL}${ONE_CHAR}${star}`; + case '*.*': + return `${nodot}${star}${DOT_LITERAL}${ONE_CHAR}${star}`; -/***/ }), + case '*/*': + return `${nodot}${star}${SLASH_LITERAL}${ONE_CHAR}${slashDot}${star}`; -/***/ 833: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + case '**': + return nodot + globstar(opts); -"use strict"; + case '**/*': + return `(?:${nodot}${globstar(opts)}${SLASH_LITERAL})?${slashDot}${ONE_CHAR}${star}`; -Object.defineProperty(exports, "__esModule", { value: true }); -const sync_1 = __webpack_require__(394); -class SyncProvider { - constructor(_root, _settings) { - this._root = _root; - this._settings = _settings; - this._reader = new sync_1.default(this._root, this._settings); - } - read() { - return this._reader.read(); - } -} -exports.default = SyncProvider; + case '**/*.*': + return `(?:${nodot}${globstar(opts)}${SLASH_LITERAL})?${slashDot}${star}${DOT_LITERAL}${ONE_CHAR}${star}`; + case '**/.*': + return `(?:${nodot}${globstar(opts)}${SLASH_LITERAL})?${DOT_LITERAL}${ONE_CHAR}${star}`; -/***/ }), + default: { + const match = /^(.*?)\.(\w+)$/.exec(str); + if (!match) return; -/***/ 868: -/***/ (function(module) { + const source = create(match[1]); + if (!source) return; -"use strict"; + return source + DOT_LITERAL + match[2]; + } + } + }; + const output = utils.removePrefix(input, state); + let source = create(output); -// (C) 1995-2013 Jean-loup Gailly and Mark Adler -// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. + if (source && opts.strictSlashes !== true) { + source += `${SLASH_LITERAL}?`; + } -module.exports = { - 2: 'need dictionary', /* Z_NEED_DICT 2 */ - 1: 'stream end', /* Z_STREAM_END 1 */ - 0: '', /* Z_OK 0 */ - '-1': 'file error', /* Z_ERRNO (-1) */ - '-2': 'stream error', /* Z_STREAM_ERROR (-2) */ - '-3': 'data error', /* Z_DATA_ERROR (-3) */ - '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */ - '-5': 'buffer error', /* Z_BUF_ERROR (-5) */ - '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */ + return source; }; +module.exports = parse; + /***/ }), -/***/ 872: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 3322: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { "use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -const fs = __webpack_require__(984); -class Settings { - constructor(_options = {}) { - this._options = _options; - this.followSymbolicLink = this._getValue(this._options.followSymbolicLink, true); - this.fs = fs.createFileSystemAdapter(this._options.fs); - this.markSymbolicLink = this._getValue(this._options.markSymbolicLink, false); - this.throwErrorOnBrokenSymbolicLink = this._getValue(this._options.throwErrorOnBrokenSymbolicLink, true); - } - _getValue(option, value) { - return option !== null && option !== void 0 ? option : value; - } -} -exports.default = Settings; +const path = __nccwpck_require__(1017); +const scan = __nccwpck_require__(2429); +const parse = __nccwpck_require__(2139); +const utils = __nccwpck_require__(479); +const constants = __nccwpck_require__(6099); +const isObject = val => val && typeof val === 'object' && !Array.isArray(val); -/***/ }), +/** + * Creates a matcher function from one or more glob patterns. The + * returned function takes a string to match as its first argument, + * and returns true if the string is a match. The returned matcher + * function also takes a boolean as the second argument that, when true, + * returns an object with additional information. + * + * ```js + * const picomatch = require('picomatch'); + * // picomatch(glob[, options]); + * + * const isMatch = picomatch('*.!(*a)'); + * console.log(isMatch('a.a')); //=> false + * console.log(isMatch('a.b')); //=> true + * ``` + * @name picomatch + * @param {String|Array} `globs` One or more glob patterns. + * @param {Object=} `options` + * @return {Function=} Returns a matcher function. + * @api public + */ -/***/ 874: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +const picomatch = (glob, options, returnState = false) => { + if (Array.isArray(glob)) { + const fns = glob.map(input => picomatch(input, options, returnState)); + const arrayMatcher = str => { + for (const isMatch of fns) { + const state = isMatch(str); + if (state) return state; + } + return false; + }; + return arrayMatcher; + } -"use strict"; + const isState = isObject(glob) && glob.tokens && glob.input; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.createFileSystemAdapter = exports.FILE_SYSTEM_ADAPTER = void 0; -const fs = __webpack_require__(747); -exports.FILE_SYSTEM_ADAPTER = { - lstat: fs.lstat, - stat: fs.stat, - lstatSync: fs.lstatSync, - statSync: fs.statSync, - readdir: fs.readdir, - readdirSync: fs.readdirSync -}; -function createFileSystemAdapter(fsMethods) { - if (fsMethods === undefined) { - return exports.FILE_SYSTEM_ADAPTER; - } - return Object.assign(Object.assign({}, exports.FILE_SYSTEM_ADAPTER), fsMethods); -} -exports.createFileSystemAdapter = createFileSystemAdapter; + if (glob === '' || (typeof glob !== 'string' && !isState)) { + throw new TypeError('Expected pattern to be a non-empty string'); + } + const opts = options || {}; + const posix = utils.isWindows(options); + const regex = isState + ? picomatch.compileRe(glob, options) + : picomatch.makeRe(glob, options, false, true); -/***/ }), + const state = regex.state; + delete regex.state; -/***/ 884: -/***/ (function(__unusedmodule, exports) { + let isIgnored = () => false; + if (opts.ignore) { + const ignoreOpts = { ...options, ignore: null, onMatch: null, onResult: null }; + isIgnored = picomatch(opts.ignore, ignoreOpts, returnState); + } -"use strict"; + const matcher = (input, returnObject = false) => { + const { isMatch, match, output } = picomatch.test(input, regex, options, { glob, posix }); + const result = { glob, state, regex, posix, input, output, match, isMatch }; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.isEmpty = exports.isString = void 0; -function isString(input) { - return typeof input === 'string'; -} -exports.isString = isString; -function isEmpty(input) { - return input === ''; -} -exports.isEmpty = isEmpty; + if (typeof opts.onResult === 'function') { + opts.onResult(result); + } + if (isMatch === false) { + result.isMatch = false; + return returnObject ? result : false; + } -/***/ }), + if (isIgnored(input)) { + if (typeof opts.onIgnore === 'function') { + opts.onIgnore(result); + } + result.isMatch = false; + return returnObject ? result : false; + } -/***/ 885: -/***/ (function(module, __unusedexports, __webpack_require__) { + if (typeof opts.onMatch === 'function') { + opts.onMatch(result); + } + return returnObject ? result : true; + }; -/*! run-parallel. MIT License. Feross Aboukhadijeh */ -module.exports = runParallel + if (returnState) { + matcher.state = state; + } -const queueMicrotask = __webpack_require__(926) + return matcher; +}; -function runParallel (tasks, cb) { - let results, pending, keys - let isSync = true +/** + * Test `input` with the given `regex`. This is used by the main + * `picomatch()` function to test the input string. + * + * ```js + * const picomatch = require('picomatch'); + * // picomatch.test(input, regex[, options]); + * + * console.log(picomatch.test('foo/bar', /^(?:([^/]*?)\/([^/]*?))$/)); + * // { isMatch: true, match: [ 'foo/', 'foo', 'bar' ], output: 'foo/bar' } + * ``` + * @param {String} `input` String to test. + * @param {RegExp} `regex` + * @return {Object} Returns an object with matching info. + * @api public + */ - if (Array.isArray(tasks)) { - results = [] - pending = tasks.length - } else { - keys = Object.keys(tasks) - results = {} - pending = keys.length +picomatch.test = (input, regex, options, { glob, posix } = {}) => { + if (typeof input !== 'string') { + throw new TypeError('Expected input to be a string'); } - function done (err) { - function end () { - if (cb) cb(err, results) - cb = null - } - if (isSync) queueMicrotask(end) - else end() + if (input === '') { + return { isMatch: false, output: '' }; } - function each (i, err, result) { - results[i] = result - if (--pending === 0 || err) { - done(err) - } + const opts = options || {}; + const format = opts.format || (posix ? utils.toPosixSlashes : null); + let match = input === glob; + let output = (match && format) ? format(input) : input; + + if (match === false) { + output = format ? format(input) : input; + match = output === glob; } - if (!pending) { - // empty - done(null) - } else if (keys) { - // object - keys.forEach(function (key) { - tasks[key](function (err, result) { each(key, err, result) }) - }) - } else { - // array - tasks.forEach(function (task, i) { - task(function (err, result) { each(i, err, result) }) - }) + if (match === false || opts.capture === true) { + if (opts.matchBase === true || opts.basename === true) { + match = picomatch.matchBase(input, regex, options, posix); + } else { + match = regex.exec(output); + } } - isSync = false -} + return { isMatch: Boolean(match), match, output }; +}; +/** + * Match the basename of a filepath. + * + * ```js + * const picomatch = require('picomatch'); + * // picomatch.matchBase(input, glob[, options]); + * console.log(picomatch.matchBase('foo/bar.js', '*.js'); // true + * ``` + * @param {String} `input` String to test. + * @param {RegExp|String} `glob` Glob pattern or regex created by [.makeRe](#makeRe). + * @return {Boolean} + * @api public + */ -/***/ }), +picomatch.matchBase = (input, glob, options, posix = utils.isWindows(options)) => { + const regex = glob instanceof RegExp ? glob : picomatch.makeRe(glob, options); + return regex.test(path.basename(input)); +}; -/***/ 887: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/** + * Returns true if **any** of the given glob `patterns` match the specified `string`. + * + * ```js + * const picomatch = require('picomatch'); + * // picomatch.isMatch(string, patterns[, options]); + * + * console.log(picomatch.isMatch('a.a', ['b.*', '*.a'])); //=> true + * console.log(picomatch.isMatch('a.a', 'b.*')); //=> false + * ``` + * @param {String|Array} str The string to test. + * @param {String|Array} patterns One or more glob patterns to use for matching. + * @param {Object} [options] See available [options](#options). + * @return {Boolean} Returns true if any patterns match `str` + * @api public + */ -"use strict"; +picomatch.isMatch = (str, patterns, options) => picomatch(patterns, options)(str); -Object.defineProperty(exports, "__esModule", { value: true }); -const utils = __webpack_require__(444); -const partial_1 = __webpack_require__(75); -class DeepFilter { - constructor(_settings, _micromatchOptions) { - this._settings = _settings; - this._micromatchOptions = _micromatchOptions; - } - getFilter(basePath, positive, negative) { - const matcher = this._getMatcher(positive); - const negativeRe = this._getNegativePatternsRe(negative); - return (entry) => this._filter(basePath, entry, matcher, negativeRe); - } - _getMatcher(patterns) { - return new partial_1.default(patterns, this._settings, this._micromatchOptions); - } - _getNegativePatternsRe(patterns) { - const affectDepthOfReadingPatterns = patterns.filter(utils.pattern.isAffectDepthOfReadingPattern); - return utils.pattern.convertPatternsToRe(affectDepthOfReadingPatterns, this._micromatchOptions); - } - _filter(basePath, entry, matcher, negativeRe) { - if (this._isSkippedByDeep(basePath, entry.path)) { - return false; - } - if (this._isSkippedSymbolicLink(entry)) { - return false; - } - const filepath = utils.path.removeLeadingDotSegment(entry.path); - if (this._isSkippedByPositivePatterns(filepath, matcher)) { - return false; - } - return this._isSkippedByNegativePatterns(filepath, negativeRe); - } - _isSkippedByDeep(basePath, entryPath) { - /** - * Avoid unnecessary depth calculations when it doesn't matter. - */ - if (this._settings.deep === Infinity) { - return false; - } - return this._getEntryLevel(basePath, entryPath) >= this._settings.deep; - } - _getEntryLevel(basePath, entryPath) { - const entryPathDepth = entryPath.split('/').length; - if (basePath === '') { - return entryPathDepth; - } - const basePathDepth = basePath.split('/').length; - return entryPathDepth - basePathDepth; - } - _isSkippedSymbolicLink(entry) { - return !this._settings.followSymbolicLinks && entry.dirent.isSymbolicLink(); - } - _isSkippedByPositivePatterns(entryPath, matcher) { - return !this._settings.baseNameMatch && !matcher.match(entryPath); - } - _isSkippedByNegativePatterns(entryPath, patternsRe) { - return !utils.pattern.matchAny(entryPath, patternsRe); - } -} -exports.default = DeepFilter; +/** + * Parse a glob pattern to create the source string for a regular + * expression. + * + * ```js + * const picomatch = require('picomatch'); + * const result = picomatch.parse(pattern[, options]); + * ``` + * @param {String} `pattern` + * @param {Object} `options` + * @return {Object} Returns an object with useful properties and output to be used as a regex source string. + * @api public + */ +picomatch.parse = (pattern, options) => { + if (Array.isArray(pattern)) return pattern.map(p => picomatch.parse(p, options)); + return parse(pattern, { ...options, fastpaths: false }); +}; -/***/ }), +/** + * Scan a glob pattern to separate the pattern into segments. + * + * ```js + * const picomatch = require('picomatch'); + * // picomatch.scan(input[, options]); + * + * const result = picomatch.scan('!./foo/*.js'); + * console.log(result); + * { prefix: '!./', + * input: '!./foo/*.js', + * start: 3, + * base: 'foo', + * glob: '*.js', + * isBrace: false, + * isBracket: false, + * isGlob: true, + * isExtglob: false, + * isGlobstar: false, + * negated: true } + * ``` + * @param {String} `input` Glob pattern to scan. + * @param {Object} `options` + * @return {Object} Returns an object with + * @api public + */ -/***/ 888: -/***/ (function(module) { +picomatch.scan = (input, options) => scan(input, options); -/*! - * is-extglob +/** + * Compile a regular expression from the `state` object returned by the + * [parse()](#parse) method. * - * Copyright (c) 2014-2016, Jon Schlinkert. - * Licensed under the MIT License. + * @param {Object} `state` + * @param {Object} `options` + * @param {Boolean} `returnOutput` Intended for implementors, this argument allows you to return the raw output from the parser. + * @param {Boolean} `returnState` Adds the state to a `state` property on the returned regex. Useful for implementors and debugging. + * @return {RegExp} + * @api public */ -module.exports = function isExtglob(str) { - if (typeof str !== 'string' || str === '') { - return false; - } - - var match; - while ((match = /(\\).|([@?!+*]\(.*\))/g.exec(str))) { - if (match[2]) return true; - str = str.slice(match.index + match[0].length); +picomatch.compileRe = (state, options, returnOutput = false, returnState = false) => { + if (returnOutput === true) { + return state.output; } - return false; -}; + const opts = options || {}; + const prepend = opts.contains ? '' : '^'; + const append = opts.contains ? '' : '$'; + let source = `${prepend}(?:${state.output})${append}`; + if (state && state.negated === true) { + source = `^(?!${source}).*$`; + } -/***/ }), + const regex = picomatch.toRegex(source, options); + if (returnState === true) { + regex.state = state; + } -/***/ 914: -/***/ (function(module) { + return regex; +}; -"use strict"; -/*! - * is-number +/** + * Create a regular expression from a parsed glob pattern. * - * Copyright (c) 2014-present, Jon Schlinkert. - * Released under the MIT License. + * ```js + * const picomatch = require('picomatch'); + * const state = picomatch.parse('*.js'); + * // picomatch.compileRe(state[, options]); + * + * console.log(picomatch.compileRe(state)); + * //=> /^(?:(?!\.)(?=.)[^/]*?\.js)$/ + * ``` + * @param {String} `state` The object returned from the `.parse` method. + * @param {Object} `options` + * @param {Boolean} `returnOutput` Implementors may use this argument to return the compiled output, instead of a regular expression. This is not exposed on the options to prevent end-users from mutating the result. + * @param {Boolean} `returnState` Implementors may use this argument to return the state from the parsed glob with the returned regular expression. + * @return {RegExp} Returns a regex created from the given pattern. + * @api public */ - - -module.exports = function(num) { - if (typeof num === 'number') { - return num - num === 0; - } - if (typeof num === 'string' && num.trim() !== '') { - return Number.isFinite ? Number.isFinite(+num) : isFinite(+num); +picomatch.makeRe = (input, options = {}, returnOutput = false, returnState = false) => { + if (!input || typeof input !== 'string') { + throw new TypeError('Expected a non-empty string'); } - return false; -}; + let parsed = { negated: false, fastpaths: true }; -/***/ }), + if (options.fastpaths !== false && (input[0] === '.' || input[0] === '*')) { + parsed.output = parse.fastpaths(input, options); + } -/***/ 926: -/***/ (function(module) { + if (!parsed.output) { + parsed = parse(input, options); + } -/*! queue-microtask. MIT License. Feross Aboukhadijeh */ -let promise + return picomatch.compileRe(parsed, options, returnOutput, returnState); +}; -module.exports = typeof queueMicrotask === 'function' - ? queueMicrotask.bind(typeof window !== 'undefined' ? window : global) - // reuse resolved promise, and allocate it lazily - : cb => (promise || (promise = Promise.resolve())) - .then(cb) - .catch(err => setTimeout(() => { throw err }, 0)) +/** + * Create a regular expression from the given regex source string. + * + * ```js + * const picomatch = require('picomatch'); + * // picomatch.toRegex(source[, options]); + * + * const { output } = picomatch.parse('*.js'); + * console.log(picomatch.toRegex(output)); + * //=> /^(?:(?!\.)(?=.)[^/]*?\.js)$/ + * ``` + * @param {String} `source` Regular expression source string. + * @param {Object} `options` + * @return {RegExp} + * @api public + */ +picomatch.toRegex = (source, options) => { + try { + const opts = options || {}; + return new RegExp(source, opts.flags || (opts.nocase ? 'i' : '')); + } catch (err) { + if (options && options.debug === true) throw err; + return /$^/; + } +}; -/***/ }), +/** + * Picomatch constants. + * @return {Object} + */ -/***/ 933: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +picomatch.constants = constants; -"use strict"; +/** + * Expose "picomatch" + */ -Object.defineProperty(exports, "__esModule", { value: true }); -exports.fs = void 0; -const fs = __webpack_require__(210); -exports.fs = fs; +module.exports = picomatch; /***/ }), -/***/ 949: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/***/ 2429: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { "use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -const path = __webpack_require__(622); -const fsStat = __webpack_require__(231); -const utils = __webpack_require__(444); -class Reader { - constructor(_settings) { - this._settings = _settings; - this._fsStatSettings = new fsStat.Settings({ - followSymbolicLink: this._settings.followSymbolicLinks, - fs: this._settings.fs, - throwErrorOnBrokenSymbolicLink: this._settings.followSymbolicLinks - }); - } - _getFullEntryPath(filepath) { - return path.resolve(this._settings.cwd, filepath); - } - _makeEntry(stats, pattern) { - const entry = { - name: pattern, - path: pattern, - dirent: utils.fs.createDirentFromStats(pattern, stats) - }; - if (this._settings.stats) { - entry.stats = stats; - } - return entry; - } - _isFatalError(error) { - return !utils.errno.isEnoentCodeError(error) && !this._settings.suppressErrors; - } -} -exports.default = Reader; +const utils = __nccwpck_require__(479); +const { + CHAR_ASTERISK, /* * */ + CHAR_AT, /* @ */ + CHAR_BACKWARD_SLASH, /* \ */ + CHAR_COMMA, /* , */ + CHAR_DOT, /* . */ + CHAR_EXCLAMATION_MARK, /* ! */ + CHAR_FORWARD_SLASH, /* / */ + CHAR_LEFT_CURLY_BRACE, /* { */ + CHAR_LEFT_PARENTHESES, /* ( */ + CHAR_LEFT_SQUARE_BRACKET, /* [ */ + CHAR_PLUS, /* + */ + CHAR_QUESTION_MARK, /* ? */ + CHAR_RIGHT_CURLY_BRACE, /* } */ + CHAR_RIGHT_PARENTHESES, /* ) */ + CHAR_RIGHT_SQUARE_BRACKET /* ] */ +} = __nccwpck_require__(6099); + +const isPathSeparator = code => { + return code === CHAR_FORWARD_SLASH || code === CHAR_BACKWARD_SLASH; +}; -/***/ }), +const depth = token => { + if (token.isPrefix !== true) { + token.depth = token.isGlobstar ? Infinity : 1; + } +}; -/***/ 956: -/***/ (function(__unusedmodule, exports, __webpack_require__) { +/** + * Quickly scans a glob pattern and returns an object with a handful of + * useful properties, like `isGlob`, `path` (the leading non-glob, if it exists), + * `glob` (the actual pattern), `negated` (true if the path starts with `!` but not + * with `!(`) and `negatedExtglob` (true if the path starts with `!(`). + * + * ```js + * const pm = require('picomatch'); + * console.log(pm.scan('foo/bar/*.js')); + * { isGlob: true, input: 'foo/bar/*.js', base: 'foo/bar', glob: '*.js' } + * ``` + * @param {String} `str` + * @param {Object} `options` + * @return {Object} Returns an object with tokens and regex source string. + * @api public + */ -"use strict"; +const scan = (input, options) => { + const opts = options || {}; + const length = input.length - 1; + const scanToEnd = opts.parts === true || opts.scanToEnd === true; + const slashes = []; + const tokens = []; + const parts = []; -Object.defineProperty(exports, '__esModule', { value: true }); + let str = input; + let index = -1; + let start = 0; + let lastIndex = 0; + let isBrace = false; + let isBracket = false; + let isGlob = false; + let isExtglob = false; + let isGlobstar = false; + let braceEscaped = false; + let backslashes = false; + let negated = false; + let negatedExtglob = false; + let finished = false; + let braces = 0; + let prev; + let code; + let token = { value: '', depth: 0, isGlob: false }; -function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } + const eos = () => index >= length; + const peek = () => str.charCodeAt(index + 1); + const advance = () => { + prev = code; + return str.charCodeAt(++index); + }; -var AsyncLock = _interopDefault(__webpack_require__(124)); -var Hash = _interopDefault(__webpack_require__(529)); -var crc32 = _interopDefault(__webpack_require__(551)); -var pako = _interopDefault(__webpack_require__(246)); -var ignore = _interopDefault(__webpack_require__(396)); -var pify = _interopDefault(__webpack_require__(802)); -var cleanGitRef = _interopDefault(__webpack_require__(178)); -var diff3Merge = _interopDefault(__webpack_require__(750)); + while (index < length) { + code = advance(); + let next; -/** - * @typedef {Object} GitProgressEvent - * @property {string} phase - * @property {number} loaded - * @property {number} total - */ + if (code === CHAR_BACKWARD_SLASH) { + backslashes = token.backslashes = true; + code = advance(); -/** - * @callback ProgressCallback - * @param {GitProgressEvent} progress - * @returns {void | Promise} - */ + if (code === CHAR_LEFT_CURLY_BRACE) { + braceEscaped = true; + } + continue; + } -/** - * @typedef {Object} GitHttpRequest - * @property {string} url - The URL to request - * @property {string} [method='GET'] - The HTTP method to use - * @property {Object} [headers={}] - Headers to include in the HTTP request - * @property {AsyncIterableIterator} [body] - An async iterator of Uint8Arrays that make up the body of POST requests - * @property {ProgressCallback} [onProgress] - Reserved for future use (emitting `GitProgressEvent`s) - * @property {object} [signal] - Reserved for future use (canceling a request) - */ + if (braceEscaped === true || code === CHAR_LEFT_CURLY_BRACE) { + braces++; -/** - * @typedef {Object} GitHttpResponse - * @property {string} url - The final URL that was fetched after any redirects - * @property {string} [method] - The HTTP method that was used - * @property {Object} [headers] - HTTP response headers - * @property {AsyncIterableIterator} [body] - An async iterator of Uint8Arrays that make up the body of the response - * @property {number} statusCode - The HTTP status code - * @property {string} statusMessage - The HTTP status message - */ + while (eos() !== true && (code = advance())) { + if (code === CHAR_BACKWARD_SLASH) { + backslashes = token.backslashes = true; + advance(); + continue; + } -/** - * @callback HttpFetch - * @param {GitHttpRequest} request - * @returns {Promise} - */ + if (code === CHAR_LEFT_CURLY_BRACE) { + braces++; + continue; + } -/** - * @typedef {Object} HttpClient - * @property {HttpFetch} request - */ + if (braceEscaped !== true && code === CHAR_DOT && (code = advance()) === CHAR_DOT) { + isBrace = token.isBrace = true; + isGlob = token.isGlob = true; + finished = true; -/** - * A git commit object. - * - * @typedef {Object} CommitObject - * @property {string} message Commit message - * @property {string} tree SHA-1 object id of corresponding file tree - * @property {string[]} parent an array of zero or more SHA-1 object ids - * @property {Object} author - * @property {string} author.name The author's name - * @property {string} author.email The author's email - * @property {number} author.timestamp UTC Unix timestamp in seconds - * @property {number} author.timezoneOffset Timezone difference from UTC in minutes - * @property {Object} committer - * @property {string} committer.name The committer's name - * @property {string} committer.email The committer's email - * @property {number} committer.timestamp UTC Unix timestamp in seconds - * @property {number} committer.timezoneOffset Timezone difference from UTC in minutes - * @property {string} [gpgsig] PGP signature (if present) - */ + if (scanToEnd === true) { + continue; + } -/** - * An entry from a git tree object. Files are called 'blobs' and directories are called 'trees'. - * - * @typedef {Object} TreeEntry - * @property {string} mode the 6 digit hexadecimal mode - * @property {string} path the name of the file or directory - * @property {string} oid the SHA-1 object id of the blob or tree - * @property {'commit'|'blob'|'tree'} type the type of object - */ + break; + } -/** - * A git tree object. Trees represent a directory snapshot. - * - * @typedef {TreeEntry[]} TreeObject - */ + if (braceEscaped !== true && code === CHAR_COMMA) { + isBrace = token.isBrace = true; + isGlob = token.isGlob = true; + finished = true; -/** - * A git annotated tag object. - * - * @typedef {Object} TagObject - * @property {string} object SHA-1 object id of object being tagged - * @property {'blob' | 'tree' | 'commit' | 'tag'} type the type of the object being tagged - * @property {string} tag the tag name - * @property {Object} tagger - * @property {string} tagger.name the tagger's name - * @property {string} tagger.email the tagger's email - * @property {number} tagger.timestamp UTC Unix timestamp in seconds - * @property {number} tagger.timezoneOffset timezone difference from UTC in minutes - * @property {string} message tag message - * @property {string} [gpgsig] PGP signature (if present) - */ + if (scanToEnd === true) { + continue; + } -/** - * @typedef {Object} ReadCommitResult - * @property {string} oid - SHA-1 object id of this commit - * @property {CommitObject} commit - the parsed commit object - * @property {string} payload - PGP signing payload - */ + break; + } -/** - * @typedef {Object} ServerRef - This object has the following schema: - * @property {string} ref - The name of the ref - * @property {string} oid - The SHA-1 object id the ref points to - * @property {string} [target] - The target ref pointed to by a symbolic ref - * @property {string} [peeled] - If the oid is the SHA-1 object id of an annotated tag, this is the SHA-1 object id that the annotated tag points to - */ + if (code === CHAR_RIGHT_CURLY_BRACE) { + braces--; -/** - * @typedef Walker - * @property {Symbol} Symbol('GitWalkerSymbol') - */ + if (braces === 0) { + braceEscaped = false; + isBrace = token.isBrace = true; + finished = true; + break; + } + } + } -/** - * Normalized subset of filesystem `stat` data: - * - * @typedef {Object} Stat - * @property {number} ctimeSeconds - * @property {number} ctimeNanoseconds - * @property {number} mtimeSeconds - * @property {number} mtimeNanoseconds - * @property {number} dev - * @property {number} ino - * @property {number} mode - * @property {number} uid - * @property {number} gid - * @property {number} size - */ + if (scanToEnd === true) { + continue; + } -/** - * The `WalkerEntry` is an interface that abstracts computing many common tree / blob stats. - * - * @typedef {Object} WalkerEntry - * @property {function(): Promise<'tree'|'blob'|'special'|'commit'>} type - * @property {function(): Promise} mode - * @property {function(): Promise} oid - * @property {function(): Promise} content - * @property {function(): Promise} stat - */ + break; + } -/** - * @typedef {Object} CallbackFsClient - * @property {function} readFile - https://nodejs.org/api/fs.html#fs_fs_readfile_path_options_callback - * @property {function} writeFile - https://nodejs.org/api/fs.html#fs_fs_writefile_file_data_options_callback - * @property {function} unlink - https://nodejs.org/api/fs.html#fs_fs_unlink_path_callback - * @property {function} readdir - https://nodejs.org/api/fs.html#fs_fs_readdir_path_options_callback - * @property {function} mkdir - https://nodejs.org/api/fs.html#fs_fs_mkdir_path_mode_callback - * @property {function} rmdir - https://nodejs.org/api/fs.html#fs_fs_rmdir_path_callback - * @property {function} stat - https://nodejs.org/api/fs.html#fs_fs_stat_path_options_callback - * @property {function} lstat - https://nodejs.org/api/fs.html#fs_fs_lstat_path_options_callback - * @property {function} [readlink] - https://nodejs.org/api/fs.html#fs_fs_readlink_path_options_callback - * @property {function} [symlink] - https://nodejs.org/api/fs.html#fs_fs_symlink_target_path_type_callback - * @property {function} [chmod] - https://nodejs.org/api/fs.html#fs_fs_chmod_path_mode_callback - */ + if (code === CHAR_FORWARD_SLASH) { + slashes.push(index); + tokens.push(token); + token = { value: '', depth: 0, isGlob: false }; -/** - * @typedef {Object} PromiseFsClient - * @property {Object} promises - * @property {function} promises.readFile - https://nodejs.org/api/fs.html#fs_fspromises_readfile_path_options - * @property {function} promises.writeFile - https://nodejs.org/api/fs.html#fs_fspromises_writefile_file_data_options - * @property {function} promises.unlink - https://nodejs.org/api/fs.html#fs_fspromises_unlink_path - * @property {function} promises.readdir - https://nodejs.org/api/fs.html#fs_fspromises_readdir_path_options - * @property {function} promises.mkdir - https://nodejs.org/api/fs.html#fs_fspromises_mkdir_path_options - * @property {function} promises.rmdir - https://nodejs.org/api/fs.html#fs_fspromises_rmdir_path - * @property {function} promises.stat - https://nodejs.org/api/fs.html#fs_fspromises_stat_path_options - * @property {function} promises.lstat - https://nodejs.org/api/fs.html#fs_fspromises_lstat_path_options - * @property {function} [promises.readlink] - https://nodejs.org/api/fs.html#fs_fspromises_readlink_path_options - * @property {function} [promises.symlink] - https://nodejs.org/api/fs.html#fs_fspromises_symlink_target_path_type - * @property {function} [promises.chmod] - https://nodejs.org/api/fs.html#fs_fspromises_chmod_path_mode - */ + if (finished === true) continue; + if (prev === CHAR_DOT && index === (start + 1)) { + start += 2; + continue; + } -/** - * @typedef {CallbackFsClient | PromiseFsClient} FsClient - */ + lastIndex = index + 1; + continue; + } -/** - * @callback MessageCallback - * @param {string} message - * @returns {void | Promise} - */ + if (opts.noext !== true) { + const isExtglobChar = code === CHAR_PLUS + || code === CHAR_AT + || code === CHAR_ASTERISK + || code === CHAR_QUESTION_MARK + || code === CHAR_EXCLAMATION_MARK; -/** - * @typedef {Object} GitAuth - * @property {string} [username] - * @property {string} [password] - * @property {Object} [headers] - * @property {boolean} [cancel] Tells git to throw a `UserCanceledError` (instead of an `HttpError`). - */ + if (isExtglobChar === true && peek() === CHAR_LEFT_PARENTHESES) { + isGlob = token.isGlob = true; + isExtglob = token.isExtglob = true; + finished = true; + if (code === CHAR_EXCLAMATION_MARK && index === start) { + negatedExtglob = true; + } -/** - * @callback AuthCallback - * @param {string} url - * @param {GitAuth} auth Might have some values if the URL itself originally contained a username or password. - * @returns {GitAuth | void | Promise} - */ + if (scanToEnd === true) { + while (eos() !== true && (code = advance())) { + if (code === CHAR_BACKWARD_SLASH) { + backslashes = token.backslashes = true; + code = advance(); + continue; + } -/** - * @callback AuthFailureCallback - * @param {string} url - * @param {GitAuth} auth The credentials that failed - * @returns {GitAuth | void | Promise} - */ + if (code === CHAR_RIGHT_PARENTHESES) { + isGlob = token.isGlob = true; + finished = true; + break; + } + } + continue; + } + break; + } + } -/** - * @callback AuthSuccessCallback - * @param {string} url - * @param {GitAuth} auth - * @returns {void | Promise} - */ + if (code === CHAR_ASTERISK) { + if (prev === CHAR_ASTERISK) isGlobstar = token.isGlobstar = true; + isGlob = token.isGlob = true; + finished = true; -/** - * @typedef {Object} SignParams - * @property {string} payload - a plaintext message - * @property {string} secretKey - an 'ASCII armor' encoded PGP key (technically can actually contain _multiple_ keys) - */ + if (scanToEnd === true) { + continue; + } + break; + } -/** - * @callback SignCallback - * @param {SignParams} args - * @return {{signature: string} | Promise<{signature: string}>} - an 'ASCII armor' encoded "detached" signature - */ + if (code === CHAR_QUESTION_MARK) { + isGlob = token.isGlob = true; + finished = true; -/** - * @callback WalkerMap - * @param {string} filename - * @param {WalkerEntry[]} entries - * @returns {Promise} - */ + if (scanToEnd === true) { + continue; + } + break; + } -/** - * @callback WalkerReduce - * @param {any} parent - * @param {any[]} children - * @returns {Promise} - */ + if (code === CHAR_LEFT_SQUARE_BRACKET) { + while (eos() !== true && (next = advance())) { + if (next === CHAR_BACKWARD_SLASH) { + backslashes = token.backslashes = true; + advance(); + continue; + } -/** - * @callback WalkerIterateCallback - * @param {WalkerEntry[]} entries - * @returns {Promise} - */ + if (next === CHAR_RIGHT_SQUARE_BRACKET) { + isBracket = token.isBracket = true; + isGlob = token.isGlob = true; + finished = true; + break; + } + } -/** - * @callback WalkerIterate - * @param {WalkerIterateCallback} walk - * @param {IterableIterator} children - * @returns {Promise} - */ + if (scanToEnd === true) { + continue; + } -/** - * @typedef {Object} RefUpdateStatus - * @property {boolean} ok - * @property {string} error - */ + break; + } -/** - * @typedef {Object} PushResult - * @property {boolean} ok - * @property {?string} error - * @property {Object} refs - * @property {Object} [headers] - */ + if (opts.nonegate !== true && code === CHAR_EXCLAMATION_MARK && index === start) { + negated = token.negated = true; + start++; + continue; + } -/** - * @typedef {0|1} HeadStatus - */ + if (opts.noparen !== true && code === CHAR_LEFT_PARENTHESES) { + isGlob = token.isGlob = true; -/** - * @typedef {0|1|2} WorkdirStatus - */ + if (scanToEnd === true) { + while (eos() !== true && (code = advance())) { + if (code === CHAR_LEFT_PARENTHESES) { + backslashes = token.backslashes = true; + code = advance(); + continue; + } -/** - * @typedef {0|1|2|3} StageStatus - */ + if (code === CHAR_RIGHT_PARENTHESES) { + finished = true; + break; + } + } + continue; + } + break; + } -/** - * @typedef {[string, HeadStatus, WorkdirStatus, StageStatus]} StatusRow - */ + if (isGlob === true) { + finished = true; -class BaseError extends Error { - constructor(message) { - super(message); - // Setting this here allows TS to infer that all git errors have a `caller` property and - // that its type is string. - this.caller = ''; - } + if (scanToEnd === true) { + continue; + } - toJSON() { - // Error objects aren't normally serializable. So we do something about that. - return { - code: this.code, - data: this.data, - caller: this.caller, - message: this.message, - stack: this.stack, + break; } } - fromJSON(json) { - const e = new BaseError(json.message); - e.code = json.code; - e.data = json.data; - e.caller = json.caller; - e.stack = json.stack; - return e + if (opts.noext === true) { + isExtglob = false; + isGlob = false; } - get isIsomorphicGitError() { - return true + let base = str; + let prefix = ''; + let glob = ''; + + if (start > 0) { + prefix = str.slice(0, start); + str = str.slice(start); + lastIndex -= start; } -} -class InternalError extends BaseError { - /** - * @param {string} message - */ - constructor(message) { - super( - `An internal error caused this command to fail. Please file a bug report at https://github.com/isomorphic-git/isomorphic-git/issues with this error message: ${message}` - ); - this.code = this.name = InternalError.code; - this.data = { message }; + if (base && isGlob === true && lastIndex > 0) { + base = str.slice(0, lastIndex); + glob = str.slice(lastIndex); + } else if (isGlob === true) { + base = ''; + glob = str; + } else { + base = str; } -} -/** @type {'InternalError'} */ -InternalError.code = 'InternalError'; -class UnsafeFilepathError extends BaseError { - /** - * @param {string} filepath - */ - constructor(filepath) { - super(`The filepath "${filepath}" contains unsafe character sequences`); - this.code = this.name = UnsafeFilepathError.code; - this.data = { filepath }; + if (base && base !== '' && base !== '/' && base !== str) { + if (isPathSeparator(base.charCodeAt(base.length - 1))) { + base = base.slice(0, -1); + } } -} -/** @type {'UnsafeFilepathError'} */ -UnsafeFilepathError.code = 'UnsafeFilepathError'; -// Modeled after https://github.com/tjfontaine/node-buffercursor -// but with the goal of being much lighter weight. -class BufferCursor { - constructor(buffer) { - this.buffer = buffer; - this._start = 0; + if (opts.unescape === true) { + if (glob) glob = utils.removeBackslashes(glob); + + if (base && backslashes === true) { + base = utils.removeBackslashes(base); + } + } + + const state = { + prefix, + input, + start, + base, + glob, + isBrace, + isBracket, + isGlob, + isExtglob, + isGlobstar, + negated, + negatedExtglob + }; + + if (opts.tokens === true) { + state.maxDepth = 0; + if (!isPathSeparator(code)) { + tokens.push(token); + } + state.tokens = tokens; } - eof() { - return this._start >= this.buffer.length - } + if (opts.parts === true || opts.tokens === true) { + let prevIndex; - tell() { - return this._start - } + for (let idx = 0; idx < slashes.length; idx++) { + const n = prevIndex ? prevIndex + 1 : start; + const i = slashes[idx]; + const value = input.slice(n, i); + if (opts.tokens) { + if (idx === 0 && start !== 0) { + tokens[idx].isPrefix = true; + tokens[idx].value = prefix; + } else { + tokens[idx].value = value; + } + depth(tokens[idx]); + state.maxDepth += tokens[idx].depth; + } + if (idx !== 0 || value !== '') { + parts.push(value); + } + prevIndex = i; + } - seek(n) { - this._start = n; - } + if (prevIndex && prevIndex + 1 < input.length) { + const value = input.slice(prevIndex + 1); + parts.push(value); - slice(n) { - const r = this.buffer.slice(this._start, this._start + n); - this._start += n; - return r - } + if (opts.tokens) { + tokens[tokens.length - 1].value = value; + depth(tokens[tokens.length - 1]); + state.maxDepth += tokens[tokens.length - 1].depth; + } + } - toString(enc, length) { - const r = this.buffer.toString(enc, this._start, this._start + length); - this._start += length; - return r + state.slashes = slashes; + state.parts = parts; } - write(value, length, enc) { - const r = this.buffer.write(value, this._start, length, enc); - this._start += length; - return r - } + return state; +}; - copy(source, start, end) { - const r = source.copy(this.buffer, this._start, start, end); - this._start += r; - return r - } +module.exports = scan; - readUInt8() { - const r = this.buffer.readUInt8(this._start); - this._start += 1; - return r - } - writeUInt8(value) { - const r = this.buffer.writeUInt8(value, this._start); - this._start += 1; - return r - } +/***/ }), - readUInt16BE() { - const r = this.buffer.readUInt16BE(this._start); - this._start += 2; - return r - } +/***/ 479: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - writeUInt16BE(value) { - const r = this.buffer.writeUInt16BE(value, this._start); - this._start += 2; - return r - } +"use strict"; - readUInt32BE() { - const r = this.buffer.readUInt32BE(this._start); - this._start += 4; - return r - } - writeUInt32BE(value) { - const r = this.buffer.writeUInt32BE(value, this._start); - this._start += 4; - return r - } -} +const path = __nccwpck_require__(1017); +const win32 = process.platform === 'win32'; +const { + REGEX_BACKSLASH, + REGEX_REMOVE_BACKSLASH, + REGEX_SPECIAL_CHARS, + REGEX_SPECIAL_CHARS_GLOBAL +} = __nccwpck_require__(6099); -function compareStrings(a, b) { - // https://stackoverflow.com/a/40355107/2168416 - return -(a < b) || +(a > b) -} +exports.isObject = val => val !== null && typeof val === 'object' && !Array.isArray(val); +exports.hasRegexChars = str => REGEX_SPECIAL_CHARS.test(str); +exports.isRegexChar = str => str.length === 1 && exports.hasRegexChars(str); +exports.escapeRegex = str => str.replace(REGEX_SPECIAL_CHARS_GLOBAL, '\\$1'); +exports.toPosixSlashes = str => str.replace(REGEX_BACKSLASH, '/'); -function comparePath(a, b) { - // https://stackoverflow.com/a/40355107/2168416 - return compareStrings(a.path, b.path) -} +exports.removeBackslashes = str => { + return str.replace(REGEX_REMOVE_BACKSLASH, match => { + return match === '\\' ? '' : match; + }); +}; -/** - * From https://github.com/git/git/blob/master/Documentation/technical/index-format.txt - * - * 32-bit mode, split into (high to low bits) - * - * 4-bit object type - * valid values in binary are 1000 (regular file), 1010 (symbolic link) - * and 1110 (gitlink) - * - * 3-bit unused - * - * 9-bit unix permission. Only 0755 and 0644 are valid for regular files. - * Symbolic links and gitlinks have value 0 in this field. - */ -function normalizeMode(mode) { - // Note: BrowserFS will use -1 for "unknown" - // I need to make it non-negative for these bitshifts to work. - let type = mode > 0 ? mode >> 12 : 0; - // If it isn't valid, assume it as a "regular file" - // 0100 = directory - // 1000 = regular file - // 1010 = symlink - // 1110 = gitlink - if ( - type !== 0b0100 && - type !== 0b1000 && - type !== 0b1010 && - type !== 0b1110 - ) { - type = 0b1000; - } - let permissions = mode & 0o777; - // Is the file executable? then 755. Else 644. - if (permissions & 0b001001001) { - permissions = 0o755; - } else { - permissions = 0o644; +exports.supportsLookbehinds = () => { + const segs = process.version.slice(1).split('.').map(Number); + if (segs.length === 3 && segs[0] >= 9 || (segs[0] === 8 && segs[1] >= 10)) { + return true; } - // If it's not a regular file, scrub all permissions - if (type !== 0b1000) permissions = 0; - return (type << 12) + permissions -} - -const MAX_UINT32 = 2 ** 32; + return false; +}; -function SecondsNanoseconds( - givenSeconds, - givenNanoseconds, - milliseconds, - date -) { - if (givenSeconds !== undefined && givenNanoseconds !== undefined) { - return [givenSeconds, givenNanoseconds] - } - if (milliseconds === undefined) { - milliseconds = date.valueOf(); +exports.isWindows = options => { + if (options && typeof options.windows === 'boolean') { + return options.windows; } - const seconds = Math.floor(milliseconds / 1000); - const nanoseconds = (milliseconds - seconds * 1000) * 1000000; - return [seconds, nanoseconds] -} + return win32 === true || path.sep === '\\'; +}; -function normalizeStats(e) { - const [ctimeSeconds, ctimeNanoseconds] = SecondsNanoseconds( - e.ctimeSeconds, - e.ctimeNanoseconds, - e.ctimeMs, - e.ctime - ); - const [mtimeSeconds, mtimeNanoseconds] = SecondsNanoseconds( - e.mtimeSeconds, - e.mtimeNanoseconds, - e.mtimeMs, - e.mtime - ); +exports.escapeLast = (input, char, lastIdx) => { + const idx = input.lastIndexOf(char, lastIdx); + if (idx === -1) return input; + if (input[idx - 1] === '\\') return exports.escapeLast(input, char, idx - 1); + return `${input.slice(0, idx)}\\${input.slice(idx)}`; +}; - return { - ctimeSeconds: ctimeSeconds % MAX_UINT32, - ctimeNanoseconds: ctimeNanoseconds % MAX_UINT32, - mtimeSeconds: mtimeSeconds % MAX_UINT32, - mtimeNanoseconds: mtimeNanoseconds % MAX_UINT32, - dev: e.dev % MAX_UINT32, - ino: e.ino % MAX_UINT32, - mode: normalizeMode(e.mode % MAX_UINT32), - uid: e.uid % MAX_UINT32, - gid: e.gid % MAX_UINT32, - // size of -1 happens over a BrowserFS HTTP Backend that doesn't serve Content-Length headers - // (like the Karma webserver) because BrowserFS HTTP Backend uses HTTP HEAD requests to do fs.stat - size: e.size > -1 ? e.size % MAX_UINT32 : 0, +exports.removePrefix = (input, state = {}) => { + let output = input; + if (output.startsWith('./')) { + output = output.slice(2); + state.prefix = './'; } -} + return output; +}; -function toHex(buffer) { - let hex = ''; - for (const byte of new Uint8Array(buffer)) { - if (byte < 16) hex += '0'; - hex += byte.toString(16); +exports.wrapOutput = (input, state = {}, options = {}) => { + const prepend = options.contains ? '' : '^'; + const append = options.contains ? '' : '$'; + + let output = `${prepend}(?:${input})${append}`; + if (state.negated === true) { + output = `(?:^(?!${output}).*$)`; } - return hex -} + return output; +}; -/* eslint-env node, browser */ -let supportsSubtleSHA1 = null; +/***/ }), -async function shasum(buffer) { - if (supportsSubtleSHA1 === null) { - supportsSubtleSHA1 = await testSubtleSHA1(); - } - return supportsSubtleSHA1 ? subtleSHA1(buffer) : shasumSync(buffer) -} +/***/ 4810: +/***/ ((module) => { -// This is modeled after @dominictarr's "shasum" module, -// but without the 'json-stable-stringify' dependency and -// extra type-casting features. -function shasumSync(buffer) { - return new Hash().update(buffer).digest('hex') -} +"use strict"; -async function subtleSHA1(buffer) { - const hash = await crypto.subtle.digest('SHA-1', buffer); - return toHex(hash) -} -async function testSubtleSHA1() { - // I'm using a rather crude method of progressive enhancement, because - // some browsers that have crypto.subtle.digest don't actually implement SHA-1. - try { - const hash = await subtleSHA1(new Uint8Array([])); - if (hash === 'da39a3ee5e6b4b0d3255bfef95601890afd80709') return true - } catch (_) { - // no bother - } - return false -} +const processFn = (fn, options) => function (...args) { + const P = options.promiseModule; -// Extract 1-bit assume-valid, 1-bit extended flag, 2-bit merge state flag, 12-bit path length flag -function parseCacheEntryFlags(bits) { - return { - assumeValid: Boolean(bits & 0b1000000000000000), - extended: Boolean(bits & 0b0100000000000000), - stage: (bits & 0b0011000000000000) >> 12, - nameLength: bits & 0b0000111111111111, - } -} + return new P((resolve, reject) => { + if (options.multiArgs) { + args.push((...result) => { + if (options.errorFirst) { + if (result[0]) { + reject(result); + } else { + result.shift(); + resolve(result); + } + } else { + resolve(result); + } + }); + } else if (options.errorFirst) { + args.push((error, result) => { + if (error) { + reject(error); + } else { + resolve(result); + } + }); + } else { + args.push(resolve); + } -function renderCacheEntryFlags(entry) { - const flags = entry.flags; - // 1-bit extended flag (must be zero in version 2) - flags.extended = false; - // 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF - // is stored in this field. - flags.nameLength = Math.min(Buffer.from(entry.path).length, 0xfff); - return ( - (flags.assumeValid ? 0b1000000000000000 : 0) + - (flags.extended ? 0b0100000000000000 : 0) + - ((flags.stage & 0b11) << 12) + - (flags.nameLength & 0b111111111111) - ) -} + fn.apply(this, args); + }); +}; -class GitIndex { - /*:: - _entries: Map - _dirty: boolean // Used to determine if index needs to be saved to filesystem - */ - constructor(entries) { - this._dirty = false; - this._entries = entries || new Map(); - } +module.exports = (input, options) => { + options = Object.assign({ + exclude: [/.+(Sync|Stream)$/], + errorFirst: true, + promiseModule: Promise + }, options); - static async from(buffer) { - if (Buffer.isBuffer(buffer)) { - return GitIndex.fromBuffer(buffer) - } else if (buffer === null) { - return new GitIndex(null) - } else { - throw new InternalError('invalid type passed to GitIndex.from') - } - } + const objType = typeof input; + if (!(input !== null && (objType === 'object' || objType === 'function'))) { + throw new TypeError(`Expected \`input\` to be a \`Function\` or \`Object\`, got \`${input === null ? 'null' : objType}\``); + } - static async fromBuffer(buffer) { - // Verify shasum - const shaComputed = await shasum(buffer.slice(0, -20)); - const shaClaimed = buffer.slice(-20).toString('hex'); - if (shaClaimed !== shaComputed) { - throw new InternalError( - `Invalid checksum in GitIndex buffer: expected ${shaClaimed} but saw ${shaComputed}` - ) - } - const reader = new BufferCursor(buffer); - const _entries = new Map(); - const magic = reader.toString('utf8', 4); - if (magic !== 'DIRC') { - throw new InternalError(`Inavlid dircache magic file number: ${magic}`) - } - const version = reader.readUInt32BE(); - if (version !== 2) { - throw new InternalError(`Unsupported dircache version: ${version}`) - } - const numEntries = reader.readUInt32BE(); - let i = 0; - while (!reader.eof() && i < numEntries) { - const entry = {}; - entry.ctimeSeconds = reader.readUInt32BE(); - entry.ctimeNanoseconds = reader.readUInt32BE(); - entry.mtimeSeconds = reader.readUInt32BE(); - entry.mtimeNanoseconds = reader.readUInt32BE(); - entry.dev = reader.readUInt32BE(); - entry.ino = reader.readUInt32BE(); - entry.mode = reader.readUInt32BE(); - entry.uid = reader.readUInt32BE(); - entry.gid = reader.readUInt32BE(); - entry.size = reader.readUInt32BE(); - entry.oid = reader.slice(20).toString('hex'); - const flags = reader.readUInt16BE(); - entry.flags = parseCacheEntryFlags(flags); - // TODO: handle if (version === 3 && entry.flags.extended) - const pathlength = buffer.indexOf(0, reader.tell() + 1) - reader.tell(); - if (pathlength < 1) { - throw new InternalError(`Got a path length of: ${pathlength}`) - } - // TODO: handle pathnames larger than 12 bits - entry.path = reader.toString('utf8', pathlength); + const filter = key => { + const match = pattern => typeof pattern === 'string' ? key === pattern : pattern.test(key); + return options.include ? options.include.some(match) : !options.exclude.some(match); + }; - // Prevent malicious paths like "..\foo" - if (entry.path.includes('..\\') || entry.path.includes('../')) { - throw new UnsafeFilepathError(entry.path) - } + let ret; + if (objType === 'function') { + ret = function (...args) { + return options.excludeMain ? input(...args) : processFn(input, options).apply(this, args); + }; + } else { + ret = Object.create(Object.getPrototypeOf(input)); + } - // The next bit is awkward. We expect 1 to 8 null characters - // such that the total size of the entry is a multiple of 8 bits. - // (Hence subtract 12 bytes for the header.) - let padding = 8 - ((reader.tell() - 12) % 8); - if (padding === 0) padding = 8; - while (padding--) { - const tmp = reader.readUInt8(); - if (tmp !== 0) { - throw new InternalError( - `Expected 1-8 null characters but got '${tmp}' after ${entry.path}` - ) - } else if (reader.eof()) { - throw new InternalError('Unexpected end of file') - } - } - // end of awkward part - _entries.set(entry.path, entry); - i++; - } - return new GitIndex(_entries) - } + for (const key in input) { // eslint-disable-line guard-for-in + const property = input[key]; + ret[key] = typeof property === 'function' && filter(key) ? processFn(property, options) : property; + } - get entries() { - return [...this._entries.values()].sort(comparePath) - } + return ret; +}; - get entriesMap() { - return this._entries - } - *[Symbol.iterator]() { - for (const entry of this.entries) { - yield entry; - } - } +/***/ }), - insert({ filepath, stats, oid }) { - stats = normalizeStats(stats); - const bfilepath = Buffer.from(filepath); - const entry = { - ctimeSeconds: stats.ctimeSeconds, - ctimeNanoseconds: stats.ctimeNanoseconds, - mtimeSeconds: stats.mtimeSeconds, - mtimeNanoseconds: stats.mtimeNanoseconds, - dev: stats.dev, - ino: stats.ino, - // We provide a fallback value for `mode` here because not all fs - // implementations assign it, but we use it in GitTree. - // '100644' is for a "regular non-executable file" - mode: stats.mode || 0o100644, - uid: stats.uid, - gid: stats.gid, - size: stats.size, - path: filepath, - oid: oid, - flags: { - assumeValid: false, - extended: false, - stage: 0, - nameLength: bfilepath.length < 0xfff ? bfilepath.length : 0xfff, - }, - }; - this._entries.set(entry.path, entry); - this._dirty = true; - } +/***/ 9217: +/***/ ((module) => { + +"use strict"; + + +/** + * protocols + * Returns the protocols of an input url. + * + * @name protocols + * @function + * @param {String|URL} input The input url (string or `URL` instance) + * @param {Boolean|Number} first If `true`, the first protocol will be returned. If number, it will represent the zero-based index of the protocols array. + * @return {Array|String} The array of protocols or the specified protocol. + */ +module.exports = function protocols(input, first) { - delete({ filepath }) { - if (this._entries.has(filepath)) { - this._entries.delete(filepath); - } else { - for (const key of this._entries.keys()) { - if (key.startsWith(filepath + '/')) { - this._entries.delete(key); - } - } + if (first === true) { + first = 0; } - this._dirty = true; - } - - clear() { - this._entries.clear(); - this._dirty = true; - } - render() { - return this.entries - .map(entry => `${entry.mode.toString(8)} ${entry.oid} ${entry.path}`) - .join('\n') - } + var prots = ""; + if (typeof input === "string") { + try { + prots = new URL(input).protocol; + } catch (e) {} + } else if (input && input.constructor === URL) { + prots = input.protocol; + } - async toObject() { - const header = Buffer.alloc(12); - const writer = new BufferCursor(header); - writer.write('DIRC', 4, 'utf8'); - writer.writeUInt32BE(2); - writer.writeUInt32BE(this.entries.length); - const body = Buffer.concat( - this.entries.map(entry => { - const bpath = Buffer.from(entry.path); - // the fixed length + the filename + at least one null char => align by 8 - const length = Math.ceil((62 + bpath.length + 1) / 8) * 8; - const written = Buffer.alloc(length); - const writer = new BufferCursor(written); - const stat = normalizeStats(entry); - writer.writeUInt32BE(stat.ctimeSeconds); - writer.writeUInt32BE(stat.ctimeNanoseconds); - writer.writeUInt32BE(stat.mtimeSeconds); - writer.writeUInt32BE(stat.mtimeNanoseconds); - writer.writeUInt32BE(stat.dev); - writer.writeUInt32BE(stat.ino); - writer.writeUInt32BE(stat.mode); - writer.writeUInt32BE(stat.uid); - writer.writeUInt32BE(stat.gid); - writer.writeUInt32BE(stat.size); - writer.write(entry.oid, 20, 'hex'); - writer.writeUInt16BE(renderCacheEntryFlags(entry)); - writer.write(entry.path, bpath.length, 'utf8'); - return written - }) - ); - const main = Buffer.concat([header, body]); - const sum = await shasum(main); - return Buffer.concat([main, Buffer.from(sum, 'hex')]) - } -} + var splits = prots.split(/\:|\+/).filter(Boolean); -function compareStats(entry, stats) { - // Comparison based on the description in Paragraph 4 of - // https://www.kernel.org/pub/software/scm/git/docs/technical/racy-git.txt - const e = normalizeStats(entry); - const s = normalizeStats(stats); - const staleness = - e.mode !== s.mode || - e.mtimeSeconds !== s.mtimeSeconds || - e.ctimeSeconds !== s.ctimeSeconds || - e.uid !== s.uid || - e.gid !== s.gid || - e.ino !== s.ino || - e.size !== s.size; - return staleness -} + if (typeof first === "number") { + return splits[first]; + } -// import LockManager from 'travix-lock-manager' + return splits; +}; -// import Lock from '../utils.js' +/***/ }), -// const lm = new LockManager() -let lock = null; +/***/ 9795: +/***/ ((module) => { -const IndexCache = Symbol('IndexCache'); +/*! queue-microtask. MIT License. Feross Aboukhadijeh */ +let promise -function createCache() { - return { - map: new Map(), - stats: new Map(), - } -} +module.exports = typeof queueMicrotask === 'function' + ? queueMicrotask.bind(typeof window !== 'undefined' ? window : global) + // reuse resolved promise, and allocate it lazily + : cb => (promise || (promise = Promise.resolve())) + .then(cb) + .catch(err => setTimeout(() => { throw err }, 0)) -async function updateCachedIndexFile(fs, filepath, cache) { - const stat = await fs.lstat(filepath); - const rawIndexFile = await fs.read(filepath); - const index = await GitIndex.from(rawIndexFile); - // cache the GitIndex object so we don't need to re-read it every time. - cache.map.set(filepath, index); - // Save the stat data for the index so we know whether the cached file is stale (modified by an outside process). - cache.stats.set(filepath, stat); -} -// Determine whether our copy of the index file is stale -async function isIndexStale(fs, filepath, cache) { - const savedStats = cache.stats.get(filepath); - if (savedStats === undefined) return true - const currStats = await fs.lstat(filepath); - if (savedStats === null) return false - if (currStats === null) return false - return compareStats(savedStats, currStats) -} +/***/ }), -class GitIndexManager { - /** - * - * @param {object} opts - * @param {import('../models/FileSystem.js').FileSystem} opts.fs - * @param {string} opts.gitdir - * @param {object} opts.cache - * @param {function(GitIndex): any} closure - */ - static async acquire({ fs, gitdir, cache }, closure) { - if (!cache[IndexCache]) cache[IndexCache] = createCache(); +/***/ 2113: +/***/ ((module) => { - const filepath = `${gitdir}/index`; - if (lock === null) lock = new AsyncLock({ maxPending: Infinity }); - let result; - await lock.acquire(filepath, async function() { - // Acquire a file lock while we're reading the index - // to make sure other processes aren't writing to it - // simultaneously, which could result in a corrupted index. - // const fileLock = await Lock(filepath) - if (await isIndexStale(fs, filepath, cache[IndexCache])) { - await updateCachedIndexFile(fs, filepath, cache[IndexCache]); - } - const index = cache[IndexCache].map.get(filepath); - result = await closure(index); - if (index._dirty) { - // Acquire a file lock while we're writing the index file - // let fileLock = await Lock(filepath) - const buffer = await index.toObject(); - await fs.write(filepath, buffer); - // Update cached stat value - cache[IndexCache].stats.set(filepath, await fs.lstat(filepath)); - index._dirty = false; - } - }); - return result - } -} +"use strict"; -function basename(path) { - const last = Math.max(path.lastIndexOf('/'), path.lastIndexOf('\\')); - if (last > -1) { - path = path.slice(last + 1); - } - return path -} -function dirname(path) { - const last = Math.max(path.lastIndexOf('/'), path.lastIndexOf('\\')); - if (last === -1) return '.' - if (last === 0) return '/' - return path.slice(0, last) -} +function reusify (Constructor) { + var head = new Constructor() + var tail = head -/*:: -type Node = { - type: string, - fullpath: string, - basename: string, - metadata: Object, // mode, oid - parent?: Node, - children: Array -} -*/ + function get () { + var current = head -function flatFileListToDirectoryStructure(files) { - const inodes = new Map(); - const mkdir = function(name) { - if (!inodes.has(name)) { - const dir = { - type: 'tree', - fullpath: name, - basename: basename(name), - metadata: {}, - children: [], - }; - inodes.set(name, dir); - // This recursively generates any missing parent folders. - // We do it after we've added the inode to the set so that - // we don't recurse infinitely trying to create the root '.' dirname. - dir.parent = mkdir(dirname(name)); - if (dir.parent && dir.parent !== dir) dir.parent.children.push(dir); + if (current.next) { + head = current.next + } else { + head = new Constructor() + tail = head } - return inodes.get(name) - }; - const mkfile = function(name, metadata) { - if (!inodes.has(name)) { - const file = { - type: 'blob', - fullpath: name, - basename: basename(name), - metadata: metadata, - // This recursively generates any missing parent folders. - parent: mkdir(dirname(name)), - children: [], - }; - if (file.parent) file.parent.children.push(file); - inodes.set(name, file); - } - return inodes.get(name) - }; + current.next = null - mkdir('.'); - for (const file of files) { - mkfile(file.path, file); + return current } - return inodes -} -/** - * - * @param {number} mode - */ -function mode2type(mode) { - // prettier-ignore - switch (mode) { - case 0o040000: return 'tree' - case 0o100644: return 'blob' - case 0o100755: return 'blob' - case 0o120000: return 'blob' - case 0o160000: return 'commit' + function release (obj) { + tail.next = obj + tail = obj + } + + return { + get: get, + release: release } - throw new InternalError(`Unexpected GitTree entry mode: ${mode.toString(8)}`) } -class GitWalkerIndex { - constructor({ fs, gitdir, cache }) { - this.treePromise = GitIndexManager.acquire( - { fs, gitdir, cache }, - async function(index) { - return flatFileListToDirectoryStructure(index.entries) - } - ); - const walker = this; - this.ConstructEntry = class StageEntry { - constructor(fullpath) { - this._fullpath = fullpath; - this._type = false; - this._mode = false; - this._stat = false; - this._oid = false; - } +module.exports = reusify - async type() { - return walker.type(this) - } - async mode() { - return walker.mode(this) - } +/***/ }), - async stat() { - return walker.stat(this) - } +/***/ 5288: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - async content() { - return walker.content(this) - } +/*! run-parallel. MIT License. Feross Aboukhadijeh */ +module.exports = runParallel - async oid() { - return walker.oid(this) - } - }; - } +const queueMicrotask = __nccwpck_require__(9795) - async readdir(entry) { - const filepath = entry._fullpath; - const tree = await this.treePromise; - const inode = tree.get(filepath); - if (!inode) return null - if (inode.type === 'blob') return null - if (inode.type !== 'tree') { - throw new Error(`ENOTDIR: not a directory, scandir '${filepath}'`) - } - const names = inode.children.map(inode => inode.fullpath); - names.sort(compareStrings); - return names - } +function runParallel (tasks, cb) { + let results, pending, keys + let isSync = true - async type(entry) { - if (entry._type === false) { - await entry.stat(); - } - return entry._type + if (Array.isArray(tasks)) { + results = [] + pending = tasks.length + } else { + keys = Object.keys(tasks) + results = {} + pending = keys.length } - async mode(entry) { - if (entry._mode === false) { - await entry.stat(); + function done (err) { + function end () { + if (cb) cb(err, results) + cb = null } - return entry._mode + if (isSync) queueMicrotask(end) + else end() } - async stat(entry) { - if (entry._stat === false) { - const tree = await this.treePromise; - const inode = tree.get(entry._fullpath); - if (!inode) { - throw new Error( - `ENOENT: no such file or directory, lstat '${entry._fullpath}'` - ) - } - const stats = inode.type === 'tree' ? {} : normalizeStats(inode.metadata); - entry._type = inode.type === 'tree' ? 'tree' : mode2type(stats.mode); - entry._mode = stats.mode; - if (inode.type === 'tree') { - entry._stat = undefined; - } else { - entry._stat = stats; - } + function each (i, err, result) { + results[i] = result + if (--pending === 0 || err) { + done(err) } - return entry._stat } - async content(_entry) { - // Cannot get content for an index entry + if (!pending) { + // empty + done(null) + } else if (keys) { + // object + keys.forEach(function (key) { + tasks[key](function (err, result) { each(key, err, result) }) + }) + } else { + // array + tasks.forEach(function (task, i) { + task(function (err, result) { each(i, err, result) }) + }) } - async oid(entry) { - if (entry._oid === false) { - const tree = await this.treePromise; - const inode = tree.get(entry._fullpath); - entry._oid = inode.metadata.oid; - } - return entry._oid - } + isSync = false } -// This is part of an elaborate system to facilitate code-splitting / tree-shaking. -// commands/walk.js can depend on only this, and the actual Walker classes exported -// can be opaque - only having a single property (this symbol) that is not enumerable, -// and thus the constructor can be passed as an argument to walk while being "unusable" -// outside of it. -const GitWalkSymbol = Symbol('GitWalkSymbol'); -// @ts-check +/***/ }), -/** - * @returns {Walker} - */ -function STAGE() { - const o = Object.create(null); - Object.defineProperty(o, GitWalkSymbol, { - value: function({ fs, gitdir, cache }) { - return new GitWalkerIndex({ fs, gitdir, cache }) - }, - }); - Object.freeze(o); - return o -} +/***/ 1867: +/***/ ((module, exports, __nccwpck_require__) => { -// @ts-check +/*! safe-buffer. MIT License. Feross Aboukhadijeh */ +/* eslint-disable node/no-deprecated-api */ +var buffer = __nccwpck_require__(4300) +var Buffer = buffer.Buffer -class NotFoundError extends BaseError { - /** - * @param {string} what - */ - constructor(what) { - super(`Could not find ${what}.`); - this.code = this.name = NotFoundError.code; - this.data = { what }; +// alternative to using Object.keys for old browsers +function copyProps (src, dst) { + for (var key in src) { + dst[key] = src[key] } } -/** @type {'NotFoundError'} */ -NotFoundError.code = 'NotFoundError'; - -class ObjectTypeError extends BaseError { - /** - * @param {string} oid - * @param {'blob'|'commit'|'tag'|'tree'} actual - * @param {'blob'|'commit'|'tag'|'tree'} expected - * @param {string} [filepath] - */ - constructor(oid, actual, expected, filepath) { - super( - `Object ${oid} ${ - filepath ? `at ${filepath}` : '' - }was anticipated to be a ${expected} but it is a ${actual}.` - ); - this.code = this.name = ObjectTypeError.code; - this.data = { oid, actual, expected, filepath }; - } +if (Buffer.from && Buffer.alloc && Buffer.allocUnsafe && Buffer.allocUnsafeSlow) { + module.exports = buffer +} else { + // Copy properties from require('buffer') + copyProps(buffer, exports) + exports.Buffer = SafeBuffer } -/** @type {'ObjectTypeError'} */ -ObjectTypeError.code = 'ObjectTypeError'; -class InvalidOidError extends BaseError { - /** - * @param {string} value - */ - constructor(value) { - super(`Expected a 40-char hex object id but saw "${value}".`); - this.code = this.name = InvalidOidError.code; - this.data = { value }; - } +function SafeBuffer (arg, encodingOrOffset, length) { + return Buffer(arg, encodingOrOffset, length) } -/** @type {'InvalidOidError'} */ -InvalidOidError.code = 'InvalidOidError'; -class NoRefspecError extends BaseError { - /** - * @param {string} remote - */ - constructor(remote) { - super(`Could not find a fetch refspec for remote "${remote}". Make sure the config file has an entry like the following: -[remote "${remote}"] -\tfetch = +refs/heads/*:refs/remotes/origin/* -`); - this.code = this.name = NoRefspecError.code; - this.data = { remote }; +SafeBuffer.prototype = Object.create(Buffer.prototype) + +// Copy static methods from Buffer +copyProps(Buffer, SafeBuffer) + +SafeBuffer.from = function (arg, encodingOrOffset, length) { + if (typeof arg === 'number') { + throw new TypeError('Argument must not be a number') } + return Buffer(arg, encodingOrOffset, length) } -/** @type {'NoRefspecError'} */ -NoRefspecError.code = 'NoRefspecError'; -class GitPackedRefs { - constructor(text) { - this.refs = new Map(); - this.parsedConfig = []; - if (text) { - let key = null; - this.parsedConfig = text - .trim() - .split('\n') - .map(line => { - if (/^\s*#/.test(line)) { - return { line, comment: true } - } - const i = line.indexOf(' '); - if (line.startsWith('^')) { - // This is a oid for the commit associated with the annotated tag immediately preceding this line. - // Trim off the '^' - const value = line.slice(1); - // The tagname^{} syntax is based on the output of `git show-ref --tags -d` - this.refs.set(key + '^{}', value); - return { line, ref: key, peeled: value } - } else { - // This is an oid followed by the ref name - const value = line.slice(0, i); - key = line.slice(i + 1); - this.refs.set(key, value); - return { line, ref: key, oid: value } - } - }); +SafeBuffer.alloc = function (size, fill, encoding) { + if (typeof size !== 'number') { + throw new TypeError('Argument must be a number') + } + var buf = Buffer(size) + if (fill !== undefined) { + if (typeof encoding === 'string') { + buf.fill(fill, encoding) + } else { + buf.fill(fill) } - return this + } else { + buf.fill(0) } + return buf +} - static from(text) { - return new GitPackedRefs(text) +SafeBuffer.allocUnsafe = function (size) { + if (typeof size !== 'number') { + throw new TypeError('Argument must be a number') } + return Buffer(size) +} - delete(ref) { - this.parsedConfig = this.parsedConfig.filter(entry => entry.ref !== ref); - this.refs.delete(ref); +SafeBuffer.allocUnsafeSlow = function (size) { + if (typeof size !== 'number') { + throw new TypeError('Argument must be a number') } + return buffer.SlowBuffer(size) +} - toString() { - return this.parsedConfig.map(({ line }) => line).join('\n') + '\n' - } + +/***/ }), + +/***/ 3251: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var Buffer = (__nccwpck_require__(1867).Buffer) + +// prototype class for hash functions +function Hash (blockSize, finalSize) { + this._block = Buffer.alloc(blockSize) + this._finalSize = finalSize + this._blockSize = blockSize + this._len = 0 } -class GitRefSpec { - constructor({ remotePath, localPath, force, matchPrefix }) { - Object.assign(this, { - remotePath, - localPath, - force, - matchPrefix, - }); +Hash.prototype.update = function (data, enc) { + if (typeof data === 'string') { + enc = enc || 'utf8' + data = Buffer.from(data, enc) } - static from(refspec) { - const [ - forceMatch, - remotePath, - remoteGlobMatch, - localPath, - localGlobMatch, - ] = refspec.match(/^(\+?)(.*?)(\*?):(.*?)(\*?)$/).slice(1); - const force = forceMatch === '+'; - const remoteIsGlob = remoteGlobMatch === '*'; - const localIsGlob = localGlobMatch === '*'; - // validate - // TODO: Make this check more nuanced, and depend on whether this is a fetch refspec or a push refspec - if (remoteIsGlob !== localIsGlob) { - throw new InternalError('Invalid refspec') - } - return new GitRefSpec({ - remotePath, - localPath, - force, - matchPrefix: remoteIsGlob, - }) - // TODO: We need to run resolveRef on both paths to expand them to their full name. - } + var block = this._block + var blockSize = this._blockSize + var length = data.length + var accum = this._len - translate(remoteBranch) { - if (this.matchPrefix) { - if (remoteBranch.startsWith(this.remotePath)) { - return this.localPath + remoteBranch.replace(this.remotePath, '') - } - } else { - if (remoteBranch === this.remotePath) return this.localPath + for (var offset = 0; offset < length;) { + var assigned = accum % blockSize + var remainder = Math.min(length - offset, blockSize - assigned) + + for (var i = 0; i < remainder; i++) { + block[assigned + i] = data[offset + i] } - return null - } - reverseTranslate(localBranch) { - if (this.matchPrefix) { - if (localBranch.startsWith(this.localPath)) { - return this.remotePath + localBranch.replace(this.localPath, '') - } - } else { - if (localBranch === this.localPath) return this.remotePath + accum += remainder + offset += remainder + + if ((accum % blockSize) === 0) { + this._update(block) } - return null } + + this._len += length + return this } -class GitRefSpecSet { - constructor(rules = []) { - this.rules = rules; - } +Hash.prototype.digest = function (enc) { + var rem = this._len % this._blockSize - static from(refspecs) { - const rules = []; - for (const refspec of refspecs) { - rules.push(GitRefSpec.from(refspec)); // might throw - } - return new GitRefSpecSet(rules) - } + this._block[rem] = 0x80 - add(refspec) { - const rule = GitRefSpec.from(refspec); // might throw - this.rules.push(rule); - } + // zero (rem + 1) trailing bits, where (rem + 1) is the smallest + // non-negative solution to the equation (length + 1 + (rem + 1)) === finalSize mod blockSize + this._block.fill(0, rem + 1) - translate(remoteRefs) { - const result = []; - for (const rule of this.rules) { - for (const remoteRef of remoteRefs) { - const localRef = rule.translate(remoteRef); - if (localRef) { - result.push([remoteRef, localRef]); - } - } - } - return result + if (rem >= this._finalSize) { + this._update(this._block) + this._block.fill(0) } - translateOne(remoteRef) { - let result = null; - for (const rule of this.rules) { - const localRef = rule.translate(remoteRef); - if (localRef) { - result = localRef; - } - } - return result - } + var bits = this._len * 8 - localNamespaces() { - return this.rules - .filter(rule => rule.matchPrefix) - .map(rule => rule.localPath.replace(/\/$/, '')) - } -} + // uint32 + if (bits <= 0xffffffff) { + this._block.writeUInt32BE(bits, this._blockSize - 4) -function compareRefNames(a, b) { - // https://stackoverflow.com/a/40355107/2168416 - const _a = a.replace(/\^\{\}$/, ''); - const _b = b.replace(/\^\{\}$/, ''); - const tmp = -(_a < _b) || +(_a > _b); - if (tmp === 0) { - return a.endsWith('^{}') ? 1 : -1 + // uint64 + } else { + var lowBits = (bits & 0xffffffff) >>> 0 + var highBits = (bits - lowBits) / 0x100000000 + + this._block.writeUInt32BE(highBits, this._blockSize - 8) + this._block.writeUInt32BE(lowBits, this._blockSize - 4) } - return tmp + + this._update(this._block) + var hash = this._hash() + + return enc ? hash.toString(enc) : hash } -function normalizePath(path) { - return path - .replace(/\/\.\//g, '/') // Replace '/./' with '/' - .replace(/\/{2,}/g, '/') // Replace consecutive '/' - .replace(/^\/\.$/, '/') // if path === '/.' return '/' - .replace(/^\.\/$/, '.') // if path === './' return '.' - .replace(/^\.\//, '') // Remove leading './' - .replace(/\/\.$/, '') // Remove trailing '/.' - .replace(/(.+)\/$/, '$1') // Remove trailing '/' - .replace(/^$/, '.') // if path === '' return '.' +Hash.prototype._update = function () { + throw new Error('_update must be implemented by subclass') } -// For some reason path.posix.join is undefined in webpack +module.exports = Hash -function join(...parts) { - return normalizePath(parts.map(normalizePath).join('/')) + +/***/ }), + +/***/ 2398: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +/* + * A JavaScript implementation of the Secure Hash Algorithm, SHA-1, as defined + * in FIPS PUB 180-1 + * Version 2.1a Copyright Paul Johnston 2000 - 2002. + * Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet + * Distributed under the BSD License + * See http://pajhome.org.uk/crypt/md5 for details. + */ + +var inherits = __nccwpck_require__(4124) +var Hash = __nccwpck_require__(3251) +var Buffer = (__nccwpck_require__(1867).Buffer) + +var K = [ + 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc | 0, 0xca62c1d6 | 0 +] + +var W = new Array(80) + +function Sha1 () { + this.init() + this._w = W + + Hash.call(this, 64, 56) } -// This is straight from parse_unit_factor in config.c of canonical git -const num = val => { - val = val.toLowerCase(); - let n = parseInt(val); - if (val.endsWith('k')) n *= 1024; - if (val.endsWith('m')) n *= 1024 * 1024; - if (val.endsWith('g')) n *= 1024 * 1024 * 1024; - return n -}; +inherits(Sha1, Hash) -// This is straight from git_parse_maybe_bool_text in config.c of canonical git -const bool = val => { - val = val.trim().toLowerCase(); - if (val === 'true' || val === 'yes' || val === 'on') return true - if (val === 'false' || val === 'no' || val === 'off') return false - throw Error( - `Expected 'true', 'false', 'yes', 'no', 'on', or 'off', but got ${val}` - ) -}; +Sha1.prototype.init = function () { + this._a = 0x67452301 + this._b = 0xefcdab89 + this._c = 0x98badcfe + this._d = 0x10325476 + this._e = 0xc3d2e1f0 + + return this +} -const schema = { - core: { - filemode: bool, - bare: bool, - logallrefupdates: bool, - symlinks: bool, - ignorecase: bool, - bigFileThreshold: num, - }, -}; +function rotl1 (num) { + return (num << 1) | (num >>> 31) +} -// https://git-scm.com/docs/git-config#_syntax +function rotl5 (num) { + return (num << 5) | (num >>> 27) +} -// section starts with [ and ends with ] -// section is alphanumeric (ASCII) with - and . -// section is case insensitive -// subsection is optionnal -// subsection is specified after section and one or more spaces -// subsection is specified between double quotes -const SECTION_LINE_REGEX = /^\[([A-Za-z0-9-.]+)(?: "(.*)")?\]$/; -const SECTION_REGEX = /^[A-Za-z0-9-.]+$/; +function rotl30 (num) { + return (num << 30) | (num >>> 2) +} -// variable lines contain a name, and equal sign and then a value -// variable lines can also only contain a name (the implicit value is a boolean true) -// variable name is alphanumeric (ASCII) with - -// variable name starts with an alphabetic character -// variable name is case insensitive -const VARIABLE_LINE_REGEX = /^([A-Za-z][A-Za-z-]*)(?: *= *(.*))?$/; -const VARIABLE_NAME_REGEX = /^[A-Za-z][A-Za-z-]*$/; +function ft (s, b, c, d) { + if (s === 0) return (b & c) | ((~b) & d) + if (s === 2) return (b & c) | (b & d) | (c & d) + return b ^ c ^ d +} -const VARIABLE_VALUE_COMMENT_REGEX = /^(.*?)( *[#;].*)$/; +Sha1.prototype._update = function (M) { + var W = this._w -const extractSectionLine = line => { - const matches = SECTION_LINE_REGEX.exec(line); - if (matches != null) { - const [section, subsection] = matches.slice(1); - return [section, subsection] - } - return null -}; + var a = this._a | 0 + var b = this._b | 0 + var c = this._c | 0 + var d = this._d | 0 + var e = this._e | 0 -const extractVariableLine = line => { - const matches = VARIABLE_LINE_REGEX.exec(line); - if (matches != null) { - const [name, rawValue = 'true'] = matches.slice(1); - const valueWithoutComments = removeComments(rawValue); - const valueWithoutQuotes = removeQuotes(valueWithoutComments); - return [name, valueWithoutQuotes] - } - return null -}; + for (var i = 0; i < 16; ++i) W[i] = M.readInt32BE(i * 4) + for (; i < 80; ++i) W[i] = rotl1(W[i - 3] ^ W[i - 8] ^ W[i - 14] ^ W[i - 16]) -const removeComments = rawValue => { - const commentMatches = VARIABLE_VALUE_COMMENT_REGEX.exec(rawValue); - if (commentMatches == null) { - return rawValue - } - const [valueWithoutComment, comment] = commentMatches.slice(1); - // if odd number of quotes before and after comment => comment is escaped - if ( - hasOddNumberOfQuotes(valueWithoutComment) && - hasOddNumberOfQuotes(comment) - ) { - return `${valueWithoutComment}${comment}` + for (var j = 0; j < 80; ++j) { + var s = ~~(j / 20) + var t = (rotl5(a) + ft(s, b, c, d) + e + W[j] + K[s]) | 0 + + e = d + d = c + c = rotl30(b) + b = a + a = t } - return valueWithoutComment -}; -const hasOddNumberOfQuotes = text => { - const numberOfQuotes = (text.match(/(?:^|[^\\])"/g) || []).length; - return numberOfQuotes % 2 !== 0 -}; + this._a = (a + this._a) | 0 + this._b = (b + this._b) | 0 + this._c = (c + this._c) | 0 + this._d = (d + this._d) | 0 + this._e = (e + this._e) | 0 +} -const removeQuotes = text => { - return text.split('').reduce((newText, c, idx, text) => { - const isQuote = c === '"' && text[idx - 1] !== '\\'; - const isEscapeForQuote = c === '\\' && text[idx + 1] === '"'; - if (isQuote || isEscapeForQuote) { - return newText - } - return newText + c - }, '') -}; +Sha1.prototype._hash = function () { + var H = Buffer.allocUnsafe(20) -const lower = text => { - return text != null ? text.toLowerCase() : null -}; + H.writeInt32BE(this._a | 0, 0) + H.writeInt32BE(this._b | 0, 4) + H.writeInt32BE(this._c | 0, 8) + H.writeInt32BE(this._d | 0, 12) + H.writeInt32BE(this._e | 0, 16) -const getPath = (section, subsection, name) => { - return [lower(section), subsection, lower(name)] - .filter(a => a != null) - .join('.') -}; + return H +} -const findLastIndex = (array, callback) => { - return array.reduce((lastIndex, item, index) => { - return callback(item) ? index : lastIndex - }, -1) -}; +module.exports = Sha1 -// Note: there are a LOT of edge cases that aren't covered (e.g. keys in sections that also -// have subsections, [include] directives, etc. -class GitConfig { - constructor(text) { - let section = null; - let subsection = null; - this.parsedConfig = text.split('\n').map(line => { - let name = null; - let value = null; - const trimmedLine = line.trim(); - const extractedSection = extractSectionLine(trimmedLine); - const isSection = extractedSection != null; - if (isSection) { - ;[section, subsection] = extractedSection; - } else { - const extractedVariable = extractVariableLine(trimmedLine); - const isVariable = extractedVariable != null; - if (isVariable) { - ;[name, value] = extractedVariable; - } - } +/***/ }), - const path = getPath(section, subsection, name); - return { line, isSection, section, subsection, name, value, path } - }); - } +/***/ 1861: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - static from(text) { - return new GitConfig(text) - } +"use strict"; +/*! + * to-regex-range + * + * Copyright (c) 2015-present, Jon Schlinkert. + * Released under the MIT License. + */ - async get(path, getall = false) { - const allValues = this.parsedConfig - .filter(config => config.path === path.toLowerCase()) - .map(({ section, name, value }) => { - const fn = schema[section] && schema[section][name]; - return fn ? fn(value) : value - }); - return getall ? allValues : allValues.pop() - } - async getall(path) { - return this.get(path, true) - } - async getSubsections(section) { - return this.parsedConfig - .filter(config => config.section === section && config.isSection) - .map(config => config.subsection) - } +const isNumber = __nccwpck_require__(5680); - async deleteSection(section, subsection) { - this.parsedConfig = this.parsedConfig.filter( - config => - !(config.section === section && config.subsection === subsection) - ); +const toRegexRange = (min, max, options) => { + if (isNumber(min) === false) { + throw new TypeError('toRegexRange: expected the first argument to be a number'); } - async append(path, value) { - return this.set(path, value, true) + if (max === void 0 || min === max) { + return String(min); } - async set(path, value, append = false) { - const configIndex = findLastIndex( - this.parsedConfig, - config => config.path === path.toLowerCase() - ); - if (value == null) { - if (configIndex !== -1) { - this.parsedConfig.splice(configIndex, 1); - } - } else { - if (configIndex !== -1) { - const config = this.parsedConfig[configIndex]; - const modifiedConfig = Object.assign({}, config, { - value, - modified: true, - }); - if (append) { - this.parsedConfig.splice(configIndex + 1, 0, modifiedConfig); - } else { - this.parsedConfig[configIndex] = modifiedConfig; - } - } else { - const pathSegments = path.split('.'); - const section = pathSegments.shift().toLowerCase(); - const name = pathSegments.pop(); - const subsection = pathSegments.length - ? pathSegments.join('.').toLowerCase() - : undefined; - const sectionPath = subsection ? section + '.' + subsection : section; - const sectionIndex = this.parsedConfig.findIndex( - config => config.path === sectionPath - ); - const newConfig = { - section, - subsection, - name, - value, - modified: true, - path: getPath(section, subsection, name), - }; - if (SECTION_REGEX.test(section) && VARIABLE_NAME_REGEX.test(name)) { - if (sectionIndex >= 0) { - // Reuse existing section - this.parsedConfig.splice(sectionIndex + 1, 0, newConfig); - } else { - // Add a new section - const newSection = { - section, - subsection, - modified: true, - path: getPath(section, subsection, null), - }; - this.parsedConfig.push(newSection, newConfig); - } - } - } + if (isNumber(max) === false) { + throw new TypeError('toRegexRange: expected the second argument to be a number.'); + } + + let opts = { relaxZeros: true, ...options }; + if (typeof opts.strictZeros === 'boolean') { + opts.relaxZeros = opts.strictZeros === false; + } + + let relax = String(opts.relaxZeros); + let shorthand = String(opts.shorthand); + let capture = String(opts.capture); + let wrap = String(opts.wrap); + let cacheKey = min + ':' + max + '=' + relax + shorthand + capture + wrap; + + if (toRegexRange.cache.hasOwnProperty(cacheKey)) { + return toRegexRange.cache[cacheKey].result; + } + + let a = Math.min(min, max); + let b = Math.max(min, max); + + if (Math.abs(a - b) === 1) { + let result = min + '|' + max; + if (opts.capture) { + return `(${result})`; + } + if (opts.wrap === false) { + return result; } + return `(?:${result})`; } - toString() { - return this.parsedConfig - .map(({ line, section, subsection, name, value, modified = false }) => { - if (!modified) { - return line - } - if (name != null && value != null) { - return `\t${name} = ${value}` - } - if (subsection != null) { - return `[${section} "${subsection}"]` - } - return `[${section}]` - }) - .join('\n') + let isPadded = hasPadding(min) || hasPadding(max); + let state = { min, max, a, b }; + let positives = []; + let negatives = []; + + if (isPadded) { + state.isPadded = isPadded; + state.maxLen = String(state.max).length; } -} -class GitConfigManager { - static async get({ fs, gitdir }) { - // We can improve efficiency later if needed. - // TODO: read from full list of git config files - const text = await fs.read(`${gitdir}/config`, { encoding: 'utf8' }); - return GitConfig.from(text) + if (a < 0) { + let newMin = b < 0 ? Math.abs(b) : 1; + negatives = splitToPatterns(newMin, Math.abs(a), state, opts); + a = state.a = 0; } - static async save({ fs, gitdir, config }) { - // We can improve efficiency later if needed. - // TODO: handle saving to the correct global/user/repo location - await fs.write(`${gitdir}/config`, config.toString(), { - encoding: 'utf8', - }); + if (b >= 0) { + positives = splitToPatterns(a, b, state, opts); + } + + state.negatives = negatives; + state.positives = positives; + state.result = collatePatterns(negatives, positives, opts); + + if (opts.capture === true) { + state.result = `(${state.result})`; + } else if (opts.wrap !== false && (positives.length + negatives.length) > 1) { + state.result = `(?:${state.result})`; } + + toRegexRange.cache[cacheKey] = state; + return state.result; +}; + +function collatePatterns(neg, pos, options) { + let onlyNegative = filterPatterns(neg, pos, '-', false, options) || []; + let onlyPositive = filterPatterns(pos, neg, '', false, options) || []; + let intersected = filterPatterns(neg, pos, '-?', true, options) || []; + let subpatterns = onlyNegative.concat(intersected).concat(onlyPositive); + return subpatterns.join('|'); } -// This is a convenience wrapper for reading and writing files in the 'refs' directory. +function splitToRanges(min, max) { + let nines = 1; + let zeros = 1; -// @see https://git-scm.com/docs/git-rev-parse.html#_specifying_revisions -const refpaths = ref => [ - `${ref}`, - `refs/${ref}`, - `refs/tags/${ref}`, - `refs/heads/${ref}`, - `refs/remotes/${ref}`, - `refs/remotes/${ref}/HEAD`, -]; + let stop = countNines(min, nines); + let stops = new Set([max]); -// @see https://git-scm.com/docs/gitrepository-layout -const GIT_FILES = ['config', 'description', 'index', 'shallow', 'commondir']; + while (min <= stop && stop <= max) { + stops.add(stop); + nines += 1; + stop = countNines(min, nines); + } -class GitRefManager { - static async updateRemoteRefs({ - fs, - gitdir, - remote, - refs, - symrefs, - tags, - refspecs = undefined, - prune = false, - pruneTags = false, - }) { - // Validate input - for (const value of refs.values()) { - if (!value.match(/[0-9a-f]{40}/)) { - throw new InvalidOidError(value) - } - } - const config = await GitConfigManager.get({ fs, gitdir }); - if (!refspecs) { - refspecs = await config.getall(`remote.${remote}.fetch`); - if (refspecs.length === 0) { - throw new NoRefspecError(remote) - } - // There's some interesting behavior with HEAD that doesn't follow the refspec. - refspecs.unshift(`+HEAD:refs/remotes/${remote}/HEAD`); - } - const refspec = GitRefSpecSet.from(refspecs); - const actualRefsToWrite = new Map(); - // Delete all current tags if the pruneTags argument is true. - if (pruneTags) { - const tags = await GitRefManager.listRefs({ - fs, - gitdir, - filepath: 'refs/tags', - }); - await GitRefManager.deleteRefs({ - fs, - gitdir, - refs: tags.map(tag => `refs/tags/${tag}`), - }); - } - // Add all tags if the fetch tags argument is true. - if (tags) { - for (const serverRef of refs.keys()) { - if (serverRef.startsWith('refs/tags') && !serverRef.endsWith('^{}')) { - // Git's behavior is to only fetch tags that do not conflict with tags already present. - if (!(await GitRefManager.exists({ fs, gitdir, ref: serverRef }))) { - // Always use the object id of the tag itself, and not the peeled object id. - const oid = refs.get(serverRef); - actualRefsToWrite.set(serverRef, oid); - } - } - } - } - // Combine refs and symrefs giving symrefs priority - const refTranslations = refspec.translate([...refs.keys()]); - for (const [serverRef, translatedRef] of refTranslations) { - const value = refs.get(serverRef); - actualRefsToWrite.set(translatedRef, value); - } - const symrefTranslations = refspec.translate([...symrefs.keys()]); - for (const [serverRef, translatedRef] of symrefTranslations) { - const value = symrefs.get(serverRef); - const symtarget = refspec.translateOne(value); - if (symtarget) { - actualRefsToWrite.set(translatedRef, `ref: ${symtarget}`); - } - } - // If `prune` argument is true, clear out the existing local refspec roots - const pruned = []; - if (prune) { - for (const filepath of refspec.localNamespaces()) { - const refs = ( - await GitRefManager.listRefs({ - fs, - gitdir, - filepath, - }) - ).map(file => `${filepath}/${file}`); - for (const ref of refs) { - if (!actualRefsToWrite.has(ref)) { - pruned.push(ref); - } - } - } - if (pruned.length > 0) { - await GitRefManager.deleteRefs({ fs, gitdir, refs: pruned }); - } - } - // Update files - // TODO: For large repos with a history of thousands of pull requests - // (i.e. gitlab-ce) it would be vastly more efficient to write them - // to .git/packed-refs. - // The trick is to make sure we a) don't write a packed ref that is - // already shadowed by a loose ref and b) don't loose any refs already - // in packed-refs. Doing this efficiently may be difficult. A - // solution that might work is - // a) load the current packed-refs file - // b) add actualRefsToWrite, overriding the existing values if present - // c) enumerate all the loose refs currently in .git/refs/remotes/${remote} - // d) overwrite their value with the new value. - // Examples of refs we need to avoid writing in loose format for efficieny's sake - // are .git/refs/remotes/origin/refs/remotes/remote_mirror_3059 - // and .git/refs/remotes/origin/refs/merge-requests - for (const [key, value] of actualRefsToWrite) { - await fs.write(join(gitdir, key), `${value.trim()}\n`, 'utf8'); - } - return { pruned } + stop = countZeros(max + 1, zeros) - 1; + + while (min < stop && stop <= max) { + stops.add(stop); + zeros += 1; + stop = countZeros(max + 1, zeros) - 1; + } + + stops = [...stops]; + stops.sort(compare); + return stops; +} + +/** + * Convert a range to a regex pattern + * @param {Number} `start` + * @param {Number} `stop` + * @return {String} + */ + +function rangeToPattern(start, stop, options) { + if (start === stop) { + return { pattern: start, count: [], digits: 0 }; } - // TODO: make this less crude? - static async writeRef({ fs, gitdir, ref, value }) { - // Validate input - if (!value.match(/[0-9a-f]{40}/)) { - throw new InvalidOidError(value) + let zipped = zip(start, stop); + let digits = zipped.length; + let pattern = ''; + let count = 0; + + for (let i = 0; i < digits; i++) { + let [startDigit, stopDigit] = zipped[i]; + + if (startDigit === stopDigit) { + pattern += startDigit; + + } else if (startDigit !== '0' || stopDigit !== '9') { + pattern += toCharacterClass(startDigit, stopDigit, options); + + } else { + count++; } - await fs.write(join(gitdir, ref), `${value.trim()}\n`, 'utf8'); } - static async writeSymbolicRef({ fs, gitdir, ref, value }) { - await fs.write(join(gitdir, ref), 'ref: ' + `${value.trim()}\n`, 'utf8'); + if (count) { + pattern += options.shorthand === true ? '\\d' : '[0-9]'; } - static async deleteRef({ fs, gitdir, ref }) { - return GitRefManager.deleteRefs({ fs, gitdir, refs: [ref] }) - } + return { pattern, count: [count], digits }; +} - static async deleteRefs({ fs, gitdir, refs }) { - // Delete regular ref - await Promise.all(refs.map(ref => fs.rm(join(gitdir, ref)))); - // Delete any packed ref - let text = await fs.read(`${gitdir}/packed-refs`, { encoding: 'utf8' }); - const packed = GitPackedRefs.from(text); - const beforeSize = packed.refs.size; - for (const ref of refs) { - if (packed.refs.has(ref)) { - packed.delete(ref); +function splitToPatterns(min, max, tok, options) { + let ranges = splitToRanges(min, max); + let tokens = []; + let start = min; + let prev; + + for (let i = 0; i < ranges.length; i++) { + let max = ranges[i]; + let obj = rangeToPattern(String(start), String(max), options); + let zeros = ''; + + if (!tok.isPadded && prev && prev.pattern === obj.pattern) { + if (prev.count.length > 1) { + prev.count.pop(); } + + prev.count.push(obj.count[0]); + prev.string = prev.pattern + toQuantifier(prev.count); + start = max + 1; + continue; } - if (packed.refs.size < beforeSize) { - text = packed.toString(); - await fs.write(`${gitdir}/packed-refs`, text, { encoding: 'utf8' }); + + if (tok.isPadded) { + zeros = padZeros(max, tok, options); } + + obj.string = zeros + obj.pattern + toQuantifier(obj.count); + tokens.push(obj); + start = max + 1; + prev = obj; } - /** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir - * @param {string} args.ref - * @param {number} [args.depth] - * @returns {Promise} - */ - static async resolve({ fs, gitdir, ref, depth = undefined }) { - if (depth !== undefined) { - depth--; - if (depth === -1) { - return ref - } + return tokens; +} + +function filterPatterns(arr, comparison, prefix, intersection, options) { + let result = []; + + for (let ele of arr) { + let { string } = ele; + + // only push if _both_ are negative... + if (!intersection && !contains(comparison, 'string', string)) { + result.push(prefix + string); } - let sha; - // Is it a ref pointer? - if (ref.startsWith('ref: ')) { - ref = ref.slice('ref: '.length); - return GitRefManager.resolve({ fs, gitdir, ref, depth }) + + // or _both_ are positive + if (intersection && contains(comparison, 'string', string)) { + result.push(prefix + string); } - // Is it a complete and valid SHA? - if (ref.length === 40 && /[0-9a-f]{40}/.test(ref)) { - return ref + } + return result; +} + +/** + * Zip strings + */ + +function zip(a, b) { + let arr = []; + for (let i = 0; i < a.length; i++) arr.push([a[i], b[i]]); + return arr; +} + +function compare(a, b) { + return a > b ? 1 : b > a ? -1 : 0; +} + +function contains(arr, key, val) { + return arr.some(ele => ele[key] === val); +} + +function countNines(min, len) { + return Number(String(min).slice(0, -len) + '9'.repeat(len)); +} + +function countZeros(integer, zeros) { + return integer - (integer % Math.pow(10, zeros)); +} + +function toQuantifier(digits) { + let [start = 0, stop = ''] = digits; + if (stop || start > 1) { + return `{${start + (stop ? ',' + stop : '')}}`; + } + return ''; +} + +function toCharacterClass(a, b, options) { + return `[${a}${(b - a === 1) ? '' : '-'}${b}]`; +} + +function hasPadding(str) { + return /^-?(0+)\d/.test(str); +} + +function padZeros(value, tok, options) { + if (!tok.isPadded) { + return value; + } + + let diff = Math.abs(tok.maxLen - String(value).length); + let relax = options.relaxZeros !== false; + + switch (diff) { + case 0: + return ''; + case 1: + return relax ? '0?' : '0'; + case 2: + return relax ? '0{0,2}' : '00'; + default: { + return relax ? `0{0,${diff}}` : `0{${diff}}`; } - // We need to alternate between the file system and the packed-refs - const packedMap = await GitRefManager.packedRefs({ fs, gitdir }); - // Look in all the proper paths, in this order - const allpaths = refpaths(ref).filter(p => !GIT_FILES.includes(p)); // exclude git system files (#709) + } +} + +/** + * Cache + */ + +toRegexRange.cache = {}; +toRegexRange.clearCache = () => (toRegexRange.cache = {}); + +/** + * Expose `toRegexRange` + */ + +module.exports = toRegexRange; + + +/***/ }), + +/***/ 9491: +/***/ ((module) => { + +"use strict"; +module.exports = require("assert"); + +/***/ }), + +/***/ 4300: +/***/ ((module) => { + +"use strict"; +module.exports = require("buffer"); + +/***/ }), + +/***/ 2081: +/***/ ((module) => { + +"use strict"; +module.exports = require("child_process"); + +/***/ }), + +/***/ 2361: +/***/ ((module) => { + +"use strict"; +module.exports = require("events"); + +/***/ }), + +/***/ 7147: +/***/ ((module) => { + +"use strict"; +module.exports = require("fs"); + +/***/ }), + +/***/ 2037: +/***/ ((module) => { + +"use strict"; +module.exports = require("os"); + +/***/ }), + +/***/ 1017: +/***/ ((module) => { + +"use strict"; +module.exports = require("path"); + +/***/ }), + +/***/ 2781: +/***/ ((module) => { + +"use strict"; +module.exports = require("stream"); + +/***/ }), + +/***/ 3837: +/***/ ((module) => { + +"use strict"; +module.exports = require("util"); + +/***/ }), + +/***/ 5114: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + + +Object.defineProperty(exports, "__esModule", ({ value: true })); + +function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } + +var AsyncLock = _interopDefault(__nccwpck_require__(1542)); +var Hash = _interopDefault(__nccwpck_require__(2398)); +var crc32 = _interopDefault(__nccwpck_require__(3201)); +var pako = _interopDefault(__nccwpck_require__(1726)); +var ignore = _interopDefault(__nccwpck_require__(4777)); +var pify = _interopDefault(__nccwpck_require__(4810)); +var cleanGitRef = _interopDefault(__nccwpck_require__(3268)); +var diff3Merge = _interopDefault(__nccwpck_require__(5211)); + +/** + * @typedef {Object} GitProgressEvent + * @property {string} phase + * @property {number} loaded + * @property {number} total + */ + +/** + * @callback ProgressCallback + * @param {GitProgressEvent} progress + * @returns {void | Promise} + */ - for (const ref of allpaths) { - sha = - (await fs.read(`${gitdir}/${ref}`, { encoding: 'utf8' })) || - packedMap.get(ref); - if (sha) { - return GitRefManager.resolve({ fs, gitdir, ref: sha.trim(), depth }) - } - } - // Do we give up? - throw new NotFoundError(ref) - } +/** + * @typedef {Object} GitHttpRequest + * @property {string} url - The URL to request + * @property {string} [method='GET'] - The HTTP method to use + * @property {Object} [headers={}] - Headers to include in the HTTP request + * @property {AsyncIterableIterator} [body] - An async iterator of Uint8Arrays that make up the body of POST requests + * @property {ProgressCallback} [onProgress] - Reserved for future use (emitting `GitProgressEvent`s) + * @property {object} [signal] - Reserved for future use (canceling a request) + */ - static async exists({ fs, gitdir, ref }) { - try { - await GitRefManager.expand({ fs, gitdir, ref }); - return true - } catch (err) { - return false - } - } +/** + * @typedef {Object} GitHttpResponse + * @property {string} url - The final URL that was fetched after any redirects + * @property {string} [method] - The HTTP method that was used + * @property {Object} [headers] - HTTP response headers + * @property {AsyncIterableIterator} [body] - An async iterator of Uint8Arrays that make up the body of the response + * @property {number} statusCode - The HTTP status code + * @property {string} statusMessage - The HTTP status message + */ - static async expand({ fs, gitdir, ref }) { - // Is it a complete and valid SHA? - if (ref.length === 40 && /[0-9a-f]{40}/.test(ref)) { - return ref - } - // We need to alternate between the file system and the packed-refs - const packedMap = await GitRefManager.packedRefs({ fs, gitdir }); - // Look in all the proper paths, in this order - const allpaths = refpaths(ref); - for (const ref of allpaths) { - if (await fs.exists(`${gitdir}/${ref}`)) return ref - if (packedMap.has(ref)) return ref - } - // Do we give up? - throw new NotFoundError(ref) - } +/** + * @callback HttpFetch + * @param {GitHttpRequest} request + * @returns {Promise} + */ - static async expandAgainstMap({ ref, map }) { - // Look in all the proper paths, in this order - const allpaths = refpaths(ref); - for (const ref of allpaths) { - if (await map.has(ref)) return ref - } - // Do we give up? - throw new NotFoundError(ref) - } +/** + * @typedef {Object} HttpClient + * @property {HttpFetch} request + */ - static resolveAgainstMap({ ref, fullref = ref, depth = undefined, map }) { - if (depth !== undefined) { - depth--; - if (depth === -1) { - return { fullref, oid: ref } - } - } - // Is it a ref pointer? - if (ref.startsWith('ref: ')) { - ref = ref.slice('ref: '.length); - return GitRefManager.resolveAgainstMap({ ref, fullref, depth, map }) - } - // Is it a complete and valid SHA? - if (ref.length === 40 && /[0-9a-f]{40}/.test(ref)) { - return { fullref, oid: ref } - } - // Look in all the proper paths, in this order - const allpaths = refpaths(ref); - for (const ref of allpaths) { - const sha = map.get(ref); - if (sha) { - return GitRefManager.resolveAgainstMap({ - ref: sha.trim(), - fullref: ref, - depth, - map, - }) - } - } - // Do we give up? - throw new NotFoundError(ref) - } +/** + * A git commit object. + * + * @typedef {Object} CommitObject + * @property {string} message Commit message + * @property {string} tree SHA-1 object id of corresponding file tree + * @property {string[]} parent an array of zero or more SHA-1 object ids + * @property {Object} author + * @property {string} author.name The author's name + * @property {string} author.email The author's email + * @property {number} author.timestamp UTC Unix timestamp in seconds + * @property {number} author.timezoneOffset Timezone difference from UTC in minutes + * @property {Object} committer + * @property {string} committer.name The committer's name + * @property {string} committer.email The committer's email + * @property {number} committer.timestamp UTC Unix timestamp in seconds + * @property {number} committer.timezoneOffset Timezone difference from UTC in minutes + * @property {string} [gpgsig] PGP signature (if present) + */ - static async packedRefs({ fs, gitdir }) { - const text = await fs.read(`${gitdir}/packed-refs`, { encoding: 'utf8' }); - const packed = GitPackedRefs.from(text); - return packed.refs - } +/** + * An entry from a git tree object. Files are called 'blobs' and directories are called 'trees'. + * + * @typedef {Object} TreeEntry + * @property {string} mode the 6 digit hexadecimal mode + * @property {string} path the name of the file or directory + * @property {string} oid the SHA-1 object id of the blob or tree + * @property {'commit'|'blob'|'tree'} type the type of object + */ - // List all the refs that match the `filepath` prefix - static async listRefs({ fs, gitdir, filepath }) { - const packedMap = GitRefManager.packedRefs({ fs, gitdir }); - let files = null; - try { - files = await fs.readdirDeep(`${gitdir}/${filepath}`); - files = files.map(x => x.replace(`${gitdir}/${filepath}/`, '')); - } catch (err) { - files = []; - } +/** + * A git tree object. Trees represent a directory snapshot. + * + * @typedef {TreeEntry[]} TreeObject + */ - for (let key of (await packedMap).keys()) { - // filter by prefix - if (key.startsWith(filepath)) { - // remove prefix - key = key.replace(filepath + '/', ''); - // Don't include duplicates; the loose files have precedence anyway - if (!files.includes(key)) { - files.push(key); - } - } - } - // since we just appended things onto an array, we need to sort them now - files.sort(compareRefNames); - return files - } +/** + * A git annotated tag object. + * + * @typedef {Object} TagObject + * @property {string} object SHA-1 object id of object being tagged + * @property {'blob' | 'tree' | 'commit' | 'tag'} type the type of the object being tagged + * @property {string} tag the tag name + * @property {Object} tagger + * @property {string} tagger.name the tagger's name + * @property {string} tagger.email the tagger's email + * @property {number} tagger.timestamp UTC Unix timestamp in seconds + * @property {number} tagger.timezoneOffset timezone difference from UTC in minutes + * @property {string} message tag message + * @property {string} [gpgsig] PGP signature (if present) + */ - static async listBranches({ fs, gitdir, remote }) { - if (remote) { - return GitRefManager.listRefs({ - fs, - gitdir, - filepath: `refs/remotes/${remote}`, - }) - } else { - return GitRefManager.listRefs({ fs, gitdir, filepath: `refs/heads` }) - } - } +/** + * @typedef {Object} ReadCommitResult + * @property {string} oid - SHA-1 object id of this commit + * @property {CommitObject} commit - the parsed commit object + * @property {string} payload - PGP signing payload + */ - static async listTags({ fs, gitdir }) { - const tags = await GitRefManager.listRefs({ - fs, - gitdir, - filepath: `refs/tags`, - }); - return tags.filter(x => !x.endsWith('^{}')) - } -} +/** + * @typedef {Object} ServerRef - This object has the following schema: + * @property {string} ref - The name of the ref + * @property {string} oid - The SHA-1 object id the ref points to + * @property {string} [target] - The target ref pointed to by a symbolic ref + * @property {string} [peeled] - If the oid is the SHA-1 object id of an annotated tag, this is the SHA-1 object id that the annotated tag points to + */ -function compareTreeEntryPath(a, b) { - // Git sorts tree entries as if there is a trailing slash on directory names. - return compareStrings(appendSlashIfDir(a), appendSlashIfDir(b)) -} +/** + * @typedef Walker + * @property {Symbol} Symbol('GitWalkerSymbol') + */ -function appendSlashIfDir(entry) { - return entry.mode === '040000' ? entry.path + '/' : entry.path -} +/** + * Normalized subset of filesystem `stat` data: + * + * @typedef {Object} Stat + * @property {number} ctimeSeconds + * @property {number} ctimeNanoseconds + * @property {number} mtimeSeconds + * @property {number} mtimeNanoseconds + * @property {number} dev + * @property {number} ino + * @property {number} mode + * @property {number} uid + * @property {number} gid + * @property {number} size + */ + +/** + * The `WalkerEntry` is an interface that abstracts computing many common tree / blob stats. + * + * @typedef {Object} WalkerEntry + * @property {function(): Promise<'tree'|'blob'|'special'|'commit'>} type + * @property {function(): Promise} mode + * @property {function(): Promise} oid + * @property {function(): Promise} content + * @property {function(): Promise} stat + */ + +/** + * @typedef {Object} CallbackFsClient + * @property {function} readFile - https://nodejs.org/api/fs.html#fs_fs_readfile_path_options_callback + * @property {function} writeFile - https://nodejs.org/api/fs.html#fs_fs_writefile_file_data_options_callback + * @property {function} unlink - https://nodejs.org/api/fs.html#fs_fs_unlink_path_callback + * @property {function} readdir - https://nodejs.org/api/fs.html#fs_fs_readdir_path_options_callback + * @property {function} mkdir - https://nodejs.org/api/fs.html#fs_fs_mkdir_path_mode_callback + * @property {function} rmdir - https://nodejs.org/api/fs.html#fs_fs_rmdir_path_callback + * @property {function} stat - https://nodejs.org/api/fs.html#fs_fs_stat_path_options_callback + * @property {function} lstat - https://nodejs.org/api/fs.html#fs_fs_lstat_path_options_callback + * @property {function} [readlink] - https://nodejs.org/api/fs.html#fs_fs_readlink_path_options_callback + * @property {function} [symlink] - https://nodejs.org/api/fs.html#fs_fs_symlink_target_path_type_callback + * @property {function} [chmod] - https://nodejs.org/api/fs.html#fs_fs_chmod_path_mode_callback + */ /** - * - * @typedef {Object} TreeEntry - * @property {string} mode - the 6 digit hexadecimal mode - * @property {string} path - the name of the file or directory - * @property {string} oid - the SHA-1 object id of the blob or tree - * @property {'commit'|'blob'|'tree'} type - the type of object + * @typedef {Object} PromiseFsClient + * @property {Object} promises + * @property {function} promises.readFile - https://nodejs.org/api/fs.html#fs_fspromises_readfile_path_options + * @property {function} promises.writeFile - https://nodejs.org/api/fs.html#fs_fspromises_writefile_file_data_options + * @property {function} promises.unlink - https://nodejs.org/api/fs.html#fs_fspromises_unlink_path + * @property {function} promises.readdir - https://nodejs.org/api/fs.html#fs_fspromises_readdir_path_options + * @property {function} promises.mkdir - https://nodejs.org/api/fs.html#fs_fspromises_mkdir_path_options + * @property {function} promises.rmdir - https://nodejs.org/api/fs.html#fs_fspromises_rmdir_path + * @property {function} promises.stat - https://nodejs.org/api/fs.html#fs_fspromises_stat_path_options + * @property {function} promises.lstat - https://nodejs.org/api/fs.html#fs_fspromises_lstat_path_options + * @property {function} [promises.readlink] - https://nodejs.org/api/fs.html#fs_fspromises_readlink_path_options + * @property {function} [promises.symlink] - https://nodejs.org/api/fs.html#fs_fspromises_symlink_target_path_type + * @property {function} [promises.chmod] - https://nodejs.org/api/fs.html#fs_fspromises_chmod_path_mode */ -function mode2type$1(mode) { - // prettier-ignore - switch (mode) { - case '040000': return 'tree' - case '100644': return 'blob' - case '100755': return 'blob' - case '120000': return 'blob' - case '160000': return 'commit' - } - throw new InternalError(`Unexpected GitTree entry mode: ${mode}`) -} +/** + * @typedef {CallbackFsClient | PromiseFsClient} FsClient + */ -function parseBuffer(buffer) { - const _entries = []; - let cursor = 0; - while (cursor < buffer.length) { - const space = buffer.indexOf(32, cursor); - if (space === -1) { - throw new InternalError( - `GitTree: Error parsing buffer at byte location ${cursor}: Could not find the next space character.` - ) - } - const nullchar = buffer.indexOf(0, cursor); - if (nullchar === -1) { - throw new InternalError( - `GitTree: Error parsing buffer at byte location ${cursor}: Could not find the next null character.` - ) - } - let mode = buffer.slice(cursor, space).toString('utf8'); - if (mode === '40000') mode = '040000'; // makes it line up neater in printed output - const type = mode2type$1(mode); - const path = buffer.slice(space + 1, nullchar).toString('utf8'); +/** + * @callback MessageCallback + * @param {string} message + * @returns {void | Promise} + */ - // Prevent malicious git repos from writing to "..\foo" on clone etc - if (path.includes('\\') || path.includes('/')) { - throw new UnsafeFilepathError(path) - } +/** + * @typedef {Object} GitAuth + * @property {string} [username] + * @property {string} [password] + * @property {Object} [headers] + * @property {boolean} [cancel] Tells git to throw a `UserCanceledError` (instead of an `HttpError`). + */ - const oid = buffer.slice(nullchar + 1, nullchar + 21).toString('hex'); - cursor = nullchar + 21; - _entries.push({ mode, path, oid, type }); - } - return _entries -} +/** + * @callback AuthCallback + * @param {string} url + * @param {GitAuth} auth Might have some values if the URL itself originally contained a username or password. + * @returns {GitAuth | void | Promise} + */ -function limitModeToAllowed(mode) { - if (typeof mode === 'number') { - mode = mode.toString(8); - } - // tree - if (mode.match(/^0?4.*/)) return '040000' // Directory - if (mode.match(/^1006.*/)) return '100644' // Regular non-executable file - if (mode.match(/^1007.*/)) return '100755' // Regular executable file - if (mode.match(/^120.*/)) return '120000' // Symbolic link - if (mode.match(/^160.*/)) return '160000' // Commit (git submodule reference) - throw new InternalError(`Could not understand file mode: ${mode}`) -} +/** + * @callback AuthFailureCallback + * @param {string} url + * @param {GitAuth} auth The credentials that failed + * @returns {GitAuth | void | Promise} + */ -function nudgeIntoShape(entry) { - if (!entry.oid && entry.sha) { - entry.oid = entry.sha; // Github - } - entry.mode = limitModeToAllowed(entry.mode); // index - if (!entry.type) { - entry.type = mode2type$1(entry.mode); // index - } - return entry -} +/** + * @callback AuthSuccessCallback + * @param {string} url + * @param {GitAuth} auth + * @returns {void | Promise} + */ -class GitTree { - constructor(entries) { - if (Buffer.isBuffer(entries)) { - this._entries = parseBuffer(entries); - } else if (Array.isArray(entries)) { - this._entries = entries.map(nudgeIntoShape); - } else { - throw new InternalError('invalid type passed to GitTree constructor') - } - // Tree entries are not sorted alphabetically in the usual sense (see `compareTreeEntryPath`) - // but it is important later on that these be sorted in the same order as they would be returned from readdir. - this._entries.sort(comparePath); - } +/** + * @typedef {Object} SignParams + * @property {string} payload - a plaintext message + * @property {string} secretKey - an 'ASCII armor' encoded PGP key (technically can actually contain _multiple_ keys) + */ - static from(tree) { - return new GitTree(tree) - } +/** + * @callback SignCallback + * @param {SignParams} args + * @return {{signature: string} | Promise<{signature: string}>} - an 'ASCII armor' encoded "detached" signature + */ - render() { - return this._entries - .map(entry => `${entry.mode} ${entry.type} ${entry.oid} ${entry.path}`) - .join('\n') - } +/** + * @callback WalkerMap + * @param {string} filename + * @param {WalkerEntry[]} entries + * @returns {Promise} + */ - toObject() { - // Adjust the sort order to match git's - const entries = [...this._entries]; - entries.sort(compareTreeEntryPath); - return Buffer.concat( - entries.map(entry => { - const mode = Buffer.from(entry.mode.replace(/^0/, '')); - const space = Buffer.from(' '); - const path = Buffer.from(entry.path, 'utf8'); - const nullchar = Buffer.from([0]); - const oid = Buffer.from(entry.oid, 'hex'); - return Buffer.concat([mode, space, path, nullchar, oid]) - }) - ) - } +/** + * @callback WalkerReduce + * @param {any} parent + * @param {any[]} children + * @returns {Promise} + */ - /** - * @returns {TreeEntry[]} - */ - entries() { - return this._entries - } +/** + * @callback WalkerIterateCallback + * @param {WalkerEntry[]} entries + * @returns {Promise} + */ - *[Symbol.iterator]() { - for (const entry of this._entries) { - yield entry; - } - } -} +/** + * @callback WalkerIterate + * @param {WalkerIterateCallback} walk + * @param {IterableIterator} children + * @returns {Promise} + */ -class GitObject { - static wrap({ type, object }) { - return Buffer.concat([ - Buffer.from(`${type} ${object.byteLength.toString()}\x00`), - Buffer.from(object), - ]) - } +/** + * @typedef {Object} RefUpdateStatus + * @property {boolean} ok + * @property {string} error + */ - static unwrap(buffer) { - const s = buffer.indexOf(32); // first space - const i = buffer.indexOf(0); // first null value - const type = buffer.slice(0, s).toString('utf8'); // get type of object - const length = buffer.slice(s + 1, i).toString('utf8'); // get type of object - const actualLength = buffer.length - (i + 1); - // verify length - if (parseInt(length) !== actualLength) { - throw new InternalError( - `Length mismatch: expected ${length} bytes but got ${actualLength} instead.` - ) - } - return { - type, - object: Buffer.from(buffer.slice(i + 1)), - } - } -} +/** + * @typedef {Object} PushResult + * @property {boolean} ok + * @property {?string} error + * @property {Object} refs + * @property {Object} [headers] + */ -async function readObjectLoose({ fs, gitdir, oid }) { - const source = `objects/${oid.slice(0, 2)}/${oid.slice(2)}`; - const file = await fs.read(`${gitdir}/${source}`); - if (!file) { - return null - } - return { object: file, format: 'deflated', source } -} +/** + * @typedef {0|1} HeadStatus + */ /** - * @param {Buffer} delta - * @param {Buffer} source - * @returns {Buffer} + * @typedef {0|1|2} WorkdirStatus */ -function applyDelta(delta, source) { - const reader = new BufferCursor(delta); - const sourceSize = readVarIntLE(reader); - if (sourceSize !== source.byteLength) { - throw new InternalError( - `applyDelta expected source buffer to be ${sourceSize} bytes but the provided buffer was ${source.length} bytes` - ) - } - const targetSize = readVarIntLE(reader); - let target; +/** + * @typedef {0|1|2|3} StageStatus + */ - const firstOp = readOp(reader, source); - // Speed optimization - return raw buffer if it's just single simple copy - if (firstOp.byteLength === targetSize) { - target = firstOp; - } else { - // Otherwise, allocate a fresh buffer and slices - target = Buffer.alloc(targetSize); - const writer = new BufferCursor(target); - writer.copy(firstOp); +/** + * @typedef {[string, HeadStatus, WorkdirStatus, StageStatus]} StatusRow + */ - while (!reader.eof()) { - writer.copy(readOp(reader, source)); - } +class BaseError extends Error { + constructor(message) { + super(message); + // Setting this here allows TS to infer that all git errors have a `caller` property and + // that its type is string. + this.caller = ''; + } - const tell = writer.tell(); - if (targetSize !== tell) { - throw new InternalError( - `applyDelta expected target buffer to be ${targetSize} bytes but the resulting buffer was ${tell} bytes` - ) + toJSON() { + // Error objects aren't normally serializable. So we do something about that. + return { + code: this.code, + data: this.data, + caller: this.caller, + message: this.message, + stack: this.stack, } } - return target -} - -function readVarIntLE(reader) { - let result = 0; - let shift = 0; - let byte = null; - do { - byte = reader.readUInt8(); - result |= (byte & 0b01111111) << shift; - shift += 7; - } while (byte & 0b10000000) - return result -} -function readCompactLE(reader, flags, size) { - let result = 0; - let shift = 0; - while (size--) { - if (flags & 0b00000001) { - result |= reader.readUInt8() << shift; - } - flags >>= 1; - shift += 8; + fromJSON(json) { + const e = new BaseError(json.message); + e.code = json.code; + e.data = json.data; + e.caller = json.caller; + e.stack = json.stack; + return e } - return result -} -function readOp(reader, source) { - /** @type {number} */ - const byte = reader.readUInt8(); - const COPY = 0b10000000; - const OFFS = 0b00001111; - const SIZE = 0b01110000; - if (byte & COPY) { - // copy consists of 4 byte offset, 3 byte size (in LE order) - const offset = readCompactLE(reader, byte & OFFS, 4); - let size = readCompactLE(reader, (byte & SIZE) >> 4, 3); - // Yup. They really did this optimization. - if (size === 0) size = 0x10000; - return source.slice(offset, offset + size) - } else { - // insert - return reader.slice(byte) + get isIsomorphicGitError() { + return true } } -// Convert a value to an Async Iterator -// This will be easier with async generator functions. -function fromValue(value) { - let queue = [value]; - return { - next() { - return Promise.resolve({ done: queue.length === 0, value: queue.pop() }) - }, - return() { - queue = []; - return {} - }, - [Symbol.asyncIterator]() { - return this - }, +class InternalError extends BaseError { + /** + * @param {string} message + */ + constructor(message) { + super( + `An internal error caused this command to fail. Please file a bug report at https://github.com/isomorphic-git/isomorphic-git/issues with this error message: ${message}` + ); + this.code = this.name = InternalError.code; + this.data = { message }; } } +/** @type {'InternalError'} */ +InternalError.code = 'InternalError'; -function getIterator(iterable) { - if (iterable[Symbol.asyncIterator]) { - return iterable[Symbol.asyncIterator]() - } - if (iterable[Symbol.iterator]) { - return iterable[Symbol.iterator]() - } - if (iterable.next) { - return iterable +class UnsafeFilepathError extends BaseError { + /** + * @param {string} filepath + */ + constructor(filepath) { + super(`The filepath "${filepath}" contains unsafe character sequences`); + this.code = this.name = UnsafeFilepathError.code; + this.data = { filepath }; } - return fromValue(iterable) } +/** @type {'UnsafeFilepathError'} */ +UnsafeFilepathError.code = 'UnsafeFilepathError'; -// inspired by 'gartal' but lighter-weight and more battle-tested. -class StreamReader { - constructor(stream) { - this.stream = getIterator(stream); - this.buffer = null; - this.cursor = 0; - this.undoCursor = 0; - this.started = false; - this._ended = false; - this._discardedBytes = 0; +// Modeled after https://github.com/tjfontaine/node-buffercursor +// but with the goal of being much lighter weight. +class BufferCursor { + constructor(buffer) { + this.buffer = buffer; + this._start = 0; } eof() { - return this._ended && this.cursor === this.buffer.length + return this._start >= this.buffer.length } tell() { - return this._discardedBytes + this.cursor - } - - async byte() { - if (this.eof()) return - if (!this.started) await this._init(); - if (this.cursor === this.buffer.length) { - await this._loadnext(); - if (this._ended) return - } - this._moveCursor(1); - return this.buffer[this.undoCursor] + return this._start } - async chunk() { - if (this.eof()) return - if (!this.started) await this._init(); - if (this.cursor === this.buffer.length) { - await this._loadnext(); - if (this._ended) return - } - this._moveCursor(this.buffer.length); - return this.buffer.slice(this.undoCursor, this.cursor) + seek(n) { + this._start = n; } - async read(n) { - if (this.eof()) return - if (!this.started) await this._init(); - if (this.cursor + n > this.buffer.length) { - this._trim(); - await this._accumulate(n); - } - this._moveCursor(n); - return this.buffer.slice(this.undoCursor, this.cursor) + slice(n) { + const r = this.buffer.slice(this._start, this._start + n); + this._start += n; + return r } - async skip(n) { - if (this.eof()) return - if (!this.started) await this._init(); - if (this.cursor + n > this.buffer.length) { - this._trim(); - await this._accumulate(n); - } - this._moveCursor(n); + toString(enc, length) { + const r = this.buffer.toString(enc, this._start, this._start + length); + this._start += length; + return r } - async undo() { - this.cursor = this.undoCursor; + write(value, length, enc) { + const r = this.buffer.write(value, this._start, length, enc); + this._start += length; + return r } - async _next() { - this.started = true; - let { done, value } = await this.stream.next(); - if (done) { - this._ended = true; - } - if (value) { - value = Buffer.from(value); - } - return value + copy(source, start, end) { + const r = source.copy(this.buffer, this._start, start, end); + this._start += r; + return r } - _trim() { - // Throw away parts of the buffer we don't need anymore - // assert(this.cursor <= this.buffer.length) - this.buffer = this.buffer.slice(this.undoCursor); - this.cursor -= this.undoCursor; - this._discardedBytes += this.undoCursor; - this.undoCursor = 0; + readUInt8() { + const r = this.buffer.readUInt8(this._start); + this._start += 1; + return r } - _moveCursor(n) { - this.undoCursor = this.cursor; - this.cursor += n; - if (this.cursor > this.buffer.length) { - this.cursor = this.buffer.length; - } + writeUInt8(value) { + const r = this.buffer.writeUInt8(value, this._start); + this._start += 1; + return r } - async _accumulate(n) { - if (this._ended) return - // Expand the buffer until we have N bytes of data - // or we've reached the end of the stream - const buffers = [this.buffer]; - while (this.cursor + n > lengthBuffers(buffers)) { - const nextbuffer = await this._next(); - if (this._ended) break - buffers.push(nextbuffer); - } - this.buffer = Buffer.concat(buffers); + readUInt16BE() { + const r = this.buffer.readUInt16BE(this._start); + this._start += 2; + return r } - async _loadnext() { - this._discardedBytes += this.buffer.length; - this.undoCursor = 0; - this.cursor = 0; - this.buffer = await this._next(); + writeUInt16BE(value) { + const r = this.buffer.writeUInt16BE(value, this._start); + this._start += 2; + return r } - async _init() { - this.buffer = await this._next(); + readUInt32BE() { + const r = this.buffer.readUInt32BE(this._start); + this._start += 4; + return r + } + + writeUInt32BE(value) { + const r = this.buffer.writeUInt32BE(value, this._start); + this._start += 4; + return r } } -// This helper function helps us postpone concatenating buffers, which -// would create intermediate buffer objects, -function lengthBuffers(buffers) { - return buffers.reduce((acc, buffer) => acc + buffer.length, 0) +function compareStrings(a, b) { + // https://stackoverflow.com/a/40355107/2168416 + return -(a < b) || +(a > b) } -// My version of git-list-pack - roughly 15x faster than the original +function comparePath(a, b) { + // https://stackoverflow.com/a/40355107/2168416 + return compareStrings(a.path, b.path) +} -async function listpack(stream, onData) { - const reader = new StreamReader(stream); - let PACK = await reader.read(4); - PACK = PACK.toString('utf8'); - if (PACK !== 'PACK') { - throw new InternalError(`Invalid PACK header '${PACK}'`) +/** + * From https://github.com/git/git/blob/master/Documentation/technical/index-format.txt + * + * 32-bit mode, split into (high to low bits) + * + * 4-bit object type + * valid values in binary are 1000 (regular file), 1010 (symbolic link) + * and 1110 (gitlink) + * + * 3-bit unused + * + * 9-bit unix permission. Only 0755 and 0644 are valid for regular files. + * Symbolic links and gitlinks have value 0 in this field. + */ +function normalizeMode(mode) { + // Note: BrowserFS will use -1 for "unknown" + // I need to make it non-negative for these bitshifts to work. + let type = mode > 0 ? mode >> 12 : 0; + // If it isn't valid, assume it as a "regular file" + // 0100 = directory + // 1000 = regular file + // 1010 = symlink + // 1110 = gitlink + if ( + type !== 0b0100 && + type !== 0b1000 && + type !== 0b1010 && + type !== 0b1110 + ) { + type = 0b1000; } - - let version = await reader.read(4); - version = version.readUInt32BE(0); - if (version !== 2) { - throw new InternalError(`Invalid packfile version: ${version}`) + let permissions = mode & 0o777; + // Is the file executable? then 755. Else 644. + if (permissions & 0b001001001) { + permissions = 0o755; + } else { + permissions = 0o644; } + // If it's not a regular file, scrub all permissions + if (type !== 0b1000) permissions = 0; + return (type << 12) + permissions +} - let numObjects = await reader.read(4); - numObjects = numObjects.readUInt32BE(0); - // If (for some godforsaken reason) this is an empty packfile, abort now. - if (numObjects < 1) return - - while (!reader.eof() && numObjects--) { - const offset = reader.tell(); - const { type, length, ofs, reference } = await parseHeader(reader); - const inflator = new pako.Inflate(); - while (!inflator.result) { - const chunk = await reader.chunk(); - if (!chunk) break - inflator.push(chunk, false); - if (inflator.err) { - throw new InternalError(`Pako error: ${inflator.msg}`) - } - if (inflator.result) { - if (inflator.result.length !== length) { - throw new InternalError( - `Inflated object size is different from that stated in packfile.` - ) - } +const MAX_UINT32 = 2 ** 32; - // Backtrack parser to where deflated data ends - await reader.undo(); - await reader.read(chunk.length - inflator.strm.avail_in); - const end = reader.tell(); - await onData({ - data: inflator.result, - type, - num: numObjects, - offset, - end, - reference, - ofs, - }); - } - } +function SecondsNanoseconds( + givenSeconds, + givenNanoseconds, + milliseconds, + date +) { + if (givenSeconds !== undefined && givenNanoseconds !== undefined) { + return [givenSeconds, givenNanoseconds] } + if (milliseconds === undefined) { + milliseconds = date.valueOf(); + } + const seconds = Math.floor(milliseconds / 1000); + const nanoseconds = (milliseconds - seconds * 1000) * 1000000; + return [seconds, nanoseconds] } -async function parseHeader(reader) { - // Object type is encoded in bits 654 - let byte = await reader.byte(); - const type = (byte >> 4) & 0b111; - // The length encoding get complicated. - // Last four bits of length is encoded in bits 3210 - let length = byte & 0b1111; - // Whether the next byte is part of the variable-length encoded number - // is encoded in bit 7 - if (byte & 0b10000000) { - let shift = 4; - do { - byte = await reader.byte(); - length |= (byte & 0b01111111) << shift; - shift += 7; - } while (byte & 0b10000000) - } - // Handle deltified objects - let ofs; - let reference; - if (type === 6) { - let shift = 0; - ofs = 0; - const bytes = []; - do { - byte = await reader.byte(); - ofs |= (byte & 0b01111111) << shift; - shift += 7; - bytes.push(byte); - } while (byte & 0b10000000) - reference = Buffer.from(bytes); +function normalizeStats(e) { + const [ctimeSeconds, ctimeNanoseconds] = SecondsNanoseconds( + e.ctimeSeconds, + e.ctimeNanoseconds, + e.ctimeMs, + e.ctime + ); + const [mtimeSeconds, mtimeNanoseconds] = SecondsNanoseconds( + e.mtimeSeconds, + e.mtimeNanoseconds, + e.mtimeMs, + e.mtime + ); + + return { + ctimeSeconds: ctimeSeconds % MAX_UINT32, + ctimeNanoseconds: ctimeNanoseconds % MAX_UINT32, + mtimeSeconds: mtimeSeconds % MAX_UINT32, + mtimeNanoseconds: mtimeNanoseconds % MAX_UINT32, + dev: e.dev % MAX_UINT32, + ino: e.ino % MAX_UINT32, + mode: normalizeMode(e.mode % MAX_UINT32), + uid: e.uid % MAX_UINT32, + gid: e.gid % MAX_UINT32, + // size of -1 happens over a BrowserFS HTTP Backend that doesn't serve Content-Length headers + // (like the Karma webserver) because BrowserFS HTTP Backend uses HTTP HEAD requests to do fs.stat + size: e.size > -1 ? e.size % MAX_UINT32 : 0, } - if (type === 7) { - const buf = await reader.read(20); - reference = buf; +} + +function toHex(buffer) { + let hex = ''; + for (const byte of new Uint8Array(buffer)) { + if (byte < 16) hex += '0'; + hex += byte.toString(16); } - return { type, length, ofs, reference } + return hex } /* eslint-env node, browser */ -let supportsDecompressionStream = false; +let supportsSubtleSHA1 = null; -async function inflate(buffer) { - if (supportsDecompressionStream === null) { - supportsDecompressionStream = testDecompressionStream(); +async function shasum(buffer) { + if (supportsSubtleSHA1 === null) { + supportsSubtleSHA1 = await testSubtleSHA1(); } - return supportsDecompressionStream - ? browserInflate(buffer) - : pako.inflate(buffer) + return supportsSubtleSHA1 ? subtleSHA1(buffer) : shasumSync(buffer) } -async function browserInflate(buffer) { - const ds = new DecompressionStream('deflate'); - const d = new Blob([buffer]).stream().pipeThrough(ds); - return new Uint8Array(await new Response(d).arrayBuffer()) +// This is modeled after @dominictarr's "shasum" module, +// but without the 'json-stable-stringify' dependency and +// extra type-casting features. +function shasumSync(buffer) { + return new Hash().update(buffer).digest('hex') } -function testDecompressionStream() { +async function subtleSHA1(buffer) { + const hash = await crypto.subtle.digest('SHA-1', buffer); + return toHex(hash) +} + +async function testSubtleSHA1() { + // I'm using a rather crude method of progressive enhancement, because + // some browsers that have crypto.subtle.digest don't actually implement SHA-1. try { - const ds = new DecompressionStream('deflate'); - if (ds) return true + const hash = await subtleSHA1(new Uint8Array([])); + if (hash === 'da39a3ee5e6b4b0d3255bfef95601890afd80709') return true } catch (_) { // no bother } return false } -function decodeVarInt(reader) { - const bytes = []; - let byte = 0; - let multibyte = 0; - do { - byte = reader.readUInt8(); - // We keep bits 6543210 - const lastSeven = byte & 0b01111111; - bytes.push(lastSeven); - // Whether the next byte is part of the variable-length encoded number - // is encoded in bit 7 - multibyte = byte & 0b10000000; - } while (multibyte) - // Now that all the bytes are in big-endian order, - // alternate shifting the bits left by 7 and OR-ing the next byte. - // And... do a weird increment-by-one thing that I don't quite understand. - return bytes.reduce((a, b) => ((a + 1) << 7) | b, -1) +// Extract 1-bit assume-valid, 1-bit extended flag, 2-bit merge state flag, 12-bit path length flag +function parseCacheEntryFlags(bits) { + return { + assumeValid: Boolean(bits & 0b1000000000000000), + extended: Boolean(bits & 0b0100000000000000), + stage: (bits & 0b0011000000000000) >> 12, + nameLength: bits & 0b0000111111111111, + } } -// I'm pretty much copying this one from the git C source code, -// because it makes no sense. -function otherVarIntDecode(reader, startWith) { - let result = startWith; - let shift = 4; - let byte = null; - do { - byte = reader.readUInt8(); - result |= (byte & 0b01111111) << shift; - shift += 7; - } while (byte & 0b10000000) - return result +function renderCacheEntryFlags(entry) { + const flags = entry.flags; + // 1-bit extended flag (must be zero in version 2) + flags.extended = false; + // 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF + // is stored in this field. + flags.nameLength = Math.min(Buffer.from(entry.path).length, 0xfff); + return ( + (flags.assumeValid ? 0b1000000000000000 : 0) + + (flags.extended ? 0b0100000000000000 : 0) + + ((flags.stage & 0b11) << 12) + + (flags.nameLength & 0b111111111111) + ) } -class GitPackIndex { - constructor(stuff) { - Object.assign(this, stuff); - this.offsetCache = {}; +class GitIndex { + /*:: + _entries: Map + _dirty: boolean // Used to determine if index needs to be saved to filesystem + */ + constructor(entries) { + this._dirty = false; + this._entries = entries || new Map(); } - static async fromIdx({ idx, getExternalRefDelta }) { - const reader = new BufferCursor(idx); - const magic = reader.slice(4).toString('hex'); - // Check for IDX v2 magic number - if (magic !== 'ff744f63') { - return // undefined - } - const version = reader.readUInt32BE(); - if (version !== 2) { - throw new InternalError( - `Unable to read version ${version} packfile IDX. (Only version 2 supported)` - ) + static async from(buffer) { + if (Buffer.isBuffer(buffer)) { + return GitIndex.fromBuffer(buffer) + } else if (buffer === null) { + return new GitIndex(null) + } else { + throw new InternalError('invalid type passed to GitIndex.from') } - if (idx.byteLength > 2048 * 1024 * 1024) { + } + + static async fromBuffer(buffer) { + // Verify shasum + const shaComputed = await shasum(buffer.slice(0, -20)); + const shaClaimed = buffer.slice(-20).toString('hex'); + if (shaClaimed !== shaComputed) { throw new InternalError( - `To keep implementation simple, I haven't implemented the layer 5 feature needed to support packfiles > 2GB in size.` + `Invalid checksum in GitIndex buffer: expected ${shaClaimed} but saw ${shaComputed}` ) } - // Skip over fanout table - reader.seek(reader.tell() + 4 * 255); - // Get hashes - const size = reader.readUInt32BE(); - const hashes = []; - for (let i = 0; i < size; i++) { - const hash = reader.slice(20).toString('hex'); - hashes[i] = hash; - } - reader.seek(reader.tell() + 4 * size); - // Skip over CRCs - // Get offsets - const offsets = new Map(); - for (let i = 0; i < size; i++) { - offsets.set(hashes[i], reader.readUInt32BE()); + const reader = new BufferCursor(buffer); + const _entries = new Map(); + const magic = reader.toString('utf8', 4); + if (magic !== 'DIRC') { + throw new InternalError(`Inavlid dircache magic file number: ${magic}`) } - const packfileSha = reader.slice(20).toString('hex'); - return new GitPackIndex({ - hashes, - crcs: {}, - offsets, - packfileSha, - getExternalRefDelta, - }) - } - - static async fromPack({ pack, getExternalRefDelta, onProgress }) { - const listpackTypes = { - 1: 'commit', - 2: 'tree', - 3: 'blob', - 4: 'tag', - 6: 'ofs-delta', - 7: 'ref-delta', - }; - const offsetToObject = {}; - - // Older packfiles do NOT use the shasum of the pack itself, - // so it is recommended to just use whatever bytes are in the trailer. - // Source: https://github.com/git/git/commit/1190a1acf800acdcfd7569f87ac1560e2d077414 - const packfileSha = pack.slice(-20).toString('hex'); - - const hashes = []; - const crcs = {}; - const offsets = new Map(); - let totalObjectCount = null; - let lastPercent = null; - - await listpack([pack], async ({ data, type, reference, offset, num }) => { - if (totalObjectCount === null) totalObjectCount = num; - const percent = Math.floor( - ((totalObjectCount - num) * 100) / totalObjectCount - ); - if (percent !== lastPercent) { - if (onProgress) { - await onProgress({ - phase: 'Receiving objects', - loaded: totalObjectCount - num, - total: totalObjectCount, - }); - } - } - lastPercent = percent; - // Change type from a number to a meaningful string - type = listpackTypes[type]; - - if (['commit', 'tree', 'blob', 'tag'].includes(type)) { - offsetToObject[offset] = { - type, - offset, - }; - } else if (type === 'ofs-delta') { - offsetToObject[offset] = { - type, - offset, - }; - } else if (type === 'ref-delta') { - offsetToObject[offset] = { - type, - offset, - }; - } - }); - - // We need to know the lengths of the slices to compute the CRCs. - const offsetArray = Object.keys(offsetToObject).map(Number); - for (const [i, start] of offsetArray.entries()) { - const end = - i + 1 === offsetArray.length ? pack.byteLength - 20 : offsetArray[i + 1]; - const o = offsetToObject[start]; - const crc = crc32.buf(pack.slice(start, end)) >>> 0; - o.end = end; - o.crc = crc; + const version = reader.readUInt32BE(); + if (version !== 2) { + throw new InternalError(`Unsupported dircache version: ${version}`) } - - // We don't have the hashes yet. But we can generate them using the .readSlice function! - const p = new GitPackIndex({ - pack: Promise.resolve(pack), - packfileSha, - crcs, - hashes, - offsets, - getExternalRefDelta, - }); - - // Resolve deltas and compute the oids - lastPercent = null; - let count = 0; - const objectsByDepth = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - for (let offset in offsetToObject) { - offset = Number(offset); - const percent = Math.floor((count++ * 100) / totalObjectCount); - if (percent !== lastPercent) { - if (onProgress) { - await onProgress({ - phase: 'Resolving deltas', - loaded: count, - total: totalObjectCount, - }); - } - } - lastPercent = percent; - - const o = offsetToObject[offset]; - if (o.oid) continue - try { - p.readDepth = 0; - p.externalReadDepth = 0; - const { type, object } = await p.readSlice({ start: offset }); - objectsByDepth[p.readDepth] += 1; - const oid = await shasum(GitObject.wrap({ type, object })); - o.oid = oid; - hashes.push(oid); - offsets.set(oid, offset); - crcs[oid] = o.crc; - } catch (err) { - continue + const numEntries = reader.readUInt32BE(); + let i = 0; + while (!reader.eof() && i < numEntries) { + const entry = {}; + entry.ctimeSeconds = reader.readUInt32BE(); + entry.ctimeNanoseconds = reader.readUInt32BE(); + entry.mtimeSeconds = reader.readUInt32BE(); + entry.mtimeNanoseconds = reader.readUInt32BE(); + entry.dev = reader.readUInt32BE(); + entry.ino = reader.readUInt32BE(); + entry.mode = reader.readUInt32BE(); + entry.uid = reader.readUInt32BE(); + entry.gid = reader.readUInt32BE(); + entry.size = reader.readUInt32BE(); + entry.oid = reader.slice(20).toString('hex'); + const flags = reader.readUInt16BE(); + entry.flags = parseCacheEntryFlags(flags); + // TODO: handle if (version === 3 && entry.flags.extended) + const pathlength = buffer.indexOf(0, reader.tell() + 1) - reader.tell(); + if (pathlength < 1) { + throw new InternalError(`Got a path length of: ${pathlength}`) } - } + // TODO: handle pathnames larger than 12 bits + entry.path = reader.toString('utf8', pathlength); - hashes.sort(); - return p - } + // Prevent malicious paths like "..\foo" + if (entry.path.includes('..\\') || entry.path.includes('../')) { + throw new UnsafeFilepathError(entry.path) + } - async toBuffer() { - const buffers = []; - const write = (str, encoding) => { - buffers.push(Buffer.from(str, encoding)); - }; - // Write out IDX v2 magic number - write('ff744f63', 'hex'); - // Write out version number 2 - write('00000002', 'hex'); - // Write fanout table - const fanoutBuffer = new BufferCursor(Buffer.alloc(256 * 4)); - for (let i = 0; i < 256; i++) { - let count = 0; - for (const hash of this.hashes) { - if (parseInt(hash.slice(0, 2), 16) <= i) count++; + // The next bit is awkward. We expect 1 to 8 null characters + // such that the total size of the entry is a multiple of 8 bits. + // (Hence subtract 12 bytes for the header.) + let padding = 8 - ((reader.tell() - 12) % 8); + if (padding === 0) padding = 8; + while (padding--) { + const tmp = reader.readUInt8(); + if (tmp !== 0) { + throw new InternalError( + `Expected 1-8 null characters but got '${tmp}' after ${entry.path}` + ) + } else if (reader.eof()) { + throw new InternalError('Unexpected end of file') + } } - fanoutBuffer.writeUInt32BE(count); - } - buffers.push(fanoutBuffer.buffer); - // Write out hashes - for (const hash of this.hashes) { - write(hash, 'hex'); - } - // Write out crcs - const crcsBuffer = new BufferCursor(Buffer.alloc(this.hashes.length * 4)); - for (const hash of this.hashes) { - crcsBuffer.writeUInt32BE(this.crcs[hash]); - } - buffers.push(crcsBuffer.buffer); - // Write out offsets - const offsetsBuffer = new BufferCursor(Buffer.alloc(this.hashes.length * 4)); - for (const hash of this.hashes) { - offsetsBuffer.writeUInt32BE(this.offsets.get(hash)); + // end of awkward part + _entries.set(entry.path, entry); + i++; } - buffers.push(offsetsBuffer.buffer); - // Write out packfile checksum - write(this.packfileSha, 'hex'); - // Write out shasum - const totalBuffer = Buffer.concat(buffers); - const sha = await shasum(totalBuffer); - const shaBuffer = Buffer.alloc(20); - shaBuffer.write(sha, 'hex'); - return Buffer.concat([totalBuffer, shaBuffer]) + return new GitIndex(_entries) } - async load({ pack }) { - this.pack = pack; + get entries() { + return [...this._entries.values()].sort(comparePath) } - async unload() { - this.pack = null; + get entriesMap() { + return this._entries } - async read({ oid }) { - if (!this.offsets.get(oid)) { - if (this.getExternalRefDelta) { - this.externalReadDepth++; - return this.getExternalRefDelta(oid) - } else { - throw new InternalError(`Could not read object ${oid} from packfile`) - } + *[Symbol.iterator]() { + for (const entry of this.entries) { + yield entry; } - const start = this.offsets.get(oid); - return this.readSlice({ start }) } - async readSlice({ start }) { - if (this.offsetCache[start]) { - return Object.assign({}, this.offsetCache[start]) - } - this.readDepth++; - const types = { - 0b0010000: 'commit', - 0b0100000: 'tree', - 0b0110000: 'blob', - 0b1000000: 'tag', - 0b1100000: 'ofs_delta', - 0b1110000: 'ref_delta', + insert({ filepath, stats, oid }) { + stats = normalizeStats(stats); + const bfilepath = Buffer.from(filepath); + const entry = { + ctimeSeconds: stats.ctimeSeconds, + ctimeNanoseconds: stats.ctimeNanoseconds, + mtimeSeconds: stats.mtimeSeconds, + mtimeNanoseconds: stats.mtimeNanoseconds, + dev: stats.dev, + ino: stats.ino, + // We provide a fallback value for `mode` here because not all fs + // implementations assign it, but we use it in GitTree. + // '100644' is for a "regular non-executable file" + mode: stats.mode || 0o100644, + uid: stats.uid, + gid: stats.gid, + size: stats.size, + path: filepath, + oid: oid, + flags: { + assumeValid: false, + extended: false, + stage: 0, + nameLength: bfilepath.length < 0xfff ? bfilepath.length : 0xfff, + }, }; - if (!this.pack) { - throw new InternalError( - 'Tried to read from a GitPackIndex with no packfile loaded into memory' - ) - } - const raw = (await this.pack).slice(start); - const reader = new BufferCursor(raw); - const byte = reader.readUInt8(); - // Object type is encoded in bits 654 - const btype = byte & 0b1110000; - let type = types[btype]; - if (type === undefined) { - throw new InternalError('Unrecognized type: 0b' + btype.toString(2)) - } - // The length encoding get complicated. - // Last four bits of length is encoded in bits 3210 - const lastFour = byte & 0b1111; - let length = lastFour; - // Whether the next byte is part of the variable-length encoded number - // is encoded in bit 7 - const multibyte = byte & 0b10000000; - if (multibyte) { - length = otherVarIntDecode(reader, lastFour); - } - let base = null; - let object = null; - // Handle deltified objects - if (type === 'ofs_delta') { - const offset = decodeVarInt(reader); - const baseOffset = start - offset - ;({ object: base, type } = await this.readSlice({ start: baseOffset })); - } - if (type === 'ref_delta') { - const oid = reader.slice(20).toString('hex') - ;({ object: base, type } = await this.read({ oid })); - } - // Handle undeltified objects - const buffer = raw.slice(reader.tell()); - object = Buffer.from(await inflate(buffer)); - // Assert that the object length is as expected. - if (object.byteLength !== length) { - throw new InternalError( - `Packfile told us object would have length ${length} but it had length ${object.byteLength}` - ) - } - if (base) { - object = Buffer.from(applyDelta(object, base)); - } - // Cache the result based on depth. - if (this.readDepth > 3) { - // hand tuned for speed / memory usage tradeoff - this.offsetCache[start] = { type, object }; + this._entries.set(entry.path, entry); + this._dirty = true; + } + + delete({ filepath }) { + if (this._entries.has(filepath)) { + this._entries.delete(filepath); + } else { + for (const key of this._entries.keys()) { + if (key.startsWith(filepath + '/')) { + this._entries.delete(key); + } + } } - return { type, format: 'content', object } + this._dirty = true; + } + + clear() { + this._entries.clear(); + this._dirty = true; + } + + render() { + return this.entries + .map(entry => `${entry.mode.toString(8)} ${entry.oid} ${entry.path}`) + .join('\n') + } + + async toObject() { + const header = Buffer.alloc(12); + const writer = new BufferCursor(header); + writer.write('DIRC', 4, 'utf8'); + writer.writeUInt32BE(2); + writer.writeUInt32BE(this.entries.length); + const body = Buffer.concat( + this.entries.map(entry => { + const bpath = Buffer.from(entry.path); + // the fixed length + the filename + at least one null char => align by 8 + const length = Math.ceil((62 + bpath.length + 1) / 8) * 8; + const written = Buffer.alloc(length); + const writer = new BufferCursor(written); + const stat = normalizeStats(entry); + writer.writeUInt32BE(stat.ctimeSeconds); + writer.writeUInt32BE(stat.ctimeNanoseconds); + writer.writeUInt32BE(stat.mtimeSeconds); + writer.writeUInt32BE(stat.mtimeNanoseconds); + writer.writeUInt32BE(stat.dev); + writer.writeUInt32BE(stat.ino); + writer.writeUInt32BE(stat.mode); + writer.writeUInt32BE(stat.uid); + writer.writeUInt32BE(stat.gid); + writer.writeUInt32BE(stat.size); + writer.write(entry.oid, 20, 'hex'); + writer.writeUInt16BE(renderCacheEntryFlags(entry)); + writer.write(entry.path, bpath.length, 'utf8'); + return written + }) + ); + const main = Buffer.concat([header, body]); + const sum = await shasum(main); + return Buffer.concat([main, Buffer.from(sum, 'hex')]) } } -const PackfileCache = Symbol('PackfileCache'); +function compareStats(entry, stats) { + // Comparison based on the description in Paragraph 4 of + // https://www.kernel.org/pub/software/scm/git/docs/technical/racy-git.txt + const e = normalizeStats(entry); + const s = normalizeStats(stats); + const staleness = + e.mode !== s.mode || + e.mtimeSeconds !== s.mtimeSeconds || + e.ctimeSeconds !== s.ctimeSeconds || + e.uid !== s.uid || + e.gid !== s.gid || + e.ino !== s.ino || + e.size !== s.size; + return staleness +} + +// import LockManager from 'travix-lock-manager' -async function loadPackIndex({ - fs, - filename, - getExternalRefDelta, - emitter, - emitterPrefix, -}) { - const idx = await fs.read(filename); - return GitPackIndex.fromIdx({ idx, getExternalRefDelta }) -} +// import Lock from '../utils.js' -function readPackIndex({ - fs, - cache, - filename, - getExternalRefDelta, - emitter, - emitterPrefix, -}) { - // Try to get the packfile index from the in-memory cache - if (!cache[PackfileCache]) cache[PackfileCache] = new Map(); - let p = cache[PackfileCache].get(filename); - if (!p) { - p = loadPackIndex({ - fs, - filename, - getExternalRefDelta, - emitter, - emitterPrefix, - }); - cache[PackfileCache].set(filename, p); +// const lm = new LockManager() +let lock = null; + +const IndexCache = Symbol('IndexCache'); + +function createCache() { + return { + map: new Map(), + stats: new Map(), } - return p } -async function readObjectPacked({ - fs, - cache, - gitdir, - oid, - format = 'content', - getExternalRefDelta, -}) { - // Check to see if it's in a packfile. - // Iterate through all the .idx files - let list = await fs.readdir(join(gitdir, 'objects/pack')); - list = list.filter(x => x.endsWith('.idx')); - for (const filename of list) { - const indexFile = `${gitdir}/objects/pack/${filename}`; - const p = await readPackIndex({ - fs, - cache, - filename: indexFile, - getExternalRefDelta, - }); - if (p.error) throw new InternalError(p.error) - // If the packfile DOES have the oid we're looking for... - if (p.offsets.has(oid)) { - // Get the resolved git object from the packfile - if (!p.pack) { - const packFile = indexFile.replace(/idx$/, 'pack'); - p.pack = fs.read(packFile); - } - const result = await p.read({ oid, getExternalRefDelta }); - result.format = 'content'; - result.source = `objects/pack/${filename.replace(/idx$/, 'pack')}`; - return result - } - } - // Failed to find it - return null +async function updateCachedIndexFile(fs, filepath, cache) { + const stat = await fs.lstat(filepath); + const rawIndexFile = await fs.read(filepath); + const index = await GitIndex.from(rawIndexFile); + // cache the GitIndex object so we don't need to re-read it every time. + cache.map.set(filepath, index); + // Save the stat data for the index so we know whether the cached file is stale (modified by an outside process). + cache.stats.set(filepath, stat); } -/** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache - * @param {string} args.gitdir - * @param {string} args.oid - * @param {string} [args.format] - */ -async function _readObject({ - fs, - cache, - gitdir, - oid, - format = 'content', -}) { - // Curry the current read method so that the packfile un-deltification - // process can acquire external ref-deltas. - const getExternalRefDelta = oid => _readObject({ fs, cache, gitdir, oid }); +// Determine whether our copy of the index file is stale +async function isIndexStale(fs, filepath, cache) { + const savedStats = cache.stats.get(filepath); + if (savedStats === undefined) return true + const currStats = await fs.lstat(filepath); + if (savedStats === null) return false + if (currStats === null) return false + return compareStats(savedStats, currStats) +} - let result; - // Empty tree - hard-coded so we can use it as a shorthand. - // Note: I think the canonical git implementation must do this too because - // `git cat-file -t 4b825dc642cb6eb9a060e54bf8d69288fbee4904` prints "tree" even in empty repos. - if (oid === '4b825dc642cb6eb9a060e54bf8d69288fbee4904') { - result = { format: 'wrapped', object: Buffer.from(`tree 0\x00`) }; - } - // Look for it in the loose object directory. - if (!result) { - result = await readObjectLoose({ fs, gitdir, oid }); - } - // Check to see if it's in a packfile. - if (!result) { - result = await readObjectPacked({ - fs, - cache, - gitdir, - oid, - getExternalRefDelta, - }); - } - // Finally - if (!result) { - throw new NotFoundError(oid) - } +class GitIndexManager { + /** + * + * @param {object} opts + * @param {import('../models/FileSystem.js').FileSystem} opts.fs + * @param {string} opts.gitdir + * @param {object} opts.cache + * @param {function(GitIndex): any} closure + */ + static async acquire({ fs, gitdir, cache }, closure) { + if (!cache[IndexCache]) cache[IndexCache] = createCache(); - if (format === 'deflated') { + const filepath = `${gitdir}/index`; + if (lock === null) lock = new AsyncLock({ maxPending: Infinity }); + let result; + await lock.acquire(filepath, async function() { + // Acquire a file lock while we're reading the index + // to make sure other processes aren't writing to it + // simultaneously, which could result in a corrupted index. + // const fileLock = await Lock(filepath) + if (await isIndexStale(fs, filepath, cache[IndexCache])) { + await updateCachedIndexFile(fs, filepath, cache[IndexCache]); + } + const index = cache[IndexCache].map.get(filepath); + result = await closure(index); + if (index._dirty) { + // Acquire a file lock while we're writing the index file + // let fileLock = await Lock(filepath) + const buffer = await index.toObject(); + await fs.write(filepath, buffer); + // Update cached stat value + cache[IndexCache].stats.set(filepath, await fs.lstat(filepath)); + index._dirty = false; + } + }); return result } +} - if (result.format === 'deflated') { - result.object = Buffer.from(await inflate(result.object)); - result.format = 'wrapped'; - } - - if (result.format === 'wrapped') { - if (format === 'wrapped' && result.format === 'wrapped') { - return result - } - const sha = await shasum(result.object); - if (sha !== oid) { - throw new InternalError( - `SHA check failed! Expected ${oid}, computed ${sha}` - ) - } - const { object, type } = GitObject.unwrap(result.object); - result.type = type; - result.object = object; - result.format = 'content'; +function basename(path) { + const last = Math.max(path.lastIndexOf('/'), path.lastIndexOf('\\')); + if (last > -1) { + path = path.slice(last + 1); } + return path +} - if (result.format === 'content') { - if (format === 'content') return result - return - } +function dirname(path) { + const last = Math.max(path.lastIndexOf('/'), path.lastIndexOf('\\')); + if (last === -1) return '.' + if (last === 0) return '/' + return path.slice(0, last) +} - throw new InternalError(`invalid format "${result.format}"`) +/*:: +type Node = { + type: string, + fullpath: string, + basename: string, + metadata: Object, // mode, oid + parent?: Node, + children: Array } +*/ + +function flatFileListToDirectoryStructure(files) { + const inodes = new Map(); + const mkdir = function(name) { + if (!inodes.has(name)) { + const dir = { + type: 'tree', + fullpath: name, + basename: basename(name), + metadata: {}, + children: [], + }; + inodes.set(name, dir); + // This recursively generates any missing parent folders. + // We do it after we've added the inode to the set so that + // we don't recurse infinitely trying to create the root '.' dirname. + dir.parent = mkdir(dirname(name)); + if (dir.parent && dir.parent !== dir) dir.parent.children.push(dir); + } + return inodes.get(name) + }; + + const mkfile = function(name, metadata) { + if (!inodes.has(name)) { + const file = { + type: 'blob', + fullpath: name, + basename: basename(name), + metadata: metadata, + // This recursively generates any missing parent folders. + parent: mkdir(dirname(name)), + children: [], + }; + if (file.parent) file.parent.children.push(file); + inodes.set(name, file); + } + return inodes.get(name) + }; -class AlreadyExistsError extends BaseError { - /** - * @param {'note'|'remote'|'tag'|'branch'} noun - * @param {string} where - * @param {boolean} canForce - */ - constructor(noun, where, canForce = true) { - super( - `Failed to create ${noun} at ${where} because it already exists.${ - canForce - ? ` (Hint: use 'force: true' parameter to overwrite existing ${noun}.)` - : '' - }` - ); - this.code = this.name = AlreadyExistsError.code; - this.data = { noun, where, canForce }; + mkdir('.'); + for (const file of files) { + mkfile(file.path, file); } + return inodes } -/** @type {'AlreadyExistsError'} */ -AlreadyExistsError.code = 'AlreadyExistsError'; -class AmbiguousError extends BaseError { - /** - * @param {'oids'|'refs'} nouns - * @param {string} short - * @param {string[]} matches - */ - constructor(nouns, short, matches) { - super( - `Found multiple ${nouns} matching "${short}" (${matches.join( - ', ' - )}). Use a longer abbreviation length to disambiguate them.` - ); - this.code = this.name = AmbiguousError.code; - this.data = { nouns, short, matches }; +/** + * + * @param {number} mode + */ +function mode2type(mode) { + // prettier-ignore + switch (mode) { + case 0o040000: return 'tree' + case 0o100644: return 'blob' + case 0o100755: return 'blob' + case 0o120000: return 'blob' + case 0o160000: return 'commit' } + throw new InternalError(`Unexpected GitTree entry mode: ${mode.toString(8)}`) } -/** @type {'AmbiguousError'} */ -AmbiguousError.code = 'AmbiguousError'; -class CheckoutConflictError extends BaseError { - /** - * @param {string[]} filepaths - */ - constructor(filepaths) { - super( - `Your local changes to the following files would be overwritten by checkout: ${filepaths.join( - ', ' - )}` +class GitWalkerIndex { + constructor({ fs, gitdir, cache }) { + this.treePromise = GitIndexManager.acquire( + { fs, gitdir, cache }, + async function(index) { + return flatFileListToDirectoryStructure(index.entries) + } ); - this.code = this.name = CheckoutConflictError.code; - this.data = { filepaths }; + const walker = this; + this.ConstructEntry = class StageEntry { + constructor(fullpath) { + this._fullpath = fullpath; + this._type = false; + this._mode = false; + this._stat = false; + this._oid = false; + } + + async type() { + return walker.type(this) + } + + async mode() { + return walker.mode(this) + } + + async stat() { + return walker.stat(this) + } + + async content() { + return walker.content(this) + } + + async oid() { + return walker.oid(this) + } + }; } -} -/** @type {'CheckoutConflictError'} */ -CheckoutConflictError.code = 'CheckoutConflictError'; -class CommitNotFetchedError extends BaseError { - /** - * @param {string} ref - * @param {string} oid - */ - constructor(ref, oid) { - super( - `Failed to checkout "${ref}" because commit ${oid} is not available locally. Do a git fetch to make the branch available locally.` - ); - this.code = this.name = CommitNotFetchedError.code; - this.data = { ref, oid }; + async readdir(entry) { + const filepath = entry._fullpath; + const tree = await this.treePromise; + const inode = tree.get(filepath); + if (!inode) return null + if (inode.type === 'blob') return null + if (inode.type !== 'tree') { + throw new Error(`ENOTDIR: not a directory, scandir '${filepath}'`) + } + const names = inode.children.map(inode => inode.fullpath); + names.sort(compareStrings); + return names } -} -/** @type {'CommitNotFetchedError'} */ -CommitNotFetchedError.code = 'CommitNotFetchedError'; -class EmptyServerResponseError extends BaseError { - constructor() { - super(`Empty response from git server.`); - this.code = this.name = EmptyServerResponseError.code; - this.data = {}; + async type(entry) { + if (entry._type === false) { + await entry.stat(); + } + return entry._type } -} -/** @type {'EmptyServerResponseError'} */ -EmptyServerResponseError.code = 'EmptyServerResponseError'; -class FastForwardError extends BaseError { - constructor() { - super(`A simple fast-forward merge was not possible.`); - this.code = this.name = FastForwardError.code; - this.data = {}; + async mode(entry) { + if (entry._mode === false) { + await entry.stat(); + } + return entry._mode } -} -/** @type {'FastForwardError'} */ -FastForwardError.code = 'FastForwardError'; -class GitPushError extends BaseError { - /** - * @param {string} prettyDetails - * @param {PushResult} result - */ - constructor(prettyDetails, result) { - super(`One or more branches were not updated: ${prettyDetails}`); - this.code = this.name = GitPushError.code; - this.data = { prettyDetails, result }; + async stat(entry) { + if (entry._stat === false) { + const tree = await this.treePromise; + const inode = tree.get(entry._fullpath); + if (!inode) { + throw new Error( + `ENOENT: no such file or directory, lstat '${entry._fullpath}'` + ) + } + const stats = inode.type === 'tree' ? {} : normalizeStats(inode.metadata); + entry._type = inode.type === 'tree' ? 'tree' : mode2type(stats.mode); + entry._mode = stats.mode; + if (inode.type === 'tree') { + entry._stat = undefined; + } else { + entry._stat = stats; + } + } + return entry._stat } -} -/** @type {'GitPushError'} */ -GitPushError.code = 'GitPushError'; -class HttpError extends BaseError { - /** - * @param {number} statusCode - * @param {string} statusMessage - * @param {string} response - */ - constructor(statusCode, statusMessage, response) { - super(`HTTP Error: ${statusCode} ${statusMessage}`); - this.code = this.name = HttpError.code; - this.data = { statusCode, statusMessage, response }; + async content(_entry) { + // Cannot get content for an index entry } -} -/** @type {'HttpError'} */ -HttpError.code = 'HttpError'; -class InvalidFilepathError extends BaseError { - /** - * @param {'leading-slash'|'trailing-slash'} [reason] - */ - constructor(reason) { - let message = 'invalid filepath'; - if (reason === 'leading-slash' || reason === 'trailing-slash') { - message = `"filepath" parameter should not include leading or trailing directory separators because these can cause problems on some platforms.`; + async oid(entry) { + if (entry._oid === false) { + const tree = await this.treePromise; + const inode = tree.get(entry._fullpath); + entry._oid = inode.metadata.oid; } - super(message); - this.code = this.name = InvalidFilepathError.code; - this.data = { reason }; + return entry._oid } } -/** @type {'InvalidFilepathError'} */ -InvalidFilepathError.code = 'InvalidFilepathError'; -class InvalidRefNameError extends BaseError { - /** - * @param {string} ref - * @param {string} suggestion - * @param {boolean} canForce - */ - constructor(ref, suggestion) { - super( - `"${ref}" would be an invalid git reference. (Hint: a valid alternative would be "${suggestion}".)` - ); - this.code = this.name = InvalidRefNameError.code; - this.data = { ref, suggestion }; - } +// This is part of an elaborate system to facilitate code-splitting / tree-shaking. +// commands/walk.js can depend on only this, and the actual Walker classes exported +// can be opaque - only having a single property (this symbol) that is not enumerable, +// and thus the constructor can be passed as an argument to walk while being "unusable" +// outside of it. +const GitWalkSymbol = Symbol('GitWalkSymbol'); + +// @ts-check + +/** + * @returns {Walker} + */ +function STAGE() { + const o = Object.create(null); + Object.defineProperty(o, GitWalkSymbol, { + value: function({ fs, gitdir, cache }) { + return new GitWalkerIndex({ fs, gitdir, cache }) + }, + }); + Object.freeze(o); + return o } -/** @type {'InvalidRefNameError'} */ -InvalidRefNameError.code = 'InvalidRefNameError'; -class MaxDepthError extends BaseError { +// @ts-check + +class NotFoundError extends BaseError { /** - * @param {number} depth + * @param {string} what */ - constructor(depth) { - super(`Maximum search depth of ${depth} exceeded.`); - this.code = this.name = MaxDepthError.code; - this.data = { depth }; - } -} -/** @type {'MaxDepthError'} */ -MaxDepthError.code = 'MaxDepthError'; - -class MergeNotSupportedError extends BaseError { - constructor() { - super(`Merges with conflicts are not supported yet.`); - this.code = this.name = MergeNotSupportedError.code; - this.data = {}; + constructor(what) { + super(`Could not find ${what}.`); + this.code = this.name = NotFoundError.code; + this.data = { what }; } } -/** @type {'MergeNotSupportedError'} */ -MergeNotSupportedError.code = 'MergeNotSupportedError'; +/** @type {'NotFoundError'} */ +NotFoundError.code = 'NotFoundError'; -class MissingNameError extends BaseError { +class ObjectTypeError extends BaseError { /** - * @param {'author'|'committer'|'tagger'} role + * @param {string} oid + * @param {'blob'|'commit'|'tag'|'tree'} actual + * @param {'blob'|'commit'|'tag'|'tree'} expected + * @param {string} [filepath] */ - constructor(role) { + constructor(oid, actual, expected, filepath) { super( - `No name was provided for ${role} in the argument or in the .git/config file.` + `Object ${oid} ${ + filepath ? `at ${filepath}` : '' + }was anticipated to be a ${expected} but it is a ${actual}.` ); - this.code = this.name = MissingNameError.code; - this.data = { role }; + this.code = this.name = ObjectTypeError.code; + this.data = { oid, actual, expected, filepath }; } } -/** @type {'MissingNameError'} */ -MissingNameError.code = 'MissingNameError'; +/** @type {'ObjectTypeError'} */ +ObjectTypeError.code = 'ObjectTypeError'; -class MissingParameterError extends BaseError { +class InvalidOidError extends BaseError { /** - * @param {string} parameter + * @param {string} value */ - constructor(parameter) { - super( - `The function requires a "${parameter}" parameter but none was provided.` - ); - this.code = this.name = MissingParameterError.code; - this.data = { parameter }; + constructor(value) { + super(`Expected a 40-char hex object id but saw "${value}".`); + this.code = this.name = InvalidOidError.code; + this.data = { value }; } } -/** @type {'MissingParameterError'} */ -MissingParameterError.code = 'MissingParameterError'; +/** @type {'InvalidOidError'} */ +InvalidOidError.code = 'InvalidOidError'; -class ParseError extends BaseError { +class NoRefspecError extends BaseError { /** - * @param {string} expected - * @param {string} actual + * @param {string} remote */ - constructor(expected, actual) { - super(`Expected "${expected}" but received "${actual}".`); - this.code = this.name = ParseError.code; - this.data = { expected, actual }; + constructor(remote) { + super(`Could not find a fetch refspec for remote "${remote}". Make sure the config file has an entry like the following: +[remote "${remote}"] +\tfetch = +refs/heads/*:refs/remotes/origin/* +`); + this.code = this.name = NoRefspecError.code; + this.data = { remote }; } } -/** @type {'ParseError'} */ -ParseError.code = 'ParseError'; +/** @type {'NoRefspecError'} */ +NoRefspecError.code = 'NoRefspecError'; -class PushRejectedError extends BaseError { - /** - * @param {'not-fast-forward'|'tag-exists'} reason - */ - constructor(reason) { - let message = ''; - if (reason === 'not-fast-forward') { - message = ' because it was not a simple fast-forward'; - } else if (reason === 'tag-exists') { - message = ' because tag already exists'; +class GitPackedRefs { + constructor(text) { + this.refs = new Map(); + this.parsedConfig = []; + if (text) { + let key = null; + this.parsedConfig = text + .trim() + .split('\n') + .map(line => { + if (/^\s*#/.test(line)) { + return { line, comment: true } + } + const i = line.indexOf(' '); + if (line.startsWith('^')) { + // This is a oid for the commit associated with the annotated tag immediately preceding this line. + // Trim off the '^' + const value = line.slice(1); + // The tagname^{} syntax is based on the output of `git show-ref --tags -d` + this.refs.set(key + '^{}', value); + return { line, ref: key, peeled: value } + } else { + // This is an oid followed by the ref name + const value = line.slice(0, i); + key = line.slice(i + 1); + this.refs.set(key, value); + return { line, ref: key, oid: value } + } + }); } - super(`Push rejected${message}. Use "force: true" to override.`); - this.code = this.name = PushRejectedError.code; - this.data = { reason }; + return this } -} -/** @type {'PushRejectedError'} */ -PushRejectedError.code = 'PushRejectedError'; -class RemoteCapabilityError extends BaseError { - /** - * @param {'shallow'|'deepen-since'|'deepen-not'|'deepen-relative'} capability - * @param {'depth'|'since'|'exclude'|'relative'} parameter - */ - constructor(capability, parameter) { - super( - `Remote does not support the "${capability}" so the "${parameter}" parameter cannot be used.` - ); - this.code = this.name = RemoteCapabilityError.code; - this.data = { capability, parameter }; + static from(text) { + return new GitPackedRefs(text) + } + + delete(ref) { + this.parsedConfig = this.parsedConfig.filter(entry => entry.ref !== ref); + this.refs.delete(ref); + } + + toString() { + return this.parsedConfig.map(({ line }) => line).join('\n') + '\n' } } -/** @type {'RemoteCapabilityError'} */ -RemoteCapabilityError.code = 'RemoteCapabilityError'; -class SmartHttpError extends BaseError { - /** - * @param {string} preview - * @param {string} response - */ - constructor(preview, response) { - super( - `Remote did not reply using the "smart" HTTP protocol. Expected "001e# service=git-upload-pack" but received: ${preview}` - ); - this.code = this.name = SmartHttpError.code; - this.data = { preview, response }; +class GitRefSpec { + constructor({ remotePath, localPath, force, matchPrefix }) { + Object.assign(this, { + remotePath, + localPath, + force, + matchPrefix, + }); + } + + static from(refspec) { + const [ + forceMatch, + remotePath, + remoteGlobMatch, + localPath, + localGlobMatch, + ] = refspec.match(/^(\+?)(.*?)(\*?):(.*?)(\*?)$/).slice(1); + const force = forceMatch === '+'; + const remoteIsGlob = remoteGlobMatch === '*'; + const localIsGlob = localGlobMatch === '*'; + // validate + // TODO: Make this check more nuanced, and depend on whether this is a fetch refspec or a push refspec + if (remoteIsGlob !== localIsGlob) { + throw new InternalError('Invalid refspec') + } + return new GitRefSpec({ + remotePath, + localPath, + force, + matchPrefix: remoteIsGlob, + }) + // TODO: We need to run resolveRef on both paths to expand them to their full name. + } + + translate(remoteBranch) { + if (this.matchPrefix) { + if (remoteBranch.startsWith(this.remotePath)) { + return this.localPath + remoteBranch.replace(this.remotePath, '') + } + } else { + if (remoteBranch === this.remotePath) return this.localPath + } + return null + } + + reverseTranslate(localBranch) { + if (this.matchPrefix) { + if (localBranch.startsWith(this.localPath)) { + return this.remotePath + localBranch.replace(this.localPath, '') + } + } else { + if (localBranch === this.localPath) return this.remotePath + } + return null } } -/** @type {'SmartHttpError'} */ -SmartHttpError.code = 'SmartHttpError'; -class UnknownTransportError extends BaseError { - /** - * @param {string} url - * @param {string} transport - * @param {string} [suggestion] - */ - constructor(url, transport, suggestion) { - super( - `Git remote "${url}" uses an unrecognized transport protocol: "${transport}"` - ); - this.code = this.name = UnknownTransportError.code; - this.data = { url, transport, suggestion }; +class GitRefSpecSet { + constructor(rules = []) { + this.rules = rules; + } + + static from(refspecs) { + const rules = []; + for (const refspec of refspecs) { + rules.push(GitRefSpec.from(refspec)); // might throw + } + return new GitRefSpecSet(rules) + } + + add(refspec) { + const rule = GitRefSpec.from(refspec); // might throw + this.rules.push(rule); + } + + translate(remoteRefs) { + const result = []; + for (const rule of this.rules) { + for (const remoteRef of remoteRefs) { + const localRef = rule.translate(remoteRef); + if (localRef) { + result.push([remoteRef, localRef]); + } + } + } + return result + } + + translateOne(remoteRef) { + let result = null; + for (const rule of this.rules) { + const localRef = rule.translate(remoteRef); + if (localRef) { + result = localRef; + } + } + return result } -} -/** @type {'UnknownTransportError'} */ -UnknownTransportError.code = 'UnknownTransportError'; -class UrlParseError extends BaseError { - /** - * @param {string} url - */ - constructor(url) { - super(`Cannot parse remote URL: "${url}"`); - this.code = this.name = UrlParseError.code; - this.data = { url }; + localNamespaces() { + return this.rules + .filter(rule => rule.matchPrefix) + .map(rule => rule.localPath.replace(/\/$/, '')) } } -/** @type {'UrlParseError'} */ -UrlParseError.code = 'UrlParseError'; -class UserCanceledError extends BaseError { - constructor() { - super(`The operation was canceled.`); - this.code = this.name = UserCanceledError.code; - this.data = {}; +function compareRefNames(a, b) { + // https://stackoverflow.com/a/40355107/2168416 + const _a = a.replace(/\^\{\}$/, ''); + const _b = b.replace(/\^\{\}$/, ''); + const tmp = -(_a < _b) || +(_a > _b); + if (tmp === 0) { + return a.endsWith('^{}') ? 1 : -1 } + return tmp } -/** @type {'UserCanceledError'} */ -UserCanceledError.code = 'UserCanceledError'; - - - -var Errors = /*#__PURE__*/Object.freeze({ - __proto__: null, - AlreadyExistsError: AlreadyExistsError, - AmbiguousError: AmbiguousError, - CheckoutConflictError: CheckoutConflictError, - CommitNotFetchedError: CommitNotFetchedError, - EmptyServerResponseError: EmptyServerResponseError, - FastForwardError: FastForwardError, - GitPushError: GitPushError, - HttpError: HttpError, - InternalError: InternalError, - InvalidFilepathError: InvalidFilepathError, - InvalidOidError: InvalidOidError, - InvalidRefNameError: InvalidRefNameError, - MaxDepthError: MaxDepthError, - MergeNotSupportedError: MergeNotSupportedError, - MissingNameError: MissingNameError, - MissingParameterError: MissingParameterError, - NoRefspecError: NoRefspecError, - NotFoundError: NotFoundError, - ObjectTypeError: ObjectTypeError, - ParseError: ParseError, - PushRejectedError: PushRejectedError, - RemoteCapabilityError: RemoteCapabilityError, - SmartHttpError: SmartHttpError, - UnknownTransportError: UnknownTransportError, - UnsafeFilepathError: UnsafeFilepathError, - UrlParseError: UrlParseError, - UserCanceledError: UserCanceledError -}); -function formatAuthor({ name, email, timestamp, timezoneOffset }) { - timezoneOffset = formatTimezoneOffset(timezoneOffset); - return `${name} <${email}> ${timestamp} ${timezoneOffset}` +function normalizePath(path) { + return path + .replace(/\/\.\//g, '/') // Replace '/./' with '/' + .replace(/\/{2,}/g, '/') // Replace consecutive '/' + .replace(/^\/\.$/, '/') // if path === '/.' return '/' + .replace(/^\.\/$/, '.') // if path === './' return '.' + .replace(/^\.\//, '') // Remove leading './' + .replace(/\/\.$/, '') // Remove trailing '/.' + .replace(/(.+)\/$/, '$1') // Remove trailing '/' + .replace(/^$/, '.') // if path === '' return '.' } -// The amount of effort that went into crafting these cases to handle -// -0 (just so we don't lose that information when parsing and reconstructing) -// but can also default to +0 was extraordinary. +// For some reason path.posix.join is undefined in webpack -function formatTimezoneOffset(minutes) { - const sign = simpleSign(negateExceptForZero(minutes)); - minutes = Math.abs(minutes); - const hours = Math.floor(minutes / 60); - minutes -= hours * 60; - let strHours = String(hours); - let strMinutes = String(minutes); - if (strHours.length < 2) strHours = '0' + strHours; - if (strMinutes.length < 2) strMinutes = '0' + strMinutes; - return (sign === -1 ? '-' : '+') + strHours + strMinutes +function join(...parts) { + return normalizePath(parts.map(normalizePath).join('/')) } -function simpleSign(n) { - return Math.sign(n) || (Object.is(n, -0) ? -1 : 1) -} +// This is straight from parse_unit_factor in config.c of canonical git +const num = val => { + val = val.toLowerCase(); + let n = parseInt(val); + if (val.endsWith('k')) n *= 1024; + if (val.endsWith('m')) n *= 1024 * 1024; + if (val.endsWith('g')) n *= 1024 * 1024 * 1024; + return n +}; -function negateExceptForZero(n) { - return n === 0 ? n : -n -} +// This is straight from git_parse_maybe_bool_text in config.c of canonical git +const bool = val => { + val = val.trim().toLowerCase(); + if (val === 'true' || val === 'yes' || val === 'on') return true + if (val === 'false' || val === 'no' || val === 'off') return false + throw Error( + `Expected 'true', 'false', 'yes', 'no', 'on', or 'off', but got ${val}` + ) +}; -function normalizeNewlines(str) { - // remove all - str = str.replace(/\r/g, ''); - // no extra newlines up front - str = str.replace(/^\n+/, ''); - // and a single newline at the end - str = str.replace(/\n+$/, '') + '\n'; - return str -} +const schema = { + core: { + filemode: bool, + bare: bool, + logallrefupdates: bool, + symlinks: bool, + ignorecase: bool, + bigFileThreshold: num, + }, +}; -function parseAuthor(author) { - const [, name, email, timestamp, offset] = author.match( - /^(.*) <(.*)> (.*) (.*)$/ - ); - return { - name: name, - email: email, - timestamp: Number(timestamp), - timezoneOffset: parseTimezoneOffset(offset), - } -} +// https://git-scm.com/docs/git-config#_syntax -// The amount of effort that went into crafting these cases to handle -// -0 (just so we don't lose that information when parsing and reconstructing) -// but can also default to +0 was extraordinary. +// section starts with [ and ends with ] +// section is alphanumeric (ASCII) with - and . +// section is case insensitive +// subsection is optionnal +// subsection is specified after section and one or more spaces +// subsection is specified between double quotes +const SECTION_LINE_REGEX = /^\[([A-Za-z0-9-.]+)(?: "(.*)")?\]$/; +const SECTION_REGEX = /^[A-Za-z0-9-.]+$/; -function parseTimezoneOffset(offset) { - let [, sign, hours, minutes] = offset.match(/(\+|-)(\d\d)(\d\d)/); - minutes = (sign === '+' ? 1 : -1) * (Number(hours) * 60 + Number(minutes)); - return negateExceptForZero$1(minutes) -} +// variable lines contain a name, and equal sign and then a value +// variable lines can also only contain a name (the implicit value is a boolean true) +// variable name is alphanumeric (ASCII) with - +// variable name starts with an alphabetic character +// variable name is case insensitive +const VARIABLE_LINE_REGEX = /^([A-Za-z][A-Za-z-]*)(?: *= *(.*))?$/; +const VARIABLE_NAME_REGEX = /^[A-Za-z][A-Za-z-]*$/; -function negateExceptForZero$1(n) { - return n === 0 ? n : -n -} +const VARIABLE_VALUE_COMMENT_REGEX = /^(.*?)( *[#;].*)$/; -class GitAnnotatedTag { - constructor(tag) { - if (typeof tag === 'string') { - this._tag = tag; - } else if (Buffer.isBuffer(tag)) { - this._tag = tag.toString('utf8'); - } else if (typeof tag === 'object') { - this._tag = GitAnnotatedTag.render(tag); - } else { - throw new InternalError( - 'invalid type passed to GitAnnotatedTag constructor' - ) - } +const extractSectionLine = line => { + const matches = SECTION_LINE_REGEX.exec(line); + if (matches != null) { + const [section, subsection] = matches.slice(1); + return [section, subsection] } + return null +}; - static from(tag) { - return new GitAnnotatedTag(tag) +const extractVariableLine = line => { + const matches = VARIABLE_LINE_REGEX.exec(line); + if (matches != null) { + const [name, rawValue = 'true'] = matches.slice(1); + const valueWithoutComments = removeComments(rawValue); + const valueWithoutQuotes = removeQuotes(valueWithoutComments); + return [name, valueWithoutQuotes] } + return null +}; - static render(obj) { - return `object ${obj.object} -type ${obj.type} -tag ${obj.tag} -tagger ${formatAuthor(obj.tagger)} - -${obj.message} -${obj.gpgsig ? obj.gpgsig : ''}` +const removeComments = rawValue => { + const commentMatches = VARIABLE_VALUE_COMMENT_REGEX.exec(rawValue); + if (commentMatches == null) { + return rawValue + } + const [valueWithoutComment, comment] = commentMatches.slice(1); + // if odd number of quotes before and after comment => comment is escaped + if ( + hasOddNumberOfQuotes(valueWithoutComment) && + hasOddNumberOfQuotes(comment) + ) { + return `${valueWithoutComment}${comment}` } + return valueWithoutComment +}; + +const hasOddNumberOfQuotes = text => { + const numberOfQuotes = (text.match(/(?:^|[^\\])"/g) || []).length; + return numberOfQuotes % 2 !== 0 +}; - justHeaders() { - return this._tag.slice(0, this._tag.indexOf('\n\n')) - } +const removeQuotes = text => { + return text.split('').reduce((newText, c, idx, text) => { + const isQuote = c === '"' && text[idx - 1] !== '\\'; + const isEscapeForQuote = c === '\\' && text[idx + 1] === '"'; + if (isQuote || isEscapeForQuote) { + return newText + } + return newText + c + }, '') +}; - message() { - const tag = this.withoutSignature(); - return tag.slice(tag.indexOf('\n\n') + 2) - } +const lower = text => { + return text != null ? text.toLowerCase() : null +}; - parse() { - return Object.assign(this.headers(), { - message: this.message(), - gpgsig: this.gpgsig(), - }) - } +const getPath = (section, subsection, name) => { + return [lower(section), subsection, lower(name)] + .filter(a => a != null) + .join('.') +}; - render() { - return this._tag - } +const findLastIndex = (array, callback) => { + return array.reduce((lastIndex, item, index) => { + return callback(item) ? index : lastIndex + }, -1) +}; - headers() { - const headers = this.justHeaders().split('\n'); - const hs = []; - for (const h of headers) { - if (h[0] === ' ') { - // combine with previous header (without space indent) - hs[hs.length - 1] += '\n' + h.slice(1); - } else { - hs.push(h); - } - } - const obj = {}; - for (const h of hs) { - const key = h.slice(0, h.indexOf(' ')); - const value = h.slice(h.indexOf(' ') + 1); - if (Array.isArray(obj[key])) { - obj[key].push(value); +// Note: there are a LOT of edge cases that aren't covered (e.g. keys in sections that also +// have subsections, [include] directives, etc. +class GitConfig { + constructor(text) { + let section = null; + let subsection = null; + this.parsedConfig = text.split('\n').map(line => { + let name = null; + let value = null; + + const trimmedLine = line.trim(); + const extractedSection = extractSectionLine(trimmedLine); + const isSection = extractedSection != null; + if (isSection) { + ;[section, subsection] = extractedSection; } else { - obj[key] = value; + const extractedVariable = extractVariableLine(trimmedLine); + const isVariable = extractedVariable != null; + if (isVariable) { + ;[name, value] = extractedVariable; + } } - } - if (obj.tagger) { - obj.tagger = parseAuthor(obj.tagger); - } - if (obj.committer) { - obj.committer = parseAuthor(obj.committer); - } - return obj - } - withoutSignature() { - const tag = normalizeNewlines(this._tag); - if (tag.indexOf('\n-----BEGIN PGP SIGNATURE-----') === -1) return tag - return tag.slice(0, tag.lastIndexOf('\n-----BEGIN PGP SIGNATURE-----')) + const path = getPath(section, subsection, name); + return { line, isSection, section, subsection, name, value, path } + }); } - gpgsig() { - if (this._tag.indexOf('\n-----BEGIN PGP SIGNATURE-----') === -1) return - const signature = this._tag.slice( - this._tag.indexOf('-----BEGIN PGP SIGNATURE-----'), - this._tag.indexOf('-----END PGP SIGNATURE-----') + - '-----END PGP SIGNATURE-----'.length - ); - return normalizeNewlines(signature) + static from(text) { + return new GitConfig(text) } - payload() { - return this.withoutSignature() + '\n' + async get(path, getall = false) { + const allValues = this.parsedConfig + .filter(config => config.path === path.toLowerCase()) + .map(({ section, name, value }) => { + const fn = schema[section] && schema[section][name]; + return fn ? fn(value) : value + }); + return getall ? allValues : allValues.pop() } - toObject() { - return Buffer.from(this._tag, 'utf8') + async getall(path) { + return this.get(path, true) } - static async sign(tag, sign, secretKey) { - const payload = tag.payload(); - let { signature } = await sign({ payload, secretKey }); - // renormalize the line endings to the one true line-ending - signature = normalizeNewlines(signature); - const signedTag = payload + signature; - // return a new tag object - return GitAnnotatedTag.from(signedTag) + async getSubsections(section) { + return this.parsedConfig + .filter(config => config.section === section && config.isSection) + .map(config => config.subsection) } -} - -function indent(str) { - return ( - str - .trim() - .split('\n') - .map(x => ' ' + x) - .join('\n') + '\n' - ) -} - -function outdent(str) { - return str - .split('\n') - .map(x => x.replace(/^ /, '')) - .join('\n') -} -class GitCommit { - constructor(commit) { - if (typeof commit === 'string') { - this._commit = commit; - } else if (Buffer.isBuffer(commit)) { - this._commit = commit.toString('utf8'); - } else if (typeof commit === 'object') { - this._commit = GitCommit.render(commit); - } else { - throw new InternalError('invalid type passed to GitCommit constructor') - } + async deleteSection(section, subsection) { + this.parsedConfig = this.parsedConfig.filter( + config => + !(config.section === section && config.subsection === subsection) + ); } - static fromPayloadSignature({ payload, signature }) { - const headers = GitCommit.justHeaders(payload); - const message = GitCommit.justMessage(payload); - const commit = normalizeNewlines( - headers + '\ngpgsig' + indent(signature) + '\n' + message - ); - return new GitCommit(commit) + async append(path, value) { + return this.set(path, value, true) } - static from(commit) { - return new GitCommit(commit) + async set(path, value, append = false) { + const configIndex = findLastIndex( + this.parsedConfig, + config => config.path === path.toLowerCase() + ); + if (value == null) { + if (configIndex !== -1) { + this.parsedConfig.splice(configIndex, 1); + } + } else { + if (configIndex !== -1) { + const config = this.parsedConfig[configIndex]; + const modifiedConfig = Object.assign({}, config, { + value, + modified: true, + }); + if (append) { + this.parsedConfig.splice(configIndex + 1, 0, modifiedConfig); + } else { + this.parsedConfig[configIndex] = modifiedConfig; + } + } else { + const pathSegments = path.split('.'); + const section = pathSegments.shift().toLowerCase(); + const name = pathSegments.pop(); + const subsection = pathSegments.length + ? pathSegments.join('.').toLowerCase() + : undefined; + const sectionPath = subsection ? section + '.' + subsection : section; + const sectionIndex = this.parsedConfig.findIndex( + config => config.path === sectionPath + ); + const newConfig = { + section, + subsection, + name, + value, + modified: true, + path: getPath(section, subsection, name), + }; + if (SECTION_REGEX.test(section) && VARIABLE_NAME_REGEX.test(name)) { + if (sectionIndex >= 0) { + // Reuse existing section + this.parsedConfig.splice(sectionIndex + 1, 0, newConfig); + } else { + // Add a new section + const newSection = { + section, + subsection, + modified: true, + path: getPath(section, subsection, null), + }; + this.parsedConfig.push(newSection, newConfig); + } + } + } + } } - toObject() { - return Buffer.from(this._commit, 'utf8') + toString() { + return this.parsedConfig + .map(({ line, section, subsection, name, value, modified = false }) => { + if (!modified) { + return line + } + if (name != null && value != null) { + return `\t${name} = ${value}` + } + if (subsection != null) { + return `[${section} "${subsection}"]` + } + return `[${section}]` + }) + .join('\n') } +} - // Todo: allow setting the headers and message - headers() { - return this.parseHeaders() +class GitConfigManager { + static async get({ fs, gitdir }) { + // We can improve efficiency later if needed. + // TODO: read from full list of git config files + const text = await fs.read(`${gitdir}/config`, { encoding: 'utf8' }); + return GitConfig.from(text) } - // Todo: allow setting the headers and message - message() { - return GitCommit.justMessage(this._commit) + static async save({ fs, gitdir, config }) { + // We can improve efficiency later if needed. + // TODO: handle saving to the correct global/user/repo location + await fs.write(`${gitdir}/config`, config.toString(), { + encoding: 'utf8', + }); } +} - parse() { - return Object.assign({ message: this.message() }, this.headers()) - } +// This is a convenience wrapper for reading and writing files in the 'refs' directory. - static justMessage(commit) { - return normalizeNewlines(commit.slice(commit.indexOf('\n\n') + 2)) - } +// @see https://git-scm.com/docs/git-rev-parse.html#_specifying_revisions +const refpaths = ref => [ + `${ref}`, + `refs/${ref}`, + `refs/tags/${ref}`, + `refs/heads/${ref}`, + `refs/remotes/${ref}`, + `refs/remotes/${ref}/HEAD`, +]; - static justHeaders(commit) { - return commit.slice(0, commit.indexOf('\n\n')) - } +// @see https://git-scm.com/docs/gitrepository-layout +const GIT_FILES = ['config', 'description', 'index', 'shallow', 'commondir']; - parseHeaders() { - const headers = GitCommit.justHeaders(this._commit).split('\n'); - const hs = []; - for (const h of headers) { - if (h[0] === ' ') { - // combine with previous header (without space indent) - hs[hs.length - 1] += '\n' + h.slice(1); - } else { - hs.push(h); +class GitRefManager { + static async updateRemoteRefs({ + fs, + gitdir, + remote, + refs, + symrefs, + tags, + refspecs = undefined, + prune = false, + pruneTags = false, + }) { + // Validate input + for (const value of refs.values()) { + if (!value.match(/[0-9a-f]{40}/)) { + throw new InvalidOidError(value) } } - const obj = { - parent: [], - }; - for (const h of hs) { - const key = h.slice(0, h.indexOf(' ')); - const value = h.slice(h.indexOf(' ') + 1); - if (Array.isArray(obj[key])) { - obj[key].push(value); - } else { - obj[key] = value; + const config = await GitConfigManager.get({ fs, gitdir }); + if (!refspecs) { + refspecs = await config.getall(`remote.${remote}.fetch`); + if (refspecs.length === 0) { + throw new NoRefspecError(remote) } + // There's some interesting behavior with HEAD that doesn't follow the refspec. + refspecs.unshift(`+HEAD:refs/remotes/${remote}/HEAD`); } - if (obj.author) { - obj.author = parseAuthor(obj.author); + const refspec = GitRefSpecSet.from(refspecs); + const actualRefsToWrite = new Map(); + // Delete all current tags if the pruneTags argument is true. + if (pruneTags) { + const tags = await GitRefManager.listRefs({ + fs, + gitdir, + filepath: 'refs/tags', + }); + await GitRefManager.deleteRefs({ + fs, + gitdir, + refs: tags.map(tag => `refs/tags/${tag}`), + }); } - if (obj.committer) { - obj.committer = parseAuthor(obj.committer); + // Add all tags if the fetch tags argument is true. + if (tags) { + for (const serverRef of refs.keys()) { + if (serverRef.startsWith('refs/tags') && !serverRef.endsWith('^{}')) { + // Git's behavior is to only fetch tags that do not conflict with tags already present. + if (!(await GitRefManager.exists({ fs, gitdir, ref: serverRef }))) { + // Always use the object id of the tag itself, and not the peeled object id. + const oid = refs.get(serverRef); + actualRefsToWrite.set(serverRef, oid); + } + } + } } - return obj - } - - static renderHeaders(obj) { - let headers = ''; - if (obj.tree) { - headers += `tree ${obj.tree}\n`; - } else { - headers += `tree 4b825dc642cb6eb9a060e54bf8d69288fbee4904\n`; // the null tree + // Combine refs and symrefs giving symrefs priority + const refTranslations = refspec.translate([...refs.keys()]); + for (const [serverRef, translatedRef] of refTranslations) { + const value = refs.get(serverRef); + actualRefsToWrite.set(translatedRef, value); } - if (obj.parent) { - if (obj.parent.length === undefined) { - throw new InternalError(`commit 'parent' property should be an array`) - } - for (const p of obj.parent) { - headers += `parent ${p}\n`; + const symrefTranslations = refspec.translate([...symrefs.keys()]); + for (const [serverRef, translatedRef] of symrefTranslations) { + const value = symrefs.get(serverRef); + const symtarget = refspec.translateOne(value); + if (symtarget) { + actualRefsToWrite.set(translatedRef, `ref: ${symtarget}`); } } - const author = obj.author; - headers += `author ${formatAuthor(author)}\n`; - const committer = obj.committer || obj.author; - headers += `committer ${formatAuthor(committer)}\n`; - if (obj.gpgsig) { - headers += 'gpgsig' + indent(obj.gpgsig); - } - return headers - } - - static render(obj) { - return GitCommit.renderHeaders(obj) + '\n' + normalizeNewlines(obj.message) - } - - render() { - return this._commit - } - - withoutSignature() { - const commit = normalizeNewlines(this._commit); - if (commit.indexOf('\ngpgsig') === -1) return commit - const headers = commit.slice(0, commit.indexOf('\ngpgsig')); - const message = commit.slice( - commit.indexOf('-----END PGP SIGNATURE-----\n') + - '-----END PGP SIGNATURE-----\n'.length - ); - return normalizeNewlines(headers + '\n' + message) - } - - isolateSignature() { - const signature = this._commit.slice( - this._commit.indexOf('-----BEGIN PGP SIGNATURE-----'), - this._commit.indexOf('-----END PGP SIGNATURE-----') + - '-----END PGP SIGNATURE-----'.length - ); - return outdent(signature) - } - - static async sign(commit, sign, secretKey) { - const payload = commit.withoutSignature(); - const message = GitCommit.justMessage(commit._commit); - let { signature } = await sign({ payload, secretKey }); - // renormalize the line endings to the one true line-ending - signature = normalizeNewlines(signature); - const headers = GitCommit.justHeaders(commit._commit); - const signedCommit = - headers + '\n' + 'gpgsig' + indent(signature) + '\n' + message; - // return a new commit object - return GitCommit.from(signedCommit) - } -} - -async function resolveTree({ fs, cache, gitdir, oid }) { - // Empty tree - bypass `readObject` - if (oid === '4b825dc642cb6eb9a060e54bf8d69288fbee4904') { - return { tree: GitTree.from([]), oid } - } - const { type, object } = await _readObject({ fs, cache, gitdir, oid }); - // Resolve annotated tag objects to whatever - if (type === 'tag') { - oid = GitAnnotatedTag.from(object).parse().object; - return resolveTree({ fs, cache, gitdir, oid }) - } - // Resolve commits to trees - if (type === 'commit') { - oid = GitCommit.from(object).parse().tree; - return resolveTree({ fs, cache, gitdir, oid }) - } - if (type !== 'tree') { - throw new ObjectTypeError(oid, type, 'tree') - } - return { tree: GitTree.from(object), oid } -} - -class GitWalkerRepo { - constructor({ fs, gitdir, ref, cache }) { - this.fs = fs; - this.cache = cache; - this.gitdir = gitdir; - this.mapPromise = (async () => { - const map = new Map(); - let oid; - try { - oid = await GitRefManager.resolve({ fs, gitdir, ref }); - } catch (e) { - if (e instanceof NotFoundError) { - // Handle fresh branches with no commits - oid = '4b825dc642cb6eb9a060e54bf8d69288fbee4904'; + // If `prune` argument is true, clear out the existing local refspec roots + const pruned = []; + if (prune) { + for (const filepath of refspec.localNamespaces()) { + const refs = ( + await GitRefManager.listRefs({ + fs, + gitdir, + filepath, + }) + ).map(file => `${filepath}/${file}`); + for (const ref of refs) { + if (!actualRefsToWrite.has(ref)) { + pruned.push(ref); + } } } - const tree = await resolveTree({ fs, cache: this.cache, gitdir, oid }); - tree.type = 'tree'; - tree.mode = '40000'; - map.set('.', tree); - return map - })(); - const walker = this; - this.ConstructEntry = class TreeEntry { - constructor(fullpath) { - this._fullpath = fullpath; - this._type = false; - this._mode = false; - this._stat = false; - this._content = false; - this._oid = false; + if (pruned.length > 0) { + await GitRefManager.deleteRefs({ fs, gitdir, refs: pruned }); } + } + // Update files + // TODO: For large repos with a history of thousands of pull requests + // (i.e. gitlab-ce) it would be vastly more efficient to write them + // to .git/packed-refs. + // The trick is to make sure we a) don't write a packed ref that is + // already shadowed by a loose ref and b) don't loose any refs already + // in packed-refs. Doing this efficiently may be difficult. A + // solution that might work is + // a) load the current packed-refs file + // b) add actualRefsToWrite, overriding the existing values if present + // c) enumerate all the loose refs currently in .git/refs/remotes/${remote} + // d) overwrite their value with the new value. + // Examples of refs we need to avoid writing in loose format for efficieny's sake + // are .git/refs/remotes/origin/refs/remotes/remote_mirror_3059 + // and .git/refs/remotes/origin/refs/merge-requests + for (const [key, value] of actualRefsToWrite) { + await fs.write(join(gitdir, key), `${value.trim()}\n`, 'utf8'); + } + return { pruned } + } - async type() { - return walker.type(this) - } + // TODO: make this less crude? + static async writeRef({ fs, gitdir, ref, value }) { + // Validate input + if (!value.match(/[0-9a-f]{40}/)) { + throw new InvalidOidError(value) + } + await fs.write(join(gitdir, ref), `${value.trim()}\n`, 'utf8'); + } - async mode() { - return walker.mode(this) - } + static async writeSymbolicRef({ fs, gitdir, ref, value }) { + await fs.write(join(gitdir, ref), 'ref: ' + `${value.trim()}\n`, 'utf8'); + } - async stat() { - return walker.stat(this) + static async deleteRef({ fs, gitdir, ref }) { + return GitRefManager.deleteRefs({ fs, gitdir, refs: [ref] }) + } + + static async deleteRefs({ fs, gitdir, refs }) { + // Delete regular ref + await Promise.all(refs.map(ref => fs.rm(join(gitdir, ref)))); + // Delete any packed ref + let text = await fs.read(`${gitdir}/packed-refs`, { encoding: 'utf8' }); + const packed = GitPackedRefs.from(text); + const beforeSize = packed.refs.size; + for (const ref of refs) { + if (packed.refs.has(ref)) { + packed.delete(ref); } + } + if (packed.refs.size < beforeSize) { + text = packed.toString(); + await fs.write(`${gitdir}/packed-refs`, text, { encoding: 'utf8' }); + } + } - async content() { - return walker.content(this) + /** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.gitdir + * @param {string} args.ref + * @param {number} [args.depth] + * @returns {Promise} + */ + static async resolve({ fs, gitdir, ref, depth = undefined }) { + if (depth !== undefined) { + depth--; + if (depth === -1) { + return ref } + } + let sha; + // Is it a ref pointer? + if (ref.startsWith('ref: ')) { + ref = ref.slice('ref: '.length); + return GitRefManager.resolve({ fs, gitdir, ref, depth }) + } + // Is it a complete and valid SHA? + if (ref.length === 40 && /[0-9a-f]{40}/.test(ref)) { + return ref + } + // We need to alternate between the file system and the packed-refs + const packedMap = await GitRefManager.packedRefs({ fs, gitdir }); + // Look in all the proper paths, in this order + const allpaths = refpaths(ref).filter(p => !GIT_FILES.includes(p)); // exclude git system files (#709) - async oid() { - return walker.oid(this) + for (const ref of allpaths) { + sha = + (await fs.read(`${gitdir}/${ref}`, { encoding: 'utf8' })) || + packedMap.get(ref); + if (sha) { + return GitRefManager.resolve({ fs, gitdir, ref: sha.trim(), depth }) } - }; + } + // Do we give up? + throw new NotFoundError(ref) } - async readdir(entry) { - const filepath = entry._fullpath; - const { fs, cache, gitdir } = this; - const map = await this.mapPromise; - const obj = map.get(filepath); - if (!obj) throw new Error(`No obj for ${filepath}`) - const oid = obj.oid; - if (!oid) throw new Error(`No oid for obj ${JSON.stringify(obj)}`) - if (obj.type !== 'tree') { - // TODO: support submodules (type === 'commit') - return null + static async exists({ fs, gitdir, ref }) { + try { + await GitRefManager.expand({ fs, gitdir, ref }); + return true + } catch (err) { + return false } - const { type, object } = await _readObject({ fs, cache, gitdir, oid }); - if (type !== obj.type) { - throw new ObjectTypeError(oid, type, obj.type) + } + + static async expand({ fs, gitdir, ref }) { + // Is it a complete and valid SHA? + if (ref.length === 40 && /[0-9a-f]{40}/.test(ref)) { + return ref } - const tree = GitTree.from(object); - // cache all entries - for (const entry of tree) { - map.set(join(filepath, entry.path), entry); + // We need to alternate between the file system and the packed-refs + const packedMap = await GitRefManager.packedRefs({ fs, gitdir }); + // Look in all the proper paths, in this order + const allpaths = refpaths(ref); + for (const ref of allpaths) { + if (await fs.exists(`${gitdir}/${ref}`)) return ref + if (packedMap.has(ref)) return ref } - return tree.entries().map(entry => join(filepath, entry.path)) + // Do we give up? + throw new NotFoundError(ref) } - async type(entry) { - if (entry._type === false) { - const map = await this.mapPromise; - const { type } = map.get(entry._fullpath); - entry._type = type; + static async expandAgainstMap({ ref, map }) { + // Look in all the proper paths, in this order + const allpaths = refpaths(ref); + for (const ref of allpaths) { + if (await map.has(ref)) return ref } - return entry._type + // Do we give up? + throw new NotFoundError(ref) } - async mode(entry) { - if (entry._mode === false) { - const map = await this.mapPromise; - const { mode } = map.get(entry._fullpath); - entry._mode = normalizeMode(parseInt(mode, 8)); + static resolveAgainstMap({ ref, fullref = ref, depth = undefined, map }) { + if (depth !== undefined) { + depth--; + if (depth === -1) { + return { fullref, oid: ref } + } } - return entry._mode + // Is it a ref pointer? + if (ref.startsWith('ref: ')) { + ref = ref.slice('ref: '.length); + return GitRefManager.resolveAgainstMap({ ref, fullref, depth, map }) + } + // Is it a complete and valid SHA? + if (ref.length === 40 && /[0-9a-f]{40}/.test(ref)) { + return { fullref, oid: ref } + } + // Look in all the proper paths, in this order + const allpaths = refpaths(ref); + for (const ref of allpaths) { + const sha = map.get(ref); + if (sha) { + return GitRefManager.resolveAgainstMap({ + ref: sha.trim(), + fullref: ref, + depth, + map, + }) + } + } + // Do we give up? + throw new NotFoundError(ref) } - async stat(_entry) {} + static async packedRefs({ fs, gitdir }) { + const text = await fs.read(`${gitdir}/packed-refs`, { encoding: 'utf8' }); + const packed = GitPackedRefs.from(text); + return packed.refs + } - async content(entry) { - if (entry._content === false) { - const map = await this.mapPromise; - const { fs, cache, gitdir } = this; - const obj = map.get(entry._fullpath); - const oid = obj.oid; - const { type, object } = await _readObject({ fs, cache, gitdir, oid }); - if (type !== 'blob') { - entry._content = undefined; - } else { - entry._content = new Uint8Array(object); + // List all the refs that match the `filepath` prefix + static async listRefs({ fs, gitdir, filepath }) { + const packedMap = GitRefManager.packedRefs({ fs, gitdir }); + let files = null; + try { + files = await fs.readdirDeep(`${gitdir}/${filepath}`); + files = files.map(x => x.replace(`${gitdir}/${filepath}/`, '')); + } catch (err) { + files = []; + } + + for (let key of (await packedMap).keys()) { + // filter by prefix + if (key.startsWith(filepath)) { + // remove prefix + key = key.replace(filepath + '/', ''); + // Don't include duplicates; the loose files have precedence anyway + if (!files.includes(key)) { + files.push(key); + } } } - return entry._content + // since we just appended things onto an array, we need to sort them now + files.sort(compareRefNames); + return files } - async oid(entry) { - if (entry._oid === false) { - const map = await this.mapPromise; - const obj = map.get(entry._fullpath); - entry._oid = obj.oid; + static async listBranches({ fs, gitdir, remote }) { + if (remote) { + return GitRefManager.listRefs({ + fs, + gitdir, + filepath: `refs/remotes/${remote}`, + }) + } else { + return GitRefManager.listRefs({ fs, gitdir, filepath: `refs/heads` }) } - return entry._oid } + + static async listTags({ fs, gitdir }) { + const tags = await GitRefManager.listRefs({ + fs, + gitdir, + filepath: `refs/tags`, + }); + return tags.filter(x => !x.endsWith('^{}')) + } +} + +function compareTreeEntryPath(a, b) { + // Git sorts tree entries as if there is a trailing slash on directory names. + return compareStrings(appendSlashIfDir(a), appendSlashIfDir(b)) } -// @ts-check +function appendSlashIfDir(entry) { + return entry.mode === '040000' ? entry.path + '/' : entry.path +} /** - * @param {object} args - * @param {string} [args.ref='HEAD'] - * @returns {Walker} + * + * @typedef {Object} TreeEntry + * @property {string} mode - the 6 digit hexadecimal mode + * @property {string} path - the name of the file or directory + * @property {string} oid - the SHA-1 object id of the blob or tree + * @property {'commit'|'blob'|'tree'} type - the type of object */ -function TREE({ ref = 'HEAD' }) { - const o = Object.create(null); - Object.defineProperty(o, GitWalkSymbol, { - value: function({ fs, gitdir, cache }) { - return new GitWalkerRepo({ fs, gitdir, ref, cache }) - }, - }); - Object.freeze(o); - return o -} -// @ts-check +function mode2type$1(mode) { + // prettier-ignore + switch (mode) { + case '040000': return 'tree' + case '100644': return 'blob' + case '100755': return 'blob' + case '120000': return 'blob' + case '160000': return 'commit' + } + throw new InternalError(`Unexpected GitTree entry mode: ${mode}`) +} -class GitWalkerFs { - constructor({ fs, dir, gitdir, cache }) { - this.fs = fs; - this.cache = cache; - this.dir = dir; - this.gitdir = gitdir; - const walker = this; - this.ConstructEntry = class WorkdirEntry { - constructor(fullpath) { - this._fullpath = fullpath; - this._type = false; - this._mode = false; - this._stat = false; - this._content = false; - this._oid = false; - } +function parseBuffer(buffer) { + const _entries = []; + let cursor = 0; + while (cursor < buffer.length) { + const space = buffer.indexOf(32, cursor); + if (space === -1) { + throw new InternalError( + `GitTree: Error parsing buffer at byte location ${cursor}: Could not find the next space character.` + ) + } + const nullchar = buffer.indexOf(0, cursor); + if (nullchar === -1) { + throw new InternalError( + `GitTree: Error parsing buffer at byte location ${cursor}: Could not find the next null character.` + ) + } + let mode = buffer.slice(cursor, space).toString('utf8'); + if (mode === '40000') mode = '040000'; // makes it line up neater in printed output + const type = mode2type$1(mode); + const path = buffer.slice(space + 1, nullchar).toString('utf8'); - async type() { - return walker.type(this) - } + // Prevent malicious git repos from writing to "..\foo" on clone etc + if (path.includes('\\') || path.includes('/')) { + throw new UnsafeFilepathError(path) + } - async mode() { - return walker.mode(this) - } + const oid = buffer.slice(nullchar + 1, nullchar + 21).toString('hex'); + cursor = nullchar + 21; + _entries.push({ mode, path, oid, type }); + } + return _entries +} - async stat() { - return walker.stat(this) - } +function limitModeToAllowed(mode) { + if (typeof mode === 'number') { + mode = mode.toString(8); + } + // tree + if (mode.match(/^0?4.*/)) return '040000' // Directory + if (mode.match(/^1006.*/)) return '100644' // Regular non-executable file + if (mode.match(/^1007.*/)) return '100755' // Regular executable file + if (mode.match(/^120.*/)) return '120000' // Symbolic link + if (mode.match(/^160.*/)) return '160000' // Commit (git submodule reference) + throw new InternalError(`Could not understand file mode: ${mode}`) +} - async content() { - return walker.content(this) - } +function nudgeIntoShape(entry) { + if (!entry.oid && entry.sha) { + entry.oid = entry.sha; // Github + } + entry.mode = limitModeToAllowed(entry.mode); // index + if (!entry.type) { + entry.type = mode2type$1(entry.mode); // index + } + return entry +} - async oid() { - return walker.oid(this) - } - }; +class GitTree { + constructor(entries) { + if (Buffer.isBuffer(entries)) { + this._entries = parseBuffer(entries); + } else if (Array.isArray(entries)) { + this._entries = entries.map(nudgeIntoShape); + } else { + throw new InternalError('invalid type passed to GitTree constructor') + } + // Tree entries are not sorted alphabetically in the usual sense (see `compareTreeEntryPath`) + // but it is important later on that these be sorted in the same order as they would be returned from readdir. + this._entries.sort(comparePath); } - async readdir(entry) { - const filepath = entry._fullpath; - const { fs, dir } = this; - const names = await fs.readdir(join(dir, filepath)); - if (names === null) return null - return names.map(name => join(filepath, name)) + static from(tree) { + return new GitTree(tree) } - async type(entry) { - if (entry._type === false) { - await entry.stat(); - } - return entry._type + render() { + return this._entries + .map(entry => `${entry.mode} ${entry.type} ${entry.oid} ${entry.path}`) + .join('\n') } - async mode(entry) { - if (entry._mode === false) { - await entry.stat(); - } - return entry._mode + toObject() { + // Adjust the sort order to match git's + const entries = [...this._entries]; + entries.sort(compareTreeEntryPath); + return Buffer.concat( + entries.map(entry => { + const mode = Buffer.from(entry.mode.replace(/^0/, '')); + const space = Buffer.from(' '); + const path = Buffer.from(entry.path, 'utf8'); + const nullchar = Buffer.from([0]); + const oid = Buffer.from(entry.oid, 'hex'); + return Buffer.concat([mode, space, path, nullchar, oid]) + }) + ) } - async stat(entry) { - if (entry._stat === false) { - const { fs, dir } = this; - let stat = await fs.lstat(`${dir}/${entry._fullpath}`); - if (!stat) { - throw new Error( - `ENOENT: no such file or directory, lstat '${entry._fullpath}'` - ) - } - let type = stat.isDirectory() ? 'tree' : 'blob'; - if (type === 'blob' && !stat.isFile() && !stat.isSymbolicLink()) { - type = 'special'; - } - entry._type = type; - stat = normalizeStats(stat); - entry._mode = stat.mode; - // workaround for a BrowserFS edge case - if (stat.size === -1 && entry._actualSize) { - stat.size = entry._actualSize; - } - entry._stat = stat; - } - return entry._stat + /** + * @returns {TreeEntry[]} + */ + entries() { + return this._entries } - async content(entry) { - if (entry._content === false) { - const { fs, dir } = this; - if ((await entry.type()) === 'tree') { - entry._content = undefined; - } else { - const content = await fs.read(`${dir}/${entry._fullpath}`); - // workaround for a BrowserFS edge case - entry._actualSize = content.length; - if (entry._stat && entry._stat.size === -1) { - entry._stat.size = entry._actualSize; - } - entry._content = new Uint8Array(content); - } + *[Symbol.iterator]() { + for (const entry of this._entries) { + yield entry; } - return entry._content } - - async oid(entry) { - if (entry._oid === false) { - const { fs, gitdir, cache } = this; - let oid; - // See if we can use the SHA1 hash in the index. - await GitIndexManager.acquire({ fs, gitdir, cache }, async function( - index - ) { - const stage = index.entriesMap.get(entry._fullpath); - const stats = await entry.stat(); - if (!stage || compareStats(stats, stage)) { - const content = await entry.content(); - if (content === undefined) { - oid = undefined; - } else { - oid = await shasum( - GitObject.wrap({ type: 'blob', object: await entry.content() }) - ); - // Update the stats in the index so we will get a "cache hit" next time - // 1) if we can (because the oid and mode are the same) - // 2) and only if we need to (because other stats differ) - if ( - stage && - oid === stage.oid && - stats.mode === stage.mode && - compareStats(stats, stage) - ) { - index.insert({ - filepath: entry._fullpath, - stats, - oid: oid, - }); - } - } - } else { - // Use the index SHA1 rather than compute it - oid = stage.oid; - } - }); - entry._oid = oid; +} + +class GitObject { + static wrap({ type, object }) { + return Buffer.concat([ + Buffer.from(`${type} ${object.byteLength.toString()}\x00`), + Buffer.from(object), + ]) + } + + static unwrap(buffer) { + const s = buffer.indexOf(32); // first space + const i = buffer.indexOf(0); // first null value + const type = buffer.slice(0, s).toString('utf8'); // get type of object + const length = buffer.slice(s + 1, i).toString('utf8'); // get type of object + const actualLength = buffer.length - (i + 1); + // verify length + if (parseInt(length) !== actualLength) { + throw new InternalError( + `Length mismatch: expected ${length} bytes but got ${actualLength} instead.` + ) + } + return { + type, + object: Buffer.from(buffer.slice(i + 1)), } - return entry._oid } } -// @ts-check +async function readObjectLoose({ fs, gitdir, oid }) { + const source = `objects/${oid.slice(0, 2)}/${oid.slice(2)}`; + const file = await fs.read(`${gitdir}/${source}`); + if (!file) { + return null + } + return { object: file, format: 'deflated', source } +} /** - * @returns {Walker} + * @param {Buffer} delta + * @param {Buffer} source + * @returns {Buffer} */ -function WORKDIR() { - const o = Object.create(null); - Object.defineProperty(o, GitWalkSymbol, { - value: function({ fs, dir, gitdir, cache }) { - return new GitWalkerFs({ fs, dir, gitdir, cache }) - }, - }); - Object.freeze(o); - return o -} +function applyDelta(delta, source) { + const reader = new BufferCursor(delta); + const sourceSize = readVarIntLE(reader); -// @ts-check + if (sourceSize !== source.byteLength) { + throw new InternalError( + `applyDelta expected source buffer to be ${sourceSize} bytes but the provided buffer was ${source.length} bytes` + ) + } + const targetSize = readVarIntLE(reader); + let target; -// I'm putting this in a Manager because I reckon it could benefit -// from a LOT of cacheing. -class GitIgnoreManager { - static async isIgnored({ fs, dir, gitdir = join(dir, '.git'), filepath }) { - // ALWAYS ignore ".git" folders. - if (basename(filepath) === '.git') return true - // '.' is not a valid gitignore entry, so '.' is never ignored - if (filepath === '.') return false - // Check and load exclusion rules from project exclude file (.git/info/exclude) - let excludes = ''; - const excludesFile = join(gitdir, 'info', 'exclude'); - if (await fs.exists(excludesFile)) { - excludes = await fs.read(excludesFile, 'utf8'); + const firstOp = readOp(reader, source); + // Speed optimization - return raw buffer if it's just single simple copy + if (firstOp.byteLength === targetSize) { + target = firstOp; + } else { + // Otherwise, allocate a fresh buffer and slices + target = Buffer.alloc(targetSize); + const writer = new BufferCursor(target); + writer.copy(firstOp); + + while (!reader.eof()) { + writer.copy(readOp(reader, source)); } - // Find all the .gitignore files that could affect this file - const pairs = [ - { - gitignore: join(dir, '.gitignore'), - filepath, - }, - ]; - const pieces = filepath.split('/').filter(Boolean); - for (let i = 1; i < pieces.length; i++) { - const folder = pieces.slice(0, i).join('/'); - const file = pieces.slice(i).join('/'); - pairs.push({ - gitignore: join(dir, folder, '.gitignore'), - filepath: file, - }); + + const tell = writer.tell(); + if (targetSize !== tell) { + throw new InternalError( + `applyDelta expected target buffer to be ${targetSize} bytes but the resulting buffer was ${tell} bytes` + ) } - let ignoredStatus = false; - for (const p of pairs) { - let file; - try { - file = await fs.read(p.gitignore, 'utf8'); - } catch (err) { - if (err.code === 'NOENT') continue - } - const ign = ignore().add(excludes); - ign.add(file); - // If the parent directory is excluded, we are done. - // "It is not possible to re-include a file if a parent directory of that file is excluded. Git doesn’t list excluded directories for performance reasons, so any patterns on contained files have no effect, no matter where they are defined." - // source: https://git-scm.com/docs/gitignore - const parentdir = dirname(p.filepath); - if (parentdir !== '.' && ign.ignores(parentdir)) return true - // If the file is currently ignored, test for UNignoring. - if (ignoredStatus) { - ignoredStatus = !ign.test(p.filepath).unignored; - } else { - ignoredStatus = ign.test(p.filepath).ignored; - } + } + return target +} + +function readVarIntLE(reader) { + let result = 0; + let shift = 0; + let byte = null; + do { + byte = reader.readUInt8(); + result |= (byte & 0b01111111) << shift; + shift += 7; + } while (byte & 0b10000000) + return result +} + +function readCompactLE(reader, flags, size) { + let result = 0; + let shift = 0; + while (size--) { + if (flags & 0b00000001) { + result |= reader.readUInt8() << shift; } - return ignoredStatus + flags >>= 1; + shift += 8; } + return result } -/** - * Removes the directory at the specified filepath recursively. Used internally to replicate the behavior of - * fs.promises.rm({ recursive: true, force: true }) from Node.js 14 and above when not available. If the provided - * filepath resolves to a file, it will be removed. - * - * @param {import('../models/FileSystem.js').FileSystem} fs - * @param {string} filepath - The file or directory to remove. - */ -async function rmRecursive(fs, filepath) { - const entries = await fs.readdir(filepath); - if (entries == null) { - await fs.rm(filepath); - } else if (entries.length) { - await Promise.all( - entries.map(entry => { - const subpath = join(filepath, entry); - return fs.lstat(subpath).then(stat => { - if (!stat) return - return stat.isDirectory() ? rmRecursive(fs, subpath) : fs.rm(subpath) - }) - }) - ).then(() => fs.rmdir(filepath)); +function readOp(reader, source) { + /** @type {number} */ + const byte = reader.readUInt8(); + const COPY = 0b10000000; + const OFFS = 0b00001111; + const SIZE = 0b01110000; + if (byte & COPY) { + // copy consists of 4 byte offset, 3 byte size (in LE order) + const offset = readCompactLE(reader, byte & OFFS, 4); + let size = readCompactLE(reader, (byte & SIZE) >> 4, 3); + // Yup. They really did this optimization. + if (size === 0) size = 0x10000; + return source.slice(offset, offset + size) } else { - await fs.rmdir(filepath); + // insert + return reader.slice(byte) } } -/** - * This is just a collection of helper functions really. At least that's how it started. - */ -class FileSystem { - constructor(fs) { - if (typeof fs._original_unwrapped_fs !== 'undefined') return fs +// Convert a value to an Async Iterator +// This will be easier with async generator functions. +function fromValue(value) { + let queue = [value]; + return { + next() { + return Promise.resolve({ done: queue.length === 0, value: queue.pop() }) + }, + return() { + queue = []; + return {} + }, + [Symbol.asyncIterator]() { + return this + }, + } +} - const promises = Object.getOwnPropertyDescriptor(fs, 'promises'); - if (promises && promises.enumerable) { - this._readFile = fs.promises.readFile.bind(fs.promises); - this._writeFile = fs.promises.writeFile.bind(fs.promises); - this._mkdir = fs.promises.mkdir.bind(fs.promises); - if (fs.promises.rm) { - this._rm = fs.promises.rm.bind(fs.promises); - } else if (fs.promises.rmdir.length > 1) { - this._rm = fs.promises.rmdir.bind(fs.promises); - } else { - this._rm = rmRecursive.bind(null, this); - } - this._rmdir = fs.promises.rmdir.bind(fs.promises); - this._unlink = fs.promises.unlink.bind(fs.promises); - this._stat = fs.promises.stat.bind(fs.promises); - this._lstat = fs.promises.lstat.bind(fs.promises); - this._readdir = fs.promises.readdir.bind(fs.promises); - this._readlink = fs.promises.readlink.bind(fs.promises); - this._symlink = fs.promises.symlink.bind(fs.promises); - } else { - this._readFile = pify(fs.readFile.bind(fs)); - this._writeFile = pify(fs.writeFile.bind(fs)); - this._mkdir = pify(fs.mkdir.bind(fs)); - if (fs.rm) { - this._rm = pify(fs.rm.bind(fs)); - } else if (fs.rmdir.length > 2) { - this._rm = pify(fs.rmdir.bind(fs)); - } else { - this._rm = rmRecursive.bind(null, this); - } - this._rmdir = pify(fs.rmdir.bind(fs)); - this._unlink = pify(fs.unlink.bind(fs)); - this._stat = pify(fs.stat.bind(fs)); - this._lstat = pify(fs.lstat.bind(fs)); - this._readdir = pify(fs.readdir.bind(fs)); - this._readlink = pify(fs.readlink.bind(fs)); - this._symlink = pify(fs.symlink.bind(fs)); - } - this._original_unwrapped_fs = fs; +function getIterator(iterable) { + if (iterable[Symbol.asyncIterator]) { + return iterable[Symbol.asyncIterator]() + } + if (iterable[Symbol.iterator]) { + return iterable[Symbol.iterator]() + } + if (iterable.next) { + return iterable } + return fromValue(iterable) +} - /** - * Return true if a file exists, false if it doesn't exist. - * Rethrows errors that aren't related to file existance. - */ - async exists(filepath, options = {}) { - try { - await this._stat(filepath); - return true - } catch (err) { - if (err.code === 'ENOENT' || err.code === 'ENOTDIR') { - return false - } else { - console.log('Unhandled error in "FileSystem.exists()" function', err); - throw err - } +// inspired by 'gartal' but lighter-weight and more battle-tested. +class StreamReader { + constructor(stream) { + this.stream = getIterator(stream); + this.buffer = null; + this.cursor = 0; + this.undoCursor = 0; + this.started = false; + this._ended = false; + this._discardedBytes = 0; + } + + eof() { + return this._ended && this.cursor === this.buffer.length + } + + tell() { + return this._discardedBytes + this.cursor + } + + async byte() { + if (this.eof()) return + if (!this.started) await this._init(); + if (this.cursor === this.buffer.length) { + await this._loadnext(); + if (this._ended) return } + this._moveCursor(1); + return this.buffer[this.undoCursor] } - /** - * Return the contents of a file if it exists, otherwise returns null. - * - * @param {string} filepath - * @param {object} [options] - * - * @returns {Promise} - */ - async read(filepath, options = {}) { - try { - let buffer = await this._readFile(filepath, options); - // Convert plain ArrayBuffers to Buffers - if (typeof buffer !== 'string') { - buffer = Buffer.from(buffer); - } - return buffer - } catch (err) { - return null + async chunk() { + if (this.eof()) return + if (!this.started) await this._init(); + if (this.cursor === this.buffer.length) { + await this._loadnext(); + if (this._ended) return } + this._moveCursor(this.buffer.length); + return this.buffer.slice(this.undoCursor, this.cursor) } - /** - * Write a file (creating missing directories if need be) without throwing errors. - * - * @param {string} filepath - * @param {Buffer|Uint8Array|string} contents - * @param {object|string} [options] - */ - async write(filepath, contents, options = {}) { - try { - await this._writeFile(filepath, contents, options); - return - } catch (err) { - // Hmm. Let's try mkdirp and try again. - await this.mkdir(dirname(filepath)); - await this._writeFile(filepath, contents, options); + async read(n) { + if (this.eof()) return + if (!this.started) await this._init(); + if (this.cursor + n > this.buffer.length) { + this._trim(); + await this._accumulate(n); } + this._moveCursor(n); + return this.buffer.slice(this.undoCursor, this.cursor) } - /** - * Make a directory (or series of nested directories) without throwing an error if it already exists. - */ - async mkdir(filepath, _selfCall = false) { - try { - await this._mkdir(filepath); - return - } catch (err) { - // If err is null then operation succeeded! - if (err === null) return - // If the directory already exists, that's OK! - if (err.code === 'EEXIST') return - // Avoid infinite loops of failure - if (_selfCall) throw err - // If we got a "no such file or directory error" backup and try again. - if (err.code === 'ENOENT') { - const parent = dirname(filepath); - // Check to see if we've gone too far - if (parent === '.' || parent === '/' || parent === filepath) throw err - // Infinite recursion, what could go wrong? - await this.mkdir(parent); - await this.mkdir(filepath, true); - } + async skip(n) { + if (this.eof()) return + if (!this.started) await this._init(); + if (this.cursor + n > this.buffer.length) { + this._trim(); + await this._accumulate(n); } + this._moveCursor(n); } - /** - * Delete a file without throwing an error if it is already deleted. - */ - async rm(filepath) { - try { - await this._unlink(filepath); - } catch (err) { - if (err.code !== 'ENOENT') throw err + async undo() { + this.cursor = this.undoCursor; + } + + async _next() { + this.started = true; + let { done, value } = await this.stream.next(); + if (done) { + this._ended = true; + } + if (value) { + value = Buffer.from(value); } + return value } - /** - * Delete a directory without throwing an error if it is already deleted. - */ - async rmdir(filepath, opts) { - try { - if (opts && opts.recursive) { - await this._rm(filepath, opts); - } else { - await this._rmdir(filepath); - } - } catch (err) { - if (err.code !== 'ENOENT') throw err + _trim() { + // Throw away parts of the buffer we don't need anymore + // assert(this.cursor <= this.buffer.length) + this.buffer = this.buffer.slice(this.undoCursor); + this.cursor -= this.undoCursor; + this._discardedBytes += this.undoCursor; + this.undoCursor = 0; + } + + _moveCursor(n) { + this.undoCursor = this.cursor; + this.cursor += n; + if (this.cursor > this.buffer.length) { + this.cursor = this.buffer.length; } } - /** - * Read a directory without throwing an error is the directory doesn't exist - */ - async readdir(filepath) { - try { - const names = await this._readdir(filepath); - // Ordering is not guaranteed, and system specific (Windows vs Unix) - // so we must sort them ourselves. - names.sort(compareStrings); - return names - } catch (err) { - if (err.code === 'ENOTDIR') return null - return [] + async _accumulate(n) { + if (this._ended) return + // Expand the buffer until we have N bytes of data + // or we've reached the end of the stream + const buffers = [this.buffer]; + while (this.cursor + n > lengthBuffers(buffers)) { + const nextbuffer = await this._next(); + if (this._ended) break + buffers.push(nextbuffer); } + this.buffer = Buffer.concat(buffers); } - /** - * Return a flast list of all the files nested inside a directory - * - * Based on an elegant concurrent recursive solution from SO - * https://stackoverflow.com/a/45130990/2168416 - */ - async readdirDeep(dir) { - const subdirs = await this._readdir(dir); - const files = await Promise.all( - subdirs.map(async subdir => { - const res = dir + '/' + subdir; - return (await this._stat(res)).isDirectory() - ? this.readdirDeep(res) - : res - }) - ); - return files.reduce((a, f) => a.concat(f), []) + async _loadnext() { + this._discardedBytes += this.buffer.length; + this.undoCursor = 0; + this.cursor = 0; + this.buffer = await this._next(); } - /** - * Return the Stats of a file/symlink if it exists, otherwise returns null. - * Rethrows errors that aren't related to file existance. - */ - async lstat(filename) { - try { - const stats = await this._lstat(filename); - return stats - } catch (err) { - if (err.code === 'ENOENT') { - return null - } - throw err - } + async _init() { + this.buffer = await this._next(); + } +} + +// This helper function helps us postpone concatenating buffers, which +// would create intermediate buffer objects, +function lengthBuffers(buffers) { + return buffers.reduce((acc, buffer) => acc + buffer.length, 0) +} + +// My version of git-list-pack - roughly 15x faster than the original + +async function listpack(stream, onData) { + const reader = new StreamReader(stream); + let PACK = await reader.read(4); + PACK = PACK.toString('utf8'); + if (PACK !== 'PACK') { + throw new InternalError(`Invalid PACK header '${PACK}'`) + } + + let version = await reader.read(4); + version = version.readUInt32BE(0); + if (version !== 2) { + throw new InternalError(`Invalid packfile version: ${version}`) } - /** - * Reads the contents of a symlink if it exists, otherwise returns null. - * Rethrows errors that aren't related to file existance. - */ - async readlink(filename, opts = { encoding: 'buffer' }) { - // Note: FileSystem.readlink returns a buffer by default - // so we can dump it into GitObject.write just like any other file. - try { - const link = await this._readlink(filename, opts); - return Buffer.isBuffer(link) ? link : Buffer.from(link) - } catch (err) { - if (err.code === 'ENOENT') { - return null + let numObjects = await reader.read(4); + numObjects = numObjects.readUInt32BE(0); + // If (for some godforsaken reason) this is an empty packfile, abort now. + if (numObjects < 1) return + + while (!reader.eof() && numObjects--) { + const offset = reader.tell(); + const { type, length, ofs, reference } = await parseHeader(reader); + const inflator = new pako.Inflate(); + while (!inflator.result) { + const chunk = await reader.chunk(); + if (!chunk) break + inflator.push(chunk, false); + if (inflator.err) { + throw new InternalError(`Pako error: ${inflator.msg}`) } - throw err - } - } + if (inflator.result) { + if (inflator.result.length !== length) { + throw new InternalError( + `Inflated object size is different from that stated in packfile.` + ) + } - /** - * Write the contents of buffer to a symlink. - */ - async writelink(filename, buffer) { - return this._symlink(buffer.toString('utf8'), filename) + // Backtrack parser to where deflated data ends + await reader.undo(); + await reader.read(chunk.length - inflator.strm.avail_in); + const end = reader.tell(); + await onData({ + data: inflator.result, + type, + num: numObjects, + offset, + end, + reference, + ofs, + }); + } + } } } -async function writeObjectLoose({ fs, gitdir, object, format, oid }) { - if (format !== 'deflated') { - throw new InternalError( - 'GitObjectStoreLoose expects objects to write to be in deflated format' - ) +async function parseHeader(reader) { + // Object type is encoded in bits 654 + let byte = await reader.byte(); + const type = (byte >> 4) & 0b111; + // The length encoding get complicated. + // Last four bits of length is encoded in bits 3210 + let length = byte & 0b1111; + // Whether the next byte is part of the variable-length encoded number + // is encoded in bit 7 + if (byte & 0b10000000) { + let shift = 4; + do { + byte = await reader.byte(); + length |= (byte & 0b01111111) << shift; + shift += 7; + } while (byte & 0b10000000) } - const source = `objects/${oid.slice(0, 2)}/${oid.slice(2)}`; - const filepath = `${gitdir}/${source}`; - // Don't overwrite existing git objects - this helps avoid EPERM errors. - // Although I don't know how we'd fix corrupted objects then. Perhaps delete them - // on read? - if (!(await fs.exists(filepath))) await fs.write(filepath, object); + // Handle deltified objects + let ofs; + let reference; + if (type === 6) { + let shift = 0; + ofs = 0; + const bytes = []; + do { + byte = await reader.byte(); + ofs |= (byte & 0b01111111) << shift; + shift += 7; + bytes.push(byte); + } while (byte & 0b10000000) + reference = Buffer.from(bytes); + } + if (type === 7) { + const buf = await reader.read(20); + reference = buf; + } + return { type, length, ofs, reference } } /* eslint-env node, browser */ -let supportsCompressionStream = null; +let supportsDecompressionStream = false; -async function deflate(buffer) { - if (supportsCompressionStream === null) { - supportsCompressionStream = testCompressionStream(); +async function inflate(buffer) { + if (supportsDecompressionStream === null) { + supportsDecompressionStream = testDecompressionStream(); } - return supportsCompressionStream - ? browserDeflate(buffer) - : pako.deflate(buffer) + return supportsDecompressionStream + ? browserInflate(buffer) + : pako.inflate(buffer) } -async function browserDeflate(buffer) { - const cs = new CompressionStream('deflate'); - const c = new Blob([buffer]).stream().pipeThrough(cs); - return new Uint8Array(await new Response(c).arrayBuffer()) +async function browserInflate(buffer) { + const ds = new DecompressionStream('deflate'); + const d = new Blob([buffer]).stream().pipeThrough(ds); + return new Uint8Array(await new Response(d).arrayBuffer()) } -function testCompressionStream() { +function testDecompressionStream() { try { - const cs = new CompressionStream('deflate'); - // Test if `Blob.stream` is present. React Native does not have the `stream` method - new Blob([]).stream(); - if (cs) return true + const ds = new DecompressionStream('deflate'); + if (ds) return true } catch (_) { // no bother } return false } -async function _writeObject({ - fs, - gitdir, - type, - object, - format = 'content', - oid = undefined, - dryRun = false, -}) { - if (format !== 'deflated') { - if (format !== 'wrapped') { - object = GitObject.wrap({ type, object }); - } - oid = await shasum(object); - object = Buffer.from(await deflate(object)); - } - if (!dryRun) { - await writeObjectLoose({ fs, gitdir, object, format: 'deflated', oid }); - } - return oid +function decodeVarInt(reader) { + const bytes = []; + let byte = 0; + let multibyte = 0; + do { + byte = reader.readUInt8(); + // We keep bits 6543210 + const lastSeven = byte & 0b01111111; + bytes.push(lastSeven); + // Whether the next byte is part of the variable-length encoded number + // is encoded in bit 7 + multibyte = byte & 0b10000000; + } while (multibyte) + // Now that all the bytes are in big-endian order, + // alternate shifting the bits left by 7 and OR-ing the next byte. + // And... do a weird increment-by-one thing that I don't quite understand. + return bytes.reduce((a, b) => ((a + 1) << 7) | b, -1) } -function assertParameter(name, value) { - if (value === undefined) { - throw new MissingParameterError(name) - } +// I'm pretty much copying this one from the git C source code, +// because it makes no sense. +function otherVarIntDecode(reader, startWith) { + let result = startWith; + let shift = 4; + let byte = null; + do { + byte = reader.readUInt8(); + result |= (byte & 0b01111111) << shift; + shift += 7; + } while (byte & 0b10000000) + return result } -function posixifyPathBuffer(buffer) { - let idx; - while (~(idx = buffer.indexOf(92))) buffer[idx] = 47; - return buffer -} +class GitPackIndex { + constructor(stuff) { + Object.assign(this, stuff); + this.offsetCache = {}; + } + + static async fromIdx({ idx, getExternalRefDelta }) { + const reader = new BufferCursor(idx); + const magic = reader.slice(4).toString('hex'); + // Check for IDX v2 magic number + if (magic !== 'ff744f63') { + return // undefined + } + const version = reader.readUInt32BE(); + if (version !== 2) { + throw new InternalError( + `Unable to read version ${version} packfile IDX. (Only version 2 supported)` + ) + } + if (idx.byteLength > 2048 * 1024 * 1024) { + throw new InternalError( + `To keep implementation simple, I haven't implemented the layer 5 feature needed to support packfiles > 2GB in size.` + ) + } + // Skip over fanout table + reader.seek(reader.tell() + 4 * 255); + // Get hashes + const size = reader.readUInt32BE(); + const hashes = []; + for (let i = 0; i < size; i++) { + const hash = reader.slice(20).toString('hex'); + hashes[i] = hash; + } + reader.seek(reader.tell() + 4 * size); + // Skip over CRCs + // Get offsets + const offsets = new Map(); + for (let i = 0; i < size; i++) { + offsets.set(hashes[i], reader.readUInt32BE()); + } + const packfileSha = reader.slice(20).toString('hex'); + return new GitPackIndex({ + hashes, + crcs: {}, + offsets, + packfileSha, + getExternalRefDelta, + }) + } + + static async fromPack({ pack, getExternalRefDelta, onProgress }) { + const listpackTypes = { + 1: 'commit', + 2: 'tree', + 3: 'blob', + 4: 'tag', + 6: 'ofs-delta', + 7: 'ref-delta', + }; + const offsetToObject = {}; + + // Older packfiles do NOT use the shasum of the pack itself, + // so it is recommended to just use whatever bytes are in the trailer. + // Source: https://github.com/git/git/commit/1190a1acf800acdcfd7569f87ac1560e2d077414 + const packfileSha = pack.slice(-20).toString('hex'); + + const hashes = []; + const crcs = {}; + const offsets = new Map(); + let totalObjectCount = null; + let lastPercent = null; -// @ts-check + await listpack([pack], async ({ data, type, reference, offset, num }) => { + if (totalObjectCount === null) totalObjectCount = num; + const percent = Math.floor( + ((totalObjectCount - num) * 100) / totalObjectCount + ); + if (percent !== lastPercent) { + if (onProgress) { + await onProgress({ + phase: 'Receiving objects', + loaded: totalObjectCount - num, + total: totalObjectCount, + }); + } + } + lastPercent = percent; + // Change type from a number to a meaningful string + type = listpackTypes[type]; -/** - * Add a file to the git index (aka staging area) - * - * @param {object} args - * @param {FsClient} args.fs - a file system implementation - * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.filepath - The path to the file to add to the index - * @param {object} [args.cache] - a [cache](cache.md) object - * - * @returns {Promise} Resolves successfully once the git index has been updated - * - * @example - * await fs.promises.writeFile('/tutorial/README.md', `# TEST`) - * await git.add({ fs, dir: '/tutorial', filepath: 'README.md' }) - * console.log('done') - * - */ -async function add({ - fs: _fs, - dir, - gitdir = join(dir, '.git'), - filepath, - cache = {}, -}) { - try { - assertParameter('fs', _fs); - assertParameter('dir', dir); - assertParameter('gitdir', gitdir); - assertParameter('filepath', filepath); + if (['commit', 'tree', 'blob', 'tag'].includes(type)) { + offsetToObject[offset] = { + type, + offset, + }; + } else if (type === 'ofs-delta') { + offsetToObject[offset] = { + type, + offset, + }; + } else if (type === 'ref-delta') { + offsetToObject[offset] = { + type, + offset, + }; + } + }); - const fs = new FileSystem(_fs); - await GitIndexManager.acquire({ fs, gitdir, cache }, async function(index) { - await addToIndex({ dir, gitdir, fs, filepath, index }); + // We need to know the lengths of the slices to compute the CRCs. + const offsetArray = Object.keys(offsetToObject).map(Number); + for (const [i, start] of offsetArray.entries()) { + const end = + i + 1 === offsetArray.length ? pack.byteLength - 20 : offsetArray[i + 1]; + const o = offsetToObject[start]; + const crc = crc32.buf(pack.slice(start, end)) >>> 0; + o.end = end; + o.crc = crc; + } + + // We don't have the hashes yet. But we can generate them using the .readSlice function! + const p = new GitPackIndex({ + pack: Promise.resolve(pack), + packfileSha, + crcs, + hashes, + offsets, + getExternalRefDelta, }); - } catch (err) { - err.caller = 'git.add'; - throw err - } -} -async function addToIndex({ dir, gitdir, fs, filepath, index }) { - // TODO: Should ignore UNLESS it's already in the index. - const ignored = await GitIgnoreManager.isIgnored({ - fs, - dir, - gitdir, - filepath, - }); - if (ignored) return - const stats = await fs.lstat(join(dir, filepath)); - if (!stats) throw new NotFoundError(filepath) - if (stats.isDirectory()) { - const children = await fs.readdir(join(dir, filepath)); - const promises = children.map(child => - addToIndex({ dir, gitdir, fs, filepath: join(filepath, child), index }) - ); - await Promise.all(promises); - } else { - const object = stats.isSymbolicLink() - ? await fs.readlink(join(dir, filepath)).then(posixifyPathBuffer) - : await fs.read(join(dir, filepath)); - if (object === null) throw new NotFoundError(filepath) - const oid = await _writeObject({ fs, gitdir, type: 'blob', object }); - index.insert({ filepath, stats, oid }); - } -} + // Resolve deltas and compute the oids + lastPercent = null; + let count = 0; + const objectsByDepth = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + for (let offset in offsetToObject) { + offset = Number(offset); + const percent = Math.floor((count++ * 100) / totalObjectCount); + if (percent !== lastPercent) { + if (onProgress) { + await onProgress({ + phase: 'Resolving deltas', + loaded: count, + total: totalObjectCount, + }); + } + } + lastPercent = percent; -// @ts-check + const o = offsetToObject[offset]; + if (o.oid) continue + try { + p.readDepth = 0; + p.externalReadDepth = 0; + const { type, object } = await p.readSlice({ start: offset }); + objectsByDepth[p.readDepth] += 1; + const oid = await shasum(GitObject.wrap({ type, object })); + o.oid = oid; + hashes.push(oid); + offsets.set(oid, offset); + crcs[oid] = o.crc; + } catch (err) { + continue + } + } -/** - * - * @param {Object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {object} args.cache - * @param {SignCallback} [args.onSign] - * @param {string} args.gitdir - * @param {string} args.message - * @param {Object} args.author - * @param {string} args.author.name - * @param {string} args.author.email - * @param {number} args.author.timestamp - * @param {number} args.author.timezoneOffset - * @param {Object} args.committer - * @param {string} args.committer.name - * @param {string} args.committer.email - * @param {number} args.committer.timestamp - * @param {number} args.committer.timezoneOffset - * @param {string} [args.signingKey] - * @param {boolean} [args.dryRun = false] - * @param {boolean} [args.noUpdateBranch = false] - * @param {string} [args.ref] - * @param {string[]} [args.parent] - * @param {string} [args.tree] - * - * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly created commit. - */ -async function _commit({ - fs, - cache, - onSign, - gitdir, - message, - author, - committer, - signingKey, - dryRun = false, - noUpdateBranch = false, - ref, - parent, - tree, -}) { - if (!ref) { - ref = await GitRefManager.resolve({ - fs, - gitdir, - ref: 'HEAD', - depth: 2, - }); + hashes.sort(); + return p + } + + async toBuffer() { + const buffers = []; + const write = (str, encoding) => { + buffers.push(Buffer.from(str, encoding)); + }; + // Write out IDX v2 magic number + write('ff744f63', 'hex'); + // Write out version number 2 + write('00000002', 'hex'); + // Write fanout table + const fanoutBuffer = new BufferCursor(Buffer.alloc(256 * 4)); + for (let i = 0; i < 256; i++) { + let count = 0; + for (const hash of this.hashes) { + if (parseInt(hash.slice(0, 2), 16) <= i) count++; + } + fanoutBuffer.writeUInt32BE(count); + } + buffers.push(fanoutBuffer.buffer); + // Write out hashes + for (const hash of this.hashes) { + write(hash, 'hex'); + } + // Write out crcs + const crcsBuffer = new BufferCursor(Buffer.alloc(this.hashes.length * 4)); + for (const hash of this.hashes) { + crcsBuffer.writeUInt32BE(this.crcs[hash]); + } + buffers.push(crcsBuffer.buffer); + // Write out offsets + const offsetsBuffer = new BufferCursor(Buffer.alloc(this.hashes.length * 4)); + for (const hash of this.hashes) { + offsetsBuffer.writeUInt32BE(this.offsets.get(hash)); + } + buffers.push(offsetsBuffer.buffer); + // Write out packfile checksum + write(this.packfileSha, 'hex'); + // Write out shasum + const totalBuffer = Buffer.concat(buffers); + const sha = await shasum(totalBuffer); + const shaBuffer = Buffer.alloc(20); + shaBuffer.write(sha, 'hex'); + return Buffer.concat([totalBuffer, shaBuffer]) } - return GitIndexManager.acquire({ fs, gitdir, cache }, async function(index) { - const inodes = flatFileListToDirectoryStructure(index.entries); - const inode = inodes.get('.'); - if (!tree) { - tree = await constructTree({ fs, gitdir, inode, dryRun }); - } - if (!parent) { - try { - parent = [ - await GitRefManager.resolve({ - fs, - gitdir, - ref, - }), - ]; - } catch (err) { - // Probably an initial commit - parent = []; + async load({ pack }) { + this.pack = pack; + } + + async unload() { + this.pack = null; + } + + async read({ oid }) { + if (!this.offsets.get(oid)) { + if (this.getExternalRefDelta) { + this.externalReadDepth++; + return this.getExternalRefDelta(oid) + } else { + throw new InternalError(`Could not read object ${oid} from packfile`) } } - let comm = GitCommit.from({ - tree, - parent, - author, - committer, - message, - }); - if (signingKey) { - comm = await GitCommit.sign(comm, onSign, signingKey); + const start = this.offsets.get(oid); + return this.readSlice({ start }) + } + + async readSlice({ start }) { + if (this.offsetCache[start]) { + return Object.assign({}, this.offsetCache[start]) } - const oid = await _writeObject({ - fs, - gitdir, - type: 'commit', - object: comm.toObject(), - dryRun, - }); - if (!noUpdateBranch && !dryRun) { - // Update branch pointer - await GitRefManager.writeRef({ - fs, - gitdir, - ref, - value: oid, - }); + this.readDepth++; + const types = { + 0b0010000: 'commit', + 0b0100000: 'tree', + 0b0110000: 'blob', + 0b1000000: 'tag', + 0b1100000: 'ofs_delta', + 0b1110000: 'ref_delta', + }; + if (!this.pack) { + throw new InternalError( + 'Tried to read from a GitPackIndex with no packfile loaded into memory' + ) } - return oid - }) -} - -async function constructTree({ fs, gitdir, inode, dryRun }) { - // use depth first traversal - const children = inode.children; - for (const inode of children) { - if (inode.type === 'tree') { - inode.metadata.mode = '040000'; - inode.metadata.oid = await constructTree({ fs, gitdir, inode, dryRun }); + const raw = (await this.pack).slice(start); + const reader = new BufferCursor(raw); + const byte = reader.readUInt8(); + // Object type is encoded in bits 654 + const btype = byte & 0b1110000; + let type = types[btype]; + if (type === undefined) { + throw new InternalError('Unrecognized type: 0b' + btype.toString(2)) + } + // The length encoding get complicated. + // Last four bits of length is encoded in bits 3210 + const lastFour = byte & 0b1111; + let length = lastFour; + // Whether the next byte is part of the variable-length encoded number + // is encoded in bit 7 + const multibyte = byte & 0b10000000; + if (multibyte) { + length = otherVarIntDecode(reader, lastFour); + } + let base = null; + let object = null; + // Handle deltified objects + if (type === 'ofs_delta') { + const offset = decodeVarInt(reader); + const baseOffset = start - offset + ;({ object: base, type } = await this.readSlice({ start: baseOffset })); + } + if (type === 'ref_delta') { + const oid = reader.slice(20).toString('hex') + ;({ object: base, type } = await this.read({ oid })); + } + // Handle undeltified objects + const buffer = raw.slice(reader.tell()); + object = Buffer.from(await inflate(buffer)); + // Assert that the object length is as expected. + if (object.byteLength !== length) { + throw new InternalError( + `Packfile told us object would have length ${length} but it had length ${object.byteLength}` + ) + } + if (base) { + object = Buffer.from(applyDelta(object, base)); + } + // Cache the result based on depth. + if (this.readDepth > 3) { + // hand tuned for speed / memory usage tradeoff + this.offsetCache[start] = { type, object }; } + return { type, format: 'content', object } } - const entries = children.map(inode => ({ - mode: inode.metadata.mode, - path: inode.basename, - oid: inode.metadata.oid, - type: inode.type, - })); - const tree = GitTree.from(entries); - const oid = await _writeObject({ - fs, - gitdir, - type: 'tree', - object: tree.toObject(), - dryRun, - }); - return oid } -// @ts-check +const PackfileCache = Symbol('PackfileCache'); -async function resolveFilepath({ fs, cache, gitdir, oid, filepath }) { - // Ensure there are no leading or trailing directory separators. - // I was going to do this automatically, but then found that the Git Terminal for Windows - // auto-expands --filepath=/src/utils to --filepath=C:/Users/Will/AppData/Local/Programs/Git/src/utils - // so I figured it would be wise to promote the behavior in the application layer not just the library layer. - if (filepath.startsWith('/')) { - throw new InvalidFilepathError('leading-slash') - } else if (filepath.endsWith('/')) { - throw new InvalidFilepathError('trailing-slash') - } - const _oid = oid; - const result = await resolveTree({ fs, cache, gitdir, oid }); - const tree = result.tree; - if (filepath === '') { - oid = result.oid; - } else { - const pathArray = filepath.split('/'); - oid = await _resolveFilepath({ +async function loadPackIndex({ + fs, + filename, + getExternalRefDelta, + emitter, + emitterPrefix, +}) { + const idx = await fs.read(filename); + return GitPackIndex.fromIdx({ idx, getExternalRefDelta }) +} + +function readPackIndex({ + fs, + cache, + filename, + getExternalRefDelta, + emitter, + emitterPrefix, +}) { + // Try to get the packfile index from the in-memory cache + if (!cache[PackfileCache]) cache[PackfileCache] = new Map(); + let p = cache[PackfileCache].get(filename); + if (!p) { + p = loadPackIndex({ fs, - cache, - gitdir, - tree, - pathArray, - oid: _oid, - filepath, + filename, + getExternalRefDelta, + emitter, + emitterPrefix, }); + cache[PackfileCache].set(filename, p); } - return oid + return p } -async function _resolveFilepath({ +async function readObjectPacked({ fs, cache, gitdir, - tree, - pathArray, oid, - filepath, + format = 'content', + getExternalRefDelta, }) { - const name = pathArray.shift(); - for (const entry of tree) { - if (entry.path === name) { - if (pathArray.length === 0) { - return entry.oid - } else { - const { type, object } = await _readObject({ - fs, - cache, - gitdir, - oid: entry.oid, - }); - if (type !== 'tree') { - throw new ObjectTypeError(oid, type, 'blob', filepath) - } - tree = GitTree.from(object); - return _resolveFilepath({ - fs, - cache, - gitdir, - tree, - pathArray, - oid, - filepath, - }) + // Check to see if it's in a packfile. + // Iterate through all the .idx files + let list = await fs.readdir(join(gitdir, 'objects/pack')); + list = list.filter(x => x.endsWith('.idx')); + for (const filename of list) { + const indexFile = `${gitdir}/objects/pack/${filename}`; + const p = await readPackIndex({ + fs, + cache, + filename: indexFile, + getExternalRefDelta, + }); + if (p.error) throw new InternalError(p.error) + // If the packfile DOES have the oid we're looking for... + if (p.offsets.has(oid)) { + // Get the resolved git object from the packfile + if (!p.pack) { + const packFile = indexFile.replace(/idx$/, 'pack'); + p.pack = fs.read(packFile); } + const result = await p.read({ oid, getExternalRefDelta }); + result.format = 'content'; + result.source = `objects/pack/${filename.replace(/idx$/, 'pack')}`; + return result } } - throw new NotFoundError(`file or directory found at "${oid}:${filepath}"`) + // Failed to find it + return null } -// @ts-check - -/** - * - * @typedef {Object} ReadTreeResult - The object returned has the following schema: - * @property {string} oid - SHA-1 object id of this tree - * @property {TreeObject} tree - the parsed tree object - */ - /** * @param {object} args * @param {import('../models/FileSystem.js').FileSystem} args.fs * @param {any} args.cache * @param {string} args.gitdir * @param {string} args.oid - * @param {string} [args.filepath] - * - * @returns {Promise} + * @param {string} [args.format] */ -async function _readTree({ +async function _readObject({ fs, cache, gitdir, oid, - filepath = undefined, + format = 'content', }) { - if (filepath !== undefined) { - oid = await resolveFilepath({ fs, cache, gitdir, oid, filepath }); + // Curry the current read method so that the packfile un-deltification + // process can acquire external ref-deltas. + const getExternalRefDelta = oid => _readObject({ fs, cache, gitdir, oid }); + + let result; + // Empty tree - hard-coded so we can use it as a shorthand. + // Note: I think the canonical git implementation must do this too because + // `git cat-file -t 4b825dc642cb6eb9a060e54bf8d69288fbee4904` prints "tree" even in empty repos. + if (oid === '4b825dc642cb6eb9a060e54bf8d69288fbee4904') { + result = { format: 'wrapped', object: Buffer.from(`tree 0\x00`) }; + } + // Look for it in the loose object directory. + if (!result) { + result = await readObjectLoose({ fs, gitdir, oid }); + } + // Check to see if it's in a packfile. + if (!result) { + result = await readObjectPacked({ + fs, + cache, + gitdir, + oid, + getExternalRefDelta, + }); + } + // Finally + if (!result) { + throw new NotFoundError(oid) + } + + if (format === 'deflated') { + return result + } + + if (result.format === 'deflated') { + result.object = Buffer.from(await inflate(result.object)); + result.format = 'wrapped'; + } + + if (result.format === 'wrapped') { + if (format === 'wrapped' && result.format === 'wrapped') { + return result + } + const sha = await shasum(result.object); + if (sha !== oid) { + throw new InternalError( + `SHA check failed! Expected ${oid}, computed ${sha}` + ) + } + const { object, type } = GitObject.unwrap(result.object); + result.type = type; + result.object = object; + result.format = 'content'; + } + + if (result.format === 'content') { + if (format === 'content') return result + return + } + + throw new InternalError(`invalid format "${result.format}"`) +} + +class AlreadyExistsError extends BaseError { + /** + * @param {'note'|'remote'|'tag'|'branch'} noun + * @param {string} where + * @param {boolean} canForce + */ + constructor(noun, where, canForce = true) { + super( + `Failed to create ${noun} at ${where} because it already exists.${ + canForce + ? ` (Hint: use 'force: true' parameter to overwrite existing ${noun}.)` + : '' + }` + ); + this.code = this.name = AlreadyExistsError.code; + this.data = { noun, where, canForce }; + } +} +/** @type {'AlreadyExistsError'} */ +AlreadyExistsError.code = 'AlreadyExistsError'; + +class AmbiguousError extends BaseError { + /** + * @param {'oids'|'refs'} nouns + * @param {string} short + * @param {string[]} matches + */ + constructor(nouns, short, matches) { + super( + `Found multiple ${nouns} matching "${short}" (${matches.join( + ', ' + )}). Use a longer abbreviation length to disambiguate them.` + ); + this.code = this.name = AmbiguousError.code; + this.data = { nouns, short, matches }; + } +} +/** @type {'AmbiguousError'} */ +AmbiguousError.code = 'AmbiguousError'; + +class CheckoutConflictError extends BaseError { + /** + * @param {string[]} filepaths + */ + constructor(filepaths) { + super( + `Your local changes to the following files would be overwritten by checkout: ${filepaths.join( + ', ' + )}` + ); + this.code = this.name = CheckoutConflictError.code; + this.data = { filepaths }; + } +} +/** @type {'CheckoutConflictError'} */ +CheckoutConflictError.code = 'CheckoutConflictError'; + +class CommitNotFetchedError extends BaseError { + /** + * @param {string} ref + * @param {string} oid + */ + constructor(ref, oid) { + super( + `Failed to checkout "${ref}" because commit ${oid} is not available locally. Do a git fetch to make the branch available locally.` + ); + this.code = this.name = CommitNotFetchedError.code; + this.data = { ref, oid }; + } +} +/** @type {'CommitNotFetchedError'} */ +CommitNotFetchedError.code = 'CommitNotFetchedError'; + +class EmptyServerResponseError extends BaseError { + constructor() { + super(`Empty response from git server.`); + this.code = this.name = EmptyServerResponseError.code; + this.data = {}; + } +} +/** @type {'EmptyServerResponseError'} */ +EmptyServerResponseError.code = 'EmptyServerResponseError'; + +class FastForwardError extends BaseError { + constructor() { + super(`A simple fast-forward merge was not possible.`); + this.code = this.name = FastForwardError.code; + this.data = {}; + } +} +/** @type {'FastForwardError'} */ +FastForwardError.code = 'FastForwardError'; + +class GitPushError extends BaseError { + /** + * @param {string} prettyDetails + * @param {PushResult} result + */ + constructor(prettyDetails, result) { + super(`One or more branches were not updated: ${prettyDetails}`); + this.code = this.name = GitPushError.code; + this.data = { prettyDetails, result }; + } +} +/** @type {'GitPushError'} */ +GitPushError.code = 'GitPushError'; + +class HttpError extends BaseError { + /** + * @param {number} statusCode + * @param {string} statusMessage + * @param {string} response + */ + constructor(statusCode, statusMessage, response) { + super(`HTTP Error: ${statusCode} ${statusMessage}`); + this.code = this.name = HttpError.code; + this.data = { statusCode, statusMessage, response }; + } +} +/** @type {'HttpError'} */ +HttpError.code = 'HttpError'; + +class InvalidFilepathError extends BaseError { + /** + * @param {'leading-slash'|'trailing-slash'} [reason] + */ + constructor(reason) { + let message = 'invalid filepath'; + if (reason === 'leading-slash' || reason === 'trailing-slash') { + message = `"filepath" parameter should not include leading or trailing directory separators because these can cause problems on some platforms.`; + } + super(message); + this.code = this.name = InvalidFilepathError.code; + this.data = { reason }; + } +} +/** @type {'InvalidFilepathError'} */ +InvalidFilepathError.code = 'InvalidFilepathError'; + +class InvalidRefNameError extends BaseError { + /** + * @param {string} ref + * @param {string} suggestion + * @param {boolean} canForce + */ + constructor(ref, suggestion) { + super( + `"${ref}" would be an invalid git reference. (Hint: a valid alternative would be "${suggestion}".)` + ); + this.code = this.name = InvalidRefNameError.code; + this.data = { ref, suggestion }; + } +} +/** @type {'InvalidRefNameError'} */ +InvalidRefNameError.code = 'InvalidRefNameError'; + +class MaxDepthError extends BaseError { + /** + * @param {number} depth + */ + constructor(depth) { + super(`Maximum search depth of ${depth} exceeded.`); + this.code = this.name = MaxDepthError.code; + this.data = { depth }; } - const { tree, oid: treeOid } = await resolveTree({ fs, cache, gitdir, oid }); - const result = { - oid: treeOid, - tree: tree.entries(), - }; - return result } +/** @type {'MaxDepthError'} */ +MaxDepthError.code = 'MaxDepthError'; -// @ts-check - -/** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir - * @param {TreeObject} args.tree - * - * @returns {Promise} - */ -async function _writeTree({ fs, gitdir, tree }) { - // Convert object to buffer - const object = GitTree.from(tree).toObject(); - const oid = await _writeObject({ - fs, - gitdir, - type: 'tree', - object, - format: 'content', - }); - return oid +class MergeNotSupportedError extends BaseError { + constructor() { + super(`Merges with conflicts are not supported yet.`); + this.code = this.name = MergeNotSupportedError.code; + this.data = {}; + } } +/** @type {'MergeNotSupportedError'} */ +MergeNotSupportedError.code = 'MergeNotSupportedError'; -// @ts-check - -/** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {object} args.cache - * @param {SignCallback} [args.onSign] - * @param {string} args.gitdir - * @param {string} args.ref - * @param {string} args.oid - * @param {string|Uint8Array} args.note - * @param {boolean} [args.force] - * @param {Object} args.author - * @param {string} args.author.name - * @param {string} args.author.email - * @param {number} args.author.timestamp - * @param {number} args.author.timezoneOffset - * @param {Object} args.committer - * @param {string} args.committer.name - * @param {string} args.committer.email - * @param {number} args.committer.timestamp - * @param {number} args.committer.timezoneOffset - * @param {string} [args.signingKey] - * - * @returns {Promise} - */ +class MissingNameError extends BaseError { + /** + * @param {'author'|'committer'|'tagger'} role + */ + constructor(role) { + super( + `No name was provided for ${role} in the argument or in the .git/config file.` + ); + this.code = this.name = MissingNameError.code; + this.data = { role }; + } +} +/** @type {'MissingNameError'} */ +MissingNameError.code = 'MissingNameError'; -async function _addNote({ - fs, - cache, - onSign, - gitdir, - ref, - oid, - note, - force, - author, - committer, - signingKey, -}) { - // Get the current note commit - let parent; - try { - parent = await GitRefManager.resolve({ gitdir, fs, ref }); - } catch (err) { - if (!(err instanceof NotFoundError)) { - throw err - } +class MissingParameterError extends BaseError { + /** + * @param {string} parameter + */ + constructor(parameter) { + super( + `The function requires a "${parameter}" parameter but none was provided.` + ); + this.code = this.name = MissingParameterError.code; + this.data = { parameter }; } +} +/** @type {'MissingParameterError'} */ +MissingParameterError.code = 'MissingParameterError'; - // I'm using the "empty tree" magic number here for brevity - const result = await _readTree({ - fs, - cache, - gitdir, - oid: parent || '4b825dc642cb6eb9a060e54bf8d69288fbee4904', - }); - let tree = result.tree; +class ParseError extends BaseError { + /** + * @param {string} expected + * @param {string} actual + */ + constructor(expected, actual) { + super(`Expected "${expected}" but received "${actual}".`); + this.code = this.name = ParseError.code; + this.data = { expected, actual }; + } +} +/** @type {'ParseError'} */ +ParseError.code = 'ParseError'; - // Handle the case where a note already exists - if (force) { - tree = tree.filter(entry => entry.path !== oid); - } else { - for (const entry of tree) { - if (entry.path === oid) { - throw new AlreadyExistsError('note', oid) - } +class PushRejectedError extends BaseError { + /** + * @param {'not-fast-forward'|'tag-exists'} reason + */ + constructor(reason) { + let message = ''; + if (reason === 'not-fast-forward') { + message = ' because it was not a simple fast-forward'; + } else if (reason === 'tag-exists') { + message = ' because tag already exists'; } + super(`Push rejected${message}. Use "force: true" to override.`); + this.code = this.name = PushRejectedError.code; + this.data = { reason }; } +} +/** @type {'PushRejectedError'} */ +PushRejectedError.code = 'PushRejectedError'; - // Create the note blob - if (typeof note === 'string') { - note = Buffer.from(note, 'utf8'); +class RemoteCapabilityError extends BaseError { + /** + * @param {'shallow'|'deepen-since'|'deepen-not'|'deepen-relative'} capability + * @param {'depth'|'since'|'exclude'|'relative'} parameter + */ + constructor(capability, parameter) { + super( + `Remote does not support the "${capability}" so the "${parameter}" parameter cannot be used.` + ); + this.code = this.name = RemoteCapabilityError.code; + this.data = { capability, parameter }; } - const noteOid = await _writeObject({ - fs, - gitdir, - type: 'blob', - object: note, - format: 'content', - }); - - // Create the new note tree - tree.push({ mode: '100644', path: oid, oid: noteOid, type: 'blob' }); - const treeOid = await _writeTree({ - fs, - gitdir, - tree, - }); - - // Create the new note commit - const commitOid = await _commit({ - fs, - cache, - onSign, - gitdir, - ref, - tree: treeOid, - parent: parent && [parent], - message: `Note added by 'isomorphic-git addNote'\n`, - author, - committer, - signingKey, - }); - - return commitOid } +/** @type {'RemoteCapabilityError'} */ +RemoteCapabilityError.code = 'RemoteCapabilityError'; -// @ts-check +class SmartHttpError extends BaseError { + /** + * @param {string} preview + * @param {string} response + */ + constructor(preview, response) { + super( + `Remote did not reply using the "smart" HTTP protocol. Expected "001e# service=git-upload-pack" but received: ${preview}` + ); + this.code = this.name = SmartHttpError.code; + this.data = { preview, response }; + } +} +/** @type {'SmartHttpError'} */ +SmartHttpError.code = 'SmartHttpError'; -/** - * @param {Object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir - * @param {string} args.path - * - * @returns {Promise} Resolves with the config value - * - * @example - * // Read config value - * let value = await git.getConfig({ - * dir: '$input((/))', - * path: '$input((user.name))' - * }) - * console.log(value) - * - */ -async function _getConfig({ fs, gitdir, path }) { - const config = await GitConfigManager.get({ fs, gitdir }); - return config.get(path) +class UnknownTransportError extends BaseError { + /** + * @param {string} url + * @param {string} transport + * @param {string} [suggestion] + */ + constructor(url, transport, suggestion) { + super( + `Git remote "${url}" uses an unrecognized transport protocol: "${transport}"` + ); + this.code = this.name = UnknownTransportError.code; + this.data = { url, transport, suggestion }; + } } +/** @type {'UnknownTransportError'} */ +UnknownTransportError.code = 'UnknownTransportError'; -/** - * - * @returns {Promise} - */ -async function normalizeAuthorObject({ fs, gitdir, author = {} }) { - let { name, email, timestamp, timezoneOffset } = author; - name = name || (await _getConfig({ fs, gitdir, path: 'user.name' })); - email = email || (await _getConfig({ fs, gitdir, path: 'user.email' })) || ''; - - if (name === undefined) { - return undefined +class UrlParseError extends BaseError { + /** + * @param {string} url + */ + constructor(url) { + super(`Cannot parse remote URL: "${url}"`); + this.code = this.name = UrlParseError.code; + this.data = { url }; } - - timestamp = timestamp != null ? timestamp : Math.floor(Date.now() / 1000); - timezoneOffset = - timezoneOffset != null - ? timezoneOffset - : new Date(timestamp * 1000).getTimezoneOffset(); - - return { name, email, timestamp, timezoneOffset } } +/** @type {'UrlParseError'} */ +UrlParseError.code = 'UrlParseError'; -/** - * - * @returns {Promise} - */ -async function normalizeCommitterObject({ - fs, - gitdir, - author, - committer, -}) { - committer = Object.assign({}, committer || author); - // Match committer's date to author's one, if omitted - if (author) { - committer.timestamp = committer.timestamp || author.timestamp; - committer.timezoneOffset = committer.timezoneOffset || author.timezoneOffset; +class UserCanceledError extends BaseError { + constructor() { + super(`The operation was canceled.`); + this.code = this.name = UserCanceledError.code; + this.data = {}; } - committer = await normalizeAuthorObject({ fs, gitdir, author: committer }); - return committer } +/** @type {'UserCanceledError'} */ +UserCanceledError.code = 'UserCanceledError'; -// @ts-check - -/** - * Add or update an object note - * - * @param {object} args - * @param {FsClient} args.fs - a file system implementation - * @param {SignCallback} [args.onSign] - a PGP signing implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.ref] - The notes ref to look under - * @param {string} args.oid - The SHA-1 object id of the object to add the note to. - * @param {string|Uint8Array} args.note - The note to add - * @param {boolean} [args.force] - Over-write note if it already exists. - * @param {Object} [args.author] - The details about the author. - * @param {string} [args.author.name] - Default is `user.name` config. - * @param {string} [args.author.email] - Default is `user.email` config. - * @param {number} [args.author.timestamp=Math.floor(Date.now()/1000)] - Set the author timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). - * @param {number} [args.author.timezoneOffset] - Set the author timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. - * @param {Object} [args.committer = author] - The details about the note committer, in the same format as the author parameter. If not specified, the author details are used. - * @param {string} [args.committer.name] - Default is `user.name` config. - * @param {string} [args.committer.email] - Default is `user.email` config. - * @param {number} [args.committer.timestamp=Math.floor(Date.now()/1000)] - Set the committer timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). - * @param {number} [args.committer.timezoneOffset] - Set the committer timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. - * @param {string} [args.signingKey] - Sign the note commit using this private PGP key. - * @param {object} [args.cache] - a [cache](cache.md) object - * - * @returns {Promise} Resolves successfully with the SHA-1 object id of the commit object for the added note. - */ - -async function addNote({ - fs: _fs, - onSign, - dir, - gitdir = join(dir, '.git'), - ref = 'refs/notes/commits', - oid, - note, - force, - author: _author, - committer: _committer, - signingKey, - cache = {}, -}) { - try { - assertParameter('fs', _fs); - assertParameter('gitdir', gitdir); - assertParameter('oid', oid); - assertParameter('note', note); - if (signingKey) { - assertParameter('onSign', onSign); - } - const fs = new FileSystem(_fs); - - const author = await normalizeAuthorObject({ fs, gitdir, author: _author }); - if (!author) throw new MissingNameError('author') - - const committer = await normalizeCommitterObject({ - fs, - gitdir, - author, - committer: _committer, - }); - if (!committer) throw new MissingNameError('committer') - return await _addNote({ - fs: new FileSystem(fs), - cache, - onSign, - gitdir, - ref, - oid, - note, - force, - author, - committer, - signingKey, - }) - } catch (err) { - err.caller = 'git.addNote'; - throw err - } -} -// @ts-check +var Errors = /*#__PURE__*/Object.freeze({ + __proto__: null, + AlreadyExistsError: AlreadyExistsError, + AmbiguousError: AmbiguousError, + CheckoutConflictError: CheckoutConflictError, + CommitNotFetchedError: CommitNotFetchedError, + EmptyServerResponseError: EmptyServerResponseError, + FastForwardError: FastForwardError, + GitPushError: GitPushError, + HttpError: HttpError, + InternalError: InternalError, + InvalidFilepathError: InvalidFilepathError, + InvalidOidError: InvalidOidError, + InvalidRefNameError: InvalidRefNameError, + MaxDepthError: MaxDepthError, + MergeNotSupportedError: MergeNotSupportedError, + MissingNameError: MissingNameError, + MissingParameterError: MissingParameterError, + NoRefspecError: NoRefspecError, + NotFoundError: NotFoundError, + ObjectTypeError: ObjectTypeError, + ParseError: ParseError, + PushRejectedError: PushRejectedError, + RemoteCapabilityError: RemoteCapabilityError, + SmartHttpError: SmartHttpError, + UnknownTransportError: UnknownTransportError, + UnsafeFilepathError: UnsafeFilepathError, + UrlParseError: UrlParseError, + UserCanceledError: UserCanceledError +}); -/** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir - * @param {string} args.remote - * @param {string} args.url - * @param {boolean} args.force - * - * @returns {Promise} - * - */ -async function _addRemote({ fs, gitdir, remote, url, force }) { - if (remote !== cleanGitRef.clean(remote)) { - throw new InvalidRefNameError(remote, cleanGitRef.clean(remote)) - } - const config = await GitConfigManager.get({ fs, gitdir }); - if (!force) { - // Check that setting it wouldn't overwrite. - const remoteNames = await config.getSubsections('remote'); - if (remoteNames.includes(remote)) { - // Throw an error if it would overwrite an existing remote, - // but not if it's simply setting the same value again. - if (url !== (await config.get(`remote.${remote}.url`))) { - throw new AlreadyExistsError('remote', remote) - } - } - } - await config.set(`remote.${remote}.url`, url); - await config.set( - `remote.${remote}.fetch`, - `+refs/heads/*:refs/remotes/${remote}/*` - ); - await GitConfigManager.save({ fs, gitdir, config }); +function formatAuthor({ name, email, timestamp, timezoneOffset }) { + timezoneOffset = formatTimezoneOffset(timezoneOffset); + return `${name} <${email}> ${timestamp} ${timezoneOffset}` } -// @ts-check +// The amount of effort that went into crafting these cases to handle +// -0 (just so we don't lose that information when parsing and reconstructing) +// but can also default to +0 was extraordinary. -/** - * Add or update a remote - * - * @param {object} args - * @param {FsClient} args.fs - a file system implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.remote - The name of the remote - * @param {string} args.url - The URL of the remote - * @param {boolean} [args.force = false] - Instead of throwing an error if a remote named `remote` already exists, overwrite the existing remote. - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * await git.addRemote({ - * fs, - * dir: '/tutorial', - * remote: 'upstream', - * url: 'https://github.com/isomorphic-git/isomorphic-git' - * }) - * console.log('done') - * - */ -async function addRemote({ - fs, - dir, - gitdir = join(dir, '.git'), - remote, - url, - force = false, -}) { - try { - assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - assertParameter('remote', remote); - assertParameter('url', url); - return await _addRemote({ - fs: new FileSystem(fs), - gitdir, - remote, - url, - force, - }) - } catch (err) { - err.caller = 'git.addRemote'; - throw err - } +function formatTimezoneOffset(minutes) { + const sign = simpleSign(negateExceptForZero(minutes)); + minutes = Math.abs(minutes); + const hours = Math.floor(minutes / 60); + minutes -= hours * 60; + let strHours = String(hours); + let strMinutes = String(minutes); + if (strHours.length < 2) strHours = '0' + strHours; + if (strMinutes.length < 2) strMinutes = '0' + strMinutes; + return (sign === -1 ? '-' : '+') + strHours + strMinutes } -// @ts-check - -/** - * Create an annotated tag. - * - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache - * @param {SignCallback} [args.onSign] - * @param {string} args.gitdir - * @param {string} args.ref - * @param {string} [args.message = ref] - * @param {string} [args.object = 'HEAD'] - * @param {object} [args.tagger] - * @param {string} args.tagger.name - * @param {string} args.tagger.email - * @param {number} args.tagger.timestamp - * @param {number} args.tagger.timezoneOffset - * @param {string} [args.gpgsig] - * @param {string} [args.signingKey] - * @param {boolean} [args.force = false] - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * await git.annotatedTag({ - * dir: '$input((/))', - * ref: '$input((test-tag))', - * message: '$input((This commit is awesome))', - * tagger: { - * name: '$input((Mr. Test))', - * email: '$input((mrtest@example.com))' - * } - * }) - * console.log('done') - * - */ -async function _annotatedTag({ - fs, - cache, - onSign, - gitdir, - ref, - tagger, - message = ref, - gpgsig, - object, - signingKey, - force = false, -}) { - ref = ref.startsWith('refs/tags/') ? ref : `refs/tags/${ref}`; +function simpleSign(n) { + return Math.sign(n) || (Object.is(n, -0) ? -1 : 1) +} - if (!force && (await GitRefManager.exists({ fs, gitdir, ref }))) { - throw new AlreadyExistsError('tag', ref) - } +function negateExceptForZero(n) { + return n === 0 ? n : -n +} - // Resolve passed value - const oid = await GitRefManager.resolve({ - fs, - gitdir, - ref: object || 'HEAD', - }); +function normalizeNewlines(str) { + // remove all + str = str.replace(/\r/g, ''); + // no extra newlines up front + str = str.replace(/^\n+/, ''); + // and a single newline at the end + str = str.replace(/\n+$/, '') + '\n'; + return str +} - const { type } = await _readObject({ fs, cache, gitdir, oid }); - let tagObject = GitAnnotatedTag.from({ - object: oid, - type, - tag: ref.replace('refs/tags/', ''), - tagger, - message, - gpgsig, - }); - if (signingKey) { - tagObject = await GitAnnotatedTag.sign(tagObject, onSign, signingKey); +function parseAuthor(author) { + const [, name, email, timestamp, offset] = author.match( + /^(.*) <(.*)> (.*) (.*)$/ + ); + return { + name: name, + email: email, + timestamp: Number(timestamp), + timezoneOffset: parseTimezoneOffset(offset), } - const value = await _writeObject({ - fs, - gitdir, - type: 'tag', - object: tagObject.toObject(), - }); +} - await GitRefManager.writeRef({ fs, gitdir, ref, value }); +// The amount of effort that went into crafting these cases to handle +// -0 (just so we don't lose that information when parsing and reconstructing) +// but can also default to +0 was extraordinary. + +function parseTimezoneOffset(offset) { + let [, sign, hours, minutes] = offset.match(/(\+|-)(\d\d)(\d\d)/); + minutes = (sign === '+' ? 1 : -1) * (Number(hours) * 60 + Number(minutes)); + return negateExceptForZero$1(minutes) } -// @ts-check +function negateExceptForZero$1(n) { + return n === 0 ? n : -n +} -/** - * Create an annotated tag. - * - * @param {object} args - * @param {FsClient} args.fs - a file system implementation - * @param {SignCallback} [args.onSign] - a PGP signing implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.ref - What to name the tag - * @param {string} [args.message = ref] - The tag message to use. - * @param {string} [args.object = 'HEAD'] - The SHA-1 object id the tag points to. (Will resolve to a SHA-1 object id if value is a ref.) By default, the commit object which is referred by the current `HEAD` is used. - * @param {object} [args.tagger] - The details about the tagger. - * @param {string} [args.tagger.name] - Default is `user.name` config. - * @param {string} [args.tagger.email] - Default is `user.email` config. - * @param {number} [args.tagger.timestamp=Math.floor(Date.now()/1000)] - Set the tagger timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). - * @param {number} [args.tagger.timezoneOffset] - Set the tagger timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. - * @param {string} [args.gpgsig] - The gpgsig attatched to the tag object. (Mutually exclusive with the `signingKey` option.) - * @param {string} [args.signingKey] - Sign the tag object using this private PGP key. (Mutually exclusive with the `gpgsig` option.) - * @param {boolean} [args.force = false] - Instead of throwing an error if a tag named `ref` already exists, overwrite the existing tag. Note that this option does not modify the original tag object itself. - * @param {object} [args.cache] - a [cache](cache.md) object - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * await git.annotatedTag({ - * fs, - * dir: '/tutorial', - * ref: 'test-tag', - * message: 'This commit is awesome', - * tagger: { - * name: 'Mr. Test', - * email: 'mrtest@example.com' - * } - * }) - * console.log('done') - * - */ -async function annotatedTag({ - fs: _fs, - onSign, - dir, - gitdir = join(dir, '.git'), - ref, - tagger: _tagger, - message = ref, - gpgsig, - object, - signingKey, - force = false, - cache = {}, -}) { - try { - assertParameter('fs', _fs); - assertParameter('gitdir', gitdir); - assertParameter('ref', ref); - if (signingKey) { - assertParameter('onSign', onSign); +class GitAnnotatedTag { + constructor(tag) { + if (typeof tag === 'string') { + this._tag = tag; + } else if (Buffer.isBuffer(tag)) { + this._tag = tag.toString('utf8'); + } else if (typeof tag === 'object') { + this._tag = GitAnnotatedTag.render(tag); + } else { + throw new InternalError( + 'invalid type passed to GitAnnotatedTag constructor' + ) } - const fs = new FileSystem(_fs); + } - // Fill in missing arguments with default values - const tagger = await normalizeAuthorObject({ fs, gitdir, author: _tagger }); - if (!tagger) throw new MissingNameError('tagger') + static from(tag) { + return new GitAnnotatedTag(tag) + } - return await _annotatedTag({ - fs, - cache, - onSign, - gitdir, - ref, - tagger, - message, - gpgsig, - object, - signingKey, - force, - }) - } catch (err) { - err.caller = 'git.annotatedTag'; - throw err + static render(obj) { + return `object ${obj.object} +type ${obj.type} +tag ${obj.tag} +tagger ${formatAuthor(obj.tagger)} + +${obj.message} +${obj.gpgsig ? obj.gpgsig : ''}` } -} -// @ts-check + justHeaders() { + return this._tag.slice(0, this._tag.indexOf('\n\n')) + } -/** - * Create a branch - * - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir - * @param {string} args.ref - * @param {boolean} [args.checkout = false] - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * await git.branch({ dir: '$input((/))', ref: '$input((develop))' }) - * console.log('done') - * - */ -async function _branch({ fs, gitdir, ref, checkout = false }) { - if (ref !== cleanGitRef.clean(ref)) { - throw new InvalidRefNameError(ref, cleanGitRef.clean(ref)) + message() { + const tag = this.withoutSignature(); + return tag.slice(tag.indexOf('\n\n') + 2) + } + + parse() { + return Object.assign(this.headers(), { + message: this.message(), + gpgsig: this.gpgsig(), + }) } - const fullref = `refs/heads/${ref}`; + render() { + return this._tag + } - const exist = await GitRefManager.exists({ fs, gitdir, ref: fullref }); - if (exist) { - throw new AlreadyExistsError('branch', ref, false) + headers() { + const headers = this.justHeaders().split('\n'); + const hs = []; + for (const h of headers) { + if (h[0] === ' ') { + // combine with previous header (without space indent) + hs[hs.length - 1] += '\n' + h.slice(1); + } else { + hs.push(h); + } + } + const obj = {}; + for (const h of hs) { + const key = h.slice(0, h.indexOf(' ')); + const value = h.slice(h.indexOf(' ') + 1); + if (Array.isArray(obj[key])) { + obj[key].push(value); + } else { + obj[key] = value; + } + } + if (obj.tagger) { + obj.tagger = parseAuthor(obj.tagger); + } + if (obj.committer) { + obj.committer = parseAuthor(obj.committer); + } + return obj } - // Get current HEAD tree oid - let oid; - try { - oid = await GitRefManager.resolve({ fs, gitdir, ref: 'HEAD' }); - } catch (e) { - // Probably an empty repo + withoutSignature() { + const tag = normalizeNewlines(this._tag); + if (tag.indexOf('\n-----BEGIN PGP SIGNATURE-----') === -1) return tag + return tag.slice(0, tag.lastIndexOf('\n-----BEGIN PGP SIGNATURE-----')) } - // Create a new ref that points at the current commit - if (oid) { - await GitRefManager.writeRef({ fs, gitdir, ref: fullref, value: oid }); + gpgsig() { + if (this._tag.indexOf('\n-----BEGIN PGP SIGNATURE-----') === -1) return + const signature = this._tag.slice( + this._tag.indexOf('-----BEGIN PGP SIGNATURE-----'), + this._tag.indexOf('-----END PGP SIGNATURE-----') + + '-----END PGP SIGNATURE-----'.length + ); + return normalizeNewlines(signature) } - if (checkout) { - // Update HEAD - await GitRefManager.writeSymbolicRef({ - fs, - gitdir, - ref: 'HEAD', - value: fullref, - }); + payload() { + return this.withoutSignature() + '\n' } -} -// @ts-check + toObject() { + return Buffer.from(this._tag, 'utf8') + } -/** - * Create a branch - * - * @param {object} args - * @param {FsClient} args.fs - a file system implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.ref - What to name the branch - * @param {boolean} [args.checkout = false] - Update `HEAD` to point at the newly created branch - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * await git.branch({ fs, dir: '/tutorial', ref: 'develop' }) - * console.log('done') - * - */ -async function branch({ - fs, - dir, - gitdir = join(dir, '.git'), - ref, - checkout = false, -}) { - try { - assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - assertParameter('ref', ref); - return await _branch({ - fs: new FileSystem(fs), - gitdir, - ref, - checkout, - }) - } catch (err) { - err.caller = 'git.branch'; - throw err + static async sign(tag, sign, secretKey) { + const payload = tag.payload(); + let { signature } = await sign({ payload, secretKey }); + // renormalize the line endings to the one true line-ending + signature = normalizeNewlines(signature); + const signedTag = payload + signature; + // return a new tag object + return GitAnnotatedTag.from(signedTag) } } -// https://dev.to/namirsab/comment/2050 -function arrayRange(start, end) { - const length = end - start; - return Array.from({ length }, (_, i) => start + i) +function indent(str) { + return ( + str + .trim() + .split('\n') + .map(x => ' ' + x) + .join('\n') + '\n' + ) } -// TODO: Should I just polyfill Array.flat? -const flat = - typeof Array.prototype.flat === 'undefined' - ? entries => entries.reduce((acc, x) => acc.concat(x), []) - : entries => entries.flat(); +function outdent(str) { + return str + .split('\n') + .map(x => x.replace(/^ /, '')) + .join('\n') +} -// This is convenient for computing unions/joins of sorted lists. -class RunningMinimum { - constructor() { - // Using a getter for 'value' would just bloat the code. - // You know better than to set it directly right? - this.value = null; +class GitCommit { + constructor(commit) { + if (typeof commit === 'string') { + this._commit = commit; + } else if (Buffer.isBuffer(commit)) { + this._commit = commit.toString('utf8'); + } else if (typeof commit === 'object') { + this._commit = GitCommit.render(commit); + } else { + throw new InternalError('invalid type passed to GitCommit constructor') + } } - consider(value) { - if (value === null || value === undefined) return - if (this.value === null) { - this.value = value; - } else if (value < this.value) { - this.value = value; - } + static fromPayloadSignature({ payload, signature }) { + const headers = GitCommit.justHeaders(payload); + const message = GitCommit.justMessage(payload); + const commit = normalizeNewlines( + headers + '\ngpgsig' + indent(signature) + '\n' + message + ); + return new GitCommit(commit) } - reset() { - this.value = null; + static from(commit) { + return new GitCommit(commit) } -} -// Take an array of length N of -// iterators of length Q_n -// of strings -// and return an iterator of length max(Q_n) for all n -// of arrays of length N -// of string|null who all have the same string value -function* unionOfIterators(sets) { - /* NOTE: We can assume all arrays are sorted. - * Indexes are sorted because they are defined that way: - * - * > Index entries are sorted in ascending order on the name field, - * > interpreted as a string of unsigned bytes (i.e. memcmp() order, no - * > localization, no special casing of directory separator '/'). Entries - * > with the same name are sorted by their stage field. - * - * Trees should be sorted because they are created directly from indexes. - * They definitely should be sorted, or else they wouldn't have a unique SHA1. - * So that would be very naughty on the part of the tree-creator. - * - * Lastly, the working dir entries are sorted because I choose to sort them - * in my FileSystem.readdir() implementation. - */ + toObject() { + return Buffer.from(this._commit, 'utf8') + } - // Init - const min = new RunningMinimum(); - let minimum; - const heads = []; - const numsets = sets.length; - for (let i = 0; i < numsets; i++) { - // Abuse the fact that iterators continue to return 'undefined' for value - // once they are done - heads[i] = sets[i].next().value; - if (heads[i] !== undefined) { - min.consider(heads[i]); + // Todo: allow setting the headers and message + headers() { + return this.parseHeaders() + } + + // Todo: allow setting the headers and message + message() { + return GitCommit.justMessage(this._commit) + } + + parse() { + return Object.assign({ message: this.message() }, this.headers()) + } + + static justMessage(commit) { + return normalizeNewlines(commit.slice(commit.indexOf('\n\n') + 2)) + } + + static justHeaders(commit) { + return commit.slice(0, commit.indexOf('\n\n')) + } + + parseHeaders() { + const headers = GitCommit.justHeaders(this._commit).split('\n'); + const hs = []; + for (const h of headers) { + if (h[0] === ' ') { + // combine with previous header (without space indent) + hs[hs.length - 1] += '\n' + h.slice(1); + } else { + hs.push(h); + } + } + const obj = { + parent: [], + }; + for (const h of hs) { + const key = h.slice(0, h.indexOf(' ')); + const value = h.slice(h.indexOf(' ') + 1); + if (Array.isArray(obj[key])) { + obj[key].push(value); + } else { + obj[key] = value; + } + } + if (obj.author) { + obj.author = parseAuthor(obj.author); + } + if (obj.committer) { + obj.committer = parseAuthor(obj.committer); } + return obj } - if (min.value === null) return - // Iterate - while (true) { - const result = []; - minimum = min.value; - min.reset(); - for (let i = 0; i < numsets; i++) { - if (heads[i] !== undefined && heads[i] === minimum) { - result[i] = heads[i]; - heads[i] = sets[i].next().value; - } else { - // A little hacky, but eh - result[i] = null; + + static renderHeaders(obj) { + let headers = ''; + if (obj.tree) { + headers += `tree ${obj.tree}\n`; + } else { + headers += `tree 4b825dc642cb6eb9a060e54bf8d69288fbee4904\n`; // the null tree + } + if (obj.parent) { + if (obj.parent.length === undefined) { + throw new InternalError(`commit 'parent' property should be an array`) } - if (heads[i] !== undefined) { - min.consider(heads[i]); + for (const p of obj.parent) { + headers += `parent ${p}\n`; } } - yield result; - if (min.value === null) return + const author = obj.author; + headers += `author ${formatAuthor(author)}\n`; + const committer = obj.committer || obj.author; + headers += `committer ${formatAuthor(committer)}\n`; + if (obj.gpgsig) { + headers += 'gpgsig' + indent(obj.gpgsig); + } + return headers } -} -// @ts-check + static render(obj) { + return GitCommit.renderHeaders(obj) + '\n' + normalizeNewlines(obj.message) + } -/** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {object} args.cache - * @param {string} [args.dir] - * @param {string} [args.gitdir=join(dir,'.git')] - * @param {Walker[]} args.trees - * @param {WalkerMap} [args.map] - * @param {WalkerReduce} [args.reduce] - * @param {WalkerIterate} [args.iterate] - * - * @returns {Promise} The finished tree-walking result - * - * @see {WalkerMap} - * - */ -async function _walk({ - fs, - cache, - dir, - gitdir, - trees, - // @ts-ignore - map = async (_, entry) => entry, - // The default reducer is a flatmap that filters out undefineds. - reduce = async (parent, children) => { - const flatten = flat(children); - if (parent !== undefined) flatten.unshift(parent); - return flatten - }, - // The default iterate function walks all children concurrently - iterate = (walk, children) => Promise.all([...children].map(walk)), -}) { - const walkers = trees.map(proxy => - proxy[GitWalkSymbol]({ fs, dir, gitdir, cache }) - ); + render() { + return this._commit + } - const root = new Array(walkers.length).fill('.'); - const range = arrayRange(0, walkers.length); - const unionWalkerFromReaddir = async entries => { - range.map(i => { - entries[i] = entries[i] && new walkers[i].ConstructEntry(entries[i]); - }); - const subdirs = await Promise.all( - range.map(i => (entries[i] ? walkers[i].readdir(entries[i]) : [])) + withoutSignature() { + const commit = normalizeNewlines(this._commit); + if (commit.indexOf('\ngpgsig') === -1) return commit + const headers = commit.slice(0, commit.indexOf('\ngpgsig')); + const message = commit.slice( + commit.indexOf('-----END PGP SIGNATURE-----\n') + + '-----END PGP SIGNATURE-----\n'.length ); - // Now process child directories - const iterators = subdirs - .map(array => (array === null ? [] : array)) - .map(array => array[Symbol.iterator]()); - return { - entries, - children: unionOfIterators(iterators), - } - }; + return normalizeNewlines(headers + '\n' + message) + } - const walk = async root => { - const { entries, children } = await unionWalkerFromReaddir(root); - const fullpath = entries.find(entry => entry && entry._fullpath)._fullpath; - const parent = await map(fullpath, entries); - if (parent !== null) { - let walkedChildren = await iterate(walk, children); - walkedChildren = walkedChildren.filter(x => x !== undefined); - return reduce(parent, walkedChildren) - } - }; - return walk(root) + isolateSignature() { + const signature = this._commit.slice( + this._commit.indexOf('-----BEGIN PGP SIGNATURE-----'), + this._commit.indexOf('-----END PGP SIGNATURE-----') + + '-----END PGP SIGNATURE-----'.length + ); + return outdent(signature) + } + + static async sign(commit, sign, secretKey) { + const payload = commit.withoutSignature(); + const message = GitCommit.justMessage(commit._commit); + let { signature } = await sign({ payload, secretKey }); + // renormalize the line endings to the one true line-ending + signature = normalizeNewlines(signature); + const headers = GitCommit.justHeaders(commit._commit); + const signedCommit = + headers + '\n' + 'gpgsig' + indent(signature) + '\n' + message; + // return a new commit object + return GitCommit.from(signedCommit) + } } -const worthWalking = (filepath, root) => { - if (filepath === '.' || root == null || root.length === 0 || root === '.') { - return true +async function resolveTree({ fs, cache, gitdir, oid }) { + // Empty tree - bypass `readObject` + if (oid === '4b825dc642cb6eb9a060e54bf8d69288fbee4904') { + return { tree: GitTree.from([]), oid } } - if (root.length >= filepath.length) { - return root.startsWith(filepath) - } else { - return filepath.startsWith(root) + const { type, object } = await _readObject({ fs, cache, gitdir, oid }); + // Resolve annotated tag objects to whatever + if (type === 'tag') { + oid = GitAnnotatedTag.from(object).parse().object; + return resolveTree({ fs, cache, gitdir, oid }) } -}; - -// @ts-check - -/** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache - * @param {ProgressCallback} [args.onProgress] - * @param {string} args.dir - * @param {string} args.gitdir - * @param {string} args.ref - * @param {string[]} [args.filepaths] - * @param {string} args.remote - * @param {boolean} args.noCheckout - * @param {boolean} [args.noUpdateHead] - * @param {boolean} [args.dryRun] - * @param {boolean} [args.force] - * @param {boolean} [args.track] - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - */ -async function _checkout({ - fs, - cache, - onProgress, - dir, - gitdir, - remote, - ref, - filepaths, - noCheckout, - noUpdateHead, - dryRun, - force, - track = true, -}) { - // Get tree oid - let oid; - try { - oid = await GitRefManager.resolve({ fs, gitdir, ref }); - // TODO: Figure out what to do if both 'ref' and 'remote' are specified, ref already exists, - // and is configured to track a different remote. - } catch (err) { - if (ref === 'HEAD') throw err - // If `ref` doesn't exist, create a new remote tracking branch - // Figure out the commit to checkout - const remoteRef = `${remote}/${ref}`; - oid = await GitRefManager.resolve({ - fs, - gitdir, - ref: remoteRef, - }); - if (track) { - // Set up remote tracking branch - const config = await GitConfigManager.get({ fs, gitdir }); - await config.set(`branch.${ref}.remote`, remote); - await config.set(`branch.${ref}.merge`, `refs/heads/${ref}`); - await GitConfigManager.save({ fs, gitdir, config }); - } - // Create a new branch that points at that same commit - await GitRefManager.writeRef({ - fs, - gitdir, - ref: `refs/heads/${ref}`, - value: oid, - }); + // Resolve commits to trees + if (type === 'commit') { + oid = GitCommit.from(object).parse().tree; + return resolveTree({ fs, cache, gitdir, oid }) + } + if (type !== 'tree') { + throw new ObjectTypeError(oid, type, 'tree') } + return { tree: GitTree.from(object), oid } +} + +class GitWalkerRepo { + constructor({ fs, gitdir, ref, cache }) { + this.fs = fs; + this.cache = cache; + this.gitdir = gitdir; + this.mapPromise = (async () => { + const map = new Map(); + let oid; + try { + oid = await GitRefManager.resolve({ fs, gitdir, ref }); + } catch (e) { + if (e instanceof NotFoundError) { + // Handle fresh branches with no commits + oid = '4b825dc642cb6eb9a060e54bf8d69288fbee4904'; + } + } + const tree = await resolveTree({ fs, cache: this.cache, gitdir, oid }); + tree.type = 'tree'; + tree.mode = '40000'; + map.set('.', tree); + return map + })(); + const walker = this; + this.ConstructEntry = class TreeEntry { + constructor(fullpath) { + this._fullpath = fullpath; + this._type = false; + this._mode = false; + this._stat = false; + this._content = false; + this._oid = false; + } - // Update working dir - if (!noCheckout) { - let ops; - // First pass - just analyze files (not directories) and figure out what needs to be done - try { - ops = await analyze({ - fs, - cache, - onProgress, - dir, - gitdir, - ref, - force, - filepaths, - }); - } catch (err) { - // Throw a more helpful error message for this common mistake. - if (err instanceof NotFoundError && err.data.what === oid) { - throw new CommitNotFetchedError(ref, oid) - } else { - throw err + async type() { + return walker.type(this) } - } - // Report conflicts - const conflicts = ops - .filter(([method]) => method === 'conflict') - .map(([method, fullpath]) => fullpath); - if (conflicts.length > 0) { - throw new CheckoutConflictError(conflicts) - } + async mode() { + return walker.mode(this) + } - // Collect errors - const errors = ops - .filter(([method]) => method === 'error') - .map(([method, fullpath]) => fullpath); - if (errors.length > 0) { - throw new InternalError(errors.join(', ')) - } + async stat() { + return walker.stat(this) + } - if (dryRun) { - // Since the format of 'ops' is in flux, I really would rather folk besides myself not start relying on it - // return ops - return - } + async content() { + return walker.content(this) + } - // Second pass - execute planned changes - // The cheapest semi-parallel solution without computing a full dependency graph will be - // to just do ops in 4 dumb phases: delete files, delete dirs, create dirs, write files + async oid() { + return walker.oid(this) + } + }; + } - let count = 0; - const total = ops.length; - await GitIndexManager.acquire({ fs, gitdir, cache }, async function(index) { - await Promise.all( - ops - .filter( - ([method]) => method === 'delete' || method === 'delete-index' - ) - .map(async function([method, fullpath]) { - const filepath = `${dir}/${fullpath}`; - if (method === 'delete') { - await fs.rm(filepath); - } - index.delete({ filepath: fullpath }); - if (onProgress) { - await onProgress({ - phase: 'Updating workdir', - loaded: ++count, - total, - }); - } - }) - ); - }); + async readdir(entry) { + const filepath = entry._fullpath; + const { fs, cache, gitdir } = this; + const map = await this.mapPromise; + const obj = map.get(filepath); + if (!obj) throw new Error(`No obj for ${filepath}`) + const oid = obj.oid; + if (!oid) throw new Error(`No oid for obj ${JSON.stringify(obj)}`) + if (obj.type !== 'tree') { + // TODO: support submodules (type === 'commit') + return null + } + const { type, object } = await _readObject({ fs, cache, gitdir, oid }); + if (type !== obj.type) { + throw new ObjectTypeError(oid, type, obj.type) + } + const tree = GitTree.from(object); + // cache all entries + for (const entry of tree) { + map.set(join(filepath, entry.path), entry); + } + return tree.entries().map(entry => join(filepath, entry.path)) + } - // Note: this is cannot be done naively in parallel - await GitIndexManager.acquire({ fs, gitdir, cache }, async function(index) { - for (const [method, fullpath] of ops) { - if (method === 'rmdir' || method === 'rmdir-index') { - const filepath = `${dir}/${fullpath}`; - try { - if (method === 'rmdir-index') { - index.delete({ filepath: fullpath }); - } - await fs.rmdir(filepath); - if (onProgress) { - await onProgress({ - phase: 'Updating workdir', - loaded: ++count, - total, - }); - } - } catch (e) { - if (e.code === 'ENOTEMPTY') { - console.log( - `Did not delete ${fullpath} because directory is not empty` - ); - } else { - throw e - } - } - } - } - }); + async type(entry) { + if (entry._type === false) { + const map = await this.mapPromise; + const { type } = map.get(entry._fullpath); + entry._type = type; + } + return entry._type + } - await Promise.all( - ops - .filter(([method]) => method === 'mkdir' || method === 'mkdir-index') - .map(async function([_, fullpath]) { - const filepath = `${dir}/${fullpath}`; - await fs.mkdir(filepath); - if (onProgress) { - await onProgress({ - phase: 'Updating workdir', - loaded: ++count, - total, - }); - } - }) - ); + async mode(entry) { + if (entry._mode === false) { + const map = await this.mapPromise; + const { mode } = map.get(entry._fullpath); + entry._mode = normalizeMode(parseInt(mode, 8)); + } + return entry._mode + } - await GitIndexManager.acquire({ fs, gitdir, cache }, async function(index) { - await Promise.all( - ops - .filter( - ([method]) => - method === 'create' || - method === 'create-index' || - method === 'update' || - method === 'mkdir-index' - ) - .map(async function([method, fullpath, oid, mode, chmod]) { - const filepath = `${dir}/${fullpath}`; - try { - if (method !== 'create-index' && method !== 'mkdir-index') { - const { object } = await _readObject({ fs, cache, gitdir, oid }); - if (chmod) { - // Note: the mode option of fs.write only works when creating files, - // not updating them. Since the `fs` plugin doesn't expose `chmod` this - // is our only option. - await fs.rm(filepath); - } - if (mode === 0o100644) { - // regular file - await fs.write(filepath, object); - } else if (mode === 0o100755) { - // executable file - await fs.write(filepath, object, { mode: 0o777 }); - } else if (mode === 0o120000) { - // symlink - await fs.writelink(filepath, object); - } else { - throw new InternalError( - `Invalid mode 0o${mode.toString(8)} detected in blob ${oid}` - ) - } - } + async stat(_entry) {} - const stats = await fs.lstat(filepath); - // We can't trust the executable bit returned by lstat on Windows, - // so we need to preserve this value from the TREE. - // TODO: Figure out how git handles this internally. - if (mode === 0o100755) { - stats.mode = 0o755; - } - // Submodules are present in the git index but use a unique mode different from trees - if (method === 'mkdir-index') { - stats.mode = 0o160000; - } - index.insert({ - filepath: fullpath, - stats, - oid, - }); - if (onProgress) { - await onProgress({ - phase: 'Updating workdir', - loaded: ++count, - total, - }); - } - } catch (e) { - console.log(e); - } - }) - ); - }); + async content(entry) { + if (entry._content === false) { + const map = await this.mapPromise; + const { fs, cache, gitdir } = this; + const obj = map.get(entry._fullpath); + const oid = obj.oid; + const { type, object } = await _readObject({ fs, cache, gitdir, oid }); + if (type !== 'blob') { + entry._content = undefined; + } else { + entry._content = new Uint8Array(object); + } + } + return entry._content } - // Update HEAD - if (!noUpdateHead) { - const fullRef = await GitRefManager.expand({ fs, gitdir, ref }); - if (fullRef.startsWith('refs/heads')) { - await GitRefManager.writeSymbolicRef({ - fs, - gitdir, - ref: 'HEAD', - value: fullRef, - }); - } else { - // detached head - await GitRefManager.writeRef({ fs, gitdir, ref: 'HEAD', value: oid }); + async oid(entry) { + if (entry._oid === false) { + const map = await this.mapPromise; + const obj = map.get(entry._fullpath); + entry._oid = obj.oid; } + return entry._oid } } -async function analyze({ - fs, - cache, - onProgress, - dir, - gitdir, - ref, - force, - filepaths, -}) { - let count = 0; - return _walk({ - fs, - cache, - dir, - gitdir, - trees: [TREE({ ref }), WORKDIR(), STAGE()], - map: async function(fullpath, [commit, workdir, stage]) { - if (fullpath === '.') return - // match against base paths - if (filepaths && !filepaths.some(base => worthWalking(fullpath, base))) { - return null +// @ts-check + +/** + * @param {object} args + * @param {string} [args.ref='HEAD'] + * @returns {Walker} + */ +function TREE({ ref = 'HEAD' }) { + const o = Object.create(null); + Object.defineProperty(o, GitWalkSymbol, { + value: function({ fs, gitdir, cache }) { + return new GitWalkerRepo({ fs, gitdir, ref, cache }) + }, + }); + Object.freeze(o); + return o +} + +// @ts-check + +class GitWalkerFs { + constructor({ fs, dir, gitdir, cache }) { + this.fs = fs; + this.cache = cache; + this.dir = dir; + this.gitdir = gitdir; + const walker = this; + this.ConstructEntry = class WorkdirEntry { + constructor(fullpath) { + this._fullpath = fullpath; + this._type = false; + this._mode = false; + this._stat = false; + this._content = false; + this._oid = false; } - // Emit progress event - if (onProgress) { - await onProgress({ phase: 'Analyzing workdir', loaded: ++count }); + + async type() { + return walker.type(this) } - // This is a kind of silly pattern but it worked so well for me in the past - // and it makes intuitively demonstrating exhaustiveness so *easy*. - // This checks for the presense and/or absense of each of the 3 entries, - // converts that to a 3-bit binary representation, and then handles - // every possible combination (2^3 or 8 cases) with a lookup table. - const key = [!!stage, !!commit, !!workdir].map(Number).join(''); - switch (key) { - // Impossible case. - case '000': - return - // Ignore workdir files that are not tracked and not part of the new commit. - case '001': - // OK, make an exception for explicitly named files. - if (force && filepaths && filepaths.includes(fullpath)) { - return ['delete', fullpath] - } - return - // New entries - case '010': { - switch (await commit.type()) { - case 'tree': { - return ['mkdir', fullpath] - } - case 'blob': { - return [ - 'create', - fullpath, - await commit.oid(), - await commit.mode(), - ] - } - case 'commit': { - return [ - 'mkdir-index', - fullpath, - await commit.oid(), - await commit.mode(), - ] - } - default: { - return [ - 'error', - `new entry Unhandled type ${await commit.type()}`, - ] - } - } - } - // New entries but there is already something in the workdir there. - case '011': { - switch (`${await commit.type()}-${await workdir.type()}`) { - case 'tree-tree': { - return // noop - } - case 'tree-blob': - case 'blob-tree': { - return ['conflict', fullpath] - } - case 'blob-blob': { - // Is the incoming file different? - if ((await commit.oid()) !== (await workdir.oid())) { - if (force) { - return [ - 'update', - fullpath, - await commit.oid(), - await commit.mode(), - (await commit.mode()) !== (await workdir.mode()), - ] - } else { - return ['conflict', fullpath] - } - } else { - // Is the incoming file a different mode? - if ((await commit.mode()) !== (await workdir.mode())) { - if (force) { - return [ - 'update', - fullpath, - await commit.oid(), - await commit.mode(), - true, - ] - } else { - return ['conflict', fullpath] - } - } else { - return [ - 'create-index', - fullpath, - await commit.oid(), - await commit.mode(), - ] - } - } - } - case 'commit-tree': { - // TODO: submodule - // We'll ignore submodule directories for now. - // Users prefer we not throw an error for lack of submodule support. - // gitlinks - return - } - case 'commit-blob': { - // TODO: submodule - // But... we'll complain if there is a *file* where we would - // put a submodule if we had submodule support. - return ['conflict', fullpath] - } - default: { - return ['error', `new entry Unhandled type ${commit.type}`] - } - } - } - // Something in stage but not in the commit OR the workdir. - // Note: I verified this behavior against canonical git. - case '100': { - return ['delete-index', fullpath] - } - // Deleted entries - // TODO: How to handle if stage type and workdir type mismatch? - case '101': { - switch (await stage.type()) { - case 'tree': { - return ['rmdir', fullpath] - } - case 'blob': { - // Git checks that the workdir.oid === stage.oid before deleting file - if ((await stage.oid()) !== (await workdir.oid())) { - if (force) { - return ['delete', fullpath] - } else { - return ['conflict', fullpath] - } - } else { - return ['delete', fullpath] - } - } - case 'commit': { - return ['rmdir-index', fullpath] - } - default: { - return [ - 'error', - `delete entry Unhandled type ${await stage.type()}`, - ] - } - } + async mode() { + return walker.mode(this) + } + + async stat() { + return walker.stat(this) + } + + async content() { + return walker.content(this) + } + + async oid() { + return walker.oid(this) + } + }; + } + + async readdir(entry) { + const filepath = entry._fullpath; + const { fs, dir } = this; + const names = await fs.readdir(join(dir, filepath)); + if (names === null) return null + return names.map(name => join(filepath, name)) + } + + async type(entry) { + if (entry._type === false) { + await entry.stat(); + } + return entry._type + } + + async mode(entry) { + if (entry._mode === false) { + await entry.stat(); + } + return entry._mode + } + + async stat(entry) { + if (entry._stat === false) { + const { fs, dir } = this; + let stat = await fs.lstat(`${dir}/${entry._fullpath}`); + if (!stat) { + throw new Error( + `ENOENT: no such file or directory, lstat '${entry._fullpath}'` + ) + } + let type = stat.isDirectory() ? 'tree' : 'blob'; + if (type === 'blob' && !stat.isFile() && !stat.isSymbolicLink()) { + type = 'special'; + } + entry._type = type; + stat = normalizeStats(stat); + entry._mode = stat.mode; + // workaround for a BrowserFS edge case + if (stat.size === -1 && entry._actualSize) { + stat.size = entry._actualSize; + } + entry._stat = stat; + } + return entry._stat + } + + async content(entry) { + if (entry._content === false) { + const { fs, dir } = this; + if ((await entry.type()) === 'tree') { + entry._content = undefined; + } else { + const content = await fs.read(`${dir}/${entry._fullpath}`); + // workaround for a BrowserFS edge case + entry._actualSize = content.length; + if (entry._stat && entry._stat.size === -1) { + entry._stat.size = entry._actualSize; } - /* eslint-disable no-fallthrough */ - // File missing from workdir - case '110': - // Possibly modified entries - case '111': { - /* eslint-enable no-fallthrough */ - switch (`${await stage.type()}-${await commit.type()}`) { - case 'tree-tree': { - return - } - case 'blob-blob': { - // If the file hasn't changed, there is no need to do anything. - // Existing file modifications in the workdir can be be left as is. - if ( - (await stage.oid()) === (await commit.oid()) && - (await stage.mode()) === (await commit.mode()) && - !force - ) { - return - } + entry._content = new Uint8Array(content); + } + } + return entry._content + } - // Check for local changes that would be lost - if (workdir) { - // Note: canonical git only compares with the stage. But we're smart enough - // to compare to the stage AND the incoming commit. - if ( - (await workdir.oid()) !== (await stage.oid()) && - (await workdir.oid()) !== (await commit.oid()) - ) { - if (force) { - return [ - 'update', - fullpath, - await commit.oid(), - await commit.mode(), - (await commit.mode()) !== (await workdir.mode()), - ] - } else { - return ['conflict', fullpath] - } - } - } else if (force) { - return [ - 'update', - fullpath, - await commit.oid(), - await commit.mode(), - (await commit.mode()) !== (await stage.mode()), - ] - } - // Has file mode changed? - if ((await commit.mode()) !== (await stage.mode())) { - return [ - 'update', - fullpath, - await commit.oid(), - await commit.mode(), - true, - ] - } - // TODO: HANDLE SYMLINKS - // Has the file content changed? - if ((await commit.oid()) !== (await stage.oid())) { - return [ - 'update', - fullpath, - await commit.oid(), - await commit.mode(), - false, - ] - } else { - return - } - } - case 'tree-blob': { - return ['update-dir-to-blob', fullpath, await commit.oid()] - } - case 'blob-tree': { - return ['update-blob-to-tree', fullpath] - } - case 'commit-commit': { - return [ - 'mkdir-index', - fullpath, - await commit.oid(), - await commit.mode(), - ] - } - default: { - return [ - 'error', - `update entry Unhandled type ${await stage.type()}-${await commit.type()}`, - ] + async oid(entry) { + if (entry._oid === false) { + const { fs, gitdir, cache } = this; + let oid; + // See if we can use the SHA1 hash in the index. + await GitIndexManager.acquire({ fs, gitdir, cache }, async function( + index + ) { + const stage = index.entriesMap.get(entry._fullpath); + const stats = await entry.stat(); + if (!stage || compareStats(stats, stage)) { + const content = await entry.content(); + if (content === undefined) { + oid = undefined; + } else { + oid = await shasum( + GitObject.wrap({ type: 'blob', object: await entry.content() }) + ); + // Update the stats in the index so we will get a "cache hit" next time + // 1) if we can (because the oid and mode are the same) + // 2) and only if we need to (because other stats differ) + if ( + stage && + oid === stage.oid && + stats.mode === stage.mode && + compareStats(stats, stage) + ) { + index.insert({ + filepath: entry._fullpath, + stats, + oid: oid, + }); } } + } else { + // Use the index SHA1 rather than compute it + oid = stage.oid; } - } + }); + entry._oid = oid; + } + return entry._oid + } +} + +// @ts-check + +/** + * @returns {Walker} + */ +function WORKDIR() { + const o = Object.create(null); + Object.defineProperty(o, GitWalkSymbol, { + value: function({ fs, dir, gitdir, cache }) { + return new GitWalkerFs({ fs, dir, gitdir, cache }) }, - // Modify the default flat mapping - reduce: async function(parent, children) { - children = flat(children); - if (!parent) { - return children - } else if (parent && parent[0] === 'rmdir') { - children.push(parent); - return children + }); + Object.freeze(o); + return o +} + +// @ts-check + +// I'm putting this in a Manager because I reckon it could benefit +// from a LOT of cacheing. +class GitIgnoreManager { + static async isIgnored({ fs, dir, gitdir = join(dir, '.git'), filepath }) { + // ALWAYS ignore ".git" folders. + if (basename(filepath) === '.git') return true + // '.' is not a valid gitignore entry, so '.' is never ignored + if (filepath === '.') return false + // Check and load exclusion rules from project exclude file (.git/info/exclude) + let excludes = ''; + const excludesFile = join(gitdir, 'info', 'exclude'); + if (await fs.exists(excludesFile)) { + excludes = await fs.read(excludesFile, 'utf8'); + } + // Find all the .gitignore files that could affect this file + const pairs = [ + { + gitignore: join(dir, '.gitignore'), + filepath, + }, + ]; + const pieces = filepath.split('/').filter(Boolean); + for (let i = 1; i < pieces.length; i++) { + const folder = pieces.slice(0, i).join('/'); + const file = pieces.slice(i).join('/'); + pairs.push({ + gitignore: join(dir, folder, '.gitignore'), + filepath: file, + }); + } + let ignoredStatus = false; + for (const p of pairs) { + let file; + try { + file = await fs.read(p.gitignore, 'utf8'); + } catch (err) { + if (err.code === 'NOENT') continue + } + const ign = ignore().add(excludes); + ign.add(file); + // If the parent directory is excluded, we are done. + // "It is not possible to re-include a file if a parent directory of that file is excluded. Git doesn’t list excluded directories for performance reasons, so any patterns on contained files have no effect, no matter where they are defined." + // source: https://git-scm.com/docs/gitignore + const parentdir = dirname(p.filepath); + if (parentdir !== '.' && ign.ignores(parentdir)) return true + // If the file is currently ignored, test for UNignoring. + if (ignoredStatus) { + ignoredStatus = !ign.test(p.filepath).unignored; } else { - children.unshift(parent); - return children + ignoredStatus = ign.test(p.filepath).ignored; } - }, - }) + } + return ignoredStatus + } } -// @ts-check - /** - * Checkout a branch - * - * If the branch already exists it will check out that branch. Otherwise, it will create a new remote tracking branch set to track the remote branch of that name. - * - * @param {object} args - * @param {FsClient} args.fs - a file system implementation - * @param {ProgressCallback} [args.onProgress] - optional progress event callback - * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.ref = 'HEAD'] - Source to checkout files from - * @param {string[]} [args.filepaths] - Limit the checkout to the given files and directories - * @param {string} [args.remote = 'origin'] - Which remote repository to use - * @param {boolean} [args.noCheckout = false] - If true, will update HEAD but won't update the working directory - * @param {boolean} [args.noUpdateHead] - If true, will update the working directory but won't update HEAD. Defaults to `false` when `ref` is provided, and `true` if `ref` is not provided. - * @param {boolean} [args.dryRun = false] - If true, simulates a checkout so you can test whether it would succeed. - * @param {boolean} [args.force = false] - If true, conflicts will be ignored and files will be overwritten regardless of local changes. - * @param {boolean} [args.track = true] - If false, will not set the remote branch tracking information. Defaults to true. - * @param {object} [args.cache] - a [cache](cache.md) object - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * // switch to the main branch - * await git.checkout({ - * fs, - * dir: '/tutorial', - * ref: 'main' - * }) - * console.log('done') - * - * @example - * // restore the 'docs' and 'src/docs' folders to the way they were, overwriting any changes - * await git.checkout({ - * fs, - * dir: '/tutorial', - * force: true, - * filepaths: ['docs', 'src/docs'] - * }) - * console.log('done') + * Removes the directory at the specified filepath recursively. Used internally to replicate the behavior of + * fs.promises.rm({ recursive: true, force: true }) from Node.js 14 and above when not available. If the provided + * filepath resolves to a file, it will be removed. * - * @example - * // restore the 'docs' and 'src/docs' folders to the way they are in the 'develop' branch, overwriting any changes - * await git.checkout({ - * fs, - * dir: '/tutorial', - * ref: 'develop', - * noUpdateHead: true, - * force: true, - * filepaths: ['docs', 'src/docs'] - * }) - * console.log('done') + * @param {import('../models/FileSystem.js').FileSystem} fs + * @param {string} filepath - The file or directory to remove. */ -async function checkout({ - fs, - onProgress, - dir, - gitdir = join(dir, '.git'), - remote = 'origin', - ref: _ref, - filepaths, - noCheckout = false, - noUpdateHead = _ref === undefined, - dryRun = false, - force = false, - track = true, - cache = {}, -}) { - try { - assertParameter('fs', fs); - assertParameter('dir', dir); - assertParameter('gitdir', gitdir); +async function rmRecursive(fs, filepath) { + const entries = await fs.readdir(filepath); + if (entries == null) { + await fs.rm(filepath); + } else if (entries.length) { + await Promise.all( + entries.map(entry => { + const subpath = join(filepath, entry); + return fs.lstat(subpath).then(stat => { + if (!stat) return + return stat.isDirectory() ? rmRecursive(fs, subpath) : fs.rm(subpath) + }) + }) + ).then(() => fs.rmdir(filepath)); + } else { + await fs.rmdir(filepath); + } +} - const ref = _ref || 'HEAD'; - return await _checkout({ - fs: new FileSystem(fs), - cache, - onProgress, - dir, - gitdir, - remote, - ref, - filepaths, - noCheckout, - noUpdateHead, - dryRun, - force, - track, - }) - } catch (err) { - err.caller = 'git.checkout'; - throw err +/** + * This is just a collection of helper functions really. At least that's how it started. + */ +class FileSystem { + constructor(fs) { + if (typeof fs._original_unwrapped_fs !== 'undefined') return fs + + const promises = Object.getOwnPropertyDescriptor(fs, 'promises'); + if (promises && promises.enumerable) { + this._readFile = fs.promises.readFile.bind(fs.promises); + this._writeFile = fs.promises.writeFile.bind(fs.promises); + this._mkdir = fs.promises.mkdir.bind(fs.promises); + if (fs.promises.rm) { + this._rm = fs.promises.rm.bind(fs.promises); + } else if (fs.promises.rmdir.length > 1) { + this._rm = fs.promises.rmdir.bind(fs.promises); + } else { + this._rm = rmRecursive.bind(null, this); + } + this._rmdir = fs.promises.rmdir.bind(fs.promises); + this._unlink = fs.promises.unlink.bind(fs.promises); + this._stat = fs.promises.stat.bind(fs.promises); + this._lstat = fs.promises.lstat.bind(fs.promises); + this._readdir = fs.promises.readdir.bind(fs.promises); + this._readlink = fs.promises.readlink.bind(fs.promises); + this._symlink = fs.promises.symlink.bind(fs.promises); + } else { + this._readFile = pify(fs.readFile.bind(fs)); + this._writeFile = pify(fs.writeFile.bind(fs)); + this._mkdir = pify(fs.mkdir.bind(fs)); + if (fs.rm) { + this._rm = pify(fs.rm.bind(fs)); + } else if (fs.rmdir.length > 2) { + this._rm = pify(fs.rmdir.bind(fs)); + } else { + this._rm = rmRecursive.bind(null, this); + } + this._rmdir = pify(fs.rmdir.bind(fs)); + this._unlink = pify(fs.unlink.bind(fs)); + this._stat = pify(fs.stat.bind(fs)); + this._lstat = pify(fs.lstat.bind(fs)); + this._readdir = pify(fs.readdir.bind(fs)); + this._readlink = pify(fs.readlink.bind(fs)); + this._symlink = pify(fs.symlink.bind(fs)); + } + this._original_unwrapped_fs = fs; + } + + /** + * Return true if a file exists, false if it doesn't exist. + * Rethrows errors that aren't related to file existance. + */ + async exists(filepath, options = {}) { + try { + await this._stat(filepath); + return true + } catch (err) { + if (err.code === 'ENOENT' || err.code === 'ENOTDIR') { + return false + } else { + console.log('Unhandled error in "FileSystem.exists()" function', err); + throw err + } + } + } + + /** + * Return the contents of a file if it exists, otherwise returns null. + * + * @param {string} filepath + * @param {object} [options] + * + * @returns {Promise} + */ + async read(filepath, options = {}) { + try { + let buffer = await this._readFile(filepath, options); + // Convert plain ArrayBuffers to Buffers + if (typeof buffer !== 'string') { + buffer = Buffer.from(buffer); + } + return buffer + } catch (err) { + return null + } + } + + /** + * Write a file (creating missing directories if need be) without throwing errors. + * + * @param {string} filepath + * @param {Buffer|Uint8Array|string} contents + * @param {object|string} [options] + */ + async write(filepath, contents, options = {}) { + try { + await this._writeFile(filepath, contents, options); + return + } catch (err) { + // Hmm. Let's try mkdirp and try again. + await this.mkdir(dirname(filepath)); + await this._writeFile(filepath, contents, options); + } + } + + /** + * Make a directory (or series of nested directories) without throwing an error if it already exists. + */ + async mkdir(filepath, _selfCall = false) { + try { + await this._mkdir(filepath); + return + } catch (err) { + // If err is null then operation succeeded! + if (err === null) return + // If the directory already exists, that's OK! + if (err.code === 'EEXIST') return + // Avoid infinite loops of failure + if (_selfCall) throw err + // If we got a "no such file or directory error" backup and try again. + if (err.code === 'ENOENT') { + const parent = dirname(filepath); + // Check to see if we've gone too far + if (parent === '.' || parent === '/' || parent === filepath) throw err + // Infinite recursion, what could go wrong? + await this.mkdir(parent); + await this.mkdir(filepath, true); + } + } } -} - -// @see https://git-scm.com/docs/git-rev-parse.html#_specifying_revisions -const abbreviateRx = new RegExp('^refs/(heads/|tags/|remotes/)?(.*)'); -function abbreviateRef(ref) { - const match = abbreviateRx.exec(ref); - if (match) { - if (match[1] === 'remotes/' && ref.endsWith('/HEAD')) { - return match[2].slice(0, -5) - } else { - return match[2] + /** + * Delete a file without throwing an error if it is already deleted. + */ + async rm(filepath) { + try { + await this._unlink(filepath); + } catch (err) { + if (err.code !== 'ENOENT') throw err } } - return ref -} - -// @ts-check -/** - * @param {Object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir - * @param {boolean} [args.fullname = false] - Return the full path (e.g. "refs/heads/main") instead of the abbreviated form. - * @param {boolean} [args.test = false] - If the current branch doesn't actually exist (such as right after git init) then return `undefined`. - * - * @returns {Promise} The name of the current branch or undefined if the HEAD is detached. - * - */ -async function _currentBranch({ - fs, - gitdir, - fullname = false, - test = false, -}) { - const ref = await GitRefManager.resolve({ - fs, - gitdir, - ref: 'HEAD', - depth: 2, - }); - if (test) { + /** + * Delete a directory without throwing an error if it is already deleted. + */ + async rmdir(filepath, opts) { try { - await GitRefManager.resolve({ fs, gitdir, ref }); - } catch (_) { - return + if (opts && opts.recursive) { + await this._rm(filepath, opts); + } else { + await this._rmdir(filepath); + } + } catch (err) { + if (err.code !== 'ENOENT') throw err } } - // Return `undefined` for detached HEAD - if (!ref.startsWith('refs/')) return - return fullname ? ref : abbreviateRef(ref) -} -function translateSSHtoHTTP(url) { - // handle "shorter scp-like syntax" - url = url.replace(/^git@([^:]+):/, 'https://$1/'); - // handle proper SSH URLs - url = url.replace(/^ssh:\/\//, 'https://'); - return url -} + /** + * Read a directory without throwing an error is the directory doesn't exist + */ + async readdir(filepath) { + try { + const names = await this._readdir(filepath); + // Ordering is not guaranteed, and system specific (Windows vs Unix) + // so we must sort them ourselves. + names.sort(compareStrings); + return names + } catch (err) { + if (err.code === 'ENOTDIR') return null + return [] + } + } -function calculateBasicAuthHeader({ username = '', password = '' }) { - return `Basic ${Buffer.from(`${username}:${password}`).toString('base64')}` -} + /** + * Return a flast list of all the files nested inside a directory + * + * Based on an elegant concurrent recursive solution from SO + * https://stackoverflow.com/a/45130990/2168416 + */ + async readdirDeep(dir) { + const subdirs = await this._readdir(dir); + const files = await Promise.all( + subdirs.map(async subdir => { + const res = dir + '/' + subdir; + return (await this._stat(res)).isDirectory() + ? this.readdirDeep(res) + : res + }) + ); + return files.reduce((a, f) => a.concat(f), []) + } -// Currently 'for await' upsets my linters. -async function forAwait(iterable, cb) { - const iter = getIterator(iterable); - while (true) { - const { value, done } = await iter.next(); - if (value) await cb(value); - if (done) break + /** + * Return the Stats of a file/symlink if it exists, otherwise returns null. + * Rethrows errors that aren't related to file existance. + */ + async lstat(filename) { + try { + const stats = await this._lstat(filename); + return stats + } catch (err) { + if (err.code === 'ENOENT') { + return null + } + throw err + } } - if (iter.return) iter.return(); -} -async function collect(iterable) { - let size = 0; - const buffers = []; - // This will be easier once `for await ... of` loops are available. - await forAwait(iterable, value => { - buffers.push(value); - size += value.byteLength; - }); - const result = new Uint8Array(size); - let nextIndex = 0; - for (const buffer of buffers) { - result.set(buffer, nextIndex); - nextIndex += buffer.byteLength; + /** + * Reads the contents of a symlink if it exists, otherwise returns null. + * Rethrows errors that aren't related to file existance. + */ + async readlink(filename, opts = { encoding: 'buffer' }) { + // Note: FileSystem.readlink returns a buffer by default + // so we can dump it into GitObject.write just like any other file. + try { + const link = await this._readlink(filename, opts); + return Buffer.isBuffer(link) ? link : Buffer.from(link) + } catch (err) { + if (err.code === 'ENOENT') { + return null + } + throw err + } } - return result -} -function extractAuthFromUrl(url) { - // For whatever reason, the `fetch` API does not convert credentials embedded in the URL - // into Basic Authentication headers automatically. Instead it throws an error! - // So we must manually parse the URL, rip out the user:password portion if it is present - // and compute the Authorization header. - // Note: I tried using new URL(url) but that throws a security exception in Edge. :rolleyes: - let userpass = url.match(/^https?:\/\/([^/]+)@/); - // No credentials, return the url unmodified and an empty auth object - if (userpass == null) return { url, auth: {} } - userpass = userpass[1]; - const [username, password] = userpass.split(':'); - // Remove credentials from URL - url = url.replace(`${userpass}@`, ''); - // Has credentials, return the fetch-safe URL and the parsed credentials - return { url, auth: { username, password } } + /** + * Write the contents of buffer to a symlink. + */ + async writelink(filename, buffer) { + return this._symlink(buffer.toString('utf8'), filename) + } } -function padHex(b, n) { - const s = n.toString(16); - return '0'.repeat(b - s.length) + s +async function writeObjectLoose({ fs, gitdir, object, format, oid }) { + if (format !== 'deflated') { + throw new InternalError( + 'GitObjectStoreLoose expects objects to write to be in deflated format' + ) + } + const source = `objects/${oid.slice(0, 2)}/${oid.slice(2)}`; + const filepath = `${gitdir}/${source}`; + // Don't overwrite existing git objects - this helps avoid EPERM errors. + // Although I don't know how we'd fix corrupted objects then. Perhaps delete them + // on read? + if (!(await fs.exists(filepath))) await fs.write(filepath, object); } -/** -pkt-line Format ---------------- - -Much (but not all) of the payload is described around pkt-lines. - -A pkt-line is a variable length binary string. The first four bytes -of the line, the pkt-len, indicates the total length of the line, -in hexadecimal. The pkt-len includes the 4 bytes used to contain -the length's hexadecimal representation. - -A pkt-line MAY contain binary data, so implementors MUST ensure -pkt-line parsing/formatting routines are 8-bit clean. - -A non-binary line SHOULD BE terminated by an LF, which if present -MUST be included in the total length. Receivers MUST treat pkt-lines -with non-binary data the same whether or not they contain the trailing -LF (stripping the LF if present, and not complaining when it is -missing). - -The maximum length of a pkt-line's data component is 65516 bytes. -Implementations MUST NOT send pkt-line whose length exceeds 65520 -(65516 bytes of payload + 4 bytes of length data). - -Implementations SHOULD NOT send an empty pkt-line ("0004"). - -A pkt-line with a length field of 0 ("0000"), called a flush-pkt, -is a special case and MUST be handled differently than an empty -pkt-line ("0004"). - ----- - pkt-line = data-pkt / flush-pkt - - data-pkt = pkt-len pkt-payload - pkt-len = 4*(HEXDIG) - pkt-payload = (pkt-len - 4)*(OCTET) +/* eslint-env node, browser */ - flush-pkt = "0000" ----- +let supportsCompressionStream = null; -Examples (as C-style strings): +async function deflate(buffer) { + if (supportsCompressionStream === null) { + supportsCompressionStream = testCompressionStream(); + } + return supportsCompressionStream + ? browserDeflate(buffer) + : pako.deflate(buffer) +} ----- - pkt-line actual value - --------------------------------- - "0006a\n" "a\n" - "0005a" "a" - "000bfoobar\n" "foobar\n" - "0004" "" ----- -*/ +async function browserDeflate(buffer) { + const cs = new CompressionStream('deflate'); + const c = new Blob([buffer]).stream().pipeThrough(cs); + return new Uint8Array(await new Response(c).arrayBuffer()) +} -// I'm really using this more as a namespace. -// There's not a lot of "state" in a pkt-line +function testCompressionStream() { + try { + const cs = new CompressionStream('deflate'); + // Test if `Blob.stream` is present. React Native does not have the `stream` method + new Blob([]).stream(); + if (cs) return true + } catch (_) { + // no bother + } + return false +} -class GitPktLine { - static flush() { - return Buffer.from('0000', 'utf8') +async function _writeObject({ + fs, + gitdir, + type, + object, + format = 'content', + oid = undefined, + dryRun = false, +}) { + if (format !== 'deflated') { + if (format !== 'wrapped') { + object = GitObject.wrap({ type, object }); + } + oid = await shasum(object); + object = Buffer.from(await deflate(object)); + } + if (!dryRun) { + await writeObjectLoose({ fs, gitdir, object, format: 'deflated', oid }); } + return oid +} - static delim() { - return Buffer.from('0001', 'utf8') +function assertParameter(name, value) { + if (value === undefined) { + throw new MissingParameterError(name) } +} - static encode(line) { - if (typeof line === 'string') { - line = Buffer.from(line); - } - const length = line.length + 4; - const hexlength = padHex(4, length); - return Buffer.concat([Buffer.from(hexlength, 'utf8'), line]) +function posixifyPathBuffer(buffer) { + let idx; + while (~(idx = buffer.indexOf(92))) buffer[idx] = 47; + return buffer +} + +// @ts-check + +/** + * Add a file to the git index (aka staging area) + * + * @param {object} args + * @param {FsClient} args.fs - a file system implementation + * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.filepath - The path to the file to add to the index + * @param {object} [args.cache] - a [cache](cache.md) object + * + * @returns {Promise} Resolves successfully once the git index has been updated + * + * @example + * await fs.promises.writeFile('/tutorial/README.md', `# TEST`) + * await git.add({ fs, dir: '/tutorial', filepath: 'README.md' }) + * console.log('done') + * + */ +async function add({ + fs: _fs, + dir, + gitdir = join(dir, '.git'), + filepath, + cache = {}, +}) { + try { + assertParameter('fs', _fs); + assertParameter('dir', dir); + assertParameter('gitdir', gitdir); + assertParameter('filepath', filepath); + + const fs = new FileSystem(_fs); + await GitIndexManager.acquire({ fs, gitdir, cache }, async function(index) { + await addToIndex({ dir, gitdir, fs, filepath, index }); + }); + } catch (err) { + err.caller = 'git.add'; + throw err } +} - static streamReader(stream) { - const reader = new StreamReader(stream); - return async function read() { - try { - let length = await reader.read(4); - if (length == null) return true - length = parseInt(length.toString('utf8'), 16); - if (length === 0) return null - if (length === 1) return null // delim packets - const buffer = await reader.read(length - 4); - if (buffer == null) return true - return buffer - } catch (err) { - console.log('error', err); - return true - } - } +async function addToIndex({ dir, gitdir, fs, filepath, index }) { + // TODO: Should ignore UNLESS it's already in the index. + const ignored = await GitIgnoreManager.isIgnored({ + fs, + dir, + gitdir, + filepath, + }); + if (ignored) return + const stats = await fs.lstat(join(dir, filepath)); + if (!stats) throw new NotFoundError(filepath) + if (stats.isDirectory()) { + const children = await fs.readdir(join(dir, filepath)); + const promises = children.map(child => + addToIndex({ dir, gitdir, fs, filepath: join(filepath, child), index }) + ); + await Promise.all(promises); + } else { + const object = stats.isSymbolicLink() + ? await fs.readlink(join(dir, filepath)).then(posixifyPathBuffer) + : await fs.read(join(dir, filepath)); + if (object === null) throw new NotFoundError(filepath) + const oid = await _writeObject({ fs, gitdir, type: 'blob', object }); + index.insert({ filepath, stats, oid }); } } // @ts-check /** - * @param {function} read + * + * @param {Object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {object} args.cache + * @param {SignCallback} [args.onSign] + * @param {string} args.gitdir + * @param {string} args.message + * @param {Object} args.author + * @param {string} args.author.name + * @param {string} args.author.email + * @param {number} args.author.timestamp + * @param {number} args.author.timezoneOffset + * @param {Object} args.committer + * @param {string} args.committer.name + * @param {string} args.committer.email + * @param {number} args.committer.timestamp + * @param {number} args.committer.timezoneOffset + * @param {string} [args.signingKey] + * @param {boolean} [args.dryRun = false] + * @param {boolean} [args.noUpdateBranch = false] + * @param {string} [args.ref] + * @param {string[]} [args.parent] + * @param {string} [args.tree] + * + * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly created commit. */ -async function parseCapabilitiesV2(read) { - /** @type {Object} */ - const capabilities2 = {}; +async function _commit({ + fs, + cache, + onSign, + gitdir, + message, + author, + committer, + signingKey, + dryRun = false, + noUpdateBranch = false, + ref, + parent, + tree, +}) { + if (!ref) { + ref = await GitRefManager.resolve({ + fs, + gitdir, + ref: 'HEAD', + depth: 2, + }); + } - let line; - while (true) { - line = await read(); - if (line === true) break - if (line === null) continue - line = line.toString('utf8').replace(/\n$/, ''); - const i = line.indexOf('='); - if (i > -1) { - const key = line.slice(0, i); - const value = line.slice(i + 1); - capabilities2[key] = value; - } else { - capabilities2[line] = true; + return GitIndexManager.acquire({ fs, gitdir, cache }, async function(index) { + const inodes = flatFileListToDirectoryStructure(index.entries); + const inode = inodes.get('.'); + if (!tree) { + tree = await constructTree({ fs, gitdir, inode, dryRun }); + } + if (!parent) { + try { + parent = [ + await GitRefManager.resolve({ + fs, + gitdir, + ref, + }), + ]; + } catch (err) { + // Probably an initial commit + parent = []; + } + } + let comm = GitCommit.from({ + tree, + parent, + author, + committer, + message, + }); + if (signingKey) { + comm = await GitCommit.sign(comm, onSign, signingKey); + } + const oid = await _writeObject({ + fs, + gitdir, + type: 'commit', + object: comm.toObject(), + dryRun, + }); + if (!noUpdateBranch && !dryRun) { + // Update branch pointer + await GitRefManager.writeRef({ + fs, + gitdir, + ref, + value: oid, + }); } - } - return { protocolVersion: 2, capabilities2 } + return oid + }) } -async function parseRefsAdResponse(stream, { service }) { - const capabilities = new Set(); - const refs = new Map(); - const symrefs = new Map(); - - // There is probably a better way to do this, but for now - // let's just throw the result parser inline here. - const read = GitPktLine.streamReader(stream); - let lineOne = await read(); - // skip past any flushes - while (lineOne === null) lineOne = await read(); - - if (lineOne === true) throw new EmptyServerResponseError() - - // Handle protocol v2 responses (Bitbucket Server doesn't include a `# service=` line) - if (lineOne.includes('version 2')) { - return parseCapabilitiesV2(read) - } - - // Clients MUST ignore an LF at the end of the line. - if (lineOne.toString('utf8').replace(/\n$/, '') !== `# service=${service}`) { - throw new ParseError(`# service=${service}\\n`, lineOne.toString('utf8')) +async function constructTree({ fs, gitdir, inode, dryRun }) { + // use depth first traversal + const children = inode.children; + for (const inode of children) { + if (inode.type === 'tree') { + inode.metadata.mode = '040000'; + inode.metadata.oid = await constructTree({ fs, gitdir, inode, dryRun }); + } } - let lineTwo = await read(); - // skip past any flushes - while (lineTwo === null) lineTwo = await read(); - // In the edge case of a brand new repo, zero refs (and zero capabilities) - // are returned. - if (lineTwo === true) return { capabilities, refs, symrefs } - lineTwo = lineTwo.toString('utf8'); + const entries = children.map(inode => ({ + mode: inode.metadata.mode, + path: inode.basename, + oid: inode.metadata.oid, + type: inode.type, + })); + const tree = GitTree.from(entries); + const oid = await _writeObject({ + fs, + gitdir, + type: 'tree', + object: tree.toObject(), + dryRun, + }); + return oid +} - // Handle protocol v2 responses - if (lineTwo.includes('version 2')) { - return parseCapabilitiesV2(read) - } +// @ts-check - const [firstRef, capabilitiesLine] = splitAndAssert(lineTwo, '\x00', '\\x00'); - capabilitiesLine.split(' ').map(x => capabilities.add(x)); - const [ref, name] = splitAndAssert(firstRef, ' ', ' '); - refs.set(name, ref); - while (true) { - const line = await read(); - if (line === true) break - if (line !== null) { - const [ref, name] = splitAndAssert(line.toString('utf8'), ' ', ' '); - refs.set(name, ref); - } +async function resolveFilepath({ fs, cache, gitdir, oid, filepath }) { + // Ensure there are no leading or trailing directory separators. + // I was going to do this automatically, but then found that the Git Terminal for Windows + // auto-expands --filepath=/src/utils to --filepath=C:/Users/Will/AppData/Local/Programs/Git/src/utils + // so I figured it would be wise to promote the behavior in the application layer not just the library layer. + if (filepath.startsWith('/')) { + throw new InvalidFilepathError('leading-slash') + } else if (filepath.endsWith('/')) { + throw new InvalidFilepathError('trailing-slash') } - // Symrefs are thrown into the "capabilities" unfortunately. - for (const cap of capabilities) { - if (cap.startsWith('symref=')) { - const m = cap.match(/symref=([^:]+):(.*)/); - if (m.length === 3) { - symrefs.set(m[1], m[2]); - } - } + const _oid = oid; + const result = await resolveTree({ fs, cache, gitdir, oid }); + const tree = result.tree; + if (filepath === '') { + oid = result.oid; + } else { + const pathArray = filepath.split('/'); + oid = await _resolveFilepath({ + fs, + cache, + gitdir, + tree, + pathArray, + oid: _oid, + filepath, + }); } - return { protocolVersion: 1, capabilities, refs, symrefs } + return oid } -function splitAndAssert(line, sep, expected) { - const split = line.trim().split(sep); - if (split.length !== 2) { - throw new ParseError( - `Two strings separated by '${expected}'`, - line.toString('utf8') - ) +async function _resolveFilepath({ + fs, + cache, + gitdir, + tree, + pathArray, + oid, + filepath, +}) { + const name = pathArray.shift(); + for (const entry of tree) { + if (entry.path === name) { + if (pathArray.length === 0) { + return entry.oid + } else { + const { type, object } = await _readObject({ + fs, + cache, + gitdir, + oid: entry.oid, + }); + if (type !== 'tree') { + throw new ObjectTypeError(oid, type, 'blob', filepath) + } + tree = GitTree.from(object); + return _resolveFilepath({ + fs, + cache, + gitdir, + tree, + pathArray, + oid, + filepath, + }) + } + } } - return split + throw new NotFoundError(`file or directory found at "${oid}:${filepath}"`) } -// Try to accomodate known CORS proxy implementations: -// - https://jcubic.pl/proxy.php? <-- uses query string -// - https://cors.isomorphic-git.org <-- uses path -const corsProxify = (corsProxy, url) => - corsProxy.endsWith('?') - ? `${corsProxy}${url}` - : `${corsProxy}/${url.replace(/^https?:\/\//, '')}`; - -const updateHeaders = (headers, auth) => { - // Update the basic auth header - if (auth.username || auth.password) { - headers.Authorization = calculateBasicAuthHeader(auth); - } - // but any manually provided headers take precedence - if (auth.headers) { - Object.assign(headers, auth.headers); - } -}; +// @ts-check /** - * @param {GitHttpResponse} res * - * @returns {{ preview: string, response: string, data: Buffer }} + * @typedef {Object} ReadTreeResult - The object returned has the following schema: + * @property {string} oid - SHA-1 object id of this tree + * @property {TreeObject} tree - the parsed tree object */ -const stringifyBody = async res => { - try { - // Some services provide a meaningful error message in the body of 403s like "token lacks the scopes necessary to perform this action" - const data = Buffer.from(await collect(res.body)); - const response = data.toString('utf8'); - const preview = - response.length < 256 ? response : response.slice(0, 256) + '...'; - return { preview, response, data } - } catch (e) { - return {} - } -}; -class GitRemoteHTTP { - static async capabilities() { - return ['discover', 'connect'] +/** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache + * @param {string} args.gitdir + * @param {string} args.oid + * @param {string} [args.filepath] + * + * @returns {Promise} + */ +async function _readTree({ + fs, + cache, + gitdir, + oid, + filepath = undefined, +}) { + if (filepath !== undefined) { + oid = await resolveFilepath({ fs, cache, gitdir, oid, filepath }); } + const { tree, oid: treeOid } = await resolveTree({ fs, cache, gitdir, oid }); + const result = { + oid: treeOid, + tree: tree.entries(), + }; + return result +} - /** - * @param {Object} args - * @param {HttpClient} args.http - * @param {ProgressCallback} [args.onProgress] - * @param {AuthCallback} [args.onAuth] - * @param {AuthFailureCallback} [args.onAuthFailure] - * @param {AuthSuccessCallback} [args.onAuthSuccess] - * @param {string} [args.corsProxy] - * @param {string} args.service - * @param {string} args.url - * @param {Object} args.headers - * @param {1 | 2} args.protocolVersion - Git Protocol Version - */ - static async discover({ - http, - onProgress, - onAuth, - onAuthSuccess, - onAuthFailure, - corsProxy, - service, - url: _origUrl, - headers, - protocolVersion, - }) { - let { url, auth } = extractAuthFromUrl(_origUrl); - const proxifiedURL = corsProxy ? corsProxify(corsProxy, url) : url; - if (auth.username || auth.password) { - headers.Authorization = calculateBasicAuthHeader(auth); - } - if (protocolVersion === 2) { - headers['Git-Protocol'] = 'version=2'; - } +// @ts-check - let res; - let tryAgain; - let providedAuthBefore = false; - do { - res = await http.request({ - onProgress, - method: 'GET', - url: `${proxifiedURL}/info/refs?service=${service}`, - headers, - }); +/** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.gitdir + * @param {TreeObject} args.tree + * + * @returns {Promise} + */ +async function _writeTree({ fs, gitdir, tree }) { + // Convert object to buffer + const object = GitTree.from(tree).toObject(); + const oid = await _writeObject({ + fs, + gitdir, + type: 'tree', + object, + format: 'content', + }); + return oid +} - // the default loop behavior - tryAgain = false; +// @ts-check - // 401 is the "correct" response for access denied. 203 is Non-Authoritative Information and comes from Azure DevOps, which - // apparently doesn't realize this is a git request and is returning the HTML for the "Azure DevOps Services | Sign In" page. - if (res.statusCode === 401 || res.statusCode === 203) { - // On subsequent 401s, call `onAuthFailure` instead of `onAuth`. - // This is so that naive `onAuth` callbacks that return a fixed value don't create an infinite loop of retrying. - const getAuth = providedAuthBefore ? onAuthFailure : onAuth; - if (getAuth) { - // Acquire credentials and try again - // TODO: read `useHttpPath` value from git config and pass along? - auth = await getAuth(url, { - ...auth, - headers: { ...headers }, - }); - if (auth && auth.cancel) { - throw new UserCanceledError() - } else if (auth) { - updateHeaders(headers, auth); - providedAuthBefore = true; - tryAgain = true; - } - } - } else if ( - res.statusCode === 200 && - providedAuthBefore && - onAuthSuccess - ) { - await onAuthSuccess(url, auth); - } - } while (tryAgain) +/** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {object} args.cache + * @param {SignCallback} [args.onSign] + * @param {string} args.gitdir + * @param {string} args.ref + * @param {string} args.oid + * @param {string|Uint8Array} args.note + * @param {boolean} [args.force] + * @param {Object} args.author + * @param {string} args.author.name + * @param {string} args.author.email + * @param {number} args.author.timestamp + * @param {number} args.author.timezoneOffset + * @param {Object} args.committer + * @param {string} args.committer.name + * @param {string} args.committer.email + * @param {number} args.committer.timestamp + * @param {number} args.committer.timezoneOffset + * @param {string} [args.signingKey] + * + * @returns {Promise} + */ - if (res.statusCode !== 200) { - const { response } = await stringifyBody(res); - throw new HttpError(res.statusCode, res.statusMessage, response) +async function _addNote({ + fs, + cache, + onSign, + gitdir, + ref, + oid, + note, + force, + author, + committer, + signingKey, +}) { + // Get the current note commit + let parent; + try { + parent = await GitRefManager.resolve({ gitdir, fs, ref }); + } catch (err) { + if (!(err instanceof NotFoundError)) { + throw err } - // Git "smart" HTTP servers should respond with the correct Content-Type header. - if ( - res.headers['content-type'] === `application/x-${service}-advertisement` - ) { - const remoteHTTP = await parseRefsAdResponse(res.body, { service }); - remoteHTTP.auth = auth; - return remoteHTTP - } else { - // If they don't send the correct content-type header, that's a good indicator it is either a "dumb" HTTP - // server, or the user specified an incorrect remote URL and the response is actually an HTML page. - // In this case, we save the response as plain text so we can generate a better error message if needed. - const { preview, response, data } = await stringifyBody(res); - // For backwards compatibility, try to parse it anyway. - // TODO: maybe just throw instead of trying? - try { - const remoteHTTP = await parseRefsAdResponse([data], { service }); - remoteHTTP.auth = auth; - return remoteHTTP - } catch (e) { - throw new SmartHttpError(preview, response) + } + + // I'm using the "empty tree" magic number here for brevity + const result = await _readTree({ + fs, + cache, + gitdir, + oid: parent || '4b825dc642cb6eb9a060e54bf8d69288fbee4904', + }); + let tree = result.tree; + + // Handle the case where a note already exists + if (force) { + tree = tree.filter(entry => entry.path !== oid); + } else { + for (const entry of tree) { + if (entry.path === oid) { + throw new AlreadyExistsError('note', oid) } } } - /** - * @param {Object} args - * @param {HttpClient} args.http - * @param {ProgressCallback} [args.onProgress] - * @param {string} [args.corsProxy] - * @param {string} args.service - * @param {string} args.url - * @param {Object} [args.headers] - * @param {any} args.body - * @param {any} args.auth - */ - static async connect({ - http, - onProgress, - corsProxy, - service, - url, - auth, - body, - headers, - }) { - // We already have the "correct" auth value at this point, but - // we need to strip out the username/password from the URL yet again. - const urlAuth = extractAuthFromUrl(url); - if (urlAuth) url = urlAuth.url; + // Create the note blob + if (typeof note === 'string') { + note = Buffer.from(note, 'utf8'); + } + const noteOid = await _writeObject({ + fs, + gitdir, + type: 'blob', + object: note, + format: 'content', + }); - if (corsProxy) url = corsProxify(corsProxy, url); + // Create the new note tree + tree.push({ mode: '100644', path: oid, oid: noteOid, type: 'blob' }); + const treeOid = await _writeTree({ + fs, + gitdir, + tree, + }); - headers['content-type'] = `application/x-${service}-request`; - headers.accept = `application/x-${service}-result`; - updateHeaders(headers, auth); + // Create the new note commit + const commitOid = await _commit({ + fs, + cache, + onSign, + gitdir, + ref, + tree: treeOid, + parent: parent && [parent], + message: `Note added by 'isomorphic-git addNote'\n`, + author, + committer, + signingKey, + }); - const res = await http.request({ - onProgress, - method: 'POST', - url: `${url}/${service}`, - body, - headers, - }); - if (res.statusCode !== 200) { - const { response } = stringifyBody(res); - throw new HttpError(res.statusCode, res.statusMessage, response) - } - return res + return commitOid +} + +// @ts-check + +/** + * @param {Object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.gitdir + * @param {string} args.path + * + * @returns {Promise} Resolves with the config value + * + * @example + * // Read config value + * let value = await git.getConfig({ + * dir: '$input((/))', + * path: '$input((user.name))' + * }) + * console.log(value) + * + */ +async function _getConfig({ fs, gitdir, path }) { + const config = await GitConfigManager.get({ fs, gitdir }); + return config.get(path) +} + +/** + * + * @returns {Promise} + */ +async function normalizeAuthorObject({ fs, gitdir, author = {} }) { + let { name, email, timestamp, timezoneOffset } = author; + name = name || (await _getConfig({ fs, gitdir, path: 'user.name' })); + email = email || (await _getConfig({ fs, gitdir, path: 'user.email' })) || ''; + + if (name === undefined) { + return undefined } + + timestamp = timestamp != null ? timestamp : Math.floor(Date.now() / 1000); + timezoneOffset = + timezoneOffset != null + ? timezoneOffset + : new Date(timestamp * 1000).getTimezoneOffset(); + + return { name, email, timestamp, timezoneOffset } } -function parseRemoteUrl({ url }) { - // the stupid "shorter scp-like syntax" - if (url.startsWith('git@')) { - return { - transport: 'ssh', - address: url, - } - } - const matches = url.match(/(\w+)(:\/\/|::)(.*)/); - if (matches === null) return - /* - * When git encounters a URL of the form ://
, where is - * a protocol that it cannot handle natively, it automatically invokes git remote- - * with the full URL as the second argument. - * - * @see https://git-scm.com/docs/git-remote-helpers - */ - if (matches[2] === '://') { - return { - transport: matches[1], - address: matches[0], - } - } - /* - * A URL of the form ::
explicitly instructs git to invoke - * git remote- with
as the second argument. - * - * @see https://git-scm.com/docs/git-remote-helpers - */ - if (matches[2] === '::') { - return { - transport: matches[1], - address: matches[3], - } +/** + * + * @returns {Promise} + */ +async function normalizeCommitterObject({ + fs, + gitdir, + author, + committer, +}) { + committer = Object.assign({}, committer || author); + // Match committer's date to author's one, if omitted + if (author) { + committer.timestamp = committer.timestamp || author.timestamp; + committer.timezoneOffset = committer.timezoneOffset || author.timezoneOffset; } + committer = await normalizeAuthorObject({ fs, gitdir, author: committer }); + return committer } -class GitRemoteManager { - static getRemoteHelperFor({ url }) { - // TODO: clean up the remoteHelper API and move into PluginCore - const remoteHelpers = new Map(); - remoteHelpers.set('http', GitRemoteHTTP); - remoteHelpers.set('https', GitRemoteHTTP); +// @ts-check - const parts = parseRemoteUrl({ url }); - if (!parts) { - throw new UrlParseError(url) - } - if (remoteHelpers.has(parts.transport)) { - return remoteHelpers.get(parts.transport) +/** + * Add or update an object note + * + * @param {object} args + * @param {FsClient} args.fs - a file system implementation + * @param {SignCallback} [args.onSign] - a PGP signing implementation + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} [args.ref] - The notes ref to look under + * @param {string} args.oid - The SHA-1 object id of the object to add the note to. + * @param {string|Uint8Array} args.note - The note to add + * @param {boolean} [args.force] - Over-write note if it already exists. + * @param {Object} [args.author] - The details about the author. + * @param {string} [args.author.name] - Default is `user.name` config. + * @param {string} [args.author.email] - Default is `user.email` config. + * @param {number} [args.author.timestamp=Math.floor(Date.now()/1000)] - Set the author timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). + * @param {number} [args.author.timezoneOffset] - Set the author timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. + * @param {Object} [args.committer = author] - The details about the note committer, in the same format as the author parameter. If not specified, the author details are used. + * @param {string} [args.committer.name] - Default is `user.name` config. + * @param {string} [args.committer.email] - Default is `user.email` config. + * @param {number} [args.committer.timestamp=Math.floor(Date.now()/1000)] - Set the committer timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). + * @param {number} [args.committer.timezoneOffset] - Set the committer timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. + * @param {string} [args.signingKey] - Sign the note commit using this private PGP key. + * @param {object} [args.cache] - a [cache](cache.md) object + * + * @returns {Promise} Resolves successfully with the SHA-1 object id of the commit object for the added note. + */ + +async function addNote({ + fs: _fs, + onSign, + dir, + gitdir = join(dir, '.git'), + ref = 'refs/notes/commits', + oid, + note, + force, + author: _author, + committer: _committer, + signingKey, + cache = {}, +}) { + try { + assertParameter('fs', _fs); + assertParameter('gitdir', gitdir); + assertParameter('oid', oid); + assertParameter('note', note); + if (signingKey) { + assertParameter('onSign', onSign); } - throw new UnknownTransportError( - url, - parts.transport, - parts.transport === 'ssh' ? translateSSHtoHTTP(url) : undefined - ) - } -} + const fs = new FileSystem(_fs); -let lock$1 = null; + const author = await normalizeAuthorObject({ fs, gitdir, author: _author }); + if (!author) throw new MissingNameError('author') -class GitShallowManager { - static async read({ fs, gitdir }) { - if (lock$1 === null) lock$1 = new AsyncLock(); - const filepath = join(gitdir, 'shallow'); - const oids = new Set(); - await lock$1.acquire(filepath, async function() { - const text = await fs.read(filepath, { encoding: 'utf8' }); - if (text === null) return oids // no file - if (text.trim() === '') return oids // empty file - text - .trim() - .split('\n') - .map(oid => oids.add(oid)); + const committer = await normalizeCommitterObject({ + fs, + gitdir, + author, + committer: _committer, }); - return oids + if (!committer) throw new MissingNameError('committer') + + return await _addNote({ + fs: new FileSystem(fs), + cache, + onSign, + gitdir, + ref, + oid, + note, + force, + author, + committer, + signingKey, + }) + } catch (err) { + err.caller = 'git.addNote'; + throw err } +} - static async write({ fs, gitdir, oids }) { - if (lock$1 === null) lock$1 = new AsyncLock(); - const filepath = join(gitdir, 'shallow'); - if (oids.size > 0) { - const text = [...oids].join('\n') + '\n'; - await lock$1.acquire(filepath, async function() { - await fs.write(filepath, text, { - encoding: 'utf8', - }); - }); - } else { - // No shallows - await lock$1.acquire(filepath, async function() { - await fs.rm(filepath); - }); +// @ts-check + +/** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.gitdir + * @param {string} args.remote + * @param {string} args.url + * @param {boolean} args.force + * + * @returns {Promise} + * + */ +async function _addRemote({ fs, gitdir, remote, url, force }) { + if (remote !== cleanGitRef.clean(remote)) { + throw new InvalidRefNameError(remote, cleanGitRef.clean(remote)) + } + const config = await GitConfigManager.get({ fs, gitdir }); + if (!force) { + // Check that setting it wouldn't overwrite. + const remoteNames = await config.getSubsections('remote'); + if (remoteNames.includes(remote)) { + // Throw an error if it would overwrite an existing remote, + // but not if it's simply setting the same value again. + if (url !== (await config.get(`remote.${remote}.url`))) { + throw new AlreadyExistsError('remote', remote) + } } } + await config.set(`remote.${remote}.url`, url); + await config.set( + `remote.${remote}.fetch`, + `+refs/heads/*:refs/remotes/${remote}/*` + ); + await GitConfigManager.save({ fs, gitdir, config }); } -async function hasObjectLoose({ fs, gitdir, oid }) { - const source = `objects/${oid.slice(0, 2)}/${oid.slice(2)}`; - return fs.exists(`${gitdir}/${source}`) -} +// @ts-check -async function hasObjectPacked({ - fs, - cache, - gitdir, - oid, - getExternalRefDelta, +/** + * Add or update a remote + * + * @param {object} args + * @param {FsClient} args.fs - a file system implementation + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.remote - The name of the remote + * @param {string} args.url - The URL of the remote + * @param {boolean} [args.force = false] - Instead of throwing an error if a remote named `remote` already exists, overwrite the existing remote. + * + * @returns {Promise} Resolves successfully when filesystem operations are complete + * + * @example + * await git.addRemote({ + * fs, + * dir: '/tutorial', + * remote: 'upstream', + * url: 'https://github.com/isomorphic-git/isomorphic-git' + * }) + * console.log('done') + * + */ +async function addRemote({ + fs, + dir, + gitdir = join(dir, '.git'), + remote, + url, + force = false, }) { - // Check to see if it's in a packfile. - // Iterate through all the .idx files - let list = await fs.readdir(join(gitdir, 'objects/pack')); - list = list.filter(x => x.endsWith('.idx')); - for (const filename of list) { - const indexFile = `${gitdir}/objects/pack/${filename}`; - const p = await readPackIndex({ - fs, - cache, - filename: indexFile, - getExternalRefDelta, - }); - if (p.error) throw new InternalError(p.error) - // If the packfile DOES have the oid we're looking for... - if (p.offsets.has(oid)) { - return true - } + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + assertParameter('remote', remote); + assertParameter('url', url); + return await _addRemote({ + fs: new FileSystem(fs), + gitdir, + remote, + url, + force, + }) + } catch (err) { + err.caller = 'git.addRemote'; + throw err } - // Failed to find it - return false } -async function hasObject({ +// @ts-check + +/** + * Create an annotated tag. + * + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache + * @param {SignCallback} [args.onSign] + * @param {string} args.gitdir + * @param {string} args.ref + * @param {string} [args.message = ref] + * @param {string} [args.object = 'HEAD'] + * @param {object} [args.tagger] + * @param {string} args.tagger.name + * @param {string} args.tagger.email + * @param {number} args.tagger.timestamp + * @param {number} args.tagger.timezoneOffset + * @param {string} [args.gpgsig] + * @param {string} [args.signingKey] + * @param {boolean} [args.force = false] + * + * @returns {Promise} Resolves successfully when filesystem operations are complete + * + * @example + * await git.annotatedTag({ + * dir: '$input((/))', + * ref: '$input((test-tag))', + * message: '$input((This commit is awesome))', + * tagger: { + * name: '$input((Mr. Test))', + * email: '$input((mrtest@example.com))' + * } + * }) + * console.log('done') + * + */ +async function _annotatedTag({ fs, cache, + onSign, gitdir, - oid, - format = 'content', + ref, + tagger, + message = ref, + gpgsig, + object, + signingKey, + force = false, }) { - // Curry the current read method so that the packfile un-deltification - // process can acquire external ref-deltas. - const getExternalRefDelta = oid => _readObject({ fs, cache, gitdir, oid }); - - // Look for it in the loose object directory. - let result = await hasObjectLoose({ fs, gitdir, oid }); - // Check to see if it's in a packfile. - if (!result) { - result = await hasObjectPacked({ - fs, - cache, - gitdir, - oid, - getExternalRefDelta, - }); - } - // Finally - return result -} - -// TODO: make a function that just returns obCount. then emptyPackfile = () => sizePack(pack) === 0 -function emptyPackfile(pack) { - const pheader = '5041434b'; - const version = '00000002'; - const obCount = '00000000'; - const header = pheader + version + obCount; - return pack.slice(0, 12).toString('hex') === header -} - -function filterCapabilities(server, client) { - const serverNames = server.map(cap => cap.split('=', 1)[0]); - return client.filter(cap => { - const name = cap.split('=', 1)[0]; - return serverNames.includes(name) - }) -} - -const pkg = { - name: 'isomorphic-git', - version: '1.11.2', - agent: 'git/isomorphic-git@1.11.2', -}; - -class FIFO { - constructor() { - this._queue = []; - } + ref = ref.startsWith('refs/tags/') ? ref : `refs/tags/${ref}`; - write(chunk) { - if (this._ended) { - throw Error('You cannot write to a FIFO that has already been ended!') - } - if (this._waiting) { - const resolve = this._waiting; - this._waiting = null; - resolve({ value: chunk }); - } else { - this._queue.push(chunk); - } + if (!force && (await GitRefManager.exists({ fs, gitdir, ref }))) { + throw new AlreadyExistsError('tag', ref) } - end() { - this._ended = true; - if (this._waiting) { - const resolve = this._waiting; - this._waiting = null; - resolve({ done: true }); - } - } + // Resolve passed value + const oid = await GitRefManager.resolve({ + fs, + gitdir, + ref: object || 'HEAD', + }); - destroy(err) { - this._ended = true; - this.error = err; + const { type } = await _readObject({ fs, cache, gitdir, oid }); + let tagObject = GitAnnotatedTag.from({ + object: oid, + type, + tag: ref.replace('refs/tags/', ''), + tagger, + message, + gpgsig, + }); + if (signingKey) { + tagObject = await GitAnnotatedTag.sign(tagObject, onSign, signingKey); } + const value = await _writeObject({ + fs, + gitdir, + type: 'tag', + object: tagObject.toObject(), + }); - async next() { - if (this._queue.length > 0) { - return { value: this._queue.shift() } - } - if (this._ended) { - return { done: true } - } - if (this._waiting) { - throw Error( - 'You cannot call read until the previous call to read has returned!' - ) - } - return new Promise(resolve => { - this._waiting = resolve; - }) - } + await GitRefManager.writeRef({ fs, gitdir, ref, value }); } -// Note: progress messages are designed to be written directly to the terminal, -// so they are often sent with just a carriage return to overwrite the last line of output. -// But there are also messages delimited with newlines. -// I also include CRLF just in case. -function findSplit(str) { - const r = str.indexOf('\r'); - const n = str.indexOf('\n'); - if (r === -1 && n === -1) return -1 - if (r === -1) return n + 1 // \n - if (n === -1) return r + 1 // \r - if (n === r + 1) return n + 1 // \r\n - return Math.min(r, n) + 1 // \r or \n -} +// @ts-check -function splitLines(input) { - const output = new FIFO(); - let tmp = '' - ;(async () => { - await forAwait(input, chunk => { - chunk = chunk.toString('utf8'); - tmp += chunk; - while (true) { - const i = findSplit(tmp); - if (i === -1) break - output.write(tmp.slice(0, i)); - tmp = tmp.slice(i); - } - }); - if (tmp.length > 0) { - output.write(tmp); +/** + * Create an annotated tag. + * + * @param {object} args + * @param {FsClient} args.fs - a file system implementation + * @param {SignCallback} [args.onSign] - a PGP signing implementation + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.ref - What to name the tag + * @param {string} [args.message = ref] - The tag message to use. + * @param {string} [args.object = 'HEAD'] - The SHA-1 object id the tag points to. (Will resolve to a SHA-1 object id if value is a ref.) By default, the commit object which is referred by the current `HEAD` is used. + * @param {object} [args.tagger] - The details about the tagger. + * @param {string} [args.tagger.name] - Default is `user.name` config. + * @param {string} [args.tagger.email] - Default is `user.email` config. + * @param {number} [args.tagger.timestamp=Math.floor(Date.now()/1000)] - Set the tagger timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). + * @param {number} [args.tagger.timezoneOffset] - Set the tagger timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. + * @param {string} [args.gpgsig] - The gpgsig attatched to the tag object. (Mutually exclusive with the `signingKey` option.) + * @param {string} [args.signingKey] - Sign the tag object using this private PGP key. (Mutually exclusive with the `gpgsig` option.) + * @param {boolean} [args.force = false] - Instead of throwing an error if a tag named `ref` already exists, overwrite the existing tag. Note that this option does not modify the original tag object itself. + * @param {object} [args.cache] - a [cache](cache.md) object + * + * @returns {Promise} Resolves successfully when filesystem operations are complete + * + * @example + * await git.annotatedTag({ + * fs, + * dir: '/tutorial', + * ref: 'test-tag', + * message: 'This commit is awesome', + * tagger: { + * name: 'Mr. Test', + * email: 'mrtest@example.com' + * } + * }) + * console.log('done') + * + */ +async function annotatedTag({ + fs: _fs, + onSign, + dir, + gitdir = join(dir, '.git'), + ref, + tagger: _tagger, + message = ref, + gpgsig, + object, + signingKey, + force = false, + cache = {}, +}) { + try { + assertParameter('fs', _fs); + assertParameter('gitdir', gitdir); + assertParameter('ref', ref); + if (signingKey) { + assertParameter('onSign', onSign); } - output.end(); - })(); - return output -} + const fs = new FileSystem(_fs); -/* -If 'side-band' or 'side-band-64k' capabilities have been specified by -the client, the server will send the packfile data multiplexed. + // Fill in missing arguments with default values + const tagger = await normalizeAuthorObject({ fs, gitdir, author: _tagger }); + if (!tagger) throw new MissingNameError('tagger') -Each packet starting with the packet-line length of the amount of data -that follows, followed by a single byte specifying the sideband the -following data is coming in on. + return await _annotatedTag({ + fs, + cache, + onSign, + gitdir, + ref, + tagger, + message, + gpgsig, + object, + signingKey, + force, + }) + } catch (err) { + err.caller = 'git.annotatedTag'; + throw err + } +} -In 'side-band' mode, it will send up to 999 data bytes plus 1 control -code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k' -mode it will send up to 65519 data bytes plus 1 control code, for a -total of up to 65520 bytes in a pkt-line. +// @ts-check -The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain -packfile data, sideband '2' will be used for progress information that the -client will generally print to stderr and sideband '3' is used for error -information. +/** + * Create a branch + * + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.gitdir + * @param {string} args.ref + * @param {boolean} [args.checkout = false] + * + * @returns {Promise} Resolves successfully when filesystem operations are complete + * + * @example + * await git.branch({ dir: '$input((/))', ref: '$input((develop))' }) + * console.log('done') + * + */ +async function _branch({ fs, gitdir, ref, checkout = false }) { + if (ref !== cleanGitRef.clean(ref)) { + throw new InvalidRefNameError(ref, cleanGitRef.clean(ref)) + } -If no 'side-band' capability was specified, the server will stream the -entire packfile without multiplexing. -*/ + const fullref = `refs/heads/${ref}`; -class GitSideBand { - static demux(input) { - const read = GitPktLine.streamReader(input); - // And now for the ridiculous side-band or side-band-64k protocol - const packetlines = new FIFO(); - const packfile = new FIFO(); - const progress = new FIFO(); - // TODO: Use a proper through stream? - const nextBit = async function() { - const line = await read(); - // Skip over flush packets - if (line === null) return nextBit() - // A made up convention to signal there's no more to read. - if (line === true) { - packetlines.end(); - progress.end(); - packfile.end(); - return - } - // Examine first byte to determine which output "stream" to use - switch (line[0]) { - case 1: { - // pack data - packfile.write(line.slice(1)); - break - } - case 2: { - // progress message - progress.write(line.slice(1)); - break - } - case 3: { - // fatal error message just before stream aborts - const error = line.slice(1); - progress.write(error); - packfile.destroy(new Error(error.toString('utf8'))); - return - } - default: { - // Not part of the side-band-64k protocol - packetlines.write(line.slice(0)); - } - } - // Careful not to blow up the stack. - // I think Promises in a tail-call position should be OK. - nextBit(); - }; - nextBit(); - return { - packetlines, - packfile, - progress, - } + const exist = await GitRefManager.exists({ fs, gitdir, ref: fullref }); + if (exist) { + throw new AlreadyExistsError('branch', ref, false) } - // static mux ({ - // protocol, // 'side-band' or 'side-band-64k' - // packetlines, - // packfile, - // progress, - // error - // }) { - // const MAX_PACKET_LENGTH = protocol === 'side-band-64k' ? 999 : 65519 - // let output = new PassThrough() - // packetlines.on('data', data => { - // if (data === null) { - // output.write(GitPktLine.flush()) - // } else { - // output.write(GitPktLine.encode(data)) - // } - // }) - // let packfileWasEmpty = true - // let packfileEnded = false - // let progressEnded = false - // let errorEnded = false - // let goodbye = Buffer.concat([ - // GitPktLine.encode(Buffer.from('010A', 'hex')), - // GitPktLine.flush() - // ]) - // packfile - // .on('data', data => { - // packfileWasEmpty = false - // const buffers = splitBuffer(data, MAX_PACKET_LENGTH) - // for (const buffer of buffers) { - // output.write( - // GitPktLine.encode(Buffer.concat([Buffer.from('01', 'hex'), buffer])) - // ) - // } - // }) - // .on('end', () => { - // packfileEnded = true - // if (!packfileWasEmpty) output.write(goodbye) - // if (progressEnded && errorEnded) output.end() - // }) - // progress - // .on('data', data => { - // const buffers = splitBuffer(data, MAX_PACKET_LENGTH) - // for (const buffer of buffers) { - // output.write( - // GitPktLine.encode(Buffer.concat([Buffer.from('02', 'hex'), buffer])) - // ) - // } - // }) - // .on('end', () => { - // progressEnded = true - // if (packfileEnded && errorEnded) output.end() - // }) - // error - // .on('data', data => { - // const buffers = splitBuffer(data, MAX_PACKET_LENGTH) - // for (const buffer of buffers) { - // output.write( - // GitPktLine.encode(Buffer.concat([Buffer.from('03', 'hex'), buffer])) - // ) - // } - // }) - // .on('end', () => { - // errorEnded = true - // if (progressEnded && packfileEnded) output.end() - // }) - // return output - // } -} -async function parseUploadPackResponse(stream) { - const { packetlines, packfile, progress } = GitSideBand.demux(stream); - const shallows = []; - const unshallows = []; - const acks = []; - let nak = false; - let done = false; - return new Promise((resolve, reject) => { - // Parse the response - forAwait(packetlines, data => { - const line = data.toString('utf8').trim(); - if (line.startsWith('shallow')) { - const oid = line.slice(-41).trim(); - if (oid.length !== 40) { - reject(new InvalidOidError(oid)); - } - shallows.push(oid); - } else if (line.startsWith('unshallow')) { - const oid = line.slice(-41).trim(); - if (oid.length !== 40) { - reject(new InvalidOidError(oid)); - } - unshallows.push(oid); - } else if (line.startsWith('ACK')) { - const [, oid, status] = line.split(' '); - acks.push({ oid, status }); - if (!status) done = true; - } else if (line.startsWith('NAK')) { - nak = true; - done = true; - } - if (done) { - resolve({ shallows, unshallows, acks, nak, packfile, progress }); - } + // Get current HEAD tree oid + let oid; + try { + oid = await GitRefManager.resolve({ fs, gitdir, ref: 'HEAD' }); + } catch (e) { + // Probably an empty repo + } + + // Create a new ref that points at the current commit + if (oid) { + await GitRefManager.writeRef({ fs, gitdir, ref: fullref, value: oid }); + } + + if (checkout) { + // Update HEAD + await GitRefManager.writeSymbolicRef({ + fs, + gitdir, + ref: 'HEAD', + value: fullref, }); - }) + } } -function writeUploadPackRequest({ - capabilities = [], - wants = [], - haves = [], - shallows = [], - depth = null, - since = null, - exclude = [], +// @ts-check + +/** + * Create a branch + * + * @param {object} args + * @param {FsClient} args.fs - a file system implementation + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.ref - What to name the branch + * @param {boolean} [args.checkout = false] - Update `HEAD` to point at the newly created branch + * + * @returns {Promise} Resolves successfully when filesystem operations are complete + * + * @example + * await git.branch({ fs, dir: '/tutorial', ref: 'develop' }) + * console.log('done') + * + */ +async function branch({ + fs, + dir, + gitdir = join(dir, '.git'), + ref, + checkout = false, }) { - const packstream = []; - wants = [...new Set(wants)]; // remove duplicates - let firstLineCapabilities = ` ${capabilities.join(' ')}`; - for (const oid of wants) { - packstream.push(GitPktLine.encode(`want ${oid}${firstLineCapabilities}\n`)); - firstLineCapabilities = ''; + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + assertParameter('ref', ref); + return await _branch({ + fs: new FileSystem(fs), + gitdir, + ref, + checkout, + }) + } catch (err) { + err.caller = 'git.branch'; + throw err } - for (const oid of shallows) { - packstream.push(GitPktLine.encode(`shallow ${oid}\n`)); +} + +// https://dev.to/namirsab/comment/2050 +function arrayRange(start, end) { + const length = end - start; + return Array.from({ length }, (_, i) => start + i) +} + +// TODO: Should I just polyfill Array.flat? +const flat = + typeof Array.prototype.flat === 'undefined' + ? entries => entries.reduce((acc, x) => acc.concat(x), []) + : entries => entries.flat(); + +// This is convenient for computing unions/joins of sorted lists. +class RunningMinimum { + constructor() { + // Using a getter for 'value' would just bloat the code. + // You know better than to set it directly right? + this.value = null; } - if (depth !== null) { - packstream.push(GitPktLine.encode(`deepen ${depth}\n`)); + + consider(value) { + if (value === null || value === undefined) return + if (this.value === null) { + this.value = value; + } else if (value < this.value) { + this.value = value; + } } - if (since !== null) { - packstream.push( - GitPktLine.encode(`deepen-since ${Math.floor(since.valueOf() / 1000)}\n`) - ); + + reset() { + this.value = null; } - for (const oid of exclude) { - packstream.push(GitPktLine.encode(`deepen-not ${oid}\n`)); +} + +// Take an array of length N of +// iterators of length Q_n +// of strings +// and return an iterator of length max(Q_n) for all n +// of arrays of length N +// of string|null who all have the same string value +function* unionOfIterators(sets) { + /* NOTE: We can assume all arrays are sorted. + * Indexes are sorted because they are defined that way: + * + * > Index entries are sorted in ascending order on the name field, + * > interpreted as a string of unsigned bytes (i.e. memcmp() order, no + * > localization, no special casing of directory separator '/'). Entries + * > with the same name are sorted by their stage field. + * + * Trees should be sorted because they are created directly from indexes. + * They definitely should be sorted, or else they wouldn't have a unique SHA1. + * So that would be very naughty on the part of the tree-creator. + * + * Lastly, the working dir entries are sorted because I choose to sort them + * in my FileSystem.readdir() implementation. + */ + + // Init + const min = new RunningMinimum(); + let minimum; + const heads = []; + const numsets = sets.length; + for (let i = 0; i < numsets; i++) { + // Abuse the fact that iterators continue to return 'undefined' for value + // once they are done + heads[i] = sets[i].next().value; + if (heads[i] !== undefined) { + min.consider(heads[i]); + } } - packstream.push(GitPktLine.flush()); - for (const oid of haves) { - packstream.push(GitPktLine.encode(`have ${oid}\n`)); + if (min.value === null) return + // Iterate + while (true) { + const result = []; + minimum = min.value; + min.reset(); + for (let i = 0; i < numsets; i++) { + if (heads[i] !== undefined && heads[i] === minimum) { + result[i] = heads[i]; + heads[i] = sets[i].next().value; + } else { + // A little hacky, but eh + result[i] = null; + } + if (heads[i] !== undefined) { + min.consider(heads[i]); + } + } + yield result; + if (min.value === null) return } - packstream.push(GitPktLine.encode(`done\n`)); - return packstream } // @ts-check /** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {object} args.cache + * @param {string} [args.dir] + * @param {string} [args.gitdir=join(dir,'.git')] + * @param {Walker[]} args.trees + * @param {WalkerMap} [args.map] + * @param {WalkerReduce} [args.reduce] + * @param {WalkerIterate} [args.iterate] * - * @typedef {object} FetchResult - The object returned has the following schema: - * @property {string | null} defaultBranch - The branch that is cloned if no branch is specified - * @property {string | null} fetchHead - The SHA-1 object id of the fetched head commit - * @property {string | null} fetchHeadDescription - a textual description of the branch that was fetched - * @property {Object} [headers] - The HTTP response headers returned by the git server - * @property {string[]} [pruned] - A list of branches that were pruned, if you provided the `prune` parameter + * @returns {Promise} The finished tree-walking result + * + * @see {WalkerMap} * */ +async function _walk({ + fs, + cache, + dir, + gitdir, + trees, + // @ts-ignore + map = async (_, entry) => entry, + // The default reducer is a flatmap that filters out undefineds. + reduce = async (parent, children) => { + const flatten = flat(children); + if (parent !== undefined) flatten.unshift(parent); + return flatten + }, + // The default iterate function walks all children concurrently + iterate = (walk, children) => Promise.all([...children].map(walk)), +}) { + const walkers = trees.map(proxy => + proxy[GitWalkSymbol]({ fs, dir, gitdir, cache }) + ); + + const root = new Array(walkers.length).fill('.'); + const range = arrayRange(0, walkers.length); + const unionWalkerFromReaddir = async entries => { + range.map(i => { + entries[i] = entries[i] && new walkers[i].ConstructEntry(entries[i]); + }); + const subdirs = await Promise.all( + range.map(i => (entries[i] ? walkers[i].readdir(entries[i]) : [])) + ); + // Now process child directories + const iterators = subdirs + .map(array => (array === null ? [] : array)) + .map(array => array[Symbol.iterator]()); + return { + entries, + children: unionOfIterators(iterators), + } + }; + + const walk = async root => { + const { entries, children } = await unionWalkerFromReaddir(root); + const fullpath = entries.find(entry => entry && entry._fullpath)._fullpath; + const parent = await map(fullpath, entries); + if (parent !== null) { + let walkedChildren = await iterate(walk, children); + walkedChildren = walkedChildren.filter(x => x !== undefined); + return reduce(parent, walkedChildren) + } + }; + return walk(root) +} + +const worthWalking = (filepath, root) => { + if (filepath === '.' || root == null || root.length === 0 || root === '.') { + return true + } + if (root.length >= filepath.length) { + return root.startsWith(filepath) + } else { + return filepath.startsWith(root) + } +}; + +// @ts-check /** * @param {object} args * @param {import('../models/FileSystem.js').FileSystem} args.fs * @param {any} args.cache - * @param {HttpClient} args.http * @param {ProgressCallback} [args.onProgress] - * @param {MessageCallback} [args.onMessage] - * @param {AuthCallback} [args.onAuth] - * @param {AuthFailureCallback} [args.onAuthFailure] - * @param {AuthSuccessCallback} [args.onAuthSuccess] + * @param {string} args.dir * @param {string} args.gitdir - * @param {string|void} [args.url] - * @param {string} [args.corsProxy] - * @param {string} [args.ref] - * @param {string} [args.remoteRef] - * @param {string} [args.remote] - * @param {boolean} [args.singleBranch = false] - * @param {boolean} [args.tags = false] - * @param {number} [args.depth] - * @param {Date} [args.since] - * @param {string[]} [args.exclude = []] - * @param {boolean} [args.relative = false] - * @param {Object} [args.headers] - * @param {boolean} [args.prune] - * @param {boolean} [args.pruneTags] + * @param {string} args.ref + * @param {string[]} [args.filepaths] + * @param {string} args.remote + * @param {boolean} args.noCheckout + * @param {boolean} [args.noUpdateHead] + * @param {boolean} [args.dryRun] + * @param {boolean} [args.force] + * @param {boolean} [args.track] + * + * @returns {Promise} Resolves successfully when filesystem operations are complete * - * @returns {Promise} - * @see FetchResult */ -async function _fetch({ +async function _checkout({ fs, cache, - http, onProgress, - onMessage, - onAuth, - onAuthSuccess, - onAuthFailure, + dir, gitdir, - ref: _ref, - remoteRef: _remoteRef, - remote: _remote, - url: _url, - corsProxy, - depth = null, - since = null, - exclude = [], - relative = false, - tags = false, - singleBranch = false, - headers = {}, - prune = false, - pruneTags = false, + remote, + ref, + filepaths, + noCheckout, + noUpdateHead, + dryRun, + force, + track = true, }) { - const ref = _ref || (await _currentBranch({ fs, gitdir, test: true })); - const config = await GitConfigManager.get({ fs, gitdir }); - // Figure out what remote to use. - const remote = - _remote || (ref && (await config.get(`branch.${ref}.remote`))) || 'origin'; - // Lookup the URL for the given remote. - const url = _url || (await config.get(`remote.${remote}.url`)); - if (typeof url === 'undefined') { - throw new MissingParameterError('remote OR url') - } - // Figure out what remote ref to use. - const remoteRef = - _remoteRef || - (ref && (await config.get(`branch.${ref}.merge`))) || - _ref || - 'HEAD'; - - if (corsProxy === undefined) { - corsProxy = await config.get('http.corsProxy'); - } - - const GitRemoteHTTP = GitRemoteManager.getRemoteHelperFor({ url }); - const remoteHTTP = await GitRemoteHTTP.discover({ - http, - onAuth, - onAuthSuccess, - onAuthFailure, - corsProxy, - service: 'git-upload-pack', - url, - headers, - protocolVersion: 1, - }); - const auth = remoteHTTP.auth; // hack to get new credentials from CredentialManager API - const remoteRefs = remoteHTTP.refs; - // For the special case of an empty repository with no refs, return null. - if (remoteRefs.size === 0) { - return { - defaultBranch: null, - fetchHead: null, - fetchHeadDescription: null, - } - } - // Check that the remote supports the requested features - if (depth !== null && !remoteHTTP.capabilities.has('shallow')) { - throw new RemoteCapabilityError('shallow', 'depth') - } - if (since !== null && !remoteHTTP.capabilities.has('deepen-since')) { - throw new RemoteCapabilityError('deepen-since', 'since') - } - if (exclude.length > 0 && !remoteHTTP.capabilities.has('deepen-not')) { - throw new RemoteCapabilityError('deepen-not', 'exclude') - } - if (relative === true && !remoteHTTP.capabilities.has('deepen-relative')) { - throw new RemoteCapabilityError('deepen-relative', 'relative') - } - // Figure out the SHA for the requested ref - const { oid, fullref } = GitRefManager.resolveAgainstMap({ - ref: remoteRef, - map: remoteRefs, - }); - // Filter out refs we want to ignore: only keep ref we're cloning, HEAD, branches, and tags (if we're keeping them) - for (const remoteRef of remoteRefs.keys()) { - if ( - remoteRef === fullref || - remoteRef === 'HEAD' || - remoteRef.startsWith('refs/heads/') || - (tags && remoteRef.startsWith('refs/tags/')) - ) { - continue - } - remoteRefs.delete(remoteRef); - } - // Assemble the application/x-git-upload-pack-request - const capabilities = filterCapabilities( - [...remoteHTTP.capabilities], - [ - 'multi_ack_detailed', - 'no-done', - 'side-band-64k', - // Note: I removed 'thin-pack' option since our code doesn't "fatten" packfiles, - // which is necessary for compatibility with git. It was the cause of mysterious - // 'fatal: pack has [x] unresolved deltas' errors that plagued us for some time. - // isomorphic-git is perfectly happy with thin packfiles in .git/objects/pack but - // canonical git it turns out is NOT. - 'ofs-delta', - `agent=${pkg.agent}`, - ] - ); - if (relative) capabilities.push('deepen-relative'); - // Start figuring out which oids from the remote we want to request - const wants = singleBranch ? [oid] : remoteRefs.values(); - // Come up with a reasonable list of oids to tell the remote we already have - // (preferably oids that are close ancestors of the branch heads we're fetching) - const haveRefs = singleBranch - ? [ref] - : await GitRefManager.listRefs({ - fs, - gitdir, - filepath: `refs`, - }); - let haves = []; - for (let ref of haveRefs) { - try { - ref = await GitRefManager.expand({ fs, gitdir, ref }); - const oid = await GitRefManager.resolve({ fs, gitdir, ref }); - if (await hasObject({ fs, cache, gitdir, oid })) { - haves.push(oid); - } - } catch (err) {} - } - haves = [...new Set(haves)]; - const oids = await GitShallowManager.read({ fs, gitdir }); - const shallows = remoteHTTP.capabilities.has('shallow') ? [...oids] : []; - const packstream = writeUploadPackRequest({ - capabilities, - wants, - haves, - shallows, - depth, - since, - exclude, - }); - // CodeCommit will hang up if we don't send a Content-Length header - // so we can't stream the body. - const packbuffer = Buffer.from(await collect(packstream)); - const raw = await GitRemoteHTTP.connect({ - http, - onProgress, - corsProxy, - service: 'git-upload-pack', - url, - auth, - body: [packbuffer], - headers, - }); - const response = await parseUploadPackResponse(raw.body); - if (raw.headers) { - response.headers = raw.headers; - } - // Apply all the 'shallow' and 'unshallow' commands - for (const oid of response.shallows) { - if (!oids.has(oid)) { - // this is in a try/catch mostly because my old test fixtures are missing objects - try { - // server says it's shallow, but do we have the parents? - const { object } = await _readObject({ fs, cache, gitdir, oid }); - const commit = new GitCommit(object); - const hasParents = await Promise.all( - commit - .headers() - .parent.map(oid => hasObject({ fs, cache, gitdir, oid })) - ); - const haveAllParents = - hasParents.length === 0 || hasParents.every(has => has); - if (!haveAllParents) { - oids.add(oid); - } - } catch (err) { - oids.add(oid); - } - } - } - for (const oid of response.unshallows) { - oids.delete(oid); - } - await GitShallowManager.write({ fs, gitdir, oids }); - // Update local remote refs - if (singleBranch) { - const refs = new Map([[fullref, oid]]); - // But wait, maybe it was a symref, like 'HEAD'! - // We need to save all the refs in the symref chain (sigh). - const symrefs = new Map(); - let bail = 10; - let key = fullref; - while (bail--) { - const value = remoteHTTP.symrefs.get(key); - if (value === undefined) break - symrefs.set(key, value); - key = value; - } - // final value must not be a symref but a real ref - const realRef = remoteRefs.get(key); - // There may be no ref at all if we've fetched a specific commit hash - if (realRef) { - refs.set(key, realRef); - } - const { pruned } = await GitRefManager.updateRemoteRefs({ + // Get tree oid + let oid; + try { + oid = await GitRefManager.resolve({ fs, gitdir, ref }); + // TODO: Figure out what to do if both 'ref' and 'remote' are specified, ref already exists, + // and is configured to track a different remote. + } catch (err) { + if (ref === 'HEAD') throw err + // If `ref` doesn't exist, create a new remote tracking branch + // Figure out the commit to checkout + const remoteRef = `${remote}/${ref}`; + oid = await GitRefManager.resolve({ fs, gitdir, - remote, - refs, - symrefs, - tags, - prune, + ref: remoteRef, }); - if (prune) { - response.pruned = pruned; + if (track) { + // Set up remote tracking branch + const config = await GitConfigManager.get({ fs, gitdir }); + await config.set(`branch.${ref}.remote`, remote); + await config.set(`branch.${ref}.merge`, `refs/heads/${ref}`); + await GitConfigManager.save({ fs, gitdir, config }); } - } else { - const { pruned } = await GitRefManager.updateRemoteRefs({ + // Create a new branch that points at that same commit + await GitRefManager.writeRef({ fs, gitdir, - remote, - refs: remoteRefs, - symrefs: remoteHTTP.symrefs, - tags, - prune, - pruneTags, + ref: `refs/heads/${ref}`, + value: oid, }); - if (prune) { - response.pruned = pruned; - } } - // We need this value later for the `clone` command. - response.HEAD = remoteHTTP.symrefs.get('HEAD'); - // AWS CodeCommit doesn't list HEAD as a symref, but we can reverse engineer it - // Find the SHA of the branch called HEAD - if (response.HEAD === undefined) { - const { oid } = GitRefManager.resolveAgainstMap({ - ref: 'HEAD', - map: remoteRefs, - }); - // Use the name of the first branch that's not called HEAD that has - // the same SHA as the branch called HEAD. - for (const [key, value] of remoteRefs.entries()) { - if (key !== 'HEAD' && value === oid) { - response.HEAD = key; - break + + // Update working dir + if (!noCheckout) { + let ops; + // First pass - just analyze files (not directories) and figure out what needs to be done + try { + ops = await analyze({ + fs, + cache, + onProgress, + dir, + gitdir, + ref, + force, + filepaths, + }); + } catch (err) { + // Throw a more helpful error message for this common mistake. + if (err instanceof NotFoundError && err.data.what === oid) { + throw new CommitNotFetchedError(ref, oid) + } else { + throw err } } + + // Report conflicts + const conflicts = ops + .filter(([method]) => method === 'conflict') + .map(([method, fullpath]) => fullpath); + if (conflicts.length > 0) { + throw new CheckoutConflictError(conflicts) + } + + // Collect errors + const errors = ops + .filter(([method]) => method === 'error') + .map(([method, fullpath]) => fullpath); + if (errors.length > 0) { + throw new InternalError(errors.join(', ')) + } + + if (dryRun) { + // Since the format of 'ops' is in flux, I really would rather folk besides myself not start relying on it + // return ops + return + } + + // Second pass - execute planned changes + // The cheapest semi-parallel solution without computing a full dependency graph will be + // to just do ops in 4 dumb phases: delete files, delete dirs, create dirs, write files + + let count = 0; + const total = ops.length; + await GitIndexManager.acquire({ fs, gitdir, cache }, async function(index) { + await Promise.all( + ops + .filter( + ([method]) => method === 'delete' || method === 'delete-index' + ) + .map(async function([method, fullpath]) { + const filepath = `${dir}/${fullpath}`; + if (method === 'delete') { + await fs.rm(filepath); + } + index.delete({ filepath: fullpath }); + if (onProgress) { + await onProgress({ + phase: 'Updating workdir', + loaded: ++count, + total, + }); + } + }) + ); + }); + + // Note: this is cannot be done naively in parallel + await GitIndexManager.acquire({ fs, gitdir, cache }, async function(index) { + for (const [method, fullpath] of ops) { + if (method === 'rmdir' || method === 'rmdir-index') { + const filepath = `${dir}/${fullpath}`; + try { + if (method === 'rmdir-index') { + index.delete({ filepath: fullpath }); + } + await fs.rmdir(filepath); + if (onProgress) { + await onProgress({ + phase: 'Updating workdir', + loaded: ++count, + total, + }); + } + } catch (e) { + if (e.code === 'ENOTEMPTY') { + console.log( + `Did not delete ${fullpath} because directory is not empty` + ); + } else { + throw e + } + } + } + } + }); + + await Promise.all( + ops + .filter(([method]) => method === 'mkdir' || method === 'mkdir-index') + .map(async function([_, fullpath]) { + const filepath = `${dir}/${fullpath}`; + await fs.mkdir(filepath); + if (onProgress) { + await onProgress({ + phase: 'Updating workdir', + loaded: ++count, + total, + }); + } + }) + ); + + await GitIndexManager.acquire({ fs, gitdir, cache }, async function(index) { + await Promise.all( + ops + .filter( + ([method]) => + method === 'create' || + method === 'create-index' || + method === 'update' || + method === 'mkdir-index' + ) + .map(async function([method, fullpath, oid, mode, chmod]) { + const filepath = `${dir}/${fullpath}`; + try { + if (method !== 'create-index' && method !== 'mkdir-index') { + const { object } = await _readObject({ fs, cache, gitdir, oid }); + if (chmod) { + // Note: the mode option of fs.write only works when creating files, + // not updating them. Since the `fs` plugin doesn't expose `chmod` this + // is our only option. + await fs.rm(filepath); + } + if (mode === 0o100644) { + // regular file + await fs.write(filepath, object); + } else if (mode === 0o100755) { + // executable file + await fs.write(filepath, object, { mode: 0o777 }); + } else if (mode === 0o120000) { + // symlink + await fs.writelink(filepath, object); + } else { + throw new InternalError( + `Invalid mode 0o${mode.toString(8)} detected in blob ${oid}` + ) + } + } + + const stats = await fs.lstat(filepath); + // We can't trust the executable bit returned by lstat on Windows, + // so we need to preserve this value from the TREE. + // TODO: Figure out how git handles this internally. + if (mode === 0o100755) { + stats.mode = 0o755; + } + // Submodules are present in the git index but use a unique mode different from trees + if (method === 'mkdir-index') { + stats.mode = 0o160000; + } + index.insert({ + filepath: fullpath, + stats, + oid, + }); + if (onProgress) { + await onProgress({ + phase: 'Updating workdir', + loaded: ++count, + total, + }); + } + } catch (e) { + console.log(e); + } + }) + ); + }); } - const noun = fullref.startsWith('refs/tags') ? 'tag' : 'branch'; - response.FETCH_HEAD = { - oid, - description: `${noun} '${abbreviateRef(fullref)}' of ${url}`, - }; - if (onProgress || onMessage) { - const lines = splitLines(response.progress); - forAwait(lines, async line => { - if (onMessage) await onMessage(line); + // Update HEAD + if (!noUpdateHead) { + const fullRef = await GitRefManager.expand({ fs, gitdir, ref }); + if (fullRef.startsWith('refs/heads')) { + await GitRefManager.writeSymbolicRef({ + fs, + gitdir, + ref: 'HEAD', + value: fullRef, + }); + } else { + // detached head + await GitRefManager.writeRef({ fs, gitdir, ref: 'HEAD', value: oid }); + } + } +} + +async function analyze({ + fs, + cache, + onProgress, + dir, + gitdir, + ref, + force, + filepaths, +}) { + let count = 0; + return _walk({ + fs, + cache, + dir, + gitdir, + trees: [TREE({ ref }), WORKDIR(), STAGE()], + map: async function(fullpath, [commit, workdir, stage]) { + if (fullpath === '.') return + // match against base paths + if (filepaths && !filepaths.some(base => worthWalking(fullpath, base))) { + return null + } + // Emit progress event if (onProgress) { - const matches = line.match(/([^:]*).*\((\d+?)\/(\d+?)\)/); - if (matches) { - await onProgress({ - phase: matches[1].trim(), - loaded: parseInt(matches[2], 10), - total: parseInt(matches[3], 10), - }); + await onProgress({ phase: 'Analyzing workdir', loaded: ++count }); + } + + // This is a kind of silly pattern but it worked so well for me in the past + // and it makes intuitively demonstrating exhaustiveness so *easy*. + // This checks for the presense and/or absense of each of the 3 entries, + // converts that to a 3-bit binary representation, and then handles + // every possible combination (2^3 or 8 cases) with a lookup table. + const key = [!!stage, !!commit, !!workdir].map(Number).join(''); + switch (key) { + // Impossible case. + case '000': + return + // Ignore workdir files that are not tracked and not part of the new commit. + case '001': + // OK, make an exception for explicitly named files. + if (force && filepaths && filepaths.includes(fullpath)) { + return ['delete', fullpath] + } + return + // New entries + case '010': { + switch (await commit.type()) { + case 'tree': { + return ['mkdir', fullpath] + } + case 'blob': { + return [ + 'create', + fullpath, + await commit.oid(), + await commit.mode(), + ] + } + case 'commit': { + return [ + 'mkdir-index', + fullpath, + await commit.oid(), + await commit.mode(), + ] + } + default: { + return [ + 'error', + `new entry Unhandled type ${await commit.type()}`, + ] + } + } + } + // New entries but there is already something in the workdir there. + case '011': { + switch (`${await commit.type()}-${await workdir.type()}`) { + case 'tree-tree': { + return // noop + } + case 'tree-blob': + case 'blob-tree': { + return ['conflict', fullpath] + } + case 'blob-blob': { + // Is the incoming file different? + if ((await commit.oid()) !== (await workdir.oid())) { + if (force) { + return [ + 'update', + fullpath, + await commit.oid(), + await commit.mode(), + (await commit.mode()) !== (await workdir.mode()), + ] + } else { + return ['conflict', fullpath] + } + } else { + // Is the incoming file a different mode? + if ((await commit.mode()) !== (await workdir.mode())) { + if (force) { + return [ + 'update', + fullpath, + await commit.oid(), + await commit.mode(), + true, + ] + } else { + return ['conflict', fullpath] + } + } else { + return [ + 'create-index', + fullpath, + await commit.oid(), + await commit.mode(), + ] + } + } + } + case 'commit-tree': { + // TODO: submodule + // We'll ignore submodule directories for now. + // Users prefer we not throw an error for lack of submodule support. + // gitlinks + return + } + case 'commit-blob': { + // TODO: submodule + // But... we'll complain if there is a *file* where we would + // put a submodule if we had submodule support. + return ['conflict', fullpath] + } + default: { + return ['error', `new entry Unhandled type ${commit.type}`] + } + } + } + // Something in stage but not in the commit OR the workdir. + // Note: I verified this behavior against canonical git. + case '100': { + return ['delete-index', fullpath] + } + // Deleted entries + // TODO: How to handle if stage type and workdir type mismatch? + case '101': { + switch (await stage.type()) { + case 'tree': { + return ['rmdir', fullpath] + } + case 'blob': { + // Git checks that the workdir.oid === stage.oid before deleting file + if ((await stage.oid()) !== (await workdir.oid())) { + if (force) { + return ['delete', fullpath] + } else { + return ['conflict', fullpath] + } + } else { + return ['delete', fullpath] + } + } + case 'commit': { + return ['rmdir-index', fullpath] + } + default: { + return [ + 'error', + `delete entry Unhandled type ${await stage.type()}`, + ] + } + } + } + /* eslint-disable no-fallthrough */ + // File missing from workdir + case '110': + // Possibly modified entries + case '111': { + /* eslint-enable no-fallthrough */ + switch (`${await stage.type()}-${await commit.type()}`) { + case 'tree-tree': { + return + } + case 'blob-blob': { + // If the file hasn't changed, there is no need to do anything. + // Existing file modifications in the workdir can be be left as is. + if ( + (await stage.oid()) === (await commit.oid()) && + (await stage.mode()) === (await commit.mode()) && + !force + ) { + return + } + + // Check for local changes that would be lost + if (workdir) { + // Note: canonical git only compares with the stage. But we're smart enough + // to compare to the stage AND the incoming commit. + if ( + (await workdir.oid()) !== (await stage.oid()) && + (await workdir.oid()) !== (await commit.oid()) + ) { + if (force) { + return [ + 'update', + fullpath, + await commit.oid(), + await commit.mode(), + (await commit.mode()) !== (await workdir.mode()), + ] + } else { + return ['conflict', fullpath] + } + } + } else if (force) { + return [ + 'update', + fullpath, + await commit.oid(), + await commit.mode(), + (await commit.mode()) !== (await stage.mode()), + ] + } + // Has file mode changed? + if ((await commit.mode()) !== (await stage.mode())) { + return [ + 'update', + fullpath, + await commit.oid(), + await commit.mode(), + true, + ] + } + // TODO: HANDLE SYMLINKS + // Has the file content changed? + if ((await commit.oid()) !== (await stage.oid())) { + return [ + 'update', + fullpath, + await commit.oid(), + await commit.mode(), + false, + ] + } else { + return + } + } + case 'tree-blob': { + return ['update-dir-to-blob', fullpath, await commit.oid()] + } + case 'blob-tree': { + return ['update-blob-to-tree', fullpath] + } + case 'commit-commit': { + return [ + 'mkdir-index', + fullpath, + await commit.oid(), + await commit.mode(), + ] + } + default: { + return [ + 'error', + `update entry Unhandled type ${await stage.type()}-${await commit.type()}`, + ] + } + } } } - }); - } - const packfile = Buffer.from(await collect(response.packfile)); - const packfileSha = packfile.slice(-20).toString('hex'); - const res = { - defaultBranch: response.HEAD, - fetchHead: response.FETCH_HEAD.oid, - fetchHeadDescription: response.FETCH_HEAD.description, - }; - if (response.headers) { - res.headers = response.headers; - } - if (prune) { - res.pruned = response.pruned; - } - // This is a quick fix for the empty .git/objects/pack/pack-.pack file error, - // which due to the way `git-list-pack` works causes the program to hang when it tries to read it. - // TODO: Longer term, we should actually: - // a) NOT concatenate the entire packfile into memory (line 78), - // b) compute the SHA of the stream except for the last 20 bytes, using the same library used in push.js, and - // c) compare the computed SHA with the last 20 bytes of the stream before saving to disk, and throwing a "packfile got corrupted during download" error if the SHA doesn't match. - if (packfileSha !== '' && !emptyPackfile(packfile)) { - res.packfile = `objects/pack/pack-${packfileSha}.pack`; - const fullpath = join(gitdir, res.packfile); - await fs.write(fullpath, packfile); - const getExternalRefDelta = oid => _readObject({ fs, cache, gitdir, oid }); - const idx = await GitPackIndex.fromPack({ - pack: packfile, - getExternalRefDelta, - onProgress, - }); - await fs.write(fullpath.replace(/\.pack$/, '.idx'), await idx.toBuffer()); - } - return res -} - -// @ts-check - -/** - * Initialize a new repository - * - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} [args.dir] - * @param {string} [args.gitdir] - * @param {boolean} [args.bare = false] - * @param {string} [args.defaultBranch = 'master'] - * @returns {Promise} - */ -async function _init({ - fs, - bare = false, - dir, - gitdir = bare ? dir : join(dir, '.git'), - defaultBranch = 'master', -}) { - // Don't overwrite an existing config - if (await fs.exists(gitdir + '/config')) return - - let folders = [ - 'hooks', - 'info', - 'objects/info', - 'objects/pack', - 'refs/heads', - 'refs/tags', - ]; - folders = folders.map(dir => gitdir + '/' + dir); - for (const folder of folders) { - await fs.mkdir(folder); - } - - await fs.write( - gitdir + '/config', - '[core]\n' + - '\trepositoryformatversion = 0\n' + - '\tfilemode = false\n' + - `\tbare = ${bare}\n` + - (bare ? '' : '\tlogallrefupdates = true\n') + - '\tsymlinks = false\n' + - '\tignorecase = true\n' - ); - await fs.write(gitdir + '/HEAD', `ref: refs/heads/${defaultBranch}\n`); + }, + // Modify the default flat mapping + reduce: async function(parent, children) { + children = flat(children); + if (!parent) { + return children + } else if (parent && parent[0] === 'rmdir') { + children.push(parent); + return children + } else { + children.unshift(parent); + return children + } + }, + }) } // @ts-check /** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {object} args.cache - * @param {HttpClient} args.http - * @param {ProgressCallback} [args.onProgress] - * @param {MessageCallback} [args.onMessage] - * @param {AuthCallback} [args.onAuth] - * @param {AuthFailureCallback} [args.onAuthFailure] - * @param {AuthSuccessCallback} [args.onAuthSuccess] - * @param {string} [args.dir] - * @param {string} args.gitdir - * @param {string} args.url - * @param {string} args.corsProxy - * @param {string} args.ref - * @param {boolean} args.singleBranch - * @param {boolean} args.noCheckout - * @param {boolean} args.noTags - * @param {string} args.remote - * @param {number} args.depth - * @param {Date} args.since - * @param {string[]} args.exclude - * @param {boolean} args.relative - * @param {Object} args.headers - * - * @returns {Promise} Resolves successfully when clone completes + * Checkout a branch * - */ -async function _clone({ - fs, - cache, - http, - onProgress, - onMessage, - onAuth, - onAuthSuccess, - onAuthFailure, - dir, - gitdir, - url, - corsProxy, - ref, - remote, - depth, - since, - exclude, - relative, - singleBranch, - noCheckout, - noTags, - headers, -}) { - try { - await _init({ fs, gitdir }); - await _addRemote({ fs, gitdir, remote, url, force: false }); - if (corsProxy) { - const config = await GitConfigManager.get({ fs, gitdir }); - await config.set(`http.corsProxy`, corsProxy); - await GitConfigManager.save({ fs, gitdir, config }); - } - const { defaultBranch, fetchHead } = await _fetch({ - fs, - cache, - http, - onProgress, - onMessage, - onAuth, - onAuthSuccess, - onAuthFailure, - gitdir, - ref, - remote, - corsProxy, - depth, - since, - exclude, - relative, - singleBranch, - headers, - tags: !noTags, - }); - if (fetchHead === null) return - ref = ref || defaultBranch; - ref = ref.replace('refs/heads/', ''); - // Checkout that branch - await _checkout({ - fs, - cache, - onProgress, - dir, - gitdir, - ref, - remote, - noCheckout, - }); - } catch (err) { - // Remove partial local repository, see #1283 - // Ignore any error as we are already failing. - // The catch is necessary so the original error is not masked. - await fs - .rmdir(gitdir, { recursive: true, maxRetries: 10 }) - .catch(() => undefined); - throw err - } -} - -// @ts-check - -/** - * Clone a repository + * If the branch already exists it will check out that branch. Otherwise, it will create a new remote tracking branch set to track the remote branch of that name. * * @param {object} args * @param {FsClient} args.fs - a file system implementation - * @param {HttpClient} args.http - an HTTP client * @param {ProgressCallback} [args.onProgress] - optional progress event callback - * @param {MessageCallback} [args.onMessage] - optional message event callback - * @param {AuthCallback} [args.onAuth] - optional auth fill callback - * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback - * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.url - The URL of the remote repository - * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Value is stored in the git config file for that repo. - * @param {string} [args.ref] - Which branch to checkout. By default this is the designated "main branch" of the repository. - * @param {boolean} [args.singleBranch = false] - Instead of the default behavior of fetching all the branches, only fetch a single branch. - * @param {boolean} [args.noCheckout = false] - If true, clone will only fetch the repo, not check out a branch. Skipping checkout can save a lot of time normally spent writing files to disk. - * @param {boolean} [args.noTags = false] - By default clone will fetch all tags. `noTags` disables that behavior. - * @param {string} [args.remote = 'origin'] - What to name the remote that is created. - * @param {number} [args.depth] - Integer. Determines how much of the git repository's history to retrieve - * @param {Date} [args.since] - Only fetch commits created after the given date. Mutually exclusive with `depth`. - * @param {string[]} [args.exclude = []] - A list of branches or tags. Instructs the remote server not to send us any commits reachable from these refs. - * @param {boolean} [args.relative = false] - Changes the meaning of `depth` to be measured from the current shallow depth rather than from the branch tip. - * @param {Object} [args.headers = {}] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config + * @param {string} [args.ref = 'HEAD'] - Source to checkout files from + * @param {string[]} [args.filepaths] - Limit the checkout to the given files and directories + * @param {string} [args.remote = 'origin'] - Which remote repository to use + * @param {boolean} [args.noCheckout = false] - If true, will update HEAD but won't update the working directory + * @param {boolean} [args.noUpdateHead] - If true, will update the working directory but won't update HEAD. Defaults to `false` when `ref` is provided, and `true` if `ref` is not provided. + * @param {boolean} [args.dryRun = false] - If true, simulates a checkout so you can test whether it would succeed. + * @param {boolean} [args.force = false] - If true, conflicts will be ignored and files will be overwritten regardless of local changes. + * @param {boolean} [args.track = true] - If false, will not set the remote branch tracking information. Defaults to true. * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} Resolves successfully when clone completes + * @returns {Promise} Resolves successfully when filesystem operations are complete * * @example - * await git.clone({ + * // switch to the main branch + * await git.checkout({ * fs, - * http, * dir: '/tutorial', - * corsProxy: 'https://cors.isomorphic-git.org', - * url: 'https://github.com/isomorphic-git/isomorphic-git', - * singleBranch: true, - * depth: 1 + * ref: 'main' + * }) + * console.log('done') + * + * @example + * // restore the 'docs' and 'src/docs' folders to the way they were, overwriting any changes + * await git.checkout({ + * fs, + * dir: '/tutorial', + * force: true, + * filepaths: ['docs', 'src/docs'] * }) * console.log('done') * + * @example + * // restore the 'docs' and 'src/docs' folders to the way they are in the 'develop' branch, overwriting any changes + * await git.checkout({ + * fs, + * dir: '/tutorial', + * ref: 'develop', + * noUpdateHead: true, + * force: true, + * filepaths: ['docs', 'src/docs'] + * }) + * console.log('done') */ -async function clone({ +async function checkout({ fs, - http, onProgress, - onMessage, - onAuth, - onAuthSuccess, - onAuthFailure, dir, gitdir = join(dir, '.git'), - url, - corsProxy = undefined, - ref = undefined, remote = 'origin', - depth = undefined, - since = undefined, - exclude = [], - relative = false, - singleBranch = false, + ref: _ref, + filepaths, noCheckout = false, - noTags = false, - headers = {}, + noUpdateHead = _ref === undefined, + dryRun = false, + force = false, + track = true, cache = {}, }) { try { assertParameter('fs', fs); - assertParameter('http', http); + assertParameter('dir', dir); assertParameter('gitdir', gitdir); - if (!noCheckout) { - assertParameter('dir', dir); + + const ref = _ref || 'HEAD'; + return await _checkout({ + fs: new FileSystem(fs), + cache, + onProgress, + dir, + gitdir, + remote, + ref, + filepaths, + noCheckout, + noUpdateHead, + dryRun, + force, + track, + }) + } catch (err) { + err.caller = 'git.checkout'; + throw err + } +} + +// @see https://git-scm.com/docs/git-rev-parse.html#_specifying_revisions +const abbreviateRx = new RegExp('^refs/(heads/|tags/|remotes/)?(.*)'); + +function abbreviateRef(ref) { + const match = abbreviateRx.exec(ref); + if (match) { + if (match[1] === 'remotes/' && ref.endsWith('/HEAD')) { + return match[2].slice(0, -5) + } else { + return match[2] + } + } + return ref +} + +// @ts-check + +/** + * @param {Object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.gitdir + * @param {boolean} [args.fullname = false] - Return the full path (e.g. "refs/heads/main") instead of the abbreviated form. + * @param {boolean} [args.test = false] - If the current branch doesn't actually exist (such as right after git init) then return `undefined`. + * + * @returns {Promise} The name of the current branch or undefined if the HEAD is detached. + * + */ +async function _currentBranch({ + fs, + gitdir, + fullname = false, + test = false, +}) { + const ref = await GitRefManager.resolve({ + fs, + gitdir, + ref: 'HEAD', + depth: 2, + }); + if (test) { + try { + await GitRefManager.resolve({ fs, gitdir, ref }); + } catch (_) { + return + } + } + // Return `undefined` for detached HEAD + if (!ref.startsWith('refs/')) return + return fullname ? ref : abbreviateRef(ref) +} + +function translateSSHtoHTTP(url) { + // handle "shorter scp-like syntax" + url = url.replace(/^git@([^:]+):/, 'https://$1/'); + // handle proper SSH URLs + url = url.replace(/^ssh:\/\//, 'https://'); + return url +} + +function calculateBasicAuthHeader({ username = '', password = '' }) { + return `Basic ${Buffer.from(`${username}:${password}`).toString('base64')}` +} + +// Currently 'for await' upsets my linters. +async function forAwait(iterable, cb) { + const iter = getIterator(iterable); + while (true) { + const { value, done } = await iter.next(); + if (value) await cb(value); + if (done) break + } + if (iter.return) iter.return(); +} + +async function collect(iterable) { + let size = 0; + const buffers = []; + // This will be easier once `for await ... of` loops are available. + await forAwait(iterable, value => { + buffers.push(value); + size += value.byteLength; + }); + const result = new Uint8Array(size); + let nextIndex = 0; + for (const buffer of buffers) { + result.set(buffer, nextIndex); + nextIndex += buffer.byteLength; + } + return result +} + +function extractAuthFromUrl(url) { + // For whatever reason, the `fetch` API does not convert credentials embedded in the URL + // into Basic Authentication headers automatically. Instead it throws an error! + // So we must manually parse the URL, rip out the user:password portion if it is present + // and compute the Authorization header. + // Note: I tried using new URL(url) but that throws a security exception in Edge. :rolleyes: + let userpass = url.match(/^https?:\/\/([^/]+)@/); + // No credentials, return the url unmodified and an empty auth object + if (userpass == null) return { url, auth: {} } + userpass = userpass[1]; + const [username, password] = userpass.split(':'); + // Remove credentials from URL + url = url.replace(`${userpass}@`, ''); + // Has credentials, return the fetch-safe URL and the parsed credentials + return { url, auth: { username, password } } +} + +function padHex(b, n) { + const s = n.toString(16); + return '0'.repeat(b - s.length) + s +} + +/** +pkt-line Format +--------------- + +Much (but not all) of the payload is described around pkt-lines. + +A pkt-line is a variable length binary string. The first four bytes +of the line, the pkt-len, indicates the total length of the line, +in hexadecimal. The pkt-len includes the 4 bytes used to contain +the length's hexadecimal representation. + +A pkt-line MAY contain binary data, so implementors MUST ensure +pkt-line parsing/formatting routines are 8-bit clean. + +A non-binary line SHOULD BE terminated by an LF, which if present +MUST be included in the total length. Receivers MUST treat pkt-lines +with non-binary data the same whether or not they contain the trailing +LF (stripping the LF if present, and not complaining when it is +missing). + +The maximum length of a pkt-line's data component is 65516 bytes. +Implementations MUST NOT send pkt-line whose length exceeds 65520 +(65516 bytes of payload + 4 bytes of length data). + +Implementations SHOULD NOT send an empty pkt-line ("0004"). + +A pkt-line with a length field of 0 ("0000"), called a flush-pkt, +is a special case and MUST be handled differently than an empty +pkt-line ("0004"). + +---- + pkt-line = data-pkt / flush-pkt + + data-pkt = pkt-len pkt-payload + pkt-len = 4*(HEXDIG) + pkt-payload = (pkt-len - 4)*(OCTET) + + flush-pkt = "0000" +---- + +Examples (as C-style strings): + +---- + pkt-line actual value + --------------------------------- + "0006a\n" "a\n" + "0005a" "a" + "000bfoobar\n" "foobar\n" + "0004" "" +---- +*/ + +// I'm really using this more as a namespace. +// There's not a lot of "state" in a pkt-line + +class GitPktLine { + static flush() { + return Buffer.from('0000', 'utf8') + } + + static delim() { + return Buffer.from('0001', 'utf8') + } + + static encode(line) { + if (typeof line === 'string') { + line = Buffer.from(line); } - assertParameter('url', url); + const length = line.length + 4; + const hexlength = padHex(4, length); + return Buffer.concat([Buffer.from(hexlength, 'utf8'), line]) + } - return await _clone({ - fs: new FileSystem(fs), - cache, - http, - onProgress, - onMessage, - onAuth, - onAuthSuccess, - onAuthFailure, - dir, - gitdir, - url, - corsProxy, - ref, - remote, - depth, - since, - exclude, - relative, - singleBranch, - noCheckout, - noTags, - headers, - }) - } catch (err) { - err.caller = 'git.clone'; - throw err + static streamReader(stream) { + const reader = new StreamReader(stream); + return async function read() { + try { + let length = await reader.read(4); + if (length == null) return true + length = parseInt(length.toString('utf8'), 16); + if (length === 0) return null + if (length === 1) return null // delim packets + const buffer = await reader.read(length - 4); + if (buffer == null) return true + return buffer + } catch (err) { + console.log('error', err); + return true + } + } } } // @ts-check /** - * Create a new commit - * - * @param {Object} args - * @param {FsClient} args.fs - a file system implementation - * @param {SignCallback} [args.onSign] - a PGP signing implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.message - The commit message to use. - * @param {Object} [args.author] - The details about the author. - * @param {string} [args.author.name] - Default is `user.name` config. - * @param {string} [args.author.email] - Default is `user.email` config. - * @param {number} [args.author.timestamp=Math.floor(Date.now()/1000)] - Set the author timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). - * @param {number} [args.author.timezoneOffset] - Set the author timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. - * @param {Object} [args.committer = author] - The details about the commit committer, in the same format as the author parameter. If not specified, the author details are used. - * @param {string} [args.committer.name] - Default is `user.name` config. - * @param {string} [args.committer.email] - Default is `user.email` config. - * @param {number} [args.committer.timestamp=Math.floor(Date.now()/1000)] - Set the committer timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). - * @param {number} [args.committer.timezoneOffset] - Set the committer timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. - * @param {string} [args.signingKey] - Sign the tag object using this private PGP key. - * @param {boolean} [args.dryRun = false] - If true, simulates making a commit so you can test whether it would succeed. Implies `noUpdateBranch`. - * @param {boolean} [args.noUpdateBranch = false] - If true, does not update the branch pointer after creating the commit. - * @param {string} [args.ref] - The fully expanded name of the branch to commit to. Default is the current branch pointed to by HEAD. (TODO: fix it so it can expand branch names without throwing if the branch doesn't exist yet.) - * @param {string[]} [args.parent] - The SHA-1 object ids of the commits to use as parents. If not specified, the commit pointed to by `ref` is used. - * @param {string} [args.tree] - The SHA-1 object id of the tree to use. If not specified, a new tree object is created from the current git index. - * @param {object} [args.cache] - a [cache](cache.md) object - * - * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly created commit. - * - * @example - * let sha = await git.commit({ - * fs, - * dir: '/tutorial', - * author: { - * name: 'Mr. Test', - * email: 'mrtest@example.com', - * }, - * message: 'Added the a.txt file' - * }) - * console.log(sha) - * + * @param {function} read */ -async function commit({ - fs: _fs, - onSign, - dir, - gitdir = join(dir, '.git'), - message, - author: _author, - committer: _committer, - signingKey, - dryRun = false, - noUpdateBranch = false, - ref, - parent, - tree, - cache = {}, -}) { - try { - assertParameter('fs', _fs); - assertParameter('message', message); - if (signingKey) { - assertParameter('onSign', onSign); +async function parseCapabilitiesV2(read) { + /** @type {Object} */ + const capabilities2 = {}; + + let line; + while (true) { + line = await read(); + if (line === true) break + if (line === null) continue + line = line.toString('utf8').replace(/\n$/, ''); + const i = line.indexOf('='); + if (i > -1) { + const key = line.slice(0, i); + const value = line.slice(i + 1); + capabilities2[key] = value; + } else { + capabilities2[line] = true; } - const fs = new FileSystem(_fs); + } + return { protocolVersion: 2, capabilities2 } +} - const author = await normalizeAuthorObject({ fs, gitdir, author: _author }); - if (!author) throw new MissingNameError('author') +async function parseRefsAdResponse(stream, { service }) { + const capabilities = new Set(); + const refs = new Map(); + const symrefs = new Map(); - const committer = await normalizeCommitterObject({ - fs, - gitdir, - author, - committer: _committer, - }); - if (!committer) throw new MissingNameError('committer') + // There is probably a better way to do this, but for now + // let's just throw the result parser inline here. + const read = GitPktLine.streamReader(stream); + let lineOne = await read(); + // skip past any flushes + while (lineOne === null) lineOne = await read(); - return await _commit({ - fs, - cache, - onSign, - gitdir, - message, - author, - committer, - signingKey, - dryRun, - noUpdateBranch, - ref, - parent, - tree, - }) - } catch (err) { - err.caller = 'git.commit'; - throw err + if (lineOne === true) throw new EmptyServerResponseError() + + // Handle protocol v2 responses (Bitbucket Server doesn't include a `# service=` line) + if (lineOne.includes('version 2')) { + return parseCapabilitiesV2(read) } -} -// @ts-check + // Clients MUST ignore an LF at the end of the line. + if (lineOne.toString('utf8').replace(/\n$/, '') !== `# service=${service}`) { + throw new ParseError(`# service=${service}\\n`, lineOne.toString('utf8')) + } + let lineTwo = await read(); + // skip past any flushes + while (lineTwo === null) lineTwo = await read(); + // In the edge case of a brand new repo, zero refs (and zero capabilities) + // are returned. + if (lineTwo === true) return { capabilities, refs, symrefs } + lineTwo = lineTwo.toString('utf8'); -/** - * Get the name of the branch currently pointed to by .git/HEAD - * - * @param {Object} args - * @param {FsClient} args.fs - a file system implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {boolean} [args.fullname = false] - Return the full path (e.g. "refs/heads/main") instead of the abbreviated form. - * @param {boolean} [args.test = false] - If the current branch doesn't actually exist (such as right after git init) then return `undefined`. - * - * @returns {Promise} The name of the current branch or undefined if the HEAD is detached. - * - * @example - * // Get the current branch name - * let branch = await git.currentBranch({ - * fs, - * dir: '/tutorial', - * fullname: false - * }) - * console.log(branch) - * - */ -async function currentBranch({ - fs, - dir, - gitdir = join(dir, '.git'), - fullname = false, - test = false, -}) { - try { - assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - return await _currentBranch({ - fs: new FileSystem(fs), - gitdir, - fullname, - test, - }) - } catch (err) { - err.caller = 'git.currentBranch'; - throw err + // Handle protocol v2 responses + if (lineTwo.includes('version 2')) { + return parseCapabilitiesV2(read) + } + + const [firstRef, capabilitiesLine] = splitAndAssert(lineTwo, '\x00', '\\x00'); + capabilitiesLine.split(' ').map(x => capabilities.add(x)); + const [ref, name] = splitAndAssert(firstRef, ' ', ' '); + refs.set(name, ref); + while (true) { + const line = await read(); + if (line === true) break + if (line !== null) { + const [ref, name] = splitAndAssert(line.toString('utf8'), ' ', ' '); + refs.set(name, ref); + } + } + // Symrefs are thrown into the "capabilities" unfortunately. + for (const cap of capabilities) { + if (cap.startsWith('symref=')) { + const m = cap.match(/symref=([^:]+):(.*)/); + if (m.length === 3) { + symrefs.set(m[1], m[2]); + } + } } + return { protocolVersion: 1, capabilities, refs, symrefs } } -// @ts-check +function splitAndAssert(line, sep, expected) { + const split = line.trim().split(sep); + if (split.length !== 2) { + throw new ParseError( + `Two strings separated by '${expected}'`, + line.toString('utf8') + ) + } + return split +} + +// Try to accomodate known CORS proxy implementations: +// - https://jcubic.pl/proxy.php? <-- uses query string +// - https://cors.isomorphic-git.org <-- uses path +const corsProxify = (corsProxy, url) => + corsProxy.endsWith('?') + ? `${corsProxy}${url}` + : `${corsProxy}/${url.replace(/^https?:\/\//, '')}`; + +const updateHeaders = (headers, auth) => { + // Update the basic auth header + if (auth.username || auth.password) { + headers.Authorization = calculateBasicAuthHeader(auth); + } + // but any manually provided headers take precedence + if (auth.headers) { + Object.assign(headers, auth.headers); + } +}; /** - * @param {Object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir - * @param {string} args.ref + * @param {GitHttpResponse} res * - * @returns {Promise} + * @returns {{ preview: string, response: string, data: Buffer }} */ -async function _deleteBranch({ fs, gitdir, ref }) { - const exist = await GitRefManager.exists({ fs, gitdir, ref }); - if (!exist) { - throw new NotFoundError(ref) +const stringifyBody = async res => { + try { + // Some services provide a meaningful error message in the body of 403s like "token lacks the scopes necessary to perform this action" + const data = Buffer.from(await collect(res.body)); + const response = data.toString('utf8'); + const preview = + response.length < 256 ? response : response.slice(0, 256) + '...'; + return { preview, response, data } + } catch (e) { + return {} } +}; - const fullRef = await GitRefManager.expand({ fs, gitdir, ref }); - const currentRef = await _currentBranch({ fs, gitdir, fullname: true }); - if (fullRef === currentRef) { - // detach HEAD - const value = await GitRefManager.resolve({ fs, gitdir, ref: fullRef }); - await GitRefManager.writeRef({ fs, gitdir, ref: 'HEAD', value }); +class GitRemoteHTTP { + static async capabilities() { + return ['discover', 'connect'] } - // Delete a specified branch - await GitRefManager.deleteRef({ fs, gitdir, ref: fullRef }); -} + /** + * @param {Object} args + * @param {HttpClient} args.http + * @param {ProgressCallback} [args.onProgress] + * @param {AuthCallback} [args.onAuth] + * @param {AuthFailureCallback} [args.onAuthFailure] + * @param {AuthSuccessCallback} [args.onAuthSuccess] + * @param {string} [args.corsProxy] + * @param {string} args.service + * @param {string} args.url + * @param {Object} args.headers + * @param {1 | 2} args.protocolVersion - Git Protocol Version + */ + static async discover({ + http, + onProgress, + onAuth, + onAuthSuccess, + onAuthFailure, + corsProxy, + service, + url: _origUrl, + headers, + protocolVersion, + }) { + let { url, auth } = extractAuthFromUrl(_origUrl); + const proxifiedURL = corsProxy ? corsProxify(corsProxy, url) : url; + if (auth.username || auth.password) { + headers.Authorization = calculateBasicAuthHeader(auth); + } + if (protocolVersion === 2) { + headers['Git-Protocol'] = 'version=2'; + } -// @ts-check + let res; + let tryAgain; + let providedAuthBefore = false; + do { + res = await http.request({ + onProgress, + method: 'GET', + url: `${proxifiedURL}/info/refs?service=${service}`, + headers, + }); -/** - * Delete a local branch - * - * > Note: This only deletes loose branches - it should be fixed in the future to delete packed branches as well. - * - * @param {Object} args - * @param {FsClient} args.fs - a file system implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.ref - The branch to delete - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * await git.deleteBranch({ fs, dir: '/tutorial', ref: 'local-branch' }) - * console.log('done') - * - */ -async function deleteBranch({ - fs, - dir, - gitdir = join(dir, '.git'), - ref, -}) { - try { - assertParameter('fs', fs); - assertParameter('ref', ref); - return await _deleteBranch({ - fs: new FileSystem(fs), - gitdir, - ref, - }) - } catch (err) { - err.caller = 'git.deleteBranch'; - throw err - } -} + // the default loop behavior + tryAgain = false; -// @ts-check + // 401 is the "correct" response for access denied. 203 is Non-Authoritative Information and comes from Azure DevOps, which + // apparently doesn't realize this is a git request and is returning the HTML for the "Azure DevOps Services | Sign In" page. + if (res.statusCode === 401 || res.statusCode === 203) { + // On subsequent 401s, call `onAuthFailure` instead of `onAuth`. + // This is so that naive `onAuth` callbacks that return a fixed value don't create an infinite loop of retrying. + const getAuth = providedAuthBefore ? onAuthFailure : onAuth; + if (getAuth) { + // Acquire credentials and try again + // TODO: read `useHttpPath` value from git config and pass along? + auth = await getAuth(url, { + ...auth, + headers: { ...headers }, + }); + if (auth && auth.cancel) { + throw new UserCanceledError() + } else if (auth) { + updateHeaders(headers, auth); + providedAuthBefore = true; + tryAgain = true; + } + } + } else if ( + res.statusCode === 200 && + providedAuthBefore && + onAuthSuccess + ) { + await onAuthSuccess(url, auth); + } + } while (tryAgain) -/** - * Delete a local ref - * - * @param {Object} args - * @param {FsClient} args.fs - a file system implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.ref - The ref to delete - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * await git.deleteRef({ fs, dir: '/tutorial', ref: 'refs/tags/test-tag' }) - * console.log('done') - * - */ -async function deleteRef({ fs, dir, gitdir = join(dir, '.git'), ref }) { - try { - assertParameter('fs', fs); - assertParameter('ref', ref); - await GitRefManager.deleteRef({ fs: new FileSystem(fs), gitdir, ref }); - } catch (err) { - err.caller = 'git.deleteRef'; - throw err + if (res.statusCode !== 200) { + const { response } = await stringifyBody(res); + throw new HttpError(res.statusCode, res.statusMessage, response) + } + // Git "smart" HTTP servers should respond with the correct Content-Type header. + if ( + res.headers['content-type'] === `application/x-${service}-advertisement` + ) { + const remoteHTTP = await parseRefsAdResponse(res.body, { service }); + remoteHTTP.auth = auth; + return remoteHTTP + } else { + // If they don't send the correct content-type header, that's a good indicator it is either a "dumb" HTTP + // server, or the user specified an incorrect remote URL and the response is actually an HTML page. + // In this case, we save the response as plain text so we can generate a better error message if needed. + const { preview, response, data } = await stringifyBody(res); + // For backwards compatibility, try to parse it anyway. + // TODO: maybe just throw instead of trying? + try { + const remoteHTTP = await parseRefsAdResponse([data], { service }); + remoteHTTP.auth = auth; + return remoteHTTP + } catch (e) { + throw new SmartHttpError(preview, response) + } + } } -} -// @ts-check + /** + * @param {Object} args + * @param {HttpClient} args.http + * @param {ProgressCallback} [args.onProgress] + * @param {string} [args.corsProxy] + * @param {string} args.service + * @param {string} args.url + * @param {Object} [args.headers] + * @param {any} args.body + * @param {any} args.auth + */ + static async connect({ + http, + onProgress, + corsProxy, + service, + url, + auth, + body, + headers, + }) { + // We already have the "correct" auth value at this point, but + // we need to strip out the username/password from the URL yet again. + const urlAuth = extractAuthFromUrl(url); + if (urlAuth) url = urlAuth.url; + + if (corsProxy) url = corsProxify(corsProxy, url); -/** - * @param {Object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir - * @param {string} args.remote - * - * @returns {Promise} - */ -async function _deleteRemote({ fs, gitdir, remote }) { - const config = await GitConfigManager.get({ fs, gitdir }); - await config.deleteSection('remote', remote); - await GitConfigManager.save({ fs, gitdir, config }); -} + headers['content-type'] = `application/x-${service}-request`; + headers.accept = `application/x-${service}-result`; + updateHeaders(headers, auth); -// @ts-check + const res = await http.request({ + onProgress, + method: 'POST', + url: `${url}/${service}`, + body, + headers, + }); + if (res.statusCode !== 200) { + const { response } = stringifyBody(res); + throw new HttpError(res.statusCode, res.statusMessage, response) + } + return res + } +} -/** - * Removes the local config entry for a given remote - * - * @param {Object} args - * @param {FsClient} args.fs - a file system implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.remote - The name of the remote to delete - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * await git.deleteRemote({ fs, dir: '/tutorial', remote: 'upstream' }) - * console.log('done') - * - */ -async function deleteRemote({ - fs, - dir, - gitdir = join(dir, '.git'), - remote, -}) { - try { - assertParameter('fs', fs); - assertParameter('remote', remote); - return await _deleteRemote({ - fs: new FileSystem(fs), - gitdir, - remote, - }) - } catch (err) { - err.caller = 'git.deleteRemote'; - throw err +function parseRemoteUrl({ url }) { + // the stupid "shorter scp-like syntax" + if (url.startsWith('git@')) { + return { + transport: 'ssh', + address: url, + } + } + const matches = url.match(/(\w+)(:\/\/|::)(.*)/); + if (matches === null) return + /* + * When git encounters a URL of the form ://
, where is + * a protocol that it cannot handle natively, it automatically invokes git remote- + * with the full URL as the second argument. + * + * @see https://git-scm.com/docs/git-remote-helpers + */ + if (matches[2] === '://') { + return { + transport: matches[1], + address: matches[0], + } + } + /* + * A URL of the form ::
explicitly instructs git to invoke + * git remote- with
as the second argument. + * + * @see https://git-scm.com/docs/git-remote-helpers + */ + if (matches[2] === '::') { + return { + transport: matches[1], + address: matches[3], + } } } -// @ts-check +class GitRemoteManager { + static getRemoteHelperFor({ url }) { + // TODO: clean up the remoteHelper API and move into PluginCore + const remoteHelpers = new Map(); + remoteHelpers.set('http', GitRemoteHTTP); + remoteHelpers.set('https', GitRemoteHTTP); -/** - * Delete a local tag ref - * - * @param {Object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir - * @param {string} args.ref - The tag to delete - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * await git.deleteTag({ dir: '$input((/))', ref: '$input((test-tag))' }) - * console.log('done') - * - */ -async function _deleteTag({ fs, gitdir, ref }) { - ref = ref.startsWith('refs/tags/') ? ref : `refs/tags/${ref}`; - await GitRefManager.deleteRef({ fs, gitdir, ref }); + const parts = parseRemoteUrl({ url }); + if (!parts) { + throw new UrlParseError(url) + } + if (remoteHelpers.has(parts.transport)) { + return remoteHelpers.get(parts.transport) + } + throw new UnknownTransportError( + url, + parts.transport, + parts.transport === 'ssh' ? translateSSHtoHTTP(url) : undefined + ) + } } -// @ts-check +let lock$1 = null; -/** - * Delete a local tag ref - * - * @param {Object} args - * @param {FsClient} args.fs - a file system implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.ref - The tag to delete - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * await git.deleteTag({ fs, dir: '/tutorial', ref: 'test-tag' }) - * console.log('done') - * - */ -async function deleteTag({ fs, dir, gitdir = join(dir, '.git'), ref }) { - try { - assertParameter('fs', fs); - assertParameter('ref', ref); - return await _deleteTag({ - fs: new FileSystem(fs), - gitdir, - ref, - }) - } catch (err) { - err.caller = 'git.deleteTag'; - throw err +class GitShallowManager { + static async read({ fs, gitdir }) { + if (lock$1 === null) lock$1 = new AsyncLock(); + const filepath = join(gitdir, 'shallow'); + const oids = new Set(); + await lock$1.acquire(filepath, async function() { + const text = await fs.read(filepath, { encoding: 'utf8' }); + if (text === null) return oids // no file + if (text.trim() === '') return oids // empty file + text + .trim() + .split('\n') + .map(oid => oids.add(oid)); + }); + return oids + } + + static async write({ fs, gitdir, oids }) { + if (lock$1 === null) lock$1 = new AsyncLock(); + const filepath = join(gitdir, 'shallow'); + if (oids.size > 0) { + const text = [...oids].join('\n') + '\n'; + await lock$1.acquire(filepath, async function() { + await fs.write(filepath, text, { + encoding: 'utf8', + }); + }); + } else { + // No shallows + await lock$1.acquire(filepath, async function() { + await fs.rm(filepath); + }); + } } } -async function expandOidLoose({ fs, gitdir, oid: short }) { - const prefix = short.slice(0, 2); - const objectsSuffixes = await fs.readdir(`${gitdir}/objects/${prefix}`); - return objectsSuffixes - .map(suffix => `${prefix}${suffix}`) - .filter(_oid => _oid.startsWith(short)) +async function hasObjectLoose({ fs, gitdir, oid }) { + const source = `objects/${oid.slice(0, 2)}/${oid.slice(2)}`; + return fs.exists(`${gitdir}/${source}`) } -async function expandOidPacked({ +async function hasObjectPacked({ fs, cache, gitdir, - oid: short, + oid, getExternalRefDelta, }) { - // Iterate through all the .pack files - const results = []; + // Check to see if it's in a packfile. + // Iterate through all the .idx files let list = await fs.readdir(join(gitdir, 'objects/pack')); list = list.filter(x => x.endsWith('.idx')); for (const filename of list) { @@ -25806,641 +24661,934 @@ async function expandOidPacked({ const p = await readPackIndex({ fs, cache, - filename: indexFile, + filename: indexFile, + getExternalRefDelta, + }); + if (p.error) throw new InternalError(p.error) + // If the packfile DOES have the oid we're looking for... + if (p.offsets.has(oid)) { + return true + } + } + // Failed to find it + return false +} + +async function hasObject({ + fs, + cache, + gitdir, + oid, + format = 'content', +}) { + // Curry the current read method so that the packfile un-deltification + // process can acquire external ref-deltas. + const getExternalRefDelta = oid => _readObject({ fs, cache, gitdir, oid }); + + // Look for it in the loose object directory. + let result = await hasObjectLoose({ fs, gitdir, oid }); + // Check to see if it's in a packfile. + if (!result) { + result = await hasObjectPacked({ + fs, + cache, + gitdir, + oid, getExternalRefDelta, }); - if (p.error) throw new InternalError(p.error) - // Search through the list of oids in the packfile - for (const oid of p.offsets.keys()) { - if (oid.startsWith(short)) results.push(oid); - } } - return results + // Finally + return result } -async function _expandOid({ fs, cache, gitdir, oid: short }) { - // Curry the current read method so that the packfile un-deltification - // process can acquire external ref-deltas. - const getExternalRefDelta = oid => _readObject({ fs, cache, gitdir, oid }); +// TODO: make a function that just returns obCount. then emptyPackfile = () => sizePack(pack) === 0 +function emptyPackfile(pack) { + const pheader = '5041434b'; + const version = '00000002'; + const obCount = '00000000'; + const header = pheader + version + obCount; + return pack.slice(0, 12).toString('hex') === header +} - const results1 = await expandOidLoose({ fs, gitdir, oid: short }); - const results2 = await expandOidPacked({ - fs, - cache, - gitdir, - oid: short, - getExternalRefDelta, - }); - const results = results1.concat(results2); +function filterCapabilities(server, client) { + const serverNames = server.map(cap => cap.split('=', 1)[0]); + return client.filter(cap => { + const name = cap.split('=', 1)[0]; + return serverNames.includes(name) + }) +} - if (results.length === 1) { - return results[0] +const pkg = { + name: 'isomorphic-git', + version: '1.11.2', + agent: 'git/isomorphic-git@1.11.2', +}; + +class FIFO { + constructor() { + this._queue = []; } - if (results.length > 1) { - throw new AmbiguousError('oids', short, results) + + write(chunk) { + if (this._ended) { + throw Error('You cannot write to a FIFO that has already been ended!') + } + if (this._waiting) { + const resolve = this._waiting; + this._waiting = null; + resolve({ value: chunk }); + } else { + this._queue.push(chunk); + } } - throw new NotFoundError(`an object matching "${short}"`) -} -// @ts-check + end() { + this._ended = true; + if (this._waiting) { + const resolve = this._waiting; + this._waiting = null; + resolve({ done: true }); + } + } -/** - * Expand and resolve a short oid into a full oid - * - * @param {Object} args - * @param {FsClient} args.fs - a file system implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.oid - The shortened oid prefix to expand (like "0414d2a") - * @param {object} [args.cache] - a [cache](cache.md) object - * - * @returns {Promise} Resolves successfully with the full oid (like "0414d2a286d7bbc7a4a326a61c1f9f888a8ab87f") - * - * @example - * let oid = await git.expandOid({ fs, dir: '/tutorial', oid: '0414d2a'}) - * console.log(oid) - * - */ -async function expandOid({ - fs, - dir, - gitdir = join(dir, '.git'), - oid, - cache = {}, -}) { - try { - assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - assertParameter('oid', oid); - return await _expandOid({ - fs: new FileSystem(fs), - cache, - gitdir, - oid, + destroy(err) { + this._ended = true; + this.error = err; + } + + async next() { + if (this._queue.length > 0) { + return { value: this._queue.shift() } + } + if (this._ended) { + return { done: true } + } + if (this._waiting) { + throw Error( + 'You cannot call read until the previous call to read has returned!' + ) + } + return new Promise(resolve => { + this._waiting = resolve; }) - } catch (err) { - err.caller = 'git.expandOid'; - throw err } } -// @ts-check +// Note: progress messages are designed to be written directly to the terminal, +// so they are often sent with just a carriage return to overwrite the last line of output. +// But there are also messages delimited with newlines. +// I also include CRLF just in case. +function findSplit(str) { + const r = str.indexOf('\r'); + const n = str.indexOf('\n'); + if (r === -1 && n === -1) return -1 + if (r === -1) return n + 1 // \n + if (n === -1) return r + 1 // \r + if (n === r + 1) return n + 1 // \r\n + return Math.min(r, n) + 1 // \r or \n +} -/** - * Expand an abbreviated ref to its full name - * - * @param {Object} args - * @param {FsClient} args.fs - a file system implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.ref - The ref to expand (like "v1.0.0") - * - * @returns {Promise} Resolves successfully with a full ref name ("refs/tags/v1.0.0") - * - * @example - * let fullRef = await git.expandRef({ fs, dir: '/tutorial', ref: 'main'}) - * console.log(fullRef) - * - */ -async function expandRef({ fs, dir, gitdir = join(dir, '.git'), ref }) { - try { - assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - assertParameter('ref', ref); - return await GitRefManager.expand({ - fs: new FileSystem(fs), - gitdir, - ref, - }) - } catch (err) { - err.caller = 'git.expandRef'; - throw err - } +function splitLines(input) { + const output = new FIFO(); + let tmp = '' + ;(async () => { + await forAwait(input, chunk => { + chunk = chunk.toString('utf8'); + tmp += chunk; + while (true) { + const i = findSplit(tmp); + if (i === -1) break + output.write(tmp.slice(0, i)); + tmp = tmp.slice(i); + } + }); + if (tmp.length > 0) { + output.write(tmp); + } + output.end(); + })(); + return output } -// @ts-check +/* +If 'side-band' or 'side-band-64k' capabilities have been specified by +the client, the server will send the packfile data multiplexed. -/** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache - * @param {string} args.gitdir - * @param {string[]} args.oids - * - */ -async function _findMergeBase({ fs, cache, gitdir, oids }) { - // Note: right now, the tests are geared so that the output should match that of - // `git merge-base --all --octopus` - // because without the --octopus flag, git's output seems to depend on the ORDER of the oids, - // and computing virtual merge bases is just too much for me to fathom right now. +Each packet starting with the packet-line length of the amount of data +that follows, followed by a single byte specifying the sideband the +following data is coming in on. - // If we start N independent walkers, one at each of the given `oids`, and walk backwards - // through ancestors, eventually we'll discover a commit where each one of these N walkers - // has passed through. So we just need to keep track of which walkers have visited each commit - // until we find a commit that N distinct walkers has visited. - const visits = {}; - const passes = oids.length; - let heads = oids.map((oid, index) => ({ index, oid })); - while (heads.length) { - // Count how many times we've passed each commit - const result = new Set(); - for (const { oid, index } of heads) { - if (!visits[oid]) visits[oid] = new Set(); - visits[oid].add(index); - if (visits[oid].size === passes) { - result.add(oid); +In 'side-band' mode, it will send up to 999 data bytes plus 1 control +code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k' +mode it will send up to 65519 data bytes plus 1 control code, for a +total of up to 65520 bytes in a pkt-line. + +The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain +packfile data, sideband '2' will be used for progress information that the +client will generally print to stderr and sideband '3' is used for error +information. + +If no 'side-band' capability was specified, the server will stream the +entire packfile without multiplexing. +*/ + +class GitSideBand { + static demux(input) { + const read = GitPktLine.streamReader(input); + // And now for the ridiculous side-band or side-band-64k protocol + const packetlines = new FIFO(); + const packfile = new FIFO(); + const progress = new FIFO(); + // TODO: Use a proper through stream? + const nextBit = async function() { + const line = await read(); + // Skip over flush packets + if (line === null) return nextBit() + // A made up convention to signal there's no more to read. + if (line === true) { + packetlines.end(); + progress.end(); + packfile.end(); + return } - } - if (result.size > 0) { - return [...result] - } - // We haven't found a common ancestor yet - const newheads = new Map(); - for (const { oid, index } of heads) { - try { - const { object } = await _readObject({ fs, cache, gitdir, oid }); - const commit = GitCommit.from(object); - const { parent } = commit.parseHeaders(); - for (const oid of parent) { - if (!visits[oid] || !visits[oid].has(index)) { - newheads.set(oid + ':' + index, { oid, index }); - } + // Examine first byte to determine which output "stream" to use + switch (line[0]) { + case 1: { + // pack data + packfile.write(line.slice(1)); + break + } + case 2: { + // progress message + progress.write(line.slice(1)); + break + } + case 3: { + // fatal error message just before stream aborts + const error = line.slice(1); + progress.write(error); + packfile.destroy(new Error(error.toString('utf8'))); + return + } + default: { + // Not part of the side-band-64k protocol + packetlines.write(line.slice(0)); } - } catch (err) { - // do nothing } + // Careful not to blow up the stack. + // I think Promises in a tail-call position should be OK. + nextBit(); + }; + nextBit(); + return { + packetlines, + packfile, + progress, } - heads = Array.from(newheads.values()); } - return [] + // static mux ({ + // protocol, // 'side-band' or 'side-band-64k' + // packetlines, + // packfile, + // progress, + // error + // }) { + // const MAX_PACKET_LENGTH = protocol === 'side-band-64k' ? 999 : 65519 + // let output = new PassThrough() + // packetlines.on('data', data => { + // if (data === null) { + // output.write(GitPktLine.flush()) + // } else { + // output.write(GitPktLine.encode(data)) + // } + // }) + // let packfileWasEmpty = true + // let packfileEnded = false + // let progressEnded = false + // let errorEnded = false + // let goodbye = Buffer.concat([ + // GitPktLine.encode(Buffer.from('010A', 'hex')), + // GitPktLine.flush() + // ]) + // packfile + // .on('data', data => { + // packfileWasEmpty = false + // const buffers = splitBuffer(data, MAX_PACKET_LENGTH) + // for (const buffer of buffers) { + // output.write( + // GitPktLine.encode(Buffer.concat([Buffer.from('01', 'hex'), buffer])) + // ) + // } + // }) + // .on('end', () => { + // packfileEnded = true + // if (!packfileWasEmpty) output.write(goodbye) + // if (progressEnded && errorEnded) output.end() + // }) + // progress + // .on('data', data => { + // const buffers = splitBuffer(data, MAX_PACKET_LENGTH) + // for (const buffer of buffers) { + // output.write( + // GitPktLine.encode(Buffer.concat([Buffer.from('02', 'hex'), buffer])) + // ) + // } + // }) + // .on('end', () => { + // progressEnded = true + // if (packfileEnded && errorEnded) output.end() + // }) + // error + // .on('data', data => { + // const buffers = splitBuffer(data, MAX_PACKET_LENGTH) + // for (const buffer of buffers) { + // output.write( + // GitPktLine.encode(Buffer.concat([Buffer.from('03', 'hex'), buffer])) + // ) + // } + // }) + // .on('end', () => { + // errorEnded = true + // if (progressEnded && packfileEnded) output.end() + // }) + // return output + // } } -const LINEBREAKS = /^.*(\r?\n|$)/gm; +async function parseUploadPackResponse(stream) { + const { packetlines, packfile, progress } = GitSideBand.demux(stream); + const shallows = []; + const unshallows = []; + const acks = []; + let nak = false; + let done = false; + return new Promise((resolve, reject) => { + // Parse the response + forAwait(packetlines, data => { + const line = data.toString('utf8').trim(); + if (line.startsWith('shallow')) { + const oid = line.slice(-41).trim(); + if (oid.length !== 40) { + reject(new InvalidOidError(oid)); + } + shallows.push(oid); + } else if (line.startsWith('unshallow')) { + const oid = line.slice(-41).trim(); + if (oid.length !== 40) { + reject(new InvalidOidError(oid)); + } + unshallows.push(oid); + } else if (line.startsWith('ACK')) { + const [, oid, status] = line.split(' '); + acks.push({ oid, status }); + if (!status) done = true; + } else if (line.startsWith('NAK')) { + nak = true; + done = true; + } + if (done) { + resolve({ shallows, unshallows, acks, nak, packfile, progress }); + } + }); + }) +} -function mergeFile({ - ourContent, - baseContent, - theirContent, - ourName = 'ours', - baseName = 'base', - theirName = 'theirs', - format = 'diff', - markerSize = 7, +function writeUploadPackRequest({ + capabilities = [], + wants = [], + haves = [], + shallows = [], + depth = null, + since = null, + exclude = [], }) { - const ours = ourContent.match(LINEBREAKS); - const base = baseContent.match(LINEBREAKS); - const theirs = theirContent.match(LINEBREAKS); - - // Here we let the diff3 library do the heavy lifting. - const result = diff3Merge(ours, base, theirs); - - // Here we note whether there are conflicts and format the results - let mergedText = ''; - let cleanMerge = true; - for (const item of result) { - if (item.ok) { - mergedText += item.ok.join(''); - } - if (item.conflict) { - cleanMerge = false; - mergedText += `${'<'.repeat(markerSize)} ${ourName}\n`; - mergedText += item.conflict.a.join(''); - if (format === 'diff3') { - mergedText += `${'|'.repeat(markerSize)} ${baseName}\n`; - mergedText += item.conflict.o.join(''); - } - mergedText += `${'='.repeat(markerSize)}\n`; - mergedText += item.conflict.b.join(''); - mergedText += `${'>'.repeat(markerSize)} ${theirName}\n`; - } + const packstream = []; + wants = [...new Set(wants)]; // remove duplicates + let firstLineCapabilities = ` ${capabilities.join(' ')}`; + for (const oid of wants) { + packstream.push(GitPktLine.encode(`want ${oid}${firstLineCapabilities}\n`)); + firstLineCapabilities = ''; } - return { cleanMerge, mergedText } + for (const oid of shallows) { + packstream.push(GitPktLine.encode(`shallow ${oid}\n`)); + } + if (depth !== null) { + packstream.push(GitPktLine.encode(`deepen ${depth}\n`)); + } + if (since !== null) { + packstream.push( + GitPktLine.encode(`deepen-since ${Math.floor(since.valueOf() / 1000)}\n`) + ); + } + for (const oid of exclude) { + packstream.push(GitPktLine.encode(`deepen-not ${oid}\n`)); + } + packstream.push(GitPktLine.flush()); + for (const oid of haves) { + packstream.push(GitPktLine.encode(`have ${oid}\n`)); + } + packstream.push(GitPktLine.encode(`done\n`)); + return packstream } // @ts-check /** - * Create a merged tree * - * @param {Object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {object} args.cache - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.ourOid - The SHA-1 object id of our tree - * @param {string} args.baseOid - The SHA-1 object id of the base tree - * @param {string} args.theirOid - The SHA-1 object id of their tree - * @param {string} [args.ourName='ours'] - The name to use in conflicted files for our hunks - * @param {string} [args.baseName='base'] - The name to use in conflicted files (in diff3 format) for the base hunks - * @param {string} [args.theirName='theirs'] - The name to use in conflicted files for their hunks - * @param {boolean} [args.dryRun=false] + * @typedef {object} FetchResult - The object returned has the following schema: + * @property {string | null} defaultBranch - The branch that is cloned if no branch is specified + * @property {string | null} fetchHead - The SHA-1 object id of the fetched head commit + * @property {string | null} fetchHeadDescription - a textual description of the branch that was fetched + * @property {Object} [headers] - The HTTP response headers returned by the git server + * @property {string[]} [pruned] - A list of branches that were pruned, if you provided the `prune` parameter * - * @returns {Promise} - The SHA-1 object id of the merged tree + */ + +/** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache + * @param {HttpClient} args.http + * @param {ProgressCallback} [args.onProgress] + * @param {MessageCallback} [args.onMessage] + * @param {AuthCallback} [args.onAuth] + * @param {AuthFailureCallback} [args.onAuthFailure] + * @param {AuthSuccessCallback} [args.onAuthSuccess] + * @param {string} args.gitdir + * @param {string|void} [args.url] + * @param {string} [args.corsProxy] + * @param {string} [args.ref] + * @param {string} [args.remoteRef] + * @param {string} [args.remote] + * @param {boolean} [args.singleBranch = false] + * @param {boolean} [args.tags = false] + * @param {number} [args.depth] + * @param {Date} [args.since] + * @param {string[]} [args.exclude = []] + * @param {boolean} [args.relative = false] + * @param {Object} [args.headers] + * @param {boolean} [args.prune] + * @param {boolean} [args.pruneTags] * + * @returns {Promise} + * @see FetchResult */ -async function mergeTree({ +async function _fetch({ fs, cache, - dir, - gitdir = join(dir, '.git'), - ourOid, - baseOid, - theirOid, - ourName = 'ours', - baseName = 'base', - theirName = 'theirs', - dryRun = false, + http, + onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, + gitdir, + ref: _ref, + remoteRef: _remoteRef, + remote: _remote, + url: _url, + corsProxy, + depth = null, + since = null, + exclude = [], + relative = false, + tags = false, + singleBranch = false, + headers = {}, + prune = false, + pruneTags = false, }) { - const ourTree = TREE({ ref: ourOid }); - const baseTree = TREE({ ref: baseOid }); - const theirTree = TREE({ ref: theirOid }); - - const results = await _walk({ - fs, - cache, - dir, - gitdir, - trees: [ourTree, baseTree, theirTree], - map: async function(filepath, [ours, base, theirs]) { - const path = basename(filepath); - // What we did, what they did - const ourChange = await modified(ours, base); - const theirChange = await modified(theirs, base); - switch (`${ourChange}-${theirChange}`) { - case 'false-false': { - return { - mode: await base.mode(), - path, - oid: await base.oid(), - type: await base.type(), - } - } - case 'false-true': { - return theirs - ? { - mode: await theirs.mode(), - path, - oid: await theirs.oid(), - type: await theirs.type(), - } - : undefined - } - case 'true-false': { - return ours - ? { - mode: await ours.mode(), - path, - oid: await ours.oid(), - type: await ours.type(), - } - : undefined - } - case 'true-true': { - // Modifications - if ( - ours && - base && - theirs && - (await ours.type()) === 'blob' && - (await base.type()) === 'blob' && - (await theirs.type()) === 'blob' - ) { - return mergeBlobs({ - fs, - gitdir, - path, - ours, - base, - theirs, - ourName, - baseName, - theirName, - }) - } - // all other types of conflicts fail - throw new MergeNotSupportedError() - } - } - }, - /** - * @param {TreeEntry} [parent] - * @param {Array} children - */ - reduce: async (parent, children) => { - const entries = children.filter(Boolean); // remove undefineds - - // if the parent was deleted, the children have to go - if (!parent) return + const ref = _ref || (await _currentBranch({ fs, gitdir, test: true })); + const config = await GitConfigManager.get({ fs, gitdir }); + // Figure out what remote to use. + const remote = + _remote || (ref && (await config.get(`branch.${ref}.remote`))) || 'origin'; + // Lookup the URL for the given remote. + const url = _url || (await config.get(`remote.${remote}.url`)); + if (typeof url === 'undefined') { + throw new MissingParameterError('remote OR url') + } + // Figure out what remote ref to use. + const remoteRef = + _remoteRef || + (ref && (await config.get(`branch.${ref}.merge`))) || + _ref || + 'HEAD'; - // automatically delete directories if they have been emptied - if (parent && parent.type === 'tree' && entries.length === 0) return + if (corsProxy === undefined) { + corsProxy = await config.get('http.corsProxy'); + } - if (entries.length > 0) { - const tree = new GitTree(entries); - const object = tree.toObject(); - const oid = await _writeObject({ - fs, - gitdir, - type: 'tree', - object, - dryRun, - }); - parent.oid = oid; + const GitRemoteHTTP = GitRemoteManager.getRemoteHelperFor({ url }); + const remoteHTTP = await GitRemoteHTTP.discover({ + http, + onAuth, + onAuthSuccess, + onAuthFailure, + corsProxy, + service: 'git-upload-pack', + url, + headers, + protocolVersion: 1, + }); + const auth = remoteHTTP.auth; // hack to get new credentials from CredentialManager API + const remoteRefs = remoteHTTP.refs; + // For the special case of an empty repository with no refs, return null. + if (remoteRefs.size === 0) { + return { + defaultBranch: null, + fetchHead: null, + fetchHeadDescription: null, + } + } + // Check that the remote supports the requested features + if (depth !== null && !remoteHTTP.capabilities.has('shallow')) { + throw new RemoteCapabilityError('shallow', 'depth') + } + if (since !== null && !remoteHTTP.capabilities.has('deepen-since')) { + throw new RemoteCapabilityError('deepen-since', 'since') + } + if (exclude.length > 0 && !remoteHTTP.capabilities.has('deepen-not')) { + throw new RemoteCapabilityError('deepen-not', 'exclude') + } + if (relative === true && !remoteHTTP.capabilities.has('deepen-relative')) { + throw new RemoteCapabilityError('deepen-relative', 'relative') + } + // Figure out the SHA for the requested ref + const { oid, fullref } = GitRefManager.resolveAgainstMap({ + ref: remoteRef, + map: remoteRefs, + }); + // Filter out refs we want to ignore: only keep ref we're cloning, HEAD, branches, and tags (if we're keeping them) + for (const remoteRef of remoteRefs.keys()) { + if ( + remoteRef === fullref || + remoteRef === 'HEAD' || + remoteRef.startsWith('refs/heads/') || + (tags && remoteRef.startsWith('refs/tags/')) + ) { + continue + } + remoteRefs.delete(remoteRef); + } + // Assemble the application/x-git-upload-pack-request + const capabilities = filterCapabilities( + [...remoteHTTP.capabilities], + [ + 'multi_ack_detailed', + 'no-done', + 'side-band-64k', + // Note: I removed 'thin-pack' option since our code doesn't "fatten" packfiles, + // which is necessary for compatibility with git. It was the cause of mysterious + // 'fatal: pack has [x] unresolved deltas' errors that plagued us for some time. + // isomorphic-git is perfectly happy with thin packfiles in .git/objects/pack but + // canonical git it turns out is NOT. + 'ofs-delta', + `agent=${pkg.agent}`, + ] + ); + if (relative) capabilities.push('deepen-relative'); + // Start figuring out which oids from the remote we want to request + const wants = singleBranch ? [oid] : remoteRefs.values(); + // Come up with a reasonable list of oids to tell the remote we already have + // (preferably oids that are close ancestors of the branch heads we're fetching) + const haveRefs = singleBranch + ? [ref] + : await GitRefManager.listRefs({ + fs, + gitdir, + filepath: `refs`, + }); + let haves = []; + for (let ref of haveRefs) { + try { + ref = await GitRefManager.expand({ fs, gitdir, ref }); + const oid = await GitRefManager.resolve({ fs, gitdir, ref }); + if (await hasObject({ fs, cache, gitdir, oid })) { + haves.push(oid); } - return parent - }, + } catch (err) {} + } + haves = [...new Set(haves)]; + const oids = await GitShallowManager.read({ fs, gitdir }); + const shallows = remoteHTTP.capabilities.has('shallow') ? [...oids] : []; + const packstream = writeUploadPackRequest({ + capabilities, + wants, + haves, + shallows, + depth, + since, + exclude, }); - return results.oid -} - -/** - * - * @param {WalkerEntry} entry - * @param {WalkerEntry} base - * - */ -async function modified(entry, base) { - if (!entry && !base) return false - if (entry && !base) return true - if (!entry && base) return true - if ((await entry.type()) === 'tree' && (await base.type()) === 'tree') { - return false + // CodeCommit will hang up if we don't send a Content-Length header + // so we can't stream the body. + const packbuffer = Buffer.from(await collect(packstream)); + const raw = await GitRemoteHTTP.connect({ + http, + onProgress, + corsProxy, + service: 'git-upload-pack', + url, + auth, + body: [packbuffer], + headers, + }); + const response = await parseUploadPackResponse(raw.body); + if (raw.headers) { + response.headers = raw.headers; } - if ( - (await entry.type()) === (await base.type()) && - (await entry.mode()) === (await base.mode()) && - (await entry.oid()) === (await base.oid()) - ) { - return false + // Apply all the 'shallow' and 'unshallow' commands + for (const oid of response.shallows) { + if (!oids.has(oid)) { + // this is in a try/catch mostly because my old test fixtures are missing objects + try { + // server says it's shallow, but do we have the parents? + const { object } = await _readObject({ fs, cache, gitdir, oid }); + const commit = new GitCommit(object); + const hasParents = await Promise.all( + commit + .headers() + .parent.map(oid => hasObject({ fs, cache, gitdir, oid })) + ); + const haveAllParents = + hasParents.length === 0 || hasParents.every(has => has); + if (!haveAllParents) { + oids.add(oid); + } + } catch (err) { + oids.add(oid); + } + } } - return true -} - -/** - * - * @param {Object} args - * @param {import('../models/FileSystem').FileSystem} args.fs - * @param {string} args.gitdir - * @param {string} args.path - * @param {WalkerEntry} args.ours - * @param {WalkerEntry} args.base - * @param {WalkerEntry} args.theirs - * @param {string} [args.ourName] - * @param {string} [args.baseName] - * @param {string} [args.theirName] - * @param {string} [args.format] - * @param {number} [args.markerSize] - * @param {boolean} [args.dryRun = false] - * - */ -async function mergeBlobs({ - fs, - gitdir, - path, - ours, - base, - theirs, - ourName, - theirName, - baseName, - format, - markerSize, - dryRun, -}) { - const type = 'blob'; - // Compute the new mode. - // Since there are ONLY two valid blob modes ('100755' and '100644') it boils down to this - const mode = - (await base.mode()) === (await ours.mode()) - ? await theirs.mode() - : await ours.mode(); - // The trivial case: nothing to merge except maybe mode - if ((await ours.oid()) === (await theirs.oid())) { - return { mode, path, oid: await ours.oid(), type } + for (const oid of response.unshallows) { + oids.delete(oid); } - // if only one side made oid changes, return that side's oid - if ((await ours.oid()) === (await base.oid())) { - return { mode, path, oid: await theirs.oid(), type } + await GitShallowManager.write({ fs, gitdir, oids }); + // Update local remote refs + if (singleBranch) { + const refs = new Map([[fullref, oid]]); + // But wait, maybe it was a symref, like 'HEAD'! + // We need to save all the refs in the symref chain (sigh). + const symrefs = new Map(); + let bail = 10; + let key = fullref; + while (bail--) { + const value = remoteHTTP.symrefs.get(key); + if (value === undefined) break + symrefs.set(key, value); + key = value; + } + // final value must not be a symref but a real ref + const realRef = remoteRefs.get(key); + // There may be no ref at all if we've fetched a specific commit hash + if (realRef) { + refs.set(key, realRef); + } + const { pruned } = await GitRefManager.updateRemoteRefs({ + fs, + gitdir, + remote, + refs, + symrefs, + tags, + prune, + }); + if (prune) { + response.pruned = pruned; + } + } else { + const { pruned } = await GitRefManager.updateRemoteRefs({ + fs, + gitdir, + remote, + refs: remoteRefs, + symrefs: remoteHTTP.symrefs, + tags, + prune, + pruneTags, + }); + if (prune) { + response.pruned = pruned; + } } - if ((await theirs.oid()) === (await base.oid())) { - return { mode, path, oid: await ours.oid(), type } + // We need this value later for the `clone` command. + response.HEAD = remoteHTTP.symrefs.get('HEAD'); + // AWS CodeCommit doesn't list HEAD as a symref, but we can reverse engineer it + // Find the SHA of the branch called HEAD + if (response.HEAD === undefined) { + const { oid } = GitRefManager.resolveAgainstMap({ + ref: 'HEAD', + map: remoteRefs, + }); + // Use the name of the first branch that's not called HEAD that has + // the same SHA as the branch called HEAD. + for (const [key, value] of remoteRefs.entries()) { + if (key !== 'HEAD' && value === oid) { + response.HEAD = key; + break + } + } } - // if both sides made changes do a merge - const { mergedText, cleanMerge } = mergeFile({ - ourContent: Buffer.from(await ours.content()).toString('utf8'), - baseContent: Buffer.from(await base.content()).toString('utf8'), - theirContent: Buffer.from(await theirs.content()).toString('utf8'), - ourName, - theirName, - baseName, - format, - markerSize, - }); - if (!cleanMerge) { - // all other types of conflicts fail - throw new MergeNotSupportedError() + const noun = fullref.startsWith('refs/tags') ? 'tag' : 'branch'; + response.FETCH_HEAD = { + oid, + description: `${noun} '${abbreviateRef(fullref)}' of ${url}`, + }; + + if (onProgress || onMessage) { + const lines = splitLines(response.progress); + forAwait(lines, async line => { + if (onMessage) await onMessage(line); + if (onProgress) { + const matches = line.match(/([^:]*).*\((\d+?)\/(\d+?)\)/); + if (matches) { + await onProgress({ + phase: matches[1].trim(), + loaded: parseInt(matches[2], 10), + total: parseInt(matches[3], 10), + }); + } + } + }); } - const oid = await _writeObject({ - fs, - gitdir, - type: 'blob', - object: Buffer.from(mergedText, 'utf8'), - dryRun, - }); - return { mode, path, oid, type } + const packfile = Buffer.from(await collect(response.packfile)); + const packfileSha = packfile.slice(-20).toString('hex'); + const res = { + defaultBranch: response.HEAD, + fetchHead: response.FETCH_HEAD.oid, + fetchHeadDescription: response.FETCH_HEAD.description, + }; + if (response.headers) { + res.headers = response.headers; + } + if (prune) { + res.pruned = response.pruned; + } + // This is a quick fix for the empty .git/objects/pack/pack-.pack file error, + // which due to the way `git-list-pack` works causes the program to hang when it tries to read it. + // TODO: Longer term, we should actually: + // a) NOT concatenate the entire packfile into memory (line 78), + // b) compute the SHA of the stream except for the last 20 bytes, using the same library used in push.js, and + // c) compare the computed SHA with the last 20 bytes of the stream before saving to disk, and throwing a "packfile got corrupted during download" error if the SHA doesn't match. + if (packfileSha !== '' && !emptyPackfile(packfile)) { + res.packfile = `objects/pack/pack-${packfileSha}.pack`; + const fullpath = join(gitdir, res.packfile); + await fs.write(fullpath, packfile); + const getExternalRefDelta = oid => _readObject({ fs, cache, gitdir, oid }); + const idx = await GitPackIndex.fromPack({ + pack: packfile, + getExternalRefDelta, + onProgress, + }); + await fs.write(fullpath.replace(/\.pack$/, '.idx'), await idx.toBuffer()); + } + return res } // @ts-check -// import diff3 from 'node-diff3' /** + * Initialize a new repository * - * @typedef {Object} MergeResult - Returns an object with a schema like this: - * @property {string} [oid] - The SHA-1 object id that is now at the head of the branch. Absent only if `dryRun` was specified and `mergeCommit` is true. - * @property {boolean} [alreadyMerged] - True if the branch was already merged so no changes were made - * @property {boolean} [fastForward] - True if it was a fast-forward merge - * @property {boolean} [mergeCommit] - True if merge resulted in a merge commit - * @property {string} [tree] - The SHA-1 object id of the tree resulting from a merge commit - * + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} [args.dir] + * @param {string} [args.gitdir] + * @param {boolean} [args.bare = false] + * @param {string} [args.defaultBranch = 'master'] + * @returns {Promise} */ +async function _init({ + fs, + bare = false, + dir, + gitdir = bare ? dir : join(dir, '.git'), + defaultBranch = 'master', +}) { + // Don't overwrite an existing config + if (await fs.exists(gitdir + '/config')) return + + let folders = [ + 'hooks', + 'info', + 'objects/info', + 'objects/pack', + 'refs/heads', + 'refs/tags', + ]; + folders = folders.map(dir => gitdir + '/' + dir); + for (const folder of folders) { + await fs.mkdir(folder); + } + + await fs.write( + gitdir + '/config', + '[core]\n' + + '\trepositoryformatversion = 0\n' + + '\tfilemode = false\n' + + `\tbare = ${bare}\n` + + (bare ? '' : '\tlogallrefupdates = true\n') + + '\tsymlinks = false\n' + + '\tignorecase = true\n' + ); + await fs.write(gitdir + '/HEAD', `ref: refs/heads/${defaultBranch}\n`); +} + +// @ts-check /** * @param {object} args * @param {import('../models/FileSystem.js').FileSystem} args.fs * @param {object} args.cache + * @param {HttpClient} args.http + * @param {ProgressCallback} [args.onProgress] + * @param {MessageCallback} [args.onMessage] + * @param {AuthCallback} [args.onAuth] + * @param {AuthFailureCallback} [args.onAuthFailure] + * @param {AuthSuccessCallback} [args.onAuthSuccess] + * @param {string} [args.dir] * @param {string} args.gitdir - * @param {string} [args.ours] - * @param {string} args.theirs - * @param {boolean} args.fastForwardOnly - * @param {boolean} args.dryRun - * @param {boolean} args.noUpdateBranch - * @param {string} [args.message] - * @param {Object} args.author - * @param {string} args.author.name - * @param {string} args.author.email - * @param {number} args.author.timestamp - * @param {number} args.author.timezoneOffset - * @param {Object} args.committer - * @param {string} args.committer.name - * @param {string} args.committer.email - * @param {number} args.committer.timestamp - * @param {number} args.committer.timezoneOffset - * @param {string} [args.signingKey] - * @param {SignCallback} [args.onSign] - a PGP signing implementation + * @param {string} args.url + * @param {string} args.corsProxy + * @param {string} args.ref + * @param {boolean} args.singleBranch + * @param {boolean} args.noCheckout + * @param {boolean} args.noTags + * @param {string} args.remote + * @param {number} args.depth + * @param {Date} args.since + * @param {string[]} args.exclude + * @param {boolean} args.relative + * @param {Object} args.headers * - * @returns {Promise} Resolves to a description of the merge operation + * @returns {Promise} Resolves successfully when clone completes * */ -async function _merge({ +async function _clone({ fs, cache, + http, + onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, + dir, gitdir, - ours, - theirs, - fastForwardOnly = false, - dryRun = false, - noUpdateBranch = false, - message, - author, - committer, - signingKey, - onSign, + url, + corsProxy, + ref, + remote, + depth, + since, + exclude, + relative, + singleBranch, + noCheckout, + noTags, + headers, }) { - if (ours === undefined) { - ours = await _currentBranch({ fs, gitdir, fullname: true }); - } - ours = await GitRefManager.expand({ - fs, - gitdir, - ref: ours, - }); - theirs = await GitRefManager.expand({ - fs, - gitdir, - ref: theirs, - }); - const ourOid = await GitRefManager.resolve({ - fs, - gitdir, - ref: ours, - }); - const theirOid = await GitRefManager.resolve({ - fs, - gitdir, - ref: theirs, - }); - // find most recent common ancestor of ref a and ref b - const baseOids = await _findMergeBase({ - fs, - cache, - gitdir, - oids: [ourOid, theirOid], - }); - if (baseOids.length !== 1) { - throw new MergeNotSupportedError() - } - const baseOid = baseOids[0]; - // handle fast-forward case - if (baseOid === theirOid) { - return { - oid: ourOid, - alreadyMerged: true, - } - } - if (baseOid === ourOid) { - if (!dryRun && !noUpdateBranch) { - await GitRefManager.writeRef({ fs, gitdir, ref: ours, value: theirOid }); - } - return { - oid: theirOid, - fastForward: true, - } - } else { - // not a simple fast-forward - if (fastForwardOnly) { - throw new FastForwardError() + try { + await _init({ fs, gitdir }); + await _addRemote({ fs, gitdir, remote, url, force: false }); + if (corsProxy) { + const config = await GitConfigManager.get({ fs, gitdir }); + await config.set(`http.corsProxy`, corsProxy); + await GitConfigManager.save({ fs, gitdir, config }); } - // try a fancier merge - const tree = await mergeTree({ + const { defaultBranch, fetchHead } = await _fetch({ fs, cache, + http, + onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, gitdir, - ourOid, - theirOid, - baseOid, - ourName: ours, - baseName: 'base', - theirName: theirs, - dryRun, + ref, + remote, + corsProxy, + depth, + since, + exclude, + relative, + singleBranch, + headers, + tags: !noTags, }); - if (!message) { - message = `Merge branch '${abbreviateRef(theirs)}' into ${abbreviateRef( - ours - )}`; - } - const oid = await _commit({ + if (fetchHead === null) return + ref = ref || defaultBranch; + ref = ref.replace('refs/heads/', ''); + // Checkout that branch + await _checkout({ fs, cache, + onProgress, + dir, gitdir, - message, - ref: ours, - tree, - parent: [ourOid, theirOid], - author, - committer, - signingKey, - onSign, - dryRun, - noUpdateBranch, + ref, + remote, + noCheckout, }); - return { - oid, - tree, - mergeCommit: true, - } + } catch (err) { + // Remove partial local repository, see #1283 + // Ignore any error as we are already failing. + // The catch is necessary so the original error is not masked. + await fs + .rmdir(gitdir, { recursive: true, maxRetries: 10 }) + .catch(() => undefined); + throw err } } // @ts-check /** + * Clone a repository + * * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {object} args.cache - * @param {HttpClient} args.http - * @param {ProgressCallback} [args.onProgress] - * @param {MessageCallback} [args.onMessage] - * @param {AuthCallback} [args.onAuth] - * @param {AuthFailureCallback} [args.onAuthFailure] - * @param {AuthSuccessCallback} [args.onAuthSuccess] - * @param {string} args.dir - * @param {string} args.gitdir - * @param {string} args.ref - * @param {string} [args.url] - * @param {string} [args.remote] - * @param {string} [args.remoteRef] - * @param {string} [args.corsProxy] - * @param {boolean} args.singleBranch - * @param {boolean} args.fastForwardOnly - * @param {Object} [args.headers] - * @param {Object} args.author - * @param {string} args.author.name - * @param {string} args.author.email - * @param {number} args.author.timestamp - * @param {number} args.author.timezoneOffset - * @param {Object} args.committer - * @param {string} args.committer.name - * @param {string} args.committer.email - * @param {number} args.committer.timestamp - * @param {number} args.committer.timezoneOffset - * @param {string} [args.signingKey] + * @param {FsClient} args.fs - a file system implementation + * @param {HttpClient} args.http - an HTTP client + * @param {ProgressCallback} [args.onProgress] - optional progress event callback + * @param {MessageCallback} [args.onMessage] - optional message event callback + * @param {AuthCallback} [args.onAuth] - optional auth fill callback + * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback + * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback + * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.url - The URL of the remote repository + * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Value is stored in the git config file for that repo. + * @param {string} [args.ref] - Which branch to checkout. By default this is the designated "main branch" of the repository. + * @param {boolean} [args.singleBranch = false] - Instead of the default behavior of fetching all the branches, only fetch a single branch. + * @param {boolean} [args.noCheckout = false] - If true, clone will only fetch the repo, not check out a branch. Skipping checkout can save a lot of time normally spent writing files to disk. + * @param {boolean} [args.noTags = false] - By default clone will fetch all tags. `noTags` disables that behavior. + * @param {string} [args.remote = 'origin'] - What to name the remote that is created. + * @param {number} [args.depth] - Integer. Determines how much of the git repository's history to retrieve + * @param {Date} [args.since] - Only fetch commits created after the given date. Mutually exclusive with `depth`. + * @param {string[]} [args.exclude = []] - A list of branches or tags. Instructs the remote server not to send us any commits reachable from these refs. + * @param {boolean} [args.relative = false] - Changes the meaning of `depth` to be measured from the current shallow depth rather than from the branch tip. + * @param {Object} [args.headers = {}] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config + * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} Resolves successfully when pull operation completes + * @returns {Promise} Resolves successfully when clone completes + * + * @example + * await git.clone({ + * fs, + * http, + * dir: '/tutorial', + * corsProxy: 'https://cors.isomorphic-git.org', + * url: 'https://github.com/isomorphic-git/isomorphic-git', + * singleBranch: true, + * depth: 1 + * }) + * console.log('done') * */ -async function _pull({ +async function clone({ fs, - cache, http, onProgress, onMessage, @@ -26448,32 +25596,32 @@ async function _pull({ onAuthSuccess, onAuthFailure, dir, - gitdir, - ref, + gitdir = join(dir, '.git'), url, - remote, - remoteRef, - fastForwardOnly, - corsProxy, - singleBranch, - headers, - author, - committer, - signingKey, + corsProxy = undefined, + ref = undefined, + remote = 'origin', + depth = undefined, + since = undefined, + exclude = [], + relative = false, + singleBranch = false, + noCheckout = false, + noTags = false, + headers = {}, + cache = {}, }) { try { - // If ref is undefined, use 'HEAD' - if (!ref) { - const head = await _currentBranch({ fs, gitdir }); - // TODO: use a better error. - if (!head) { - throw new MissingParameterError('ref') - } - ref = head; + assertParameter('fs', fs); + assertParameter('http', http); + assertParameter('gitdir', gitdir); + if (!noCheckout) { + assertParameter('dir', dir); } + assertParameter('url', url); - const { fetchHead, fetchHeadDescription } = await _fetch({ - fs, + return await _clone({ + fs: new FileSystem(fs), cache, http, onProgress, @@ -26481,42 +25629,123 @@ async function _pull({ onAuth, onAuthSuccess, onAuthFailure, + dir, gitdir, + url, corsProxy, ref, - url, remote, - remoteRef, + depth, + since, + exclude, + relative, singleBranch, + noCheckout, + noTags, headers, - }); - // Merge the remote tracking branch into the local one. - await _merge({ + }) + } catch (err) { + err.caller = 'git.clone'; + throw err + } +} + +// @ts-check + +/** + * Create a new commit + * + * @param {Object} args + * @param {FsClient} args.fs - a file system implementation + * @param {SignCallback} [args.onSign] - a PGP signing implementation + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.message - The commit message to use. + * @param {Object} [args.author] - The details about the author. + * @param {string} [args.author.name] - Default is `user.name` config. + * @param {string} [args.author.email] - Default is `user.email` config. + * @param {number} [args.author.timestamp=Math.floor(Date.now()/1000)] - Set the author timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). + * @param {number} [args.author.timezoneOffset] - Set the author timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. + * @param {Object} [args.committer = author] - The details about the commit committer, in the same format as the author parameter. If not specified, the author details are used. + * @param {string} [args.committer.name] - Default is `user.name` config. + * @param {string} [args.committer.email] - Default is `user.email` config. + * @param {number} [args.committer.timestamp=Math.floor(Date.now()/1000)] - Set the committer timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). + * @param {number} [args.committer.timezoneOffset] - Set the committer timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. + * @param {string} [args.signingKey] - Sign the tag object using this private PGP key. + * @param {boolean} [args.dryRun = false] - If true, simulates making a commit so you can test whether it would succeed. Implies `noUpdateBranch`. + * @param {boolean} [args.noUpdateBranch = false] - If true, does not update the branch pointer after creating the commit. + * @param {string} [args.ref] - The fully expanded name of the branch to commit to. Default is the current branch pointed to by HEAD. (TODO: fix it so it can expand branch names without throwing if the branch doesn't exist yet.) + * @param {string[]} [args.parent] - The SHA-1 object ids of the commits to use as parents. If not specified, the commit pointed to by `ref` is used. + * @param {string} [args.tree] - The SHA-1 object id of the tree to use. If not specified, a new tree object is created from the current git index. + * @param {object} [args.cache] - a [cache](cache.md) object + * + * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly created commit. + * + * @example + * let sha = await git.commit({ + * fs, + * dir: '/tutorial', + * author: { + * name: 'Mr. Test', + * email: 'mrtest@example.com', + * }, + * message: 'Added the a.txt file' + * }) + * console.log(sha) + * + */ +async function commit({ + fs: _fs, + onSign, + dir, + gitdir = join(dir, '.git'), + message, + author: _author, + committer: _committer, + signingKey, + dryRun = false, + noUpdateBranch = false, + ref, + parent, + tree, + cache = {}, +}) { + try { + assertParameter('fs', _fs); + assertParameter('message', message); + if (signingKey) { + assertParameter('onSign', onSign); + } + const fs = new FileSystem(_fs); + + const author = await normalizeAuthorObject({ fs, gitdir, author: _author }); + if (!author) throw new MissingNameError('author') + + const committer = await normalizeCommitterObject({ fs, - cache, gitdir, - ours: ref, - theirs: fetchHead, - fastForwardOnly, - message: `Merge ${fetchHeadDescription}`, author, - committer, - signingKey, - dryRun: false, - noUpdateBranch: false, + committer: _committer, }); - await _checkout({ + if (!committer) throw new MissingNameError('committer') + + return await _commit({ fs, cache, - onProgress, - dir, + onSign, gitdir, + message, + author, + committer, + signingKey, + dryRun, + noUpdateBranch, ref, - remote, - noCheckout: false, - }); + parent, + tree, + }) } catch (err) { - err.caller = 'git.pull'; + err.caller = 'git.commit'; throw err } } @@ -26524,95 +25753,45 @@ async function _pull({ // @ts-check /** - * Like `pull`, but hard-coded with `fastForward: true` so there is no need for an `author` parameter. - * - * @param {object} args - * @param {FsClient} args.fs - a file system client - * @param {HttpClient} args.http - an HTTP client - * @param {ProgressCallback} [args.onProgress] - optional progress event callback - * @param {MessageCallback} [args.onMessage] - optional message event callback - * @param {AuthCallback} [args.onAuth] - optional auth fill callback - * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback - * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback - * @param {string} args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.ref] - Which branch to merge into. By default this is the currently checked out branch. - * @param {string} [args.url] - (Added in 1.1.0) The URL of the remote repository. The default is the value set in the git config for that remote. - * @param {string} [args.remote] - (Added in 1.1.0) If URL is not specified, determines which remote to use. - * @param {string} [args.remoteRef] - (Added in 1.1.0) The name of the branch on the remote to fetch. By default this is the configured remote tracking branch. - * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Overrides value in repo config. - * @param {boolean} [args.singleBranch = false] - Instead of the default behavior of fetching all the branches, only fetch a single branch. - * @param {Object} [args.headers] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config - * @param {object} [args.cache] - a [cache](cache.md) object + * Get the name of the branch currently pointed to by .git/HEAD * - * @returns {Promise} Resolves successfully when pull operation completes + * @param {Object} args + * @param {FsClient} args.fs - a file system implementation + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {boolean} [args.fullname = false] - Return the full path (e.g. "refs/heads/main") instead of the abbreviated form. + * @param {boolean} [args.test = false] - If the current branch doesn't actually exist (such as right after git init) then return `undefined`. + * + * @returns {Promise} The name of the current branch or undefined if the HEAD is detached. * * @example - * await git.fastForward({ + * // Get the current branch name + * let branch = await git.currentBranch({ * fs, - * http, * dir: '/tutorial', - * ref: 'main', - * singleBranch: true + * fullname: false * }) - * console.log('done') + * console.log(branch) * */ -async function fastForward({ +async function currentBranch({ fs, - http, - onProgress, - onMessage, - onAuth, - onAuthSuccess, - onAuthFailure, dir, gitdir = join(dir, '.git'), - ref, - url, - remote, - remoteRef, - corsProxy, - singleBranch, - headers = {}, - cache = {}, + fullname = false, + test = false, }) { try { assertParameter('fs', fs); - assertParameter('http', http); assertParameter('gitdir', gitdir); - - const thisWillNotBeUsed = { - name: '', - email: '', - timestamp: Date.now(), - timezoneOffset: 0, - }; - - return await _pull({ + return await _currentBranch({ fs: new FileSystem(fs), - cache, - http, - onProgress, - onMessage, - onAuth, - onAuthSuccess, - onAuthFailure, - dir, gitdir, - ref, - url, - remote, - remoteRef, - fastForwardOnly: true, - corsProxy, - singleBranch, - headers, - author: thisWillNotBeUsed, - committer: thisWillNotBeUsed, + fullname, + test, }) } catch (err) { - err.caller = 'git.fastForward'; + err.caller = 'git.currentBranch'; throw err } } @@ -26620,121 +25799,67 @@ async function fastForward({ // @ts-check /** + * @param {Object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.gitdir + * @param {string} args.ref * - * @typedef {object} FetchResult - The object returned has the following schema: - * @property {string | null} defaultBranch - The branch that is cloned if no branch is specified - * @property {string | null} fetchHead - The SHA-1 object id of the fetched head commit - * @property {string | null} fetchHeadDescription - a textual description of the branch that was fetched - * @property {Object} [headers] - The HTTP response headers returned by the git server - * @property {string[]} [pruned] - A list of branches that were pruned, if you provided the `prune` parameter - * + * @returns {Promise} */ +async function _deleteBranch({ fs, gitdir, ref }) { + const exist = await GitRefManager.exists({ fs, gitdir, ref }); + if (!exist) { + throw new NotFoundError(ref) + } + + const fullRef = await GitRefManager.expand({ fs, gitdir, ref }); + const currentRef = await _currentBranch({ fs, gitdir, fullname: true }); + if (fullRef === currentRef) { + // detach HEAD + const value = await GitRefManager.resolve({ fs, gitdir, ref: fullRef }); + await GitRefManager.writeRef({ fs, gitdir, ref: 'HEAD', value }); + } + + // Delete a specified branch + await GitRefManager.deleteRef({ fs, gitdir, ref: fullRef }); +} + +// @ts-check /** - * Fetch commits from a remote repository + * Delete a local branch * - * @param {object} args - * @param {FsClient} args.fs - a file system client - * @param {HttpClient} args.http - an HTTP client - * @param {ProgressCallback} [args.onProgress] - optional progress event callback - * @param {MessageCallback} [args.onMessage] - optional message event callback - * @param {AuthCallback} [args.onAuth] - optional auth fill callback - * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback - * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback + * > Note: This only deletes loose branches - it should be fixed in the future to delete packed branches as well. + * + * @param {Object} args + * @param {FsClient} args.fs - a file system implementation * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.url] - The URL of the remote repository. The default is the value set in the git config for that remote. - * @param {string} [args.remote] - If URL is not specified, determines which remote to use. - * @param {boolean} [args.singleBranch = false] - Instead of the default behavior of fetching all the branches, only fetch a single branch. - * @param {string} [args.ref] - Which branch to fetch if `singleBranch` is true. By default this is the current branch or the remote's default branch. - * @param {string} [args.remoteRef] - The name of the branch on the remote to fetch if `singleBranch` is true. By default this is the configured remote tracking branch. - * @param {boolean} [args.tags = false] - Also fetch tags - * @param {number} [args.depth] - Integer. Determines how much of the git repository's history to retrieve - * @param {boolean} [args.relative = false] - Changes the meaning of `depth` to be measured from the current shallow depth rather than from the branch tip. - * @param {Date} [args.since] - Only fetch commits created after the given date. Mutually exclusive with `depth`. - * @param {string[]} [args.exclude = []] - A list of branches or tags. Instructs the remote server not to send us any commits reachable from these refs. - * @param {boolean} [args.prune] - Delete local remote-tracking branches that are not present on the remote - * @param {boolean} [args.pruneTags] - Prune local tags that don’t exist on the remote, and force-update those tags that differ - * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Overrides value in repo config. - * @param {Object} [args.headers] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config - * @param {object} [args.cache] - a [cache](cache.md) object + * @param {string} args.ref - The branch to delete * - * @returns {Promise} Resolves successfully when fetch completes - * @see FetchResult + * @returns {Promise} Resolves successfully when filesystem operations are complete * * @example - * let result = await git.fetch({ - * fs, - * http, - * dir: '/tutorial', - * corsProxy: 'https://cors.isomorphic-git.org', - * url: 'https://github.com/isomorphic-git/isomorphic-git', - * ref: 'main', - * depth: 1, - * singleBranch: true, - * tags: false - * }) - * console.log(result) + * await git.deleteBranch({ fs, dir: '/tutorial', ref: 'local-branch' }) + * console.log('done') * */ -async function fetch({ +async function deleteBranch({ fs, - http, - onProgress, - onMessage, - onAuth, - onAuthSuccess, - onAuthFailure, dir, gitdir = join(dir, '.git'), ref, - remote, - remoteRef, - url, - corsProxy, - depth = null, - since = null, - exclude = [], - relative = false, - tags = false, - singleBranch = false, - headers = {}, - prune = false, - pruneTags = false, - cache = {}, }) { try { assertParameter('fs', fs); - assertParameter('http', http); - assertParameter('gitdir', gitdir); - - return await _fetch({ + assertParameter('ref', ref); + return await _deleteBranch({ fs: new FileSystem(fs), - cache, - http, - onProgress, - onMessage, - onAuth, - onAuthSuccess, - onAuthFailure, gitdir, ref, - remote, - remoteRef, - url, - corsProxy, - depth, - since, - exclude, - relative, - tags, - singleBranch, - headers, - prune, - pruneTags, }) } catch (err) { - err.caller = 'git.fetch'; + err.caller = 'git.deleteBranch'; throw err } } @@ -26742,36 +25867,82 @@ async function fetch({ // @ts-check /** - * Find the merge base for a set of commits + * Delete a local ref * - * @param {object} args - * @param {FsClient} args.fs - a file system client + * @param {Object} args + * @param {FsClient} args.fs - a file system implementation * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string[]} args.oids - Which commits - * @param {object} [args.cache] - a [cache](cache.md) object + * @param {string} args.ref - The ref to delete + * + * @returns {Promise} Resolves successfully when filesystem operations are complete + * + * @example + * await git.deleteRef({ fs, dir: '/tutorial', ref: 'refs/tags/test-tag' }) + * console.log('done') + * + */ +async function deleteRef({ fs, dir, gitdir = join(dir, '.git'), ref }) { + try { + assertParameter('fs', fs); + assertParameter('ref', ref); + await GitRefManager.deleteRef({ fs: new FileSystem(fs), gitdir, ref }); + } catch (err) { + err.caller = 'git.deleteRef'; + throw err + } +} + +// @ts-check + +/** + * @param {Object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.gitdir + * @param {string} args.remote + * + * @returns {Promise} + */ +async function _deleteRemote({ fs, gitdir, remote }) { + const config = await GitConfigManager.get({ fs, gitdir }); + await config.deleteSection('remote', remote); + await GitConfigManager.save({ fs, gitdir, config }); +} + +// @ts-check + +/** + * Removes the local config entry for a given remote + * + * @param {Object} args + * @param {FsClient} args.fs - a file system implementation + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.remote - The name of the remote to delete + * + * @returns {Promise} Resolves successfully when filesystem operations are complete + * + * @example + * await git.deleteRemote({ fs, dir: '/tutorial', remote: 'upstream' }) + * console.log('done') * */ -async function findMergeBase({ +async function deleteRemote({ fs, dir, gitdir = join(dir, '.git'), - oids, - cache = {}, + remote, }) { try { assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - assertParameter('oids', oids); - - return await _findMergeBase({ + assertParameter('remote', remote); + return await _deleteRemote({ fs: new FileSystem(fs), - cache, gitdir, - oids, + remote, }) } catch (err) { - err.caller = 'git.findMergeBase'; + err.caller = 'git.deleteRemote'; throw err } } @@ -26779,102 +25950,156 @@ async function findMergeBase({ // @ts-check /** - * Find the root git directory - * - * Starting at `filepath`, walks upward until it finds a directory that contains a subdirectory called '.git'. + * Delete a local tag ref * * @param {Object} args * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.filepath + * @param {string} args.gitdir + * @param {string} args.ref - The tag to delete + * + * @returns {Promise} Resolves successfully when filesystem operations are complete + * + * @example + * await git.deleteTag({ dir: '$input((/))', ref: '$input((test-tag))' }) + * console.log('done') * - * @returns {Promise} Resolves successfully with a root git directory path */ -async function _findRoot({ fs, filepath }) { - if (await fs.exists(join(filepath, '.git'))) { - return filepath - } else { - const parent = dirname(filepath); - if (parent === filepath) { - throw new NotFoundError(`git root for ${filepath}`) - } - return _findRoot({ fs, filepath: parent }) - } +async function _deleteTag({ fs, gitdir, ref }) { + ref = ref.startsWith('refs/tags/') ? ref : `refs/tags/${ref}`; + await GitRefManager.deleteRef({ fs, gitdir, ref }); } // @ts-check /** - * Find the root git directory - * - * Starting at `filepath`, walks upward until it finds a directory that contains a subdirectory called '.git'. + * Delete a local tag ref * * @param {Object} args - * @param {FsClient} args.fs - a file system client - * @param {string} args.filepath - The file directory to start searching in. + * @param {FsClient} args.fs - a file system implementation + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.ref - The tag to delete * - * @returns {Promise} Resolves successfully with a root git directory path - * @throws {NotFoundError} + * @returns {Promise} Resolves successfully when filesystem operations are complete * * @example - * let gitroot = await git.findRoot({ - * fs, - * filepath: '/tutorial/src/utils' - * }) - * console.log(gitroot) + * await git.deleteTag({ fs, dir: '/tutorial', ref: 'test-tag' }) + * console.log('done') * */ -async function findRoot({ fs, filepath }) { +async function deleteTag({ fs, dir, gitdir = join(dir, '.git'), ref }) { try { assertParameter('fs', fs); - assertParameter('filepath', filepath); - - return await _findRoot({ fs: new FileSystem(fs), filepath }) + assertParameter('ref', ref); + return await _deleteTag({ + fs: new FileSystem(fs), + gitdir, + ref, + }) } catch (err) { - err.caller = 'git.findRoot'; + err.caller = 'git.deleteTag'; throw err } } +async function expandOidLoose({ fs, gitdir, oid: short }) { + const prefix = short.slice(0, 2); + const objectsSuffixes = await fs.readdir(`${gitdir}/objects/${prefix}`); + return objectsSuffixes + .map(suffix => `${prefix}${suffix}`) + .filter(_oid => _oid.startsWith(short)) +} + +async function expandOidPacked({ + fs, + cache, + gitdir, + oid: short, + getExternalRefDelta, +}) { + // Iterate through all the .pack files + const results = []; + let list = await fs.readdir(join(gitdir, 'objects/pack')); + list = list.filter(x => x.endsWith('.idx')); + for (const filename of list) { + const indexFile = `${gitdir}/objects/pack/${filename}`; + const p = await readPackIndex({ + fs, + cache, + filename: indexFile, + getExternalRefDelta, + }); + if (p.error) throw new InternalError(p.error) + // Search through the list of oids in the packfile + for (const oid of p.offsets.keys()) { + if (oid.startsWith(short)) results.push(oid); + } + } + return results +} + +async function _expandOid({ fs, cache, gitdir, oid: short }) { + // Curry the current read method so that the packfile un-deltification + // process can acquire external ref-deltas. + const getExternalRefDelta = oid => _readObject({ fs, cache, gitdir, oid }); + + const results1 = await expandOidLoose({ fs, gitdir, oid: short }); + const results2 = await expandOidPacked({ + fs, + cache, + gitdir, + oid: short, + getExternalRefDelta, + }); + const results = results1.concat(results2); + + if (results.length === 1) { + return results[0] + } + if (results.length > 1) { + throw new AmbiguousError('oids', short, results) + } + throw new NotFoundError(`an object matching "${short}"`) +} + // @ts-check /** - * Read an entry from the git config files. - * - * *Caveats:* - * - Currently only the local `$GIT_DIR/config` file can be read or written. However support for the global `~/.gitconfig` and system `$(prefix)/etc/gitconfig` will be added in the future. - * - The current parser does not support the more exotic features of the git-config file format such as `[include]` and `[includeIf]`. + * Expand and resolve a short oid into a full oid * * @param {Object} args * @param {FsClient} args.fs - a file system implementation * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.path - The key of the git config entry + * @param {string} args.oid - The shortened oid prefix to expand (like "0414d2a") + * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} Resolves with the config value + * @returns {Promise} Resolves successfully with the full oid (like "0414d2a286d7bbc7a4a326a61c1f9f888a8ab87f") * * @example - * // Read config value - * let value = await git.getConfig({ - * fs, - * dir: '/tutorial', - * path: 'remote.origin.url' - * }) - * console.log(value) + * let oid = await git.expandOid({ fs, dir: '/tutorial', oid: '0414d2a'}) + * console.log(oid) * */ -async function getConfig({ fs, dir, gitdir = join(dir, '.git'), path }) { +async function expandOid({ + fs, + dir, + gitdir = join(dir, '.git'), + oid, + cache = {}, +}) { try { assertParameter('fs', fs); assertParameter('gitdir', gitdir); - assertParameter('path', path); - - return await _getConfig({ + assertParameter('oid', oid); + return await _expandOid({ fs: new FileSystem(fs), + cache, gitdir, - path, + oid, }) } catch (err) { - err.caller = 'git.getConfig'; + err.caller = 'git.expandOid'; throw err } } @@ -26882,380 +26107,517 @@ async function getConfig({ fs, dir, gitdir = join(dir, '.git'), path }) { // @ts-check /** - * @param {Object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir - * @param {string} args.path - * - * @returns {Promise>} Resolves with an array of the config value - * - */ -async function _getConfigAll({ fs, gitdir, path }) { - const config = await GitConfigManager.get({ fs, gitdir }); - return config.getall(path) -} - -// @ts-check - -/** - * Read a multi-valued entry from the git config files. - * - * *Caveats:* - * - Currently only the local `$GIT_DIR/config` file can be read or written. However support for the global `~/.gitconfig` and system `$(prefix)/etc/gitconfig` will be added in the future. - * - The current parser does not support the more exotic features of the git-config file format such as `[include]` and `[includeIf]`. + * Expand an abbreviated ref to its full name * * @param {Object} args * @param {FsClient} args.fs - a file system implementation * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.path - The key of the git config entry + * @param {string} args.ref - The ref to expand (like "v1.0.0") * - * @returns {Promise>} Resolves with the config value + * @returns {Promise} Resolves successfully with a full ref name ("refs/tags/v1.0.0") + * + * @example + * let fullRef = await git.expandRef({ fs, dir: '/tutorial', ref: 'main'}) + * console.log(fullRef) * */ -async function getConfigAll({ - fs, - dir, - gitdir = join(dir, '.git'), - path, -}) { +async function expandRef({ fs, dir, gitdir = join(dir, '.git'), ref }) { try { assertParameter('fs', fs); assertParameter('gitdir', gitdir); - assertParameter('path', path); - - return await _getConfigAll({ + assertParameter('ref', ref); + return await GitRefManager.expand({ fs: new FileSystem(fs), gitdir, - path, + ref, }) } catch (err) { - err.caller = 'git.getConfigAll'; + err.caller = 'git.expandRef'; throw err } } -// @ts-check - -/** - * - * @typedef {Object} GetRemoteInfoResult - The object returned has the following schema: - * @property {string[]} capabilities - The list of capabilities returned by the server (part of the Git protocol) - * @property {Object} [refs] - * @property {string} [HEAD] - The default branch of the remote - * @property {Object} [refs.heads] - The branches on the remote - * @property {Object} [refs.pull] - The special branches representing pull requests (non-standard) - * @property {Object} [refs.tags] - The tags on the remote - * - */ +// @ts-check + +/** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache + * @param {string} args.gitdir + * @param {string[]} args.oids + * + */ +async function _findMergeBase({ fs, cache, gitdir, oids }) { + // Note: right now, the tests are geared so that the output should match that of + // `git merge-base --all --octopus` + // because without the --octopus flag, git's output seems to depend on the ORDER of the oids, + // and computing virtual merge bases is just too much for me to fathom right now. + + // If we start N independent walkers, one at each of the given `oids`, and walk backwards + // through ancestors, eventually we'll discover a commit where each one of these N walkers + // has passed through. So we just need to keep track of which walkers have visited each commit + // until we find a commit that N distinct walkers has visited. + const visits = {}; + const passes = oids.length; + let heads = oids.map((oid, index) => ({ index, oid })); + while (heads.length) { + // Count how many times we've passed each commit + const result = new Set(); + for (const { oid, index } of heads) { + if (!visits[oid]) visits[oid] = new Set(); + visits[oid].add(index); + if (visits[oid].size === passes) { + result.add(oid); + } + } + if (result.size > 0) { + return [...result] + } + // We haven't found a common ancestor yet + const newheads = new Map(); + for (const { oid, index } of heads) { + try { + const { object } = await _readObject({ fs, cache, gitdir, oid }); + const commit = GitCommit.from(object); + const { parent } = commit.parseHeaders(); + for (const oid of parent) { + if (!visits[oid] || !visits[oid].has(index)) { + newheads.set(oid + ':' + index, { oid, index }); + } + } + } catch (err) { + // do nothing + } + } + heads = Array.from(newheads.values()); + } + return [] +} + +const LINEBREAKS = /^.*(\r?\n|$)/gm; -/** - * List a remote servers branches, tags, and capabilities. - * - * This is a rare command that doesn't require an `fs`, `dir`, or even `gitdir` argument. - * It just communicates to a remote git server, using the first step of the `git-upload-pack` handshake, but stopping short of fetching the packfile. - * - * @param {object} args - * @param {HttpClient} args.http - an HTTP client - * @param {AuthCallback} [args.onAuth] - optional auth fill callback - * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback - * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback - * @param {string} args.url - The URL of the remote repository. Will be gotten from gitconfig if absent. - * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Overrides value in repo config. - * @param {boolean} [args.forPush = false] - By default, the command queries the 'fetch' capabilities. If true, it will ask for the 'push' capabilities. - * @param {Object} [args.headers] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config - * - * @returns {Promise} Resolves successfully with an object listing the branches, tags, and capabilities of the remote. - * @see GetRemoteInfoResult - * - * @example - * let info = await git.getRemoteInfo({ - * http, - * url: - * "https://cors.isomorphic-git.org/github.com/isomorphic-git/isomorphic-git.git" - * }); - * console.log(info); - * - */ -async function getRemoteInfo({ - http, - onAuth, - onAuthSuccess, - onAuthFailure, - corsProxy, - url, - headers = {}, - forPush = false, +function mergeFile({ + ourContent, + baseContent, + theirContent, + ourName = 'ours', + baseName = 'base', + theirName = 'theirs', + format = 'diff', + markerSize = 7, }) { - try { - assertParameter('http', http); - assertParameter('url', url); + const ours = ourContent.match(LINEBREAKS); + const base = baseContent.match(LINEBREAKS); + const theirs = theirContent.match(LINEBREAKS); - const GitRemoteHTTP = GitRemoteManager.getRemoteHelperFor({ url }); - const remote = await GitRemoteHTTP.discover({ - http, - onAuth, - onAuthSuccess, - onAuthFailure, - corsProxy, - service: forPush ? 'git-receive-pack' : 'git-upload-pack', - url, - headers, - protocolVersion: 1, - }); + // Here we let the diff3 library do the heavy lifting. + const result = diff3Merge(ours, base, theirs); - // Note: remote.capabilities, remote.refs, and remote.symrefs are Set and Map objects, - // but one of the objectives of the public API is to always return JSON-compatible objects - // so we must JSONify them. - const result = { - capabilities: [...remote.capabilities], - }; - // Convert the flat list into an object tree, because I figure 99% of the time - // that will be easier to use. - for (const [ref, oid] of remote.refs) { - const parts = ref.split('/'); - const last = parts.pop(); - let o = result; - for (const part of parts) { - o[part] = o[part] || {}; - o = o[part]; - } - o[last] = oid; + // Here we note whether there are conflicts and format the results + let mergedText = ''; + let cleanMerge = true; + for (const item of result) { + if (item.ok) { + mergedText += item.ok.join(''); } - // Merge symrefs on top of refs to more closely match actual git repo layouts - for (const [symref, ref] of remote.symrefs) { - const parts = symref.split('/'); - const last = parts.pop(); - let o = result; - for (const part of parts) { - o[part] = o[part] || {}; - o = o[part]; + if (item.conflict) { + cleanMerge = false; + mergedText += `${'<'.repeat(markerSize)} ${ourName}\n`; + mergedText += item.conflict.a.join(''); + if (format === 'diff3') { + mergedText += `${'|'.repeat(markerSize)} ${baseName}\n`; + mergedText += item.conflict.o.join(''); } - o[last] = ref; + mergedText += `${'='.repeat(markerSize)}\n`; + mergedText += item.conflict.b.join(''); + mergedText += `${'>'.repeat(markerSize)} ${theirName}\n`; } - return result - } catch (err) { - err.caller = 'git.getRemoteInfo'; - throw err } + return { cleanMerge, mergedText } } // @ts-check /** - * @param {any} remote - * @param {string} prefix - * @param {boolean} symrefs - * @param {boolean} peelTags - * @returns {ServerRef[]} + * Create a merged tree + * + * @param {Object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {object} args.cache + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.ourOid - The SHA-1 object id of our tree + * @param {string} args.baseOid - The SHA-1 object id of the base tree + * @param {string} args.theirOid - The SHA-1 object id of their tree + * @param {string} [args.ourName='ours'] - The name to use in conflicted files for our hunks + * @param {string} [args.baseName='base'] - The name to use in conflicted files (in diff3 format) for the base hunks + * @param {string} [args.theirName='theirs'] - The name to use in conflicted files for their hunks + * @param {boolean} [args.dryRun=false] + * + * @returns {Promise} - The SHA-1 object id of the merged tree + * */ -function formatInfoRefs(remote, prefix, symrefs, peelTags) { - const refs = []; - for (const [key, value] of remote.refs) { - if (prefix && !key.startsWith(prefix)) continue +async function mergeTree({ + fs, + cache, + dir, + gitdir = join(dir, '.git'), + ourOid, + baseOid, + theirOid, + ourName = 'ours', + baseName = 'base', + theirName = 'theirs', + dryRun = false, +}) { + const ourTree = TREE({ ref: ourOid }); + const baseTree = TREE({ ref: baseOid }); + const theirTree = TREE({ ref: theirOid }); - if (key.endsWith('^{}')) { - if (peelTags) { - const _key = key.replace('^{}', ''); - // Peeled tags are almost always listed immediately after the original tag - const last = refs[refs.length - 1]; - const r = last.ref === _key ? last : refs.find(x => x.ref === _key); - if (r === undefined) { - throw new Error('I did not expect this to happen') + const results = await _walk({ + fs, + cache, + dir, + gitdir, + trees: [ourTree, baseTree, theirTree], + map: async function(filepath, [ours, base, theirs]) { + const path = basename(filepath); + // What we did, what they did + const ourChange = await modified(ours, base); + const theirChange = await modified(theirs, base); + switch (`${ourChange}-${theirChange}`) { + case 'false-false': { + return { + mode: await base.mode(), + path, + oid: await base.oid(), + type: await base.type(), + } + } + case 'false-true': { + return theirs + ? { + mode: await theirs.mode(), + path, + oid: await theirs.oid(), + type: await theirs.type(), + } + : undefined + } + case 'true-false': { + return ours + ? { + mode: await ours.mode(), + path, + oid: await ours.oid(), + type: await ours.type(), + } + : undefined + } + case 'true-true': { + // Modifications + if ( + ours && + base && + theirs && + (await ours.type()) === 'blob' && + (await base.type()) === 'blob' && + (await theirs.type()) === 'blob' + ) { + return mergeBlobs({ + fs, + gitdir, + path, + ours, + base, + theirs, + ourName, + baseName, + theirName, + }) + } + // all other types of conflicts fail + throw new MergeNotSupportedError() } - r.peeled = value; } - continue - } - /** @type ServerRef */ - const ref = { ref: key, oid: value }; - if (symrefs) { - if (remote.symrefs.has(key)) { - ref.target = remote.symrefs.get(key); + }, + /** + * @param {TreeEntry} [parent] + * @param {Array} children + */ + reduce: async (parent, children) => { + const entries = children.filter(Boolean); // remove undefineds + + // if the parent was deleted, the children have to go + if (!parent) return + + // automatically delete directories if they have been emptied + if (parent && parent.type === 'tree' && entries.length === 0) return + + if (entries.length > 0) { + const tree = new GitTree(entries); + const object = tree.toObject(); + const oid = await _writeObject({ + fs, + gitdir, + type: 'tree', + object, + dryRun, + }); + parent.oid = oid; } - } - refs.push(ref); - } - return refs + return parent + }, + }); + return results.oid } -// @ts-check - /** - * @typedef {Object} GetRemoteInfo2Result - This object has the following schema: - * @property {1 | 2} protocolVersion - Git protocol version the server supports - * @property {Object} capabilities - An object of capabilities represented as keys and values - * @property {ServerRef[]} [refs] - Server refs (they get returned by protocol version 1 whether you want them or not) + * + * @param {WalkerEntry} entry + * @param {WalkerEntry} base + * */ +async function modified(entry, base) { + if (!entry && !base) return false + if (entry && !base) return true + if (!entry && base) return true + if ((await entry.type()) === 'tree' && (await base.type()) === 'tree') { + return false + } + if ( + (await entry.type()) === (await base.type()) && + (await entry.mode()) === (await base.mode()) && + (await entry.oid()) === (await base.oid()) + ) { + return false + } + return true +} /** - * List a remote server's capabilities. - * - * This is a rare command that doesn't require an `fs`, `dir`, or even `gitdir` argument. - * It just communicates to a remote git server, determining what protocol version, commands, and features it supports. - * - * > The successor to [`getRemoteInfo`](./getRemoteInfo.md), this command supports Git Wire Protocol Version 2. - * > Therefore its return type is more complicated as either: - * > - * > - v1 capabilities (and refs) or - * > - v2 capabilities (and no refs) - * > - * > are returned. - * > If you just care about refs, use [`listServerRefs`](./listServerRefs.md) - * - * @param {object} args - * @param {HttpClient} args.http - an HTTP client - * @param {AuthCallback} [args.onAuth] - optional auth fill callback - * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback - * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback - * @param {string} args.url - The URL of the remote repository. Will be gotten from gitconfig if absent. - * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Overrides value in repo config. - * @param {boolean} [args.forPush = false] - By default, the command queries the 'fetch' capabilities. If true, it will ask for the 'push' capabilities. - * @param {Object} [args.headers] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config - * @param {1 | 2} [args.protocolVersion = 2] - Which version of the Git Protocol to use. - * - * @returns {Promise} Resolves successfully with an object listing the capabilities of the remote. - * @see GetRemoteInfo2Result - * @see ServerRef * - * @example - * let info = await git.getRemoteInfo2({ - * http, - * corsProxy: "https://cors.isomorphic-git.org", - * url: "https://github.com/isomorphic-git/isomorphic-git.git" - * }); - * console.log(info); + * @param {Object} args + * @param {import('../models/FileSystem').FileSystem} args.fs + * @param {string} args.gitdir + * @param {string} args.path + * @param {WalkerEntry} args.ours + * @param {WalkerEntry} args.base + * @param {WalkerEntry} args.theirs + * @param {string} [args.ourName] + * @param {string} [args.baseName] + * @param {string} [args.theirName] + * @param {string} [args.format] + * @param {number} [args.markerSize] + * @param {boolean} [args.dryRun = false] * */ -async function getRemoteInfo2({ - http, - onAuth, - onAuthSuccess, - onAuthFailure, - corsProxy, - url, - headers = {}, - forPush = false, - protocolVersion = 2, +async function mergeBlobs({ + fs, + gitdir, + path, + ours, + base, + theirs, + ourName, + theirName, + baseName, + format, + markerSize, + dryRun, }) { - try { - assertParameter('http', http); - assertParameter('url', url); - - const GitRemoteHTTP = GitRemoteManager.getRemoteHelperFor({ url }); - const remote = await GitRemoteHTTP.discover({ - http, - onAuth, - onAuthSuccess, - onAuthFailure, - corsProxy, - service: forPush ? 'git-receive-pack' : 'git-upload-pack', - url, - headers, - protocolVersion, - }); - - if (remote.protocolVersion === 2) { - /** @type GetRemoteInfo2Result */ - return { - protocolVersion: remote.protocolVersion, - capabilities: remote.capabilities2, - } - } - - // Note: remote.capabilities, remote.refs, and remote.symrefs are Set and Map objects, - // but one of the objectives of the public API is to always return JSON-compatible objects - // so we must JSONify them. - /** @type Object */ - const capabilities = {}; - for (const cap of remote.capabilities) { - const [key, value] = cap.split('='); - if (value) { - capabilities[key] = value; - } else { - capabilities[key] = true; - } - } - /** @type GetRemoteInfo2Result */ - return { - protocolVersion: 1, - capabilities, - refs: formatInfoRefs(remote, undefined, true, true), - } - } catch (err) { - err.caller = 'git.getRemoteInfo2'; - throw err + const type = 'blob'; + // Compute the new mode. + // Since there are ONLY two valid blob modes ('100755' and '100644') it boils down to this + const mode = + (await base.mode()) === (await ours.mode()) + ? await theirs.mode() + : await ours.mode(); + // The trivial case: nothing to merge except maybe mode + if ((await ours.oid()) === (await theirs.oid())) { + return { mode, path, oid: await ours.oid(), type } } -} - -async function hashObject({ - type, - object, - format = 'content', - oid = undefined, -}) { - if (format !== 'deflated') { - if (format !== 'wrapped') { - object = GitObject.wrap({ type, object }); - } - oid = await shasum(object); + // if only one side made oid changes, return that side's oid + if ((await ours.oid()) === (await base.oid())) { + return { mode, path, oid: await theirs.oid(), type } } - return { oid, object } + if ((await theirs.oid()) === (await base.oid())) { + return { mode, path, oid: await ours.oid(), type } + } + // if both sides made changes do a merge + const { mergedText, cleanMerge } = mergeFile({ + ourContent: Buffer.from(await ours.content()).toString('utf8'), + baseContent: Buffer.from(await base.content()).toString('utf8'), + theirContent: Buffer.from(await theirs.content()).toString('utf8'), + ourName, + theirName, + baseName, + format, + markerSize, + }); + if (!cleanMerge) { + // all other types of conflicts fail + throw new MergeNotSupportedError() + } + const oid = await _writeObject({ + fs, + gitdir, + type: 'blob', + object: Buffer.from(mergedText, 'utf8'), + dryRun, + }); + return { mode, path, oid, type } } // @ts-check +// import diff3 from 'node-diff3' /** * - * @typedef {object} HashBlobResult - The object returned has the following schema: - * @property {string} oid - The SHA-1 object id - * @property {'blob'} type - The type of the object - * @property {Uint8Array} object - The wrapped git object (the thing that is hashed) - * @property {'wrapped'} format - The format of the object + * @typedef {Object} MergeResult - Returns an object with a schema like this: + * @property {string} [oid] - The SHA-1 object id that is now at the head of the branch. Absent only if `dryRun` was specified and `mergeCommit` is true. + * @property {boolean} [alreadyMerged] - True if the branch was already merged so no changes were made + * @property {boolean} [fastForward] - True if it was a fast-forward merge + * @property {boolean} [mergeCommit] - True if merge resulted in a merge commit + * @property {string} [tree] - The SHA-1 object id of the tree resulting from a merge commit * */ /** - * Compute what the SHA-1 object id of a file would be - * * @param {object} args - * @param {Uint8Array|string} args.object - The object to write. If `object` is a String then it will be converted to a Uint8Array using UTF-8 encoding. - * - * @returns {Promise} Resolves successfully with the SHA-1 object id and the wrapped object Uint8Array. - * @see HashBlobResult - * - * @example - * let { oid, type, object, format } = await git.hashBlob({ - * object: 'Hello world!', - * }) + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {object} args.cache + * @param {string} args.gitdir + * @param {string} [args.ours] + * @param {string} args.theirs + * @param {boolean} args.fastForwardOnly + * @param {boolean} args.dryRun + * @param {boolean} args.noUpdateBranch + * @param {string} [args.message] + * @param {Object} args.author + * @param {string} args.author.name + * @param {string} args.author.email + * @param {number} args.author.timestamp + * @param {number} args.author.timezoneOffset + * @param {Object} args.committer + * @param {string} args.committer.name + * @param {string} args.committer.email + * @param {number} args.committer.timestamp + * @param {number} args.committer.timezoneOffset + * @param {string} [args.signingKey] + * @param {SignCallback} [args.onSign] - a PGP signing implementation * - * console.log('oid', oid) - * console.log('type', type) - * console.log('object', object) - * console.log('format', format) + * @returns {Promise} Resolves to a description of the merge operation * */ -async function hashBlob({ object }) { - try { - assertParameter('object', object); - - // Convert object to buffer - if (typeof object === 'string') { - object = Buffer.from(object, 'utf8'); - } else { - object = Buffer.from(object); +async function _merge({ + fs, + cache, + gitdir, + ours, + theirs, + fastForwardOnly = false, + dryRun = false, + noUpdateBranch = false, + message, + author, + committer, + signingKey, + onSign, +}) { + if (ours === undefined) { + ours = await _currentBranch({ fs, gitdir, fullname: true }); + } + ours = await GitRefManager.expand({ + fs, + gitdir, + ref: ours, + }); + theirs = await GitRefManager.expand({ + fs, + gitdir, + ref: theirs, + }); + const ourOid = await GitRefManager.resolve({ + fs, + gitdir, + ref: ours, + }); + const theirOid = await GitRefManager.resolve({ + fs, + gitdir, + ref: theirs, + }); + // find most recent common ancestor of ref a and ref b + const baseOids = await _findMergeBase({ + fs, + cache, + gitdir, + oids: [ourOid, theirOid], + }); + if (baseOids.length !== 1) { + throw new MergeNotSupportedError() + } + const baseOid = baseOids[0]; + // handle fast-forward case + if (baseOid === theirOid) { + return { + oid: ourOid, + alreadyMerged: true, } - - const type = 'blob'; - const { oid, object: _object } = await hashObject({ - type: 'blob', - format: 'content', - object, + } + if (baseOid === ourOid) { + if (!dryRun && !noUpdateBranch) { + await GitRefManager.writeRef({ fs, gitdir, ref: ours, value: theirOid }); + } + return { + oid: theirOid, + fastForward: true, + } + } else { + // not a simple fast-forward + if (fastForwardOnly) { + throw new FastForwardError() + } + // try a fancier merge + const tree = await mergeTree({ + fs, + cache, + gitdir, + ourOid, + theirOid, + baseOid, + ourName: ours, + baseName: 'base', + theirName: theirs, + dryRun, }); - return { oid, type, object: new Uint8Array(_object), format: 'wrapped' } - } catch (err) { - err.caller = 'git.hashBlob'; - throw err + if (!message) { + message = `Merge branch '${abbreviateRef(theirs)}' into ${abbreviateRef( + ours + )}`; + } + const oid = await _commit({ + fs, + cache, + gitdir, + message, + ref: ours, + tree, + parent: [ourOid, theirOid], + author, + committer, + signingKey, + onSign, + dryRun, + noUpdateBranch, + }); + return { + oid, + tree, + mergeCommit: true, + } } } @@ -27264,37 +26626,117 @@ async function hashBlob({ object }) { /** * @param {object} args * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache + * @param {object} args.cache + * @param {HttpClient} args.http * @param {ProgressCallback} [args.onProgress] + * @param {MessageCallback} [args.onMessage] + * @param {AuthCallback} [args.onAuth] + * @param {AuthFailureCallback} [args.onAuthFailure] + * @param {AuthSuccessCallback} [args.onAuthSuccess] * @param {string} args.dir * @param {string} args.gitdir - * @param {string} args.filepath + * @param {string} args.ref + * @param {string} [args.url] + * @param {string} [args.remote] + * @param {string} [args.remoteRef] + * @param {string} [args.corsProxy] + * @param {boolean} args.singleBranch + * @param {boolean} args.fastForwardOnly + * @param {Object} [args.headers] + * @param {Object} args.author + * @param {string} args.author.name + * @param {string} args.author.email + * @param {number} args.author.timestamp + * @param {number} args.author.timezoneOffset + * @param {Object} args.committer + * @param {string} args.committer.name + * @param {string} args.committer.email + * @param {number} args.committer.timestamp + * @param {number} args.committer.timezoneOffset + * @param {string} [args.signingKey] + * + * @returns {Promise} Resolves successfully when pull operation completes * - * @returns {Promise<{oids: string[]}>} */ -async function _indexPack({ +async function _pull({ fs, cache, + http, onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, dir, gitdir, - filepath, + ref, + url, + remote, + remoteRef, + fastForwardOnly, + corsProxy, + singleBranch, + headers, + author, + committer, + signingKey, }) { try { - filepath = join(dir, filepath); - const pack = await fs.read(filepath); - const getExternalRefDelta = oid => _readObject({ fs, cache, gitdir, oid }); - const idx = await GitPackIndex.fromPack({ - pack, - getExternalRefDelta, + // If ref is undefined, use 'HEAD' + if (!ref) { + const head = await _currentBranch({ fs, gitdir }); + // TODO: use a better error. + if (!head) { + throw new MissingParameterError('ref') + } + ref = head; + } + + const { fetchHead, fetchHeadDescription } = await _fetch({ + fs, + cache, + http, + onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, + gitdir, + corsProxy, + ref, + url, + remote, + remoteRef, + singleBranch, + headers, + }); + // Merge the remote tracking branch into the local one. + await _merge({ + fs, + cache, + gitdir, + ours: ref, + theirs: fetchHead, + fastForwardOnly, + message: `Merge ${fetchHeadDescription}`, + author, + committer, + signingKey, + dryRun: false, + noUpdateBranch: false, + }); + await _checkout({ + fs, + cache, onProgress, + dir, + gitdir, + ref, + remote, + noCheckout: false, }); - await fs.write(filepath.replace(/\.pack$/, '.idx'), await idx.toBuffer()); - return { - oids: [...idx.hashes], - } } catch (err) { - err.caller = 'git.indexPack'; + err.caller = 'git.pull'; throw err } } @@ -27302,58 +26744,95 @@ async function _indexPack({ // @ts-check /** - * Create the .idx file for a given .pack file + * Like `pull`, but hard-coded with `fastForward: true` so there is no need for an `author` parameter. * * @param {object} args * @param {FsClient} args.fs - a file system client + * @param {HttpClient} args.http - an HTTP client * @param {ProgressCallback} [args.onProgress] - optional progress event callback - * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path + * @param {MessageCallback} [args.onMessage] - optional message event callback + * @param {AuthCallback} [args.onAuth] - optional auth fill callback + * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback + * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback + * @param {string} args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.filepath - The path to the .pack file to index + * @param {string} [args.ref] - Which branch to merge into. By default this is the currently checked out branch. + * @param {string} [args.url] - (Added in 1.1.0) The URL of the remote repository. The default is the value set in the git config for that remote. + * @param {string} [args.remote] - (Added in 1.1.0) If URL is not specified, determines which remote to use. + * @param {string} [args.remoteRef] - (Added in 1.1.0) The name of the branch on the remote to fetch. By default this is the configured remote tracking branch. + * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Overrides value in repo config. + * @param {boolean} [args.singleBranch = false] - Instead of the default behavior of fetching all the branches, only fetch a single branch. + * @param {Object} [args.headers] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise<{oids: string[]}>} Resolves with a list of the SHA-1 object ids contained in the packfile + * @returns {Promise} Resolves successfully when pull operation completes * * @example - * let packfiles = await fs.promises.readdir('/tutorial/.git/objects/pack') - * packfiles = packfiles.filter(name => name.endsWith('.pack')) - * console.log('packfiles', packfiles) - * - * const { oids } = await git.indexPack({ + * await git.fastForward({ * fs, + * http, * dir: '/tutorial', - * filepath: `.git/objects/pack/${packfiles[0]}`, - * async onProgress (evt) { - * console.log(`${evt.phase}: ${evt.loaded} / ${evt.total}`) - * } + * ref: 'main', + * singleBranch: true * }) - * console.log(oids) + * console.log('done') * */ -async function indexPack({ +async function fastForward({ fs, + http, onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, dir, gitdir = join(dir, '.git'), - filepath, + ref, + url, + remote, + remoteRef, + corsProxy, + singleBranch, + headers = {}, cache = {}, }) { try { assertParameter('fs', fs); - assertParameter('dir', dir); - assertParameter('gitdir', dir); - assertParameter('filepath', filepath); + assertParameter('http', http); + assertParameter('gitdir', gitdir); - return await _indexPack({ + const thisWillNotBeUsed = { + name: '', + email: '', + timestamp: Date.now(), + timezoneOffset: 0, + }; + + return await _pull({ fs: new FileSystem(fs), cache, + http, onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, dir, gitdir, - filepath, + ref, + url, + remote, + remoteRef, + fastForwardOnly: true, + corsProxy, + singleBranch, + headers, + author: thisWillNotBeUsed, + committer: thisWillNotBeUsed, }) } catch (err) { - err.caller = 'git.indexPack'; + err.caller = 'git.fastForward'; throw err } } @@ -27361,168 +26840,121 @@ async function indexPack({ // @ts-check /** - * Initialize a new repository - * - * @param {object} args - * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {boolean} [args.bare = false] - Initialize a bare repository - * @param {string} [args.defaultBranch = 'master'] - The name of the default branch (might be changed to a required argument in 2.0.0) - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * await git.init({ fs, dir: '/tutorial' }) - * console.log('done') * - */ -async function init({ - fs, - bare = false, - dir, - gitdir = bare ? dir : join(dir, '.git'), - defaultBranch = 'master', -}) { - try { - assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - if (!bare) { - assertParameter('dir', dir); - } - - return await _init({ - fs: new FileSystem(fs), - bare, - dir, - gitdir, - defaultBranch, - }) - } catch (err) { - err.caller = 'git.init'; - throw err - } -} - -// @ts-check - -/** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache - * @param {string} args.gitdir - * @param {string} args.oid - * @param {string} args.ancestor - * @param {number} args.depth - Maximum depth to search before giving up. -1 means no maximum depth. + * @typedef {object} FetchResult - The object returned has the following schema: + * @property {string | null} defaultBranch - The branch that is cloned if no branch is specified + * @property {string | null} fetchHead - The SHA-1 object id of the fetched head commit + * @property {string | null} fetchHeadDescription - a textual description of the branch that was fetched + * @property {Object} [headers] - The HTTP response headers returned by the git server + * @property {string[]} [pruned] - A list of branches that were pruned, if you provided the `prune` parameter * - * @returns {Promise} */ -async function _isDescendent({ - fs, - cache, - gitdir, - oid, - ancestor, - depth, -}) { - const shallows = await GitShallowManager.read({ fs, gitdir }); - if (!oid) { - throw new MissingParameterError('oid') - } - if (!ancestor) { - throw new MissingParameterError('ancestor') - } - // If you don't like this behavior, add your own check. - // Edge cases are hard to define a perfect solution. - if (oid === ancestor) return false - // We do not use recursion here, because that would lead to depth-first traversal, - // and we want to maintain a breadth-first traversal to avoid hitting shallow clone depth cutoffs. - const queue = [oid]; - const visited = new Set(); - let searchdepth = 0; - while (queue.length) { - if (searchdepth++ === depth) { - throw new MaxDepthError(depth) - } - const oid = queue.shift(); - const { type, object } = await _readObject({ - fs, - cache, - gitdir, - oid, - }); - if (type !== 'commit') { - throw new ObjectTypeError(oid, type, 'commit') - } - const commit = GitCommit.from(object).parse(); - // Are any of the parents the sought-after ancestor? - for (const parent of commit.parent) { - if (parent === ancestor) return true - } - // If not, add them to heads (unless we know this is a shallow commit) - if (!shallows.has(oid)) { - for (const parent of commit.parent) { - if (!visited.has(parent)) { - queue.push(parent); - visited.add(parent); - } - } - } - // Eventually, we'll travel entire tree to the roots where all the parents are empty arrays, - // or hit the shallow depth and throw an error. Excluding the possibility of grafts, or - // different branches cloned to different depths, you would hit this error at the same time - // for all parents, so trying to continue is futile. - } - return false -} - -// @ts-check /** - * Check whether a git commit is descended from another + * Fetch commits from a remote repository * * @param {object} args * @param {FsClient} args.fs - a file system client + * @param {HttpClient} args.http - an HTTP client + * @param {ProgressCallback} [args.onProgress] - optional progress event callback + * @param {MessageCallback} [args.onMessage] - optional message event callback + * @param {AuthCallback} [args.onAuth] - optional auth fill callback + * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback + * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.oid - The descendent commit - * @param {string} args.ancestor - The (proposed) ancestor commit - * @param {number} [args.depth = -1] - Maximum depth to search before giving up. -1 means no maximum depth. + * @param {string} [args.url] - The URL of the remote repository. The default is the value set in the git config for that remote. + * @param {string} [args.remote] - If URL is not specified, determines which remote to use. + * @param {boolean} [args.singleBranch = false] - Instead of the default behavior of fetching all the branches, only fetch a single branch. + * @param {string} [args.ref] - Which branch to fetch if `singleBranch` is true. By default this is the current branch or the remote's default branch. + * @param {string} [args.remoteRef] - The name of the branch on the remote to fetch if `singleBranch` is true. By default this is the configured remote tracking branch. + * @param {boolean} [args.tags = false] - Also fetch tags + * @param {number} [args.depth] - Integer. Determines how much of the git repository's history to retrieve + * @param {boolean} [args.relative = false] - Changes the meaning of `depth` to be measured from the current shallow depth rather than from the branch tip. + * @param {Date} [args.since] - Only fetch commits created after the given date. Mutually exclusive with `depth`. + * @param {string[]} [args.exclude = []] - A list of branches or tags. Instructs the remote server not to send us any commits reachable from these refs. + * @param {boolean} [args.prune] - Delete local remote-tracking branches that are not present on the remote + * @param {boolean} [args.pruneTags] - Prune local tags that don’t exist on the remote, and force-update those tags that differ + * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Overrides value in repo config. + * @param {Object} [args.headers] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} Resolves to true if `oid` is a descendent of `ancestor` + * @returns {Promise} Resolves successfully when fetch completes + * @see FetchResult * * @example - * let oid = await git.resolveRef({ fs, dir: '/tutorial', ref: 'main' }) - * let ancestor = await git.resolveRef({ fs, dir: '/tutorial', ref: 'v0.20.0' }) - * console.log(oid, ancestor) - * await git.isDescendent({ fs, dir: '/tutorial', oid, ancestor, depth: -1 }) + * let result = await git.fetch({ + * fs, + * http, + * dir: '/tutorial', + * corsProxy: 'https://cors.isomorphic-git.org', + * url: 'https://github.com/isomorphic-git/isomorphic-git', + * ref: 'main', + * depth: 1, + * singleBranch: true, + * tags: false + * }) + * console.log(result) * */ -async function isDescendent({ +async function fetch({ fs, + http, + onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, dir, gitdir = join(dir, '.git'), - oid, - ancestor, - depth = -1, + ref, + remote, + remoteRef, + url, + corsProxy, + depth = null, + since = null, + exclude = [], + relative = false, + tags = false, + singleBranch = false, + headers = {}, + prune = false, + pruneTags = false, cache = {}, }) { try { assertParameter('fs', fs); + assertParameter('http', http); assertParameter('gitdir', gitdir); - assertParameter('oid', oid); - assertParameter('ancestor', ancestor); - return await _isDescendent({ + return await _fetch({ fs: new FileSystem(fs), cache, + http, + onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, gitdir, - oid, - ancestor, + ref, + remote, + remoteRef, + url, + corsProxy, depth, + since, + exclude, + relative, + tags, + singleBranch, + headers, + prune, + pruneTags, }) } catch (err) { - err.caller = 'git.isDescendent'; + err.caller = 'git.fetch'; throw err } } @@ -27530,40 +26962,36 @@ async function isDescendent({ // @ts-check /** - * Test whether a filepath should be ignored (because of .gitignore or .git/exclude) + * Find the merge base for a set of commits * * @param {object} args * @param {FsClient} args.fs - a file system client - * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.filepath - The filepath to test - * - * @returns {Promise} Resolves to true if the file should be ignored - * - * @example - * await git.isIgnored({ fs, dir: '/tutorial', filepath: 'docs/add.md' }) + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string[]} args.oids - Which commits + * @param {object} [args.cache] - a [cache](cache.md) object * */ -async function isIgnored({ +async function findMergeBase({ fs, dir, gitdir = join(dir, '.git'), - filepath, + oids, + cache = {}, }) { try { assertParameter('fs', fs); - assertParameter('dir', dir); assertParameter('gitdir', gitdir); - assertParameter('filepath', filepath); + assertParameter('oids', oids); - return GitIgnoreManager.isIgnored({ + return await _findMergeBase({ fs: new FileSystem(fs), - dir, + cache, gitdir, - filepath, + oids, }) } catch (err) { - err.caller = 'git.isIgnored'; + err.caller = 'git.findMergeBase'; throw err } } @@ -27571,46 +26999,58 @@ async function isIgnored({ // @ts-check /** - * List branches + * Find the root git directory * - * By default it lists local branches. If a 'remote' is specified, it lists the remote's branches. When listing remote branches, the HEAD branch is not filtered out, so it may be included in the list of results. + * Starting at `filepath`, walks upward until it finds a directory that contains a subdirectory called '.git'. * - * Note that specifying a remote does not actually contact the server and update the list of branches. - * If you want an up-to-date list, first do a `fetch` to that remote. - * (Which branch you fetch doesn't matter - the list of branches available on the remote is updated during the fetch handshake.) + * @param {Object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.filepath * - * @param {object} args + * @returns {Promise} Resolves successfully with a root git directory path + */ +async function _findRoot({ fs, filepath }) { + if (await fs.exists(join(filepath, '.git'))) { + return filepath + } else { + const parent = dirname(filepath); + if (parent === filepath) { + throw new NotFoundError(`git root for ${filepath}`) + } + return _findRoot({ fs, filepath: parent }) + } +} + +// @ts-check + +/** + * Find the root git directory + * + * Starting at `filepath`, walks upward until it finds a directory that contains a subdirectory called '.git'. + * + * @param {Object} args * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.remote] - Instead of the branches in `refs/heads`, list the branches in `refs/remotes/${remote}`. + * @param {string} args.filepath - The file directory to start searching in. * - * @returns {Promise>} Resolves successfully with an array of branch names + * @returns {Promise} Resolves successfully with a root git directory path + * @throws {NotFoundError} * * @example - * let branches = await git.listBranches({ fs, dir: '/tutorial' }) - * console.log(branches) - * let remoteBranches = await git.listBranches({ fs, dir: '/tutorial', remote: 'origin' }) - * console.log(remoteBranches) + * let gitroot = await git.findRoot({ + * fs, + * filepath: '/tutorial/src/utils' + * }) + * console.log(gitroot) * */ -async function listBranches({ - fs, - dir, - gitdir = join(dir, '.git'), - remote, -}) { +async function findRoot({ fs, filepath }) { try { assertParameter('fs', fs); - assertParameter('gitdir', gitdir); + assertParameter('filepath', filepath); - return GitRefManager.listBranches({ - fs: new FileSystem(fs), - gitdir, - remote, - }) + return await _findRoot({ fs: new FileSystem(fs), filepath }) } catch (err) { - err.caller = 'git.listBranches'; + err.caller = 'git.findRoot'; throw err } } @@ -27618,107 +27058,43 @@ async function listBranches({ // @ts-check /** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {object} args.cache - * @param {string} args.gitdir - * @param {string} [args.ref] - * - * @returns {Promise>} - */ -async function _listFiles({ fs, gitdir, ref, cache }) { - if (ref) { - const oid = await GitRefManager.resolve({ gitdir, fs, ref }); - const filenames = []; - await accumulateFilesFromOid({ - fs, - cache, - gitdir, - oid, - filenames, - prefix: '', - }); - return filenames - } else { - return GitIndexManager.acquire({ fs, gitdir, cache }, async function( - index - ) { - return index.entries.map(x => x.path) - }) - } -} - -async function accumulateFilesFromOid({ - fs, - cache, - gitdir, - oid, - filenames, - prefix, -}) { - const { tree } = await _readTree({ fs, cache, gitdir, oid }); - // TODO: Use `walk` to do this. Should be faster. - for (const entry of tree) { - if (entry.type === 'tree') { - await accumulateFilesFromOid({ - fs, - cache, - gitdir, - oid: entry.oid, - filenames, - prefix: join(prefix, entry.path), - }); - } else { - filenames.push(join(prefix, entry.path)); - } - } -} - -// @ts-check - -/** - * List all the files in the git index or a commit + * Read an entry from the git config files. * - * > Note: This function is efficient for listing the files in the staging area, but listing all the files in a commit requires recursively walking through the git object store. - * > If you do not require a complete list of every file, better performance can be achieved by using [walk](./walk) and ignoring subdirectories you don't care about. + * *Caveats:* + * - Currently only the local `$GIT_DIR/config` file can be read or written. However support for the global `~/.gitconfig` and system `$(prefix)/etc/gitconfig` will be added in the future. + * - The current parser does not support the more exotic features of the git-config file format such as `[include]` and `[includeIf]`. * - * @param {object} args - * @param {FsClient} args.fs - a file system client + * @param {Object} args + * @param {FsClient} args.fs - a file system implementation * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.ref] - Return a list of all the files in the commit at `ref` instead of the files currently in the git index (aka staging area) - * @param {object} [args.cache] - a [cache](cache.md) object + * @param {string} args.path - The key of the git config entry * - * @returns {Promise>} Resolves successfully with an array of filepaths + * @returns {Promise} Resolves with the config value * * @example - * // All the files in the previous commit - * let files = await git.listFiles({ fs, dir: '/tutorial', ref: 'HEAD' }) - * console.log(files) - * // All the files in the current staging area - * files = await git.listFiles({ fs, dir: '/tutorial' }) - * console.log(files) + * // Read config value + * let value = await git.getConfig({ + * fs, + * dir: '/tutorial', + * path: 'remote.origin.url' + * }) + * console.log(value) * */ -async function listFiles({ - fs, - dir, - gitdir = join(dir, '.git'), - ref, - cache = {}, -}) { +async function getConfig({ fs, dir, gitdir = join(dir, '.git'), path }) { try { assertParameter('fs', fs); assertParameter('gitdir', gitdir); + assertParameter('path', path); - return await _listFiles({ + return await _getConfig({ fs: new FileSystem(fs), - cache, gitdir, - ref, + path, }) } catch (err) { - err.caller = 'git.listFiles'; + err.caller = 'git.getConfig'; throw err } } @@ -27726,79 +27102,55 @@ async function listFiles({ // @ts-check /** - * List all the object notes - * - * @param {object} args + * @param {Object} args * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache * @param {string} args.gitdir - * @param {string} args.ref + * @param {string} args.path + * + * @returns {Promise>} Resolves with an array of the config value * - * @returns {Promise>} */ - -async function _listNotes({ fs, cache, gitdir, ref }) { - // Get the current note commit - let parent; - try { - parent = await GitRefManager.resolve({ gitdir, fs, ref }); - } catch (err) { - if (err instanceof NotFoundError) { - return [] - } - } - - // Create the current note tree - const result = await _readTree({ - fs, - cache, - gitdir, - oid: parent, - }); - - // Format the tree entries - const notes = result.tree.map(entry => ({ - target: entry.path, - note: entry.oid, - })); - return notes +async function _getConfigAll({ fs, gitdir, path }) { + const config = await GitConfigManager.get({ fs, gitdir }); + return config.getall(path) } // @ts-check /** - * List all the object notes + * Read a multi-valued entry from the git config files. * - * @param {object} args - * @param {FsClient} args.fs - a file system client + * *Caveats:* + * - Currently only the local `$GIT_DIR/config` file can be read or written. However support for the global `~/.gitconfig` and system `$(prefix)/etc/gitconfig` will be added in the future. + * - The current parser does not support the more exotic features of the git-config file format such as `[include]` and `[includeIf]`. + * + * @param {Object} args + * @param {FsClient} args.fs - a file system implementation * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.ref] - The notes ref to look under - * @param {object} [args.cache] - a [cache](cache.md) object + * @param {string} args.path - The key of the git config entry + * + * @returns {Promise>} Resolves with the config value * - * @returns {Promise>} Resolves successfully with an array of entries containing SHA-1 object ids of the note and the object the note targets */ - -async function listNotes({ +async function getConfigAll({ fs, dir, gitdir = join(dir, '.git'), - ref = 'refs/notes/commits', - cache = {}, + path, }) { try { assertParameter('fs', fs); assertParameter('gitdir', gitdir); - assertParameter('ref', ref); + assertParameter('path', path); - return await _listNotes({ + return await _getConfigAll({ fs: new FileSystem(fs), - cache, gitdir, - ref, + path, }) } catch (err) { - err.caller = 'git.listNotes'; + err.caller = 'git.getConfigAll'; throw err } } @@ -27806,153 +27158,170 @@ async function listNotes({ // @ts-check /** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir * - * @returns {Promise>} + * @typedef {Object} GetRemoteInfoResult - The object returned has the following schema: + * @property {string[]} capabilities - The list of capabilities returned by the server (part of the Git protocol) + * @property {Object} [refs] + * @property {string} [HEAD] - The default branch of the remote + * @property {Object} [refs.heads] - The branches on the remote + * @property {Object} [refs.pull] - The special branches representing pull requests (non-standard) + * @property {Object} [refs.tags] - The tags on the remote + * */ -async function _listRemotes({ fs, gitdir }) { - const config = await GitConfigManager.get({ fs, gitdir }); - const remoteNames = await config.getSubsections('remote'); - const remotes = Promise.all( - remoteNames.map(async remote => { - const url = await config.get(`remote.${remote}.url`); - return { remote, url } - }) - ); - return remotes -} - -// @ts-check /** - * List remotes + * List a remote servers branches, tags, and capabilities. + * + * This is a rare command that doesn't require an `fs`, `dir`, or even `gitdir` argument. + * It just communicates to a remote git server, using the first step of the `git-upload-pack` handshake, but stopping short of fetching the packfile. * * @param {object} args - * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {HttpClient} args.http - an HTTP client + * @param {AuthCallback} [args.onAuth] - optional auth fill callback + * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback + * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback + * @param {string} args.url - The URL of the remote repository. Will be gotten from gitconfig if absent. + * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Overrides value in repo config. + * @param {boolean} [args.forPush = false] - By default, the command queries the 'fetch' capabilities. If true, it will ask for the 'push' capabilities. + * @param {Object} [args.headers] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config * - * @returns {Promise>} Resolves successfully with an array of `{remote, url}` objects + * @returns {Promise} Resolves successfully with an object listing the branches, tags, and capabilities of the remote. + * @see GetRemoteInfoResult * * @example - * let remotes = await git.listRemotes({ fs, dir: '/tutorial' }) - * console.log(remotes) + * let info = await git.getRemoteInfo({ + * http, + * url: + * "https://cors.isomorphic-git.org/github.com/isomorphic-git/isomorphic-git.git" + * }); + * console.log(info); * */ -async function listRemotes({ fs, dir, gitdir = join(dir, '.git') }) { +async function getRemoteInfo({ + http, + onAuth, + onAuthSuccess, + onAuthFailure, + corsProxy, + url, + headers = {}, + forPush = false, +}) { try { - assertParameter('fs', fs); - assertParameter('gitdir', gitdir); + assertParameter('http', http); + assertParameter('url', url); - return await _listRemotes({ - fs: new FileSystem(fs), - gitdir, - }) + const GitRemoteHTTP = GitRemoteManager.getRemoteHelperFor({ url }); + const remote = await GitRemoteHTTP.discover({ + http, + onAuth, + onAuthSuccess, + onAuthFailure, + corsProxy, + service: forPush ? 'git-receive-pack' : 'git-upload-pack', + url, + headers, + protocolVersion: 1, + }); + + // Note: remote.capabilities, remote.refs, and remote.symrefs are Set and Map objects, + // but one of the objectives of the public API is to always return JSON-compatible objects + // so we must JSONify them. + const result = { + capabilities: [...remote.capabilities], + }; + // Convert the flat list into an object tree, because I figure 99% of the time + // that will be easier to use. + for (const [ref, oid] of remote.refs) { + const parts = ref.split('/'); + const last = parts.pop(); + let o = result; + for (const part of parts) { + o[part] = o[part] || {}; + o = o[part]; + } + o[last] = oid; + } + // Merge symrefs on top of refs to more closely match actual git repo layouts + for (const [symref, ref] of remote.symrefs) { + const parts = symref.split('/'); + const last = parts.pop(); + let o = result; + for (const part of parts) { + o[part] = o[part] || {}; + o = o[part]; + } + o[last] = ref; + } + return result } catch (err) { - err.caller = 'git.listRemotes'; + err.caller = 'git.getRemoteInfo'; throw err } } +// @ts-check + /** - * @typedef {Object} ServerRef - This object has the following schema: - * @property {string} ref - The name of the ref - * @property {string} oid - The SHA-1 object id the ref points to - * @property {string} [target] - The target ref pointed to by a symbolic ref - * @property {string} [peeled] - If the oid is the SHA-1 object id of an annotated tag, this is the SHA-1 object id that the annotated tag points to + * @param {any} remote + * @param {string} prefix + * @param {boolean} symrefs + * @param {boolean} peelTags + * @returns {ServerRef[]} */ - -async function parseListRefsResponse(stream) { - const read = GitPktLine.streamReader(stream); - - // TODO: when we re-write everything to minimize memory usage, - // we could make this a generator +function formatInfoRefs(remote, prefix, symrefs, peelTags) { const refs = []; + for (const [key, value] of remote.refs) { + if (prefix && !key.startsWith(prefix)) continue - let line; - while (true) { - line = await read(); - if (line === true) break - if (line === null) continue - line = line.toString('utf8').replace(/\n$/, ''); - const [oid, ref, ...attrs] = line.split(' '); - const r = { ref, oid }; - for (const attr of attrs) { - const [name, value] = attr.split(':'); - if (name === 'symref-target') { - r.target = value; - } else if (name === 'peeled') { + if (key.endsWith('^{}')) { + if (peelTags) { + const _key = key.replace('^{}', ''); + // Peeled tags are almost always listed immediately after the original tag + const last = refs[refs.length - 1]; + const r = last.ref === _key ? last : refs.find(x => x.ref === _key); + if (r === undefined) { + throw new Error('I did not expect this to happen') + } r.peeled = value; } + continue } - refs.push(r); - } - - return refs -} - -/** - * @param {object} args - * @param {string} [args.prefix] - Only list refs that start with this prefix - * @param {boolean} [args.symrefs = false] - Include symbolic ref targets - * @param {boolean} [args.peelTags = false] - Include peeled tags values - * @returns {Uint8Array[]} - */ -async function writeListRefsRequest({ prefix, symrefs, peelTags }) { - const packstream = []; - // command - packstream.push(GitPktLine.encode('command=ls-refs\n')); - // capability-list - packstream.push(GitPktLine.encode(`agent=${pkg.agent}\n`)); - // [command-args] - if (peelTags || symrefs || prefix) { - packstream.push(GitPktLine.delim()); + /** @type ServerRef */ + const ref = { ref: key, oid: value }; + if (symrefs) { + if (remote.symrefs.has(key)) { + ref.target = remote.symrefs.get(key); + } + } + refs.push(ref); } - if (peelTags) packstream.push(GitPktLine.encode('peel')); - if (symrefs) packstream.push(GitPktLine.encode('symrefs')); - if (prefix) packstream.push(GitPktLine.encode(`ref-prefix ${prefix}`)); - packstream.push(GitPktLine.flush()); - return packstream + return refs } // @ts-check /** - * Fetch a list of refs (branches, tags, etc) from a server. + * @typedef {Object} GetRemoteInfo2Result - This object has the following schema: + * @property {1 | 2} protocolVersion - Git protocol version the server supports + * @property {Object} capabilities - An object of capabilities represented as keys and values + * @property {ServerRef[]} [refs] - Server refs (they get returned by protocol version 1 whether you want them or not) + */ + +/** + * List a remote server's capabilities. * * This is a rare command that doesn't require an `fs`, `dir`, or even `gitdir` argument. - * It just requires an `http` argument. - * - * ### About `protocolVersion` - * - * There's a rather fun trade-off between Git Protocol Version 1 and Git Protocol Version 2. - * Version 2 actually requires 2 HTTP requests instead of 1, making it similar to fetch or push in that regard. - * However, version 2 supports server-side filtering by prefix, whereas that filtering is done client-side in version 1. - * Which protocol is most efficient therefore depends on the number of refs on the remote, the latency of the server, and speed of the network connection. - * For an small repos (or fast Internet connections), the requirement to make two trips to the server makes protocol 2 slower. - * But for large repos (or slow Internet connections), the decreased payload size of the second request makes up for the additional request. - * - * Hard numbers vary by situation, but here's some numbers from my machine: - * - * Using isomorphic-git in a browser, with a CORS proxy, listing only the branches (refs/heads) of https://github.com/isomorphic-git/isomorphic-git - * - Protocol Version 1 took ~300ms and transfered 84 KB. - * - Protocol Version 2 took ~500ms and transfered 4.1 KB. - * - * Using isomorphic-git in a browser, with a CORS proxy, listing only the branches (refs/heads) of https://gitlab.com/gitlab-org/gitlab - * - Protocol Version 1 took ~4900ms and transfered 9.41 MB. - * - Protocol Version 2 took ~1280ms and transfered 433 KB. - * - * Finally, there is a fun quirk regarding the `symrefs` parameter. - * Protocol Version 1 will generally only return the `HEAD` symref and not others. - * Historically, this meant that servers don't use symbolic refs except for `HEAD`, which is used to point at the "default branch". - * However Protocol Version 2 can return *all* the symbolic refs on the server. - * So if you are running your own git server, you could take advantage of that I guess. + * It just communicates to a remote git server, determining what protocol version, commands, and features it supports. * - * #### TL;DR - * If you are _not_ taking advantage of `prefix` I would recommend `protocolVersion: 1`. - * Otherwise, I recommend to use the default which is `protocolVersion: 2`. + * > The successor to [`getRemoteInfo`](./getRemoteInfo.md), this command supports Git Wire Protocol Version 2. + * > Therefore its return type is more complicated as either: + * > + * > - v1 capabilities (and refs) or + * > - v2 capabilities (and no refs) + * > + * > are returned. + * > If you just care about refs, use [`listServerRefs`](./listServerRefs.md) * * @param {object} args * @param {HttpClient} args.http - an HTTP client @@ -27964,57 +27333,21 @@ async function writeListRefsRequest({ prefix, symrefs, peelTags }) { * @param {boolean} [args.forPush = false] - By default, the command queries the 'fetch' capabilities. If true, it will ask for the 'push' capabilities. * @param {Object} [args.headers] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config * @param {1 | 2} [args.protocolVersion = 2] - Which version of the Git Protocol to use. - * @param {string} [args.prefix] - Only list refs that start with this prefix - * @param {boolean} [args.symrefs = false] - Include symbolic ref targets - * @param {boolean} [args.peelTags = false] - Include annotated tag peeled targets * - * @returns {Promise} Resolves successfully with an array of ServerRef objects + * @returns {Promise} Resolves successfully with an object listing the capabilities of the remote. + * @see GetRemoteInfo2Result * @see ServerRef * * @example - * // List all the branches on a repo - * let refs = await git.listServerRefs({ - * http, - * corsProxy: "https://cors.isomorphic-git.org", - * url: "https://github.com/isomorphic-git/isomorphic-git.git", - * prefix: "refs/heads/", - * }); - * console.log(refs); - * - * @example - * // Get the default branch on a repo - * let refs = await git.listServerRefs({ - * http, - * corsProxy: "https://cors.isomorphic-git.org", - * url: "https://github.com/isomorphic-git/isomorphic-git.git", - * prefix: "HEAD", - * symrefs: true, - * }); - * console.log(refs); - * - * @example - * // List all the tags on a repo - * let refs = await git.listServerRefs({ - * http, - * corsProxy: "https://cors.isomorphic-git.org", - * url: "https://github.com/isomorphic-git/isomorphic-git.git", - * prefix: "refs/tags/", - * peelTags: true, - * }); - * console.log(refs); - * - * @example - * // List all the pull requests on a repo - * let refs = await git.listServerRefs({ + * let info = await git.getRemoteInfo2({ * http, * corsProxy: "https://cors.isomorphic-git.org", - * url: "https://github.com/isomorphic-git/isomorphic-git.git", - * prefix: "refs/pull/", + * url: "https://github.com/isomorphic-git/isomorphic-git.git" * }); - * console.log(refs); + * console.log(info); * */ -async function listServerRefs({ +async function getRemoteInfo2({ http, onAuth, onAuthSuccess, @@ -28024,14 +27357,12 @@ async function listServerRefs({ headers = {}, forPush = false, protocolVersion = 2, - prefix, - symrefs, - peelTags, }) { try { assertParameter('http', http); assertParameter('url', url); + const GitRemoteHTTP = GitRemoteManager.getRemoteHelperFor({ url }); const remote = await GitRemoteHTTP.discover({ http, onAuth, @@ -28044,402 +27375,205 @@ async function listServerRefs({ protocolVersion, }); - if (remote.protocolVersion === 1) { - return formatInfoRefs(remote, prefix, symrefs, peelTags) + if (remote.protocolVersion === 2) { + /** @type GetRemoteInfo2Result */ + return { + protocolVersion: remote.protocolVersion, + capabilities: remote.capabilities2, + } } - // Protocol Version 2 - const body = await writeListRefsRequest({ prefix, symrefs, peelTags }); - - const res = await GitRemoteHTTP.connect({ - http, - auth: remote.auth, - headers, - corsProxy, - service: forPush ? 'git-receive-pack' : 'git-upload-pack', - url, - body, - }); - - return parseListRefsResponse(res.body) - } catch (err) { - err.caller = 'git.listServerRefs'; - throw err - } -} - -// @ts-check - -/** - * List tags - * - * @param {object} args - * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * - * @returns {Promise>} Resolves successfully with an array of tag names - * - * @example - * let tags = await git.listTags({ fs, dir: '/tutorial' }) - * console.log(tags) - * - */ -async function listTags({ fs, dir, gitdir = join(dir, '.git') }) { - try { - assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - return GitRefManager.listTags({ fs: new FileSystem(fs), gitdir }) - } catch (err) { - err.caller = 'git.listTags'; - throw err - } -} - -async function resolveCommit({ fs, cache, gitdir, oid }) { - const { type, object } = await _readObject({ fs, cache, gitdir, oid }); - // Resolve annotated tag objects to whatever - if (type === 'tag') { - oid = GitAnnotatedTag.from(object).parse().object; - return resolveCommit({ fs, cache, gitdir, oid }) - } - if (type !== 'commit') { - throw new ObjectTypeError(oid, type, 'commit') - } - return { commit: GitCommit.from(object), oid } -} - -// @ts-check - -/** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache - * @param {string} args.gitdir - * @param {string} args.oid - * - * @returns {Promise} Resolves successfully with a git commit object - * @see ReadCommitResult - * @see CommitObject - * - */ -async function _readCommit({ fs, cache, gitdir, oid }) { - const { commit, oid: commitOid } = await resolveCommit({ - fs, - cache, - gitdir, - oid, - }); - const result = { - oid: commitOid, - commit: commit.parse(), - payload: commit.withoutSignature(), - }; - // @ts-ignore - return result -} - -function compareAge(a, b) { - return a.committer.timestamp - b.committer.timestamp -} - -// @ts-check - -// the empty file content object id -const EMPTY_OID = 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391'; - -async function resolveFileIdInTree({ fs, cache, gitdir, oid, fileId }) { - if (fileId === EMPTY_OID) return - const _oid = oid; - let filepath; - const result = await resolveTree({ fs, cache, gitdir, oid }); - const tree = result.tree; - if (fileId === result.oid) { - filepath = result.path; - } else { - filepath = await _resolveFileId({ - fs, - cache, - gitdir, - tree, - fileId, - oid: _oid, - }); - if (Array.isArray(filepath)) { - if (filepath.length === 0) filepath = undefined; - else if (filepath.length === 1) filepath = filepath[0]; + // Note: remote.capabilities, remote.refs, and remote.symrefs are Set and Map objects, + // but one of the objectives of the public API is to always return JSON-compatible objects + // so we must JSONify them. + /** @type Object */ + const capabilities = {}; + for (const cap of remote.capabilities) { + const [key, value] = cap.split('='); + if (value) { + capabilities[key] = value; + } else { + capabilities[key] = true; + } + } + /** @type GetRemoteInfo2Result */ + return { + protocolVersion: 1, + capabilities, + refs: formatInfoRefs(remote, undefined, true, true), } + } catch (err) { + err.caller = 'git.getRemoteInfo2'; + throw err } - return filepath } -async function _resolveFileId({ - fs, - cache, - gitdir, - tree, - fileId, - oid, - filepaths = [], - parentPath = '', +async function hashObject({ + type, + object, + format = 'content', + oid = undefined, }) { - const walks = tree.entries().map(function(entry) { - let result; - if (entry.oid === fileId) { - result = join(parentPath, entry.path); - filepaths.push(result); - } else if (entry.type === 'tree') { - result = _readObject({ - fs, - cache, - gitdir, - oid: entry.oid, - }).then(function({ object }) { - return _resolveFileId({ - fs, - cache, - gitdir, - tree: GitTree.from(object), - fileId, - oid, - filepaths, - parentPath: join(parentPath, entry.path), - }) - }); + if (format !== 'deflated') { + if (format !== 'wrapped') { + object = GitObject.wrap({ type, object }); } - return result - }); - - await Promise.all(walks); - return filepaths + oid = await shasum(object); + } + return { oid, object } } // @ts-check /** - * Get commit descriptions from the git history + * + * @typedef {object} HashBlobResult - The object returned has the following schema: + * @property {string} oid - The SHA-1 object id + * @property {'blob'} type - The type of the object + * @property {Uint8Array} object - The wrapped git object (the thing that is hashed) + * @property {'wrapped'} format - The format of the object + * + */ + +/** + * Compute what the SHA-1 object id of a file would be * * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache - * @param {string} args.gitdir - * @param {string=} args.filepath optional get the commit for the filepath only - * @param {string} args.ref - * @param {number|void} args.depth - * @param {boolean=} [args.force=false] do not throw error if filepath is not exist (works only for a single file). defaults to false - * @param {boolean=} [args.follow=false] Continue listing the history of a file beyond renames (works only for a single file). defaults to false - * @param {boolean=} args.follow Continue listing the history of a file beyond renames (works only for a single file). defaults to false + * @param {Uint8Array|string} args.object - The object to write. If `object` is a String then it will be converted to a Uint8Array using UTF-8 encoding. * - * @returns {Promise>} Resolves to an array of ReadCommitResult objects - * @see ReadCommitResult - * @see CommitObject + * @returns {Promise} Resolves successfully with the SHA-1 object id and the wrapped object Uint8Array. + * @see HashBlobResult * * @example - * let commits = await git.log({ dir: '$input((/))', depth: $input((5)), ref: '$input((master))' }) - * console.log(commits) + * let { oid, type, object, format } = await git.hashBlob({ + * object: 'Hello world!', + * }) + * + * console.log('oid', oid) + * console.log('type', type) + * console.log('object', object) + * console.log('format', format) * */ -async function _log({ - fs, - cache, - gitdir, - filepath, - ref, - depth, - since, - force, - follow, -}) { - const sinceTimestamp = - typeof since === 'undefined' - ? undefined - : Math.floor(since.valueOf() / 1000); - // TODO: In the future, we may want to have an API where we return a - // async iterator that emits commits. - const commits = []; - const shallowCommits = await GitShallowManager.read({ fs, gitdir }); - const oid = await GitRefManager.resolve({ fs, gitdir, ref }); - const tips = [await _readCommit({ fs, cache, gitdir, oid })]; - let lastFileOid; - let lastCommit; - let isOk; - - function endCommit(commit) { - if (isOk && filepath) commits.push(commit); - } - - while (tips.length > 0) { - const commit = tips.pop(); - - // Stop the log if we've hit the age limit - if ( - sinceTimestamp !== undefined && - commit.commit.committer.timestamp <= sinceTimestamp - ) { - break - } +async function hashBlob({ object }) { + try { + assertParameter('object', object); - if (filepath) { - let vFileOid; - try { - vFileOid = await resolveFilepath({ - fs, - cache, - gitdir, - oid: commit.commit.tree, - filepath, - }); - if (lastCommit && lastFileOid !== vFileOid) { - commits.push(lastCommit); - } - lastFileOid = vFileOid; - lastCommit = commit; - isOk = true; - } catch (e) { - if (e instanceof NotFoundError) { - let found = follow && lastFileOid; - if (found) { - found = await resolveFileIdInTree({ - fs, - cache, - gitdir, - oid: commit.commit.tree, - fileId: lastFileOid, - }); - if (found) { - if (Array.isArray(found)) { - if (lastCommit) { - const lastFound = await resolveFileIdInTree({ - fs, - cache, - gitdir, - oid: lastCommit.commit.tree, - fileId: lastFileOid, - }); - if (Array.isArray(lastFound)) { - found = found.filter(p => lastFound.indexOf(p) === -1); - if (found.length === 1) { - found = found[0]; - filepath = found; - if (lastCommit) commits.push(lastCommit); - } else { - found = false; - if (lastCommit) commits.push(lastCommit); - break - } - } - } - } else { - filepath = found; - if (lastCommit) commits.push(lastCommit); - } - } - } - if (!found) { - if (!force && !follow) throw e - if (isOk && lastFileOid) { - commits.push(lastCommit); - // break - } - } - lastCommit = commit; - isOk = false; - } else throw e - } + // Convert object to buffer + if (typeof object === 'string') { + object = Buffer.from(object, 'utf8'); } else { - commits.push(commit); + object = Buffer.from(object); } - // Stop the loop if we have enough commits now. - if (depth !== undefined && commits.length === depth) { - endCommit(commit); - break - } + const type = 'blob'; + const { oid, object: _object } = await hashObject({ + type: 'blob', + format: 'content', + object, + }); + return { oid, type, object: new Uint8Array(_object), format: 'wrapped' } + } catch (err) { + err.caller = 'git.hashBlob'; + throw err + } +} - // If this is not a shallow commit... - if (!shallowCommits.has(commit.oid)) { - // Add the parents of this commit to the queue - // Note: for the case of a commit with no parents, it will concat an empty array, having no net effect. - for (const oid of commit.commit.parent) { - const commit = await _readCommit({ fs, cache, gitdir, oid }); - if (!tips.map(commit => commit.oid).includes(commit.oid)) { - tips.push(commit); - } - } - } +// @ts-check - // Stop the loop if there are no more commit parents - if (tips.length === 0) { - endCommit(commit); +/** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache + * @param {ProgressCallback} [args.onProgress] + * @param {string} args.dir + * @param {string} args.gitdir + * @param {string} args.filepath + * + * @returns {Promise<{oids: string[]}>} + */ +async function _indexPack({ + fs, + cache, + onProgress, + dir, + gitdir, + filepath, +}) { + try { + filepath = join(dir, filepath); + const pack = await fs.read(filepath); + const getExternalRefDelta = oid => _readObject({ fs, cache, gitdir, oid }); + const idx = await GitPackIndex.fromPack({ + pack, + getExternalRefDelta, + onProgress, + }); + await fs.write(filepath.replace(/\.pack$/, '.idx'), await idx.toBuffer()); + return { + oids: [...idx.hashes], } - - // Process tips in order by age - tips.sort((a, b) => compareAge(a.commit, b.commit)); + } catch (err) { + err.caller = 'git.indexPack'; + throw err } - return commits } // @ts-check /** - * Get commit descriptions from the git history + * Create the .idx file for a given .pack file * * @param {object} args * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {ProgressCallback} [args.onProgress] - optional progress event callback + * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string=} args.filepath optional get the commit for the filepath only - * @param {string} [args.ref = 'HEAD'] - The commit to begin walking backwards through the history from - * @param {number=} [args.depth] - Limit the number of commits returned. No limit by default. - * @param {Date} [args.since] - Return history newer than the given date. Can be combined with `depth` to get whichever is shorter. - * @param {boolean=} [args.force=false] do not throw error if filepath is not exist (works only for a single file). defaults to false - * @param {boolean=} [args.follow=false] Continue listing the history of a file beyond renames (works only for a single file). defaults to false + * @param {string} args.filepath - The path to the .pack file to index * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise>} Resolves to an array of ReadCommitResult objects - * @see ReadCommitResult - * @see CommitObject + * @returns {Promise<{oids: string[]}>} Resolves with a list of the SHA-1 object ids contained in the packfile * * @example - * let commits = await git.log({ + * let packfiles = await fs.promises.readdir('/tutorial/.git/objects/pack') + * packfiles = packfiles.filter(name => name.endsWith('.pack')) + * console.log('packfiles', packfiles) + * + * const { oids } = await git.indexPack({ * fs, * dir: '/tutorial', - * depth: 5, - * ref: 'main' + * filepath: `.git/objects/pack/${packfiles[0]}`, + * async onProgress (evt) { + * console.log(`${evt.phase}: ${evt.loaded} / ${evt.total}`) + * } * }) - * console.log(commits) + * console.log(oids) * */ -async function log({ +async function indexPack({ fs, + onProgress, dir, gitdir = join(dir, '.git'), filepath, - ref = 'HEAD', - depth, - since, // Date - force, - follow, cache = {}, }) { try { assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - assertParameter('ref', ref); + assertParameter('dir', dir); + assertParameter('gitdir', dir); + assertParameter('filepath', filepath); - return await _log({ + return await _indexPack({ fs: new FileSystem(fs), cache, + onProgress, + dir, gitdir, filepath, - ref, - depth, - since, - force, - follow, }) } catch (err) { - err.caller = 'git.log'; + err.caller = 'git.indexPack'; throw err } } @@ -28447,286 +27581,168 @@ async function log({ // @ts-check /** - * - * @typedef {Object} MergeResult - Returns an object with a schema like this: - * @property {string} [oid] - The SHA-1 object id that is now at the head of the branch. Absent only if `dryRun` was specified and `mergeCommit` is true. - * @property {boolean} [alreadyMerged] - True if the branch was already merged so no changes were made - * @property {boolean} [fastForward] - True if it was a fast-forward merge - * @property {boolean} [mergeCommit] - True if merge resulted in a merge commit - * @property {string} [tree] - The SHA-1 object id of the tree resulting from a merge commit - * - */ - -/** - * Merge two branches - * - * ## Limitations - * - * Currently it does not support incomplete merges. That is, if there are merge conflicts it cannot solve - * with the built in diff3 algorithm it will not modify the working dir, and will throw a [`MergeNotSupportedError`](./errors.md#mergenotsupportedError) error. - * - * Currently it will fail if multiple candidate merge bases are found. (It doesn't yet implement the recursive merge strategy.) - * - * Currently it does not support selecting alternative merge strategies. + * Initialize a new repository * * @param {object} args * @param {FsClient} args.fs - a file system client - * @param {SignCallback} [args.onSign] - a PGP signing implementation * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.ours] - The branch receiving the merge. If undefined, defaults to the current branch. - * @param {string} args.theirs - The branch to be merged - * @param {boolean} [args.fastForwardOnly = false] - If true, then non-fast-forward merges will throw an Error instead of performing a merge. - * @param {boolean} [args.dryRun = false] - If true, simulates a merge so you can test whether it would succeed. - * @param {boolean} [args.noUpdateBranch = false] - If true, does not update the branch pointer after creating the commit. - * @param {string} [args.message] - Overrides the default auto-generated merge commit message - * @param {Object} [args.author] - passed to [commit](commit.md) when creating a merge commit - * @param {string} [args.author.name] - Default is `user.name` config. - * @param {string} [args.author.email] - Default is `user.email` config. - * @param {number} [args.author.timestamp=Math.floor(Date.now()/1000)] - Set the author timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). - * @param {number} [args.author.timezoneOffset] - Set the author timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. - * @param {Object} [args.committer] - passed to [commit](commit.md) when creating a merge commit - * @param {string} [args.committer.name] - Default is `user.name` config. - * @param {string} [args.committer.email] - Default is `user.email` config. - * @param {number} [args.committer.timestamp=Math.floor(Date.now()/1000)] - Set the committer timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). - * @param {number} [args.committer.timezoneOffset] - Set the committer timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. - * @param {string} [args.signingKey] - passed to [commit](commit.md) when creating a merge commit - * @param {object} [args.cache] - a [cache](cache.md) object - * - * @returns {Promise} Resolves to a description of the merge operation - * @see MergeResult + * @param {boolean} [args.bare = false] - Initialize a bare repository + * @param {string} [args.defaultBranch = 'master'] - The name of the default branch (might be changed to a required argument in 2.0.0) + * @returns {Promise} Resolves successfully when filesystem operations are complete * * @example - * let m = await git.merge({ - * fs, - * dir: '/tutorial', - * ours: 'main', - * theirs: 'remotes/origin/main' - * }) - * console.log(m) + * await git.init({ fs, dir: '/tutorial' }) + * console.log('done') * */ -async function merge({ - fs: _fs, - onSign, - dir, - gitdir = join(dir, '.git'), - ours, - theirs, - fastForwardOnly = false, - dryRun = false, - noUpdateBranch = false, - message, - author: _author, - committer: _committer, - signingKey, - cache = {}, -}) { - try { - assertParameter('fs', _fs); - if (signingKey) { - assertParameter('onSign', onSign); - } - const fs = new FileSystem(_fs); - - const author = await normalizeAuthorObject({ fs, gitdir, author: _author }); - if (!author && !fastForwardOnly) throw new MissingNameError('author') - - const committer = await normalizeCommitterObject({ - fs, - gitdir, - author, - committer: _committer, - }); - if (!committer && !fastForwardOnly) { - throw new MissingNameError('committer') - } - - return await _merge({ - fs, - cache, - gitdir, - ours, - theirs, - fastForwardOnly, - dryRun, - noUpdateBranch, - message, - author, - committer, - signingKey, - onSign, - }) - } catch (err) { - err.caller = 'git.merge'; - throw err - } -} - -/** - * @enum {number} - */ -const types = { - commit: 0b0010000, - tree: 0b0100000, - blob: 0b0110000, - tag: 0b1000000, - ofs_delta: 0b1100000, - ref_delta: 0b1110000, -}; - -/** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string[]} args.oids - */ -async function _pack({ +async function init({ fs, - cache, + bare = false, dir, - gitdir = join(dir, '.git'), - oids, + gitdir = bare ? dir : join(dir, '.git'), + defaultBranch = 'master', }) { - const hash = new Hash(); - const outputStream = []; - function write(chunk, enc) { - const buff = Buffer.from(chunk, enc); - outputStream.push(buff); - hash.update(buff); - } - async function writeObject({ stype, object }) { - // Object type is encoded in bits 654 - const type = types[stype]; - // The length encoding gets complicated. - let length = object.length; - // Whether the next byte is part of the variable-length encoded number - // is encoded in bit 7 - let multibyte = length > 0b1111 ? 0b10000000 : 0b0; - // Last four bits of length is encoded in bits 3210 - const lastFour = length & 0b1111; - // Discard those bits - length = length >>> 4; - // The first byte is then (1-bit multibyte?), (3-bit type), (4-bit least sig 4-bits of length) - let byte = (multibyte | type | lastFour).toString(16); - write(byte, 'hex'); - // Now we keep chopping away at length 7-bits at a time until its zero, - // writing out the bytes in what amounts to little-endian order. - while (multibyte) { - multibyte = length > 0b01111111 ? 0b10000000 : 0b0; - byte = multibyte | (length & 0b01111111); - write(padHex(2, byte), 'hex'); - length = length >>> 7; - } - // Lastly, we can compress and write the object. - write(Buffer.from(await deflate(object))); - } - write('PACK'); - write('00000002', 'hex'); - // Write a 4 byte (32-bit) int - write(padHex(8, oids.length), 'hex'); - for (const oid of oids) { - const { type, object } = await _readObject({ fs, cache, gitdir, oid }); - await writeObject({ write, object, stype: type }); + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + if (!bare) { + assertParameter('dir', dir); + } + + return await _init({ + fs: new FileSystem(fs), + bare, + dir, + gitdir, + defaultBranch, + }) + } catch (err) { + err.caller = 'git.init'; + throw err } - // Write SHA1 checksum - const digest = hash.digest(); - outputStream.push(digest); - return outputStream } // @ts-check -/** - * - * @typedef {Object} PackObjectsResult The packObjects command returns an object with two properties: - * @property {string} filename - The suggested filename for the packfile if you want to save it to disk somewhere. It includes the packfile SHA. - * @property {Uint8Array} [packfile] - The packfile contents. Not present if `write` parameter was true, in which case the packfile was written straight to disk. - */ - /** * @param {object} args * @param {import('../models/FileSystem.js').FileSystem} args.fs * @param {any} args.cache * @param {string} args.gitdir - * @param {string[]} args.oids - * @param {boolean} args.write + * @param {string} args.oid + * @param {string} args.ancestor + * @param {number} args.depth - Maximum depth to search before giving up. -1 means no maximum depth. * - * @returns {Promise} - * @see PackObjectsResult + * @returns {Promise} */ -async function _packObjects({ fs, cache, gitdir, oids, write }) { - const buffers = await _pack({ fs, cache, gitdir, oids }); - const packfile = Buffer.from(await collect(buffers)); - const packfileSha = packfile.slice(-20).toString('hex'); - const filename = `pack-${packfileSha}.pack`; - if (write) { - await fs.write(join(gitdir, `objects/pack/${filename}`), packfile); - return { filename } +async function _isDescendent({ + fs, + cache, + gitdir, + oid, + ancestor, + depth, +}) { + const shallows = await GitShallowManager.read({ fs, gitdir }); + if (!oid) { + throw new MissingParameterError('oid') } - return { - filename, - packfile: new Uint8Array(packfile), + if (!ancestor) { + throw new MissingParameterError('ancestor') + } + // If you don't like this behavior, add your own check. + // Edge cases are hard to define a perfect solution. + if (oid === ancestor) return false + // We do not use recursion here, because that would lead to depth-first traversal, + // and we want to maintain a breadth-first traversal to avoid hitting shallow clone depth cutoffs. + const queue = [oid]; + const visited = new Set(); + let searchdepth = 0; + while (queue.length) { + if (searchdepth++ === depth) { + throw new MaxDepthError(depth) + } + const oid = queue.shift(); + const { type, object } = await _readObject({ + fs, + cache, + gitdir, + oid, + }); + if (type !== 'commit') { + throw new ObjectTypeError(oid, type, 'commit') + } + const commit = GitCommit.from(object).parse(); + // Are any of the parents the sought-after ancestor? + for (const parent of commit.parent) { + if (parent === ancestor) return true + } + // If not, add them to heads (unless we know this is a shallow commit) + if (!shallows.has(oid)) { + for (const parent of commit.parent) { + if (!visited.has(parent)) { + queue.push(parent); + visited.add(parent); + } + } + } + // Eventually, we'll travel entire tree to the roots where all the parents are empty arrays, + // or hit the shallow depth and throw an error. Excluding the possibility of grafts, or + // different branches cloned to different depths, you would hit this error at the same time + // for all parents, so trying to continue is futile. } + return false } // @ts-check /** - * - * @typedef {Object} PackObjectsResult The packObjects command returns an object with two properties: - * @property {string} filename - The suggested filename for the packfile if you want to save it to disk somewhere. It includes the packfile SHA. - * @property {Uint8Array} [packfile] - The packfile contents. Not present if `write` parameter was true, in which case the packfile was written straight to disk. - */ - -/** - * Create a packfile from an array of SHA-1 object ids + * Check whether a git commit is descended from another * * @param {object} args * @param {FsClient} args.fs - a file system client * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string[]} args.oids - An array of SHA-1 object ids to be included in the packfile - * @param {boolean} [args.write = false] - Whether to save the packfile to disk or not + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.oid - The descendent commit + * @param {string} args.ancestor - The (proposed) ancestor commit + * @param {number} [args.depth = -1] - Maximum depth to search before giving up. -1 means no maximum depth. * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} Resolves successfully when the packfile is ready with the filename and buffer - * @see PackObjectsResult + * @returns {Promise} Resolves to true if `oid` is a descendent of `ancestor` * * @example - * // Create a packfile containing only an empty tree - * let { packfile } = await git.packObjects({ - * fs, - * dir: '/tutorial', - * oids: ['4b825dc642cb6eb9a060e54bf8d69288fbee4904'] - * }) - * console.log(packfile) + * let oid = await git.resolveRef({ fs, dir: '/tutorial', ref: 'main' }) + * let ancestor = await git.resolveRef({ fs, dir: '/tutorial', ref: 'v0.20.0' }) + * console.log(oid, ancestor) + * await git.isDescendent({ fs, dir: '/tutorial', oid, ancestor, depth: -1 }) * */ -async function packObjects({ +async function isDescendent({ fs, dir, gitdir = join(dir, '.git'), - oids, - write = false, + oid, + ancestor, + depth = -1, cache = {}, }) { try { assertParameter('fs', fs); assertParameter('gitdir', gitdir); - assertParameter('oids', oids); + assertParameter('oid', oid); + assertParameter('ancestor', ancestor); - return await _packObjects({ + return await _isDescendent({ fs: new FileSystem(fs), cache, gitdir, - oids, - write, + oid, + ancestor, + depth, }) } catch (err) { - err.caller = 'git.packObjects'; + err.caller = 'git.isDescendent'; throw err } } @@ -28734,903 +27750,916 @@ async function packObjects({ // @ts-check /** - * Fetch and merge commits from a remote repository + * Test whether a filepath should be ignored (because of .gitignore or .git/exclude) * * @param {object} args * @param {FsClient} args.fs - a file system client - * @param {HttpClient} args.http - an HTTP client - * @param {ProgressCallback} [args.onProgress] - optional progress event callback - * @param {MessageCallback} [args.onMessage] - optional message event callback - * @param {AuthCallback} [args.onAuth] - optional auth fill callback - * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback - * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback - * @param {string} args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.ref] - Which branch to merge into. By default this is the currently checked out branch. - * @param {string} [args.url] - (Added in 1.1.0) The URL of the remote repository. The default is the value set in the git config for that remote. - * @param {string} [args.remote] - (Added in 1.1.0) If URL is not specified, determines which remote to use. - * @param {string} [args.remoteRef] - (Added in 1.1.0) The name of the branch on the remote to fetch. By default this is the configured remote tracking branch. - * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Overrides value in repo config. - * @param {boolean} [args.singleBranch = false] - Instead of the default behavior of fetching all the branches, only fetch a single branch. - * @param {boolean} [args.fastForwardOnly = false] - Only perform simple fast-forward merges. (Don't create merge commits.) - * @param {Object} [args.headers] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config - * @param {Object} [args.author] - The details about the author. - * @param {string} [args.author.name] - Default is `user.name` config. - * @param {string} [args.author.email] - Default is `user.email` config. - * @param {number} [args.author.timestamp=Math.floor(Date.now()/1000)] - Set the author timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). - * @param {number} [args.author.timezoneOffset] - Set the author timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. - * @param {Object} [args.committer = author] - The details about the commit committer, in the same format as the author parameter. If not specified, the author details are used. - * @param {string} [args.committer.name] - Default is `user.name` config. - * @param {string} [args.committer.email] - Default is `user.email` config. - * @param {number} [args.committer.timestamp=Math.floor(Date.now()/1000)] - Set the committer timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). - * @param {number} [args.committer.timezoneOffset] - Set the committer timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. - * @param {string} [args.signingKey] - passed to [commit](commit.md) when creating a merge commit - * @param {object} [args.cache] - a [cache](cache.md) object + * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.filepath - The filepath to test * - * @returns {Promise} Resolves successfully when pull operation completes + * @returns {Promise} Resolves to true if the file should be ignored * * @example - * await git.pull({ - * fs, - * http, - * dir: '/tutorial', - * ref: 'main', - * singleBranch: true - * }) - * console.log('done') + * await git.isIgnored({ fs, dir: '/tutorial', filepath: 'docs/add.md' }) * */ -async function pull({ - fs: _fs, - http, - onProgress, - onMessage, - onAuth, - onAuthSuccess, - onAuthFailure, +async function isIgnored({ + fs, dir, gitdir = join(dir, '.git'), - ref, - url, - remote, - remoteRef, - fastForwardOnly = false, - corsProxy, - singleBranch, - headers = {}, - author: _author, - committer: _committer, - signingKey, - cache = {}, + filepath, }) { try { - assertParameter('fs', _fs); + assertParameter('fs', fs); + assertParameter('dir', dir); assertParameter('gitdir', gitdir); + assertParameter('filepath', filepath); - const fs = new FileSystem(_fs); - - const author = await normalizeAuthorObject({ fs, gitdir, author: _author }); - if (!author) throw new MissingNameError('author') - - const committer = await normalizeCommitterObject({ - fs, - gitdir, - author, - committer: _committer, - }); - if (!committer) throw new MissingNameError('committer') - - return await _pull({ - fs, - cache, - http, - onProgress, - onMessage, - onAuth, - onAuthSuccess, - onAuthFailure, + return GitIgnoreManager.isIgnored({ + fs: new FileSystem(fs), dir, gitdir, - ref, - url, - remote, - remoteRef, - fastForwardOnly, - corsProxy, - singleBranch, - headers, - author, - committer, - signingKey, + filepath, }) } catch (err) { - err.caller = 'git.pull'; + err.caller = 'git.isIgnored'; throw err } } +// @ts-check + /** + * List branches + * + * By default it lists local branches. If a 'remote' is specified, it lists the remote's branches. When listing remote branches, the HEAD branch is not filtered out, so it may be included in the list of results. + * + * Note that specifying a remote does not actually contact the server and update the list of branches. + * If you want an up-to-date list, first do a `fetch` to that remote. + * (Which branch you fetch doesn't matter - the list of branches available on the remote is updated during the fetch handshake.) + * * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache - * @param {string} [args.dir] - * @param {string} args.gitdir - * @param {Iterable} args.start - * @param {Iterable} args.finish - * @returns {Promise>} + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} [args.remote] - Instead of the branches in `refs/heads`, list the branches in `refs/remotes/${remote}`. + * + * @returns {Promise>} Resolves successfully with an array of branch names + * + * @example + * let branches = await git.listBranches({ fs, dir: '/tutorial' }) + * console.log(branches) + * let remoteBranches = await git.listBranches({ fs, dir: '/tutorial', remote: 'origin' }) + * console.log(remoteBranches) + * */ -async function listCommitsAndTags({ +async function listBranches({ fs, - cache, dir, gitdir = join(dir, '.git'), - start, - finish, + remote, }) { - const shallows = await GitShallowManager.read({ fs, gitdir }); - const startingSet = new Set(); - const finishingSet = new Set(); - for (const ref of start) { - startingSet.add(await GitRefManager.resolve({ fs, gitdir, ref })); - } - for (const ref of finish) { - // We may not have these refs locally so we must try/catch - try { - const oid = await GitRefManager.resolve({ fs, gitdir, ref }); - finishingSet.add(oid); - } catch (err) {} - } - const visited = new Set(); - // Because git commits are named by their hash, there is no - // way to construct a cycle. Therefore we won't worry about - // setting a default recursion limit. - async function walk(oid) { - visited.add(oid); - const { type, object } = await _readObject({ fs, cache, gitdir, oid }); - // Recursively resolve annotated tags - if (type === 'tag') { - const tag = GitAnnotatedTag.from(object); - const commit = tag.headers().object; - return walk(commit) - } - if (type !== 'commit') { - throw new ObjectTypeError(oid, type, 'commit') - } - if (!shallows.has(oid)) { - const commit = GitCommit.from(object); - const parents = commit.headers().parent; - for (oid of parents) { - if (!finishingSet.has(oid) && !visited.has(oid)) { - await walk(oid); - } - } - } - } - // Let's go walking! - for (const oid of startingSet) { - await walk(oid); + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + + return GitRefManager.listBranches({ + fs: new FileSystem(fs), + gitdir, + remote, + }) + } catch (err) { + err.caller = 'git.listBranches'; + throw err } - return visited } +// @ts-check + /** * @param {object} args * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache - * @param {string} [args.dir] + * @param {object} args.cache * @param {string} args.gitdir - * @param {Iterable} args.oids - * @returns {Promise>} + * @param {string} [args.ref] + * + * @returns {Promise>} */ -async function listObjects({ +async function _listFiles({ fs, gitdir, ref, cache }) { + if (ref) { + const oid = await GitRefManager.resolve({ gitdir, fs, ref }); + const filenames = []; + await accumulateFilesFromOid({ + fs, + cache, + gitdir, + oid, + filenames, + prefix: '', + }); + return filenames + } else { + return GitIndexManager.acquire({ fs, gitdir, cache }, async function( + index + ) { + return index.entries.map(x => x.path) + }) + } +} + +async function accumulateFilesFromOid({ fs, cache, - dir, - gitdir = join(dir, '.git'), - oids, + gitdir, + oid, + filenames, + prefix, }) { - const visited = new Set(); - // We don't do the purest simplest recursion, because we can - // avoid reading Blob objects entirely since the Tree objects - // tell us which oids are Blobs and which are Trees. - async function walk(oid) { - if (visited.has(oid)) return - visited.add(oid); - const { type, object } = await _readObject({ fs, cache, gitdir, oid }); - if (type === 'tag') { - const tag = GitAnnotatedTag.from(object); - const obj = tag.headers().object; - await walk(obj); - } else if (type === 'commit') { - const commit = GitCommit.from(object); - const tree = commit.headers().tree; - await walk(tree); - } else if (type === 'tree') { - const tree = GitTree.from(object); - for (const entry of tree) { - // add blobs to the set - // skip over submodules whose type is 'commit' - if (entry.type === 'blob') { - visited.add(entry.oid); - } - // recurse for trees - if (entry.type === 'tree') { - await walk(entry.oid); - } - } + const { tree } = await _readTree({ fs, cache, gitdir, oid }); + // TODO: Use `walk` to do this. Should be faster. + for (const entry of tree) { + if (entry.type === 'tree') { + await accumulateFilesFromOid({ + fs, + cache, + gitdir, + oid: entry.oid, + filenames, + prefix: join(prefix, entry.path), + }); + } else { + filenames.push(join(prefix, entry.path)); } } - // Let's go walking! - for (const oid of oids) { - await walk(oid); - } - return visited } -async function parseReceivePackResponse(packfile) { - /** @type PushResult */ - const result = {}; - let response = ''; - const read = GitPktLine.streamReader(packfile); - let line = await read(); - while (line !== true) { - if (line !== null) response += line.toString('utf8') + '\n'; - line = await read(); - } - - const lines = response.toString('utf8').split('\n'); - // We're expecting "unpack {unpack-result}" - line = lines.shift(); - if (!line.startsWith('unpack ')) { - throw new ParseError('unpack ok" or "unpack [error message]', line) - } - result.ok = line === 'unpack ok'; - if (!result.ok) { - result.error = line.slice('unpack '.length); - } - result.refs = {}; - for (const line of lines) { - if (line.trim() === '') continue - const status = line.slice(0, 2); - const refAndMessage = line.slice(3); - let space = refAndMessage.indexOf(' '); - if (space === -1) space = refAndMessage.length; - const ref = refAndMessage.slice(0, space); - const error = refAndMessage.slice(space + 1); - result.refs[ref] = { - ok: status === 'ok', - error, - }; - } - return result -} +// @ts-check -async function writeReceivePackRequest({ - capabilities = [], - triplets = [], -}) { - const packstream = []; - let capsFirstLine = `\x00 ${capabilities.join(' ')}`; - for (const trip of triplets) { - packstream.push( - GitPktLine.encode( - `${trip.oldoid} ${trip.oid} ${trip.fullRef}${capsFirstLine}\n` - ) - ); - capsFirstLine = ''; +/** + * List all the files in the git index or a commit + * + * > Note: This function is efficient for listing the files in the staging area, but listing all the files in a commit requires recursively walking through the git object store. + * > If you do not require a complete list of every file, better performance can be achieved by using [walk](./walk) and ignoring subdirectories you don't care about. + * + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} [args.ref] - Return a list of all the files in the commit at `ref` instead of the files currently in the git index (aka staging area) + * @param {object} [args.cache] - a [cache](cache.md) object + * + * @returns {Promise>} Resolves successfully with an array of filepaths + * + * @example + * // All the files in the previous commit + * let files = await git.listFiles({ fs, dir: '/tutorial', ref: 'HEAD' }) + * console.log(files) + * // All the files in the current staging area + * files = await git.listFiles({ fs, dir: '/tutorial' }) + * console.log(files) + * + */ +async function listFiles({ + fs, + dir, + gitdir = join(dir, '.git'), + ref, + cache = {}, +}) { + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + + return await _listFiles({ + fs: new FileSystem(fs), + cache, + gitdir, + ref, + }) + } catch (err) { + err.caller = 'git.listFiles'; + throw err } - packstream.push(GitPktLine.flush()); - return packstream } // @ts-check /** + * List all the object notes + * * @param {object} args * @param {import('../models/FileSystem.js').FileSystem} args.fs * @param {any} args.cache - * @param {HttpClient} args.http - * @param {ProgressCallback} [args.onProgress] - * @param {MessageCallback} [args.onMessage] - * @param {AuthCallback} [args.onAuth] - * @param {AuthFailureCallback} [args.onAuthFailure] - * @param {AuthSuccessCallback} [args.onAuthSuccess] * @param {string} args.gitdir - * @param {string} [args.ref] - * @param {string} [args.remoteRef] - * @param {string} [args.remote] - * @param {boolean} [args.force = false] - * @param {boolean} [args.delete = false] - * @param {string} [args.url] - * @param {string} [args.corsProxy] - * @param {Object} [args.headers] + * @param {string} args.ref * - * @returns {Promise} + * @returns {Promise>} */ -async function _push({ + +async function _listNotes({ fs, cache, gitdir, ref }) { + // Get the current note commit + let parent; + try { + parent = await GitRefManager.resolve({ gitdir, fs, ref }); + } catch (err) { + if (err instanceof NotFoundError) { + return [] + } + } + + // Create the current note tree + const result = await _readTree({ + fs, + cache, + gitdir, + oid: parent, + }); + + // Format the tree entries + const notes = result.tree.map(entry => ({ + target: entry.path, + note: entry.oid, + })); + return notes +} + +// @ts-check + +/** + * List all the object notes + * + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} [args.ref] - The notes ref to look under + * @param {object} [args.cache] - a [cache](cache.md) object + * + * @returns {Promise>} Resolves successfully with an array of entries containing SHA-1 object ids of the note and the object the note targets + */ + +async function listNotes({ fs, - cache, - http, - onProgress, - onMessage, - onAuth, - onAuthSuccess, - onAuthFailure, - gitdir, - ref: _ref, - remoteRef: _remoteRef, - remote, - url: _url, - force = false, - delete: _delete = false, - corsProxy, - headers = {}, + dir, + gitdir = join(dir, '.git'), + ref = 'refs/notes/commits', + cache = {}, }) { - const ref = _ref || (await _currentBranch({ fs, gitdir })); - if (typeof ref === 'undefined') { - throw new MissingParameterError('ref') - } - const config = await GitConfigManager.get({ fs, gitdir }); - // Figure out what remote to use. - remote = - remote || - (await config.get(`branch.${ref}.pushRemote`)) || - (await config.get('remote.pushDefault')) || - (await config.get(`branch.${ref}.remote`)) || - 'origin'; - // Lookup the URL for the given remote. - const url = - _url || - (await config.get(`remote.${remote}.pushurl`)) || - (await config.get(`remote.${remote}.url`)); - if (typeof url === 'undefined') { - throw new MissingParameterError('remote OR url') - } - // Figure out what remote ref to use. - const remoteRef = _remoteRef || (await config.get(`branch.${ref}.merge`)); - if (typeof url === 'undefined') { - throw new MissingParameterError('remoteRef') - } + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + assertParameter('ref', ref); - if (corsProxy === undefined) { - corsProxy = await config.get('http.corsProxy'); + return await _listNotes({ + fs: new FileSystem(fs), + cache, + gitdir, + ref, + }) + } catch (err) { + err.caller = 'git.listNotes'; + throw err } +} - const fullRef = await GitRefManager.expand({ fs, gitdir, ref }); - const oid = _delete - ? '0000000000000000000000000000000000000000' - : await GitRefManager.resolve({ fs, gitdir, ref: fullRef }); +// @ts-check - /** @type typeof import("../managers/GitRemoteHTTP").GitRemoteHTTP */ - const GitRemoteHTTP = GitRemoteManager.getRemoteHelperFor({ url }); - const httpRemote = await GitRemoteHTTP.discover({ - http, - onAuth, - onAuthSuccess, - onAuthFailure, - corsProxy, - service: 'git-receive-pack', - url, - headers, - protocolVersion: 1, - }); - const auth = httpRemote.auth; // hack to get new credentials from CredentialManager API - let fullRemoteRef; - if (!remoteRef) { - fullRemoteRef = fullRef; - } else { - try { - fullRemoteRef = await GitRefManager.expandAgainstMap({ - ref: remoteRef, - map: httpRemote.refs, - }); - } catch (err) { - if (err instanceof NotFoundError) { - // The remote reference doesn't exist yet. - // If it is fully specified, use that value. Otherwise, treat it as a branch. - fullRemoteRef = remoteRef.startsWith('refs/') - ? remoteRef - : `refs/heads/${remoteRef}`; - } else { - throw err - } - } - } - const oldoid = - httpRemote.refs.get(fullRemoteRef) || - '0000000000000000000000000000000000000000'; +/** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.gitdir + * + * @returns {Promise>} + */ +async function _listRemotes({ fs, gitdir }) { + const config = await GitConfigManager.get({ fs, gitdir }); + const remoteNames = await config.getSubsections('remote'); + const remotes = Promise.all( + remoteNames.map(async remote => { + const url = await config.get(`remote.${remote}.url`); + return { remote, url } + }) + ); + return remotes +} - // Remotes can always accept thin-packs UNLESS they specify the 'no-thin' capability - const thinPack = !httpRemote.capabilities.has('no-thin'); +// @ts-check - let objects = new Set(); - if (!_delete) { - const finish = [...httpRemote.refs.values()]; - let skipObjects = new Set(); +/** + * List remotes + * + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * + * @returns {Promise>} Resolves successfully with an array of `{remote, url}` objects + * + * @example + * let remotes = await git.listRemotes({ fs, dir: '/tutorial' }) + * console.log(remotes) + * + */ +async function listRemotes({ fs, dir, gitdir = join(dir, '.git') }) { + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); - // If remote branch is present, look for a common merge base. - if (oldoid !== '0000000000000000000000000000000000000000') { - // trick to speed up common force push scenarios - const mergebase = await _findMergeBase({ - fs, - cache, - gitdir, - oids: [oid, oldoid], - }); - for (const oid of mergebase) finish.push(oid); - if (thinPack) { - skipObjects = await listObjects({ fs, cache, gitdir, oids: mergebase }); - } - } + return await _listRemotes({ + fs: new FileSystem(fs), + gitdir, + }) + } catch (err) { + err.caller = 'git.listRemotes'; + throw err + } +} - // If remote does not have the commit, figure out the objects to send - if (!finish.includes(oid)) { - const commits = await listCommitsAndTags({ - fs, - cache, - gitdir, - start: [oid], - finish, - }); - objects = await listObjects({ fs, cache, gitdir, oids: commits }); - } +/** + * @typedef {Object} ServerRef - This object has the following schema: + * @property {string} ref - The name of the ref + * @property {string} oid - The SHA-1 object id the ref points to + * @property {string} [target] - The target ref pointed to by a symbolic ref + * @property {string} [peeled] - If the oid is the SHA-1 object id of an annotated tag, this is the SHA-1 object id that the annotated tag points to + */ - if (thinPack) { - // If there's a default branch for the remote lets skip those objects too. - // Since this is an optional optimization, we just catch and continue if there is - // an error (because we can't find a default branch, or can't find a commit, etc) - try { - // Sadly, the discovery phase with 'forPush' doesn't return symrefs, so we have to - // rely on existing ones. - const ref = await GitRefManager.resolve({ - fs, - gitdir, - ref: `refs/remotes/${remote}/HEAD`, - depth: 2, - }); - const { oid } = await GitRefManager.resolveAgainstMap({ - ref: ref.replace(`refs/remotes/${remote}/`, ''), - fullref: ref, - map: httpRemote.refs, - }); - const oids = [oid]; - for (const oid of await listObjects({ fs, cache, gitdir, oids })) { - skipObjects.add(oid); - } - } catch (e) {} +async function parseListRefsResponse(stream) { + const read = GitPktLine.streamReader(stream); - // Remove objects that we know the remote already has - for (const oid of skipObjects) { - objects.delete(oid); - } - } + // TODO: when we re-write everything to minimize memory usage, + // we could make this a generator + const refs = []; - if (!force) { - // Is it a tag that already exists? - if ( - fullRef.startsWith('refs/tags') && - oldoid !== '0000000000000000000000000000000000000000' - ) { - throw new PushRejectedError('tag-exists') - } - // Is it a non-fast-forward commit? - if ( - oid !== '0000000000000000000000000000000000000000' && - oldoid !== '0000000000000000000000000000000000000000' && - !(await _isDescendent({ - fs, - cache, - gitdir, - oid, - ancestor: oldoid, - depth: -1, - })) - ) { - throw new PushRejectedError('not-fast-forward') + let line; + while (true) { + line = await read(); + if (line === true) break + if (line === null) continue + line = line.toString('utf8').replace(/\n$/, ''); + const [oid, ref, ...attrs] = line.split(' '); + const r = { ref, oid }; + for (const attr of attrs) { + const [name, value] = attr.split(':'); + if (name === 'symref-target') { + r.target = value; + } else if (name === 'peeled') { + r.peeled = value; } } - } - // We can only safely use capabilities that the server also understands. - // For instance, AWS CodeCommit aborts a push if you include the `agent`!!! - const capabilities = filterCapabilities( - [...httpRemote.capabilities], - ['report-status', 'side-band-64k', `agent=${pkg.agent}`] - ); - const packstream1 = await writeReceivePackRequest({ - capabilities, - triplets: [{ oldoid, oid, fullRef: fullRemoteRef }], - }); - const packstream2 = _delete - ? [] - : await _pack({ - fs, - cache, - gitdir, - oids: [...objects], - }); - const res = await GitRemoteHTTP.connect({ - http, - onProgress, - corsProxy, - service: 'git-receive-pack', - url, - auth, - headers, - body: [...packstream1, ...packstream2], - }); - const { packfile, progress } = await GitSideBand.demux(res.body); - if (onMessage) { - const lines = splitLines(progress); - forAwait(lines, async line => { - await onMessage(line); - }); - } - // Parse the response! - const result = await parseReceivePackResponse(packfile); - if (res.headers) { - result.headers = res.headers; + refs.push(r); } - // Update the local copy of the remote ref - if (remote && result.ok && result.refs[fullRemoteRef].ok) { - // TODO: I think this should actually be using a refspec transform rather than assuming 'refs/remotes/{remote}' - const ref = `refs/remotes/${remote}/${fullRemoteRef.replace( - 'refs/heads', - '' - )}`; - if (_delete) { - await GitRefManager.deleteRef({ fs, gitdir, ref }); - } else { - await GitRefManager.writeRef({ fs, gitdir, ref, value: oid }); - } - } - if (result.ok && Object.values(result.refs).every(result => result.ok)) { - return result - } else { - const prettyDetails = Object.entries(result.refs) - .filter(([k, v]) => !v.ok) - .map(([k, v]) => `\n - ${k}: ${v.error}`) - .join(''); - throw new GitPushError(prettyDetails, result) + return refs +} + +/** + * @param {object} args + * @param {string} [args.prefix] - Only list refs that start with this prefix + * @param {boolean} [args.symrefs = false] - Include symbolic ref targets + * @param {boolean} [args.peelTags = false] - Include peeled tags values + * @returns {Uint8Array[]} + */ +async function writeListRefsRequest({ prefix, symrefs, peelTags }) { + const packstream = []; + // command + packstream.push(GitPktLine.encode('command=ls-refs\n')); + // capability-list + packstream.push(GitPktLine.encode(`agent=${pkg.agent}\n`)); + // [command-args] + if (peelTags || symrefs || prefix) { + packstream.push(GitPktLine.delim()); } + if (peelTags) packstream.push(GitPktLine.encode('peel')); + if (symrefs) packstream.push(GitPktLine.encode('symrefs')); + if (prefix) packstream.push(GitPktLine.encode(`ref-prefix ${prefix}`)); + packstream.push(GitPktLine.flush()); + return packstream } // @ts-check /** - * Push a branch or tag + * Fetch a list of refs (branches, tags, etc) from a server. * - * The push command returns an object that describes the result of the attempted push operation. - * *Notes:* If there were no errors, then there will be no `errors` property. There can be a mix of `ok` messages and `errors` messages. + * This is a rare command that doesn't require an `fs`, `dir`, or even `gitdir` argument. + * It just requires an `http` argument. * - * | param | type [= default] | description | - * | ------ | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | - * | ok | Array\ | The first item is "unpack" if the overall operation was successful. The remaining items are the names of refs that were updated successfully. | - * | errors | Array\ | If the overall operation threw and error, the first item will be "unpack {Overall error message}". The remaining items are individual refs that failed to be updated in the format "{ref name} {error message}". | + * ### About `protocolVersion` + * + * There's a rather fun trade-off between Git Protocol Version 1 and Git Protocol Version 2. + * Version 2 actually requires 2 HTTP requests instead of 1, making it similar to fetch or push in that regard. + * However, version 2 supports server-side filtering by prefix, whereas that filtering is done client-side in version 1. + * Which protocol is most efficient therefore depends on the number of refs on the remote, the latency of the server, and speed of the network connection. + * For an small repos (or fast Internet connections), the requirement to make two trips to the server makes protocol 2 slower. + * But for large repos (or slow Internet connections), the decreased payload size of the second request makes up for the additional request. + * + * Hard numbers vary by situation, but here's some numbers from my machine: + * + * Using isomorphic-git in a browser, with a CORS proxy, listing only the branches (refs/heads) of https://github.com/isomorphic-git/isomorphic-git + * - Protocol Version 1 took ~300ms and transfered 84 KB. + * - Protocol Version 2 took ~500ms and transfered 4.1 KB. + * + * Using isomorphic-git in a browser, with a CORS proxy, listing only the branches (refs/heads) of https://gitlab.com/gitlab-org/gitlab + * - Protocol Version 1 took ~4900ms and transfered 9.41 MB. + * - Protocol Version 2 took ~1280ms and transfered 433 KB. + * + * Finally, there is a fun quirk regarding the `symrefs` parameter. + * Protocol Version 1 will generally only return the `HEAD` symref and not others. + * Historically, this meant that servers don't use symbolic refs except for `HEAD`, which is used to point at the "default branch". + * However Protocol Version 2 can return *all* the symbolic refs on the server. + * So if you are running your own git server, you could take advantage of that I guess. + * + * #### TL;DR + * If you are _not_ taking advantage of `prefix` I would recommend `protocolVersion: 1`. + * Otherwise, I recommend to use the default which is `protocolVersion: 2`. * * @param {object} args - * @param {FsClient} args.fs - a file system client * @param {HttpClient} args.http - an HTTP client - * @param {ProgressCallback} [args.onProgress] - optional progress event callback - * @param {MessageCallback} [args.onMessage] - optional message event callback * @param {AuthCallback} [args.onAuth] - optional auth fill callback * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.ref] - Which branch to push. By default this is the currently checked out branch. - * @param {string} [args.url] - The URL of the remote repository. The default is the value set in the git config for that remote. - * @param {string} [args.remote] - If URL is not specified, determines which remote to use. - * @param {string} [args.remoteRef] - The name of the receiving branch on the remote. By default this is the configured remote tracking branch. - * @param {boolean} [args.force = false] - If true, behaves the same as `git push --force` - * @param {boolean} [args.delete = false] - If true, delete the remote ref + * @param {string} args.url - The URL of the remote repository. Will be gotten from gitconfig if absent. * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Overrides value in repo config. + * @param {boolean} [args.forPush = false] - By default, the command queries the 'fetch' capabilities. If true, it will ask for the 'push' capabilities. * @param {Object} [args.headers] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config - * @param {object} [args.cache] - a [cache](cache.md) object + * @param {1 | 2} [args.protocolVersion = 2] - Which version of the Git Protocol to use. + * @param {string} [args.prefix] - Only list refs that start with this prefix + * @param {boolean} [args.symrefs = false] - Include symbolic ref targets + * @param {boolean} [args.peelTags = false] - Include annotated tag peeled targets * - * @returns {Promise} Resolves successfully when push completes with a detailed description of the operation from the server. - * @see PushResult - * @see RefUpdateStatus + * @returns {Promise} Resolves successfully with an array of ServerRef objects + * @see ServerRef + * + * @example + * // List all the branches on a repo + * let refs = await git.listServerRefs({ + * http, + * corsProxy: "https://cors.isomorphic-git.org", + * url: "https://github.com/isomorphic-git/isomorphic-git.git", + * prefix: "refs/heads/", + * }); + * console.log(refs); + * + * @example + * // Get the default branch on a repo + * let refs = await git.listServerRefs({ + * http, + * corsProxy: "https://cors.isomorphic-git.org", + * url: "https://github.com/isomorphic-git/isomorphic-git.git", + * prefix: "HEAD", + * symrefs: true, + * }); + * console.log(refs); * * @example - * let pushResult = await git.push({ - * fs, + * // List all the tags on a repo + * let refs = await git.listServerRefs({ * http, - * dir: '/tutorial', - * remote: 'origin', - * ref: 'main', - * onAuth: () => ({ username: process.env.GITHUB_TOKEN }), - * }) - * console.log(pushResult) + * corsProxy: "https://cors.isomorphic-git.org", + * url: "https://github.com/isomorphic-git/isomorphic-git.git", + * prefix: "refs/tags/", + * peelTags: true, + * }); + * console.log(refs); + * + * @example + * // List all the pull requests on a repo + * let refs = await git.listServerRefs({ + * http, + * corsProxy: "https://cors.isomorphic-git.org", + * url: "https://github.com/isomorphic-git/isomorphic-git.git", + * prefix: "refs/pull/", + * }); + * console.log(refs); * */ -async function push({ - fs, +async function listServerRefs({ http, - onProgress, - onMessage, onAuth, onAuthSuccess, onAuthFailure, - dir, - gitdir = join(dir, '.git'), - ref, - remoteRef, - remote = 'origin', - url, - force = false, - delete: _delete = false, corsProxy, + url, headers = {}, - cache = {}, + forPush = false, + protocolVersion = 2, + prefix, + symrefs, + peelTags, }) { try { - assertParameter('fs', fs); assertParameter('http', http); - assertParameter('gitdir', gitdir); + assertParameter('url', url); - return await _push({ - fs: new FileSystem(fs), - cache, + const remote = await GitRemoteHTTP.discover({ http, - onProgress, - onMessage, onAuth, onAuthSuccess, onAuthFailure, - gitdir, - ref, - remoteRef, - remote, - url, - force, - delete: _delete, corsProxy, + service: forPush ? 'git-receive-pack' : 'git-upload-pack', + url, headers, - }) + protocolVersion, + }); + + if (remote.protocolVersion === 1) { + return formatInfoRefs(remote, prefix, symrefs, peelTags) + } + + // Protocol Version 2 + const body = await writeListRefsRequest({ prefix, symrefs, peelTags }); + + const res = await GitRemoteHTTP.connect({ + http, + auth: remote.auth, + headers, + corsProxy, + service: forPush ? 'git-receive-pack' : 'git-upload-pack', + url, + body, + }); + + return parseListRefsResponse(res.body) } catch (err) { - err.caller = 'git.push'; + err.caller = 'git.listServerRefs'; throw err } } -async function resolveBlob({ fs, cache, gitdir, oid }) { +// @ts-check + +/** + * List tags + * + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * + * @returns {Promise>} Resolves successfully with an array of tag names + * + * @example + * let tags = await git.listTags({ fs, dir: '/tutorial' }) + * console.log(tags) + * + */ +async function listTags({ fs, dir, gitdir = join(dir, '.git') }) { + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + return GitRefManager.listTags({ fs: new FileSystem(fs), gitdir }) + } catch (err) { + err.caller = 'git.listTags'; + throw err + } +} + +async function resolveCommit({ fs, cache, gitdir, oid }) { const { type, object } = await _readObject({ fs, cache, gitdir, oid }); // Resolve annotated tag objects to whatever if (type === 'tag') { oid = GitAnnotatedTag.from(object).parse().object; - return resolveBlob({ fs, cache, gitdir, oid }) + return resolveCommit({ fs, cache, gitdir, oid }) } - if (type !== 'blob') { - throw new ObjectTypeError(oid, type, 'blob') + if (type !== 'commit') { + throw new ObjectTypeError(oid, type, 'commit') } - return { oid, blob: new Uint8Array(object) } + return { commit: GitCommit.from(object), oid } } // @ts-check -/** - * - * @typedef {Object} ReadBlobResult - The object returned has the following schema: - * @property {string} oid - * @property {Uint8Array} blob - * - */ - /** * @param {object} args * @param {import('../models/FileSystem.js').FileSystem} args.fs * @param {any} args.cache * @param {string} args.gitdir * @param {string} args.oid - * @param {string} [args.filepath] * - * @returns {Promise} Resolves successfully with a blob object description - * @see ReadBlobResult + * @returns {Promise} Resolves successfully with a git commit object + * @see ReadCommitResult + * @see CommitObject + * */ -async function _readBlob({ - fs, - cache, - gitdir, - oid, - filepath = undefined, -}) { - if (filepath !== undefined) { - oid = await resolveFilepath({ fs, cache, gitdir, oid, filepath }); - } - const blob = await resolveBlob({ +async function _readCommit({ fs, cache, gitdir, oid }) { + const { commit, oid: commitOid } = await resolveCommit({ fs, cache, gitdir, oid, }); - return blob + const result = { + oid: commitOid, + commit: commit.parse(), + payload: commit.withoutSignature(), + }; + // @ts-ignore + return result +} + +function compareAge(a, b) { + return a.committer.timestamp - b.committer.timestamp } // @ts-check -/** - * - * @typedef {Object} ReadBlobResult - The object returned has the following schema: - * @property {string} oid - * @property {Uint8Array} blob - * - */ +// the empty file content object id +const EMPTY_OID = 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391'; -/** - * Read a blob object directly - * - * @param {object} args - * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.oid - The SHA-1 object id to get. Annotated tags, commits, and trees are peeled. - * @param {string} [args.filepath] - Don't return the object with `oid` itself, but resolve `oid` to a tree and then return the blob object at that filepath. - * @param {object} [args.cache] - a [cache](cache.md) object - * - * @returns {Promise} Resolves successfully with a blob object description - * @see ReadBlobResult - * - * @example - * // Get the contents of 'README.md' in the main branch. - * let commitOid = await git.resolveRef({ fs, dir: '/tutorial', ref: 'main' }) - * console.log(commitOid) - * let { blob } = await git.readBlob({ - * fs, - * dir: '/tutorial', - * oid: commitOid, - * filepath: 'README.md' - * }) - * console.log(Buffer.from(blob).toString('utf8')) - * - */ -async function readBlob({ +async function resolveFileIdInTree({ fs, cache, gitdir, oid, fileId }) { + if (fileId === EMPTY_OID) return + const _oid = oid; + let filepath; + const result = await resolveTree({ fs, cache, gitdir, oid }); + const tree = result.tree; + if (fileId === result.oid) { + filepath = result.path; + } else { + filepath = await _resolveFileId({ + fs, + cache, + gitdir, + tree, + fileId, + oid: _oid, + }); + if (Array.isArray(filepath)) { + if (filepath.length === 0) filepath = undefined; + else if (filepath.length === 1) filepath = filepath[0]; + } + } + return filepath +} + +async function _resolveFileId({ fs, - dir, - gitdir = join(dir, '.git'), + cache, + gitdir, + tree, + fileId, oid, - filepath, - cache = {}, + filepaths = [], + parentPath = '', }) { - try { - assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - assertParameter('oid', oid); + const walks = tree.entries().map(function(entry) { + let result; + if (entry.oid === fileId) { + result = join(parentPath, entry.path); + filepaths.push(result); + } else if (entry.type === 'tree') { + result = _readObject({ + fs, + cache, + gitdir, + oid: entry.oid, + }).then(function({ object }) { + return _resolveFileId({ + fs, + cache, + gitdir, + tree: GitTree.from(object), + fileId, + oid, + filepaths, + parentPath: join(parentPath, entry.path), + }) + }); + } + return result + }); - return await _readBlob({ - fs: new FileSystem(fs), - cache, - gitdir, - oid, - filepath, - }) - } catch (err) { - err.caller = 'git.readBlob'; - throw err - } + await Promise.all(walks); + return filepaths } // @ts-check /** - * Read a commit object directly + * Get commit descriptions from the git history * * @param {object} args - * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.oid - The SHA-1 object id to get. Annotated tags are peeled. - * @param {object} [args.cache] - a [cache](cache.md) object + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache + * @param {string} args.gitdir + * @param {string=} args.filepath optional get the commit for the filepath only + * @param {string} args.ref + * @param {number|void} args.depth + * @param {boolean=} [args.force=false] do not throw error if filepath is not exist (works only for a single file). defaults to false + * @param {boolean=} [args.follow=false] Continue listing the history of a file beyond renames (works only for a single file). defaults to false + * @param {boolean=} args.follow Continue listing the history of a file beyond renames (works only for a single file). defaults to false * - * @returns {Promise} Resolves successfully with a git commit object + * @returns {Promise>} Resolves to an array of ReadCommitResult objects * @see ReadCommitResult * @see CommitObject * * @example - * // Read a commit object - * let sha = await git.resolveRef({ fs, dir: '/tutorial', ref: 'main' }) - * console.log(sha) - * let commit = await git.readCommit({ fs, dir: '/tutorial', oid: sha }) - * console.log(commit) + * let commits = await git.log({ dir: '$input((/))', depth: $input((5)), ref: '$input((master))' }) + * console.log(commits) * */ -async function readCommit({ +async function _log({ fs, - dir, - gitdir = join(dir, '.git'), - oid, - cache = {}, + cache, + gitdir, + filepath, + ref, + depth, + since, + force, + follow, }) { - try { - assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - assertParameter('oid', oid); + const sinceTimestamp = + typeof since === 'undefined' + ? undefined + : Math.floor(since.valueOf() / 1000); + // TODO: In the future, we may want to have an API where we return a + // async iterator that emits commits. + const commits = []; + const shallowCommits = await GitShallowManager.read({ fs, gitdir }); + const oid = await GitRefManager.resolve({ fs, gitdir, ref }); + const tips = [await _readCommit({ fs, cache, gitdir, oid })]; + let lastFileOid; + let lastCommit; + let isOk; - return await _readCommit({ - fs: new FileSystem(fs), - cache, - gitdir, - oid, - }) - } catch (err) { - err.caller = 'git.readCommit'; - throw err + function endCommit(commit) { + if (isOk && filepath) commits.push(commit); } -} -// @ts-check + while (tips.length > 0) { + const commit = tips.pop(); -/** - * Read the contents of a note - * - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache - * @param {string} args.gitdir - * @param {string} [args.ref] - The notes ref to look under - * @param {string} args.oid - * - * @returns {Promise} Resolves successfully with note contents as a Buffer. - */ + // Stop the log if we've hit the age limit + if ( + sinceTimestamp !== undefined && + commit.commit.committer.timestamp <= sinceTimestamp + ) { + break + } -async function _readNote({ - fs, - cache, - gitdir, - ref = 'refs/notes/commits', - oid, -}) { - const parent = await GitRefManager.resolve({ gitdir, fs, ref }); - const { blob } = await _readBlob({ - fs, - cache, - gitdir, - oid: parent, - filepath: oid, - }); + if (filepath) { + let vFileOid; + try { + vFileOid = await resolveFilepath({ + fs, + cache, + gitdir, + oid: commit.commit.tree, + filepath, + }); + if (lastCommit && lastFileOid !== vFileOid) { + commits.push(lastCommit); + } + lastFileOid = vFileOid; + lastCommit = commit; + isOk = true; + } catch (e) { + if (e instanceof NotFoundError) { + let found = follow && lastFileOid; + if (found) { + found = await resolveFileIdInTree({ + fs, + cache, + gitdir, + oid: commit.commit.tree, + fileId: lastFileOid, + }); + if (found) { + if (Array.isArray(found)) { + if (lastCommit) { + const lastFound = await resolveFileIdInTree({ + fs, + cache, + gitdir, + oid: lastCommit.commit.tree, + fileId: lastFileOid, + }); + if (Array.isArray(lastFound)) { + found = found.filter(p => lastFound.indexOf(p) === -1); + if (found.length === 1) { + found = found[0]; + filepath = found; + if (lastCommit) commits.push(lastCommit); + } else { + found = false; + if (lastCommit) commits.push(lastCommit); + break + } + } + } + } else { + filepath = found; + if (lastCommit) commits.push(lastCommit); + } + } + } + if (!found) { + if (!force && !follow) throw e + if (isOk && lastFileOid) { + commits.push(lastCommit); + // break + } + } + lastCommit = commit; + isOk = false; + } else throw e + } + } else { + commits.push(commit); + } - return blob + // Stop the loop if we have enough commits now. + if (depth !== undefined && commits.length === depth) { + endCommit(commit); + break + } + + // If this is not a shallow commit... + if (!shallowCommits.has(commit.oid)) { + // Add the parents of this commit to the queue + // Note: for the case of a commit with no parents, it will concat an empty array, having no net effect. + for (const oid of commit.commit.parent) { + const commit = await _readCommit({ fs, cache, gitdir, oid }); + if (!tips.map(commit => commit.oid).includes(commit.oid)) { + tips.push(commit); + } + } + } + + // Stop the loop if there are no more commit parents + if (tips.length === 0) { + endCommit(commit); + } + + // Process tips in order by age + tips.sort((a, b) => compareAge(a.commit, b.commit)); + } + return commits } // @ts-check /** - * Read the contents of a note + * Get commit descriptions from the git history * * @param {object} args * @param {FsClient} args.fs - a file system client * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.ref] - The notes ref to look under - * @param {string} args.oid - The SHA-1 object id of the object to get the note for. + * @param {string=} args.filepath optional get the commit for the filepath only + * @param {string} [args.ref = 'HEAD'] - The commit to begin walking backwards through the history from + * @param {number=} [args.depth] - Limit the number of commits returned. No limit by default. + * @param {Date} [args.since] - Return history newer than the given date. Can be combined with `depth` to get whichever is shorter. + * @param {boolean=} [args.force=false] do not throw error if filepath is not exist (works only for a single file). defaults to false + * @param {boolean=} [args.follow=false] Continue listing the history of a file beyond renames (works only for a single file). defaults to false * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} Resolves successfully with note contents as a Buffer. - */ - -async function readNote({ + * @returns {Promise>} Resolves to an array of ReadCommitResult objects + * @see ReadCommitResult + * @see CommitObject + * + * @example + * let commits = await git.log({ + * fs, + * dir: '/tutorial', + * depth: 5, + * ref: 'main' + * }) + * console.log(commits) + * + */ +async function log({ fs, dir, gitdir = join(dir, '.git'), - ref = 'refs/notes/commits', - oid, + filepath, + ref = 'HEAD', + depth, + since, // Date + force, + follow, cache = {}, }) { try { assertParameter('fs', fs); assertParameter('gitdir', gitdir); assertParameter('ref', ref); - assertParameter('oid', oid); - return await _readNote({ + return await _log({ fs: new FileSystem(fs), cache, gitdir, + filepath, ref, - oid, + depth, + since, + force, + follow, }) } catch (err) { - err.caller = 'git.readNote'; + err.caller = 'git.log'; throw err } } @@ -29639,262 +28668,285 @@ async function readNote({ /** * - * @typedef {Object} DeflatedObject - * @property {string} oid - * @property {'deflated'} type - * @property {'deflated'} format - * @property {Uint8Array} object - * @property {string} [source] + * @typedef {Object} MergeResult - Returns an object with a schema like this: + * @property {string} [oid] - The SHA-1 object id that is now at the head of the branch. Absent only if `dryRun` was specified and `mergeCommit` is true. + * @property {boolean} [alreadyMerged] - True if the branch was already merged so no changes were made + * @property {boolean} [fastForward] - True if it was a fast-forward merge + * @property {boolean} [mergeCommit] - True if merge resulted in a merge commit + * @property {string} [tree] - The SHA-1 object id of the tree resulting from a merge commit * */ /** + * Merge two branches * - * @typedef {Object} WrappedObject - * @property {string} oid - * @property {'wrapped'} type - * @property {'wrapped'} format - * @property {Uint8Array} object - * @property {string} [source] + * ## Limitations * - */ - -/** + * Currently it does not support incomplete merges. That is, if there are merge conflicts it cannot solve + * with the built in diff3 algorithm it will not modify the working dir, and will throw a [`MergeNotSupportedError`](./errors.md#mergenotsupportedError) error. * - * @typedef {Object} RawObject - * @property {string} oid - * @property {'blob'|'commit'|'tree'|'tag'} type - * @property {'content'} format - * @property {Uint8Array} object - * @property {string} [source] + * Currently it will fail if multiple candidate merge bases are found. (It doesn't yet implement the recursive merge strategy.) * - */ - -/** + * Currently it does not support selecting alternative merge strategies. * - * @typedef {Object} ParsedBlobObject - * @property {string} oid - * @property {'blob'} type - * @property {'parsed'} format - * @property {string} object - * @property {string} [source] + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {SignCallback} [args.onSign] - a PGP signing implementation + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} [args.ours] - The branch receiving the merge. If undefined, defaults to the current branch. + * @param {string} args.theirs - The branch to be merged + * @param {boolean} [args.fastForwardOnly = false] - If true, then non-fast-forward merges will throw an Error instead of performing a merge. + * @param {boolean} [args.dryRun = false] - If true, simulates a merge so you can test whether it would succeed. + * @param {boolean} [args.noUpdateBranch = false] - If true, does not update the branch pointer after creating the commit. + * @param {string} [args.message] - Overrides the default auto-generated merge commit message + * @param {Object} [args.author] - passed to [commit](commit.md) when creating a merge commit + * @param {string} [args.author.name] - Default is `user.name` config. + * @param {string} [args.author.email] - Default is `user.email` config. + * @param {number} [args.author.timestamp=Math.floor(Date.now()/1000)] - Set the author timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). + * @param {number} [args.author.timezoneOffset] - Set the author timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. + * @param {Object} [args.committer] - passed to [commit](commit.md) when creating a merge commit + * @param {string} [args.committer.name] - Default is `user.name` config. + * @param {string} [args.committer.email] - Default is `user.email` config. + * @param {number} [args.committer.timestamp=Math.floor(Date.now()/1000)] - Set the committer timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). + * @param {number} [args.committer.timezoneOffset] - Set the committer timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. + * @param {string} [args.signingKey] - passed to [commit](commit.md) when creating a merge commit + * @param {object} [args.cache] - a [cache](cache.md) object + * + * @returns {Promise} Resolves to a description of the merge operation + * @see MergeResult + * + * @example + * let m = await git.merge({ + * fs, + * dir: '/tutorial', + * ours: 'main', + * theirs: 'remotes/origin/main' + * }) + * console.log(m) * */ +async function merge({ + fs: _fs, + onSign, + dir, + gitdir = join(dir, '.git'), + ours, + theirs, + fastForwardOnly = false, + dryRun = false, + noUpdateBranch = false, + message, + author: _author, + committer: _committer, + signingKey, + cache = {}, +}) { + try { + assertParameter('fs', _fs); + if (signingKey) { + assertParameter('onSign', onSign); + } + const fs = new FileSystem(_fs); + + const author = await normalizeAuthorObject({ fs, gitdir, author: _author }); + if (!author && !fastForwardOnly) throw new MissingNameError('author') + + const committer = await normalizeCommitterObject({ + fs, + gitdir, + author, + committer: _committer, + }); + if (!committer && !fastForwardOnly) { + throw new MissingNameError('committer') + } + + return await _merge({ + fs, + cache, + gitdir, + ours, + theirs, + fastForwardOnly, + dryRun, + noUpdateBranch, + message, + author, + committer, + signingKey, + onSign, + }) + } catch (err) { + err.caller = 'git.merge'; + throw err + } +} /** - * - * @typedef {Object} ParsedCommitObject - * @property {string} oid - * @property {'commit'} type - * @property {'parsed'} format - * @property {CommitObject} object - * @property {string} [source] - * + * @enum {number} */ +const types = { + commit: 0b0010000, + tree: 0b0100000, + blob: 0b0110000, + tag: 0b1000000, + ofs_delta: 0b1100000, + ref_delta: 0b1110000, +}; /** - * - * @typedef {Object} ParsedTreeObject - * @property {string} oid - * @property {'tree'} type - * @property {'parsed'} format - * @property {TreeObject} object - * @property {string} [source] - * + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string[]} args.oids */ +async function _pack({ + fs, + cache, + dir, + gitdir = join(dir, '.git'), + oids, +}) { + const hash = new Hash(); + const outputStream = []; + function write(chunk, enc) { + const buff = Buffer.from(chunk, enc); + outputStream.push(buff); + hash.update(buff); + } + async function writeObject({ stype, object }) { + // Object type is encoded in bits 654 + const type = types[stype]; + // The length encoding gets complicated. + let length = object.length; + // Whether the next byte is part of the variable-length encoded number + // is encoded in bit 7 + let multibyte = length > 0b1111 ? 0b10000000 : 0b0; + // Last four bits of length is encoded in bits 3210 + const lastFour = length & 0b1111; + // Discard those bits + length = length >>> 4; + // The first byte is then (1-bit multibyte?), (3-bit type), (4-bit least sig 4-bits of length) + let byte = (multibyte | type | lastFour).toString(16); + write(byte, 'hex'); + // Now we keep chopping away at length 7-bits at a time until its zero, + // writing out the bytes in what amounts to little-endian order. + while (multibyte) { + multibyte = length > 0b01111111 ? 0b10000000 : 0b0; + byte = multibyte | (length & 0b01111111); + write(padHex(2, byte), 'hex'); + length = length >>> 7; + } + // Lastly, we can compress and write the object. + write(Buffer.from(await deflate(object))); + } + write('PACK'); + write('00000002', 'hex'); + // Write a 4 byte (32-bit) int + write(padHex(8, oids.length), 'hex'); + for (const oid of oids) { + const { type, object } = await _readObject({ fs, cache, gitdir, oid }); + await writeObject({ write, object, stype: type }); + } + // Write SHA1 checksum + const digest = hash.digest(); + outputStream.push(digest); + return outputStream +} + +// @ts-check /** * - * @typedef {Object} ParsedTagObject - * @property {string} oid - * @property {'tag'} type - * @property {'parsed'} format - * @property {TagObject} object - * @property {string} [source] - * + * @typedef {Object} PackObjectsResult The packObjects command returns an object with two properties: + * @property {string} filename - The suggested filename for the packfile if you want to save it to disk somewhere. It includes the packfile SHA. + * @property {Uint8Array} [packfile] - The packfile contents. Not present if `write` parameter was true, in which case the packfile was written straight to disk. */ /** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache + * @param {string} args.gitdir + * @param {string[]} args.oids + * @param {boolean} args.write * - * @typedef {ParsedBlobObject | ParsedCommitObject | ParsedTreeObject | ParsedTagObject} ParsedObject + * @returns {Promise} + * @see PackObjectsResult */ +async function _packObjects({ fs, cache, gitdir, oids, write }) { + const buffers = await _pack({ fs, cache, gitdir, oids }); + const packfile = Buffer.from(await collect(buffers)); + const packfileSha = packfile.slice(-20).toString('hex'); + const filename = `pack-${packfileSha}.pack`; + if (write) { + await fs.write(join(gitdir, `objects/pack/${filename}`), packfile); + return { filename } + } + return { + filename, + packfile: new Uint8Array(packfile), + } +} + +// @ts-check /** * - * @typedef {DeflatedObject | WrappedObject | RawObject | ParsedObject } ReadObjectResult + * @typedef {Object} PackObjectsResult The packObjects command returns an object with two properties: + * @property {string} filename - The suggested filename for the packfile if you want to save it to disk somewhere. It includes the packfile SHA. + * @property {Uint8Array} [packfile] - The packfile contents. Not present if `write` parameter was true, in which case the packfile was written straight to disk. */ /** - * Read a git object directly by its SHA-1 object id - * - * Regarding `ReadObjectResult`: - * - * - `oid` will be the same as the `oid` argument unless the `filepath` argument is provided, in which case it will be the oid of the tree or blob being returned. - * - `type` of deflated objects is `'deflated'`, and `type` of wrapped objects is `'wrapped'` - * - `format` is usually, but not always, the format you requested. Packfiles do not store each object individually compressed so if you end up reading the object from a packfile it will be returned in format 'content' even if you requested 'deflated' or 'wrapped'. - * - `object` will be an actual Object if format is 'parsed' and the object is a commit, tree, or annotated tag. Blobs are still formatted as Buffers unless an encoding is provided in which case they'll be strings. If format is anything other than 'parsed', object will be a Buffer. - * - `source` is the name of the packfile or loose object file where the object was found. - * - * The `format` parameter can have the following values: - * - * | param | description | - * | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | - * | 'deflated' | Return the raw deflate-compressed buffer for an object if possible. Useful for efficiently shuffling around loose objects when you don't care about the contents and can save time by not inflating them. | - * | 'wrapped' | Return the inflated object buffer wrapped in the git object header if possible. This is the raw data used when calculating the SHA-1 object id of a git object. | - * | 'content' | Return the object buffer without the git header. | - * | 'parsed' | Returns a parsed representation of the object. | - * - * The result will be in one of the following schemas: - * - * ## `'deflated'` format - * - * {@link DeflatedObject typedef} - * - * ## `'wrapped'` format - * - * {@link WrappedObject typedef} - * - * ## `'content'` format - * - * {@link RawObject typedef} - * - * ## `'parsed'` format - * - * ### parsed `'blob'` type - * - * {@link ParsedBlobObject typedef} - * - * ### parsed `'commit'` type - * - * {@link ParsedCommitObject typedef} - * {@link CommitObject typedef} - * - * ### parsed `'tree'` type - * - * {@link ParsedTreeObject typedef} - * {@link TreeObject typedef} - * {@link TreeEntry typedef} - * - * ### parsed `'tag'` type - * - * {@link ParsedTagObject typedef} - * {@link TagObject typedef} - * - * @deprecated - * > This command is overly complicated. - * > - * > If you know the type of object you are reading, use [`readBlob`](./readBlob.md), [`readCommit`](./readCommit.md), [`readTag`](./readTag.md), or [`readTree`](./readTree.md). + * Create a packfile from an array of SHA-1 object ids * * @param {object} args * @param {FsClient} args.fs - a file system client * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.oid - The SHA-1 object id to get - * @param {'deflated' | 'wrapped' | 'content' | 'parsed'} [args.format = 'parsed'] - What format to return the object in. The choices are described in more detail below. - * @param {string} [args.filepath] - Don't return the object with `oid` itself, but resolve `oid` to a tree and then return the object at that filepath. To return the root directory of a tree set filepath to `''` - * @param {string} [args.encoding] - A convenience argument that only affects blobs. Instead of returning `object` as a buffer, it returns a string parsed using the given encoding. + * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string[]} args.oids - An array of SHA-1 object ids to be included in the packfile + * @param {boolean} [args.write = false] - Whether to save the packfile to disk or not * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} Resolves successfully with a git object description - * @see ReadObjectResult + * @returns {Promise} Resolves successfully when the packfile is ready with the filename and buffer + * @see PackObjectsResult * * @example - * // Given a ransom SHA-1 object id, figure out what it is - * let { type, object } = await git.readObject({ + * // Create a packfile containing only an empty tree + * let { packfile } = await git.packObjects({ * fs, * dir: '/tutorial', - * oid: '0698a781a02264a6f37ba3ff41d78067eaf0f075' + * oids: ['4b825dc642cb6eb9a060e54bf8d69288fbee4904'] * }) - * switch (type) { - * case 'commit': { - * console.log(object) - * break - * } - * case 'tree': { - * console.log(object) - * break - * } - * case 'blob': { - * console.log(object) - * break - * } - * case 'tag': { - * console.log(object) - * break - * } - * } + * console.log(packfile) * */ -async function readObject({ - fs: _fs, +async function packObjects({ + fs, dir, gitdir = join(dir, '.git'), - oid, - format = 'parsed', - filepath = undefined, - encoding = undefined, + oids, + write = false, cache = {}, }) { try { - assertParameter('fs', _fs); + assertParameter('fs', fs); assertParameter('gitdir', gitdir); - assertParameter('oid', oid); + assertParameter('oids', oids); - const fs = new FileSystem(_fs); - if (filepath !== undefined) { - oid = await resolveFilepath({ - fs, - cache, - gitdir, - oid, - filepath, - }); - } - // GitObjectManager does not know how to parse content, so we tweak that parameter before passing it. - const _format = format === 'parsed' ? 'content' : format; - const result = await _readObject({ - fs, + return await _packObjects({ + fs: new FileSystem(fs), cache, gitdir, - oid, - format: _format, - }); - result.oid = oid; - if (format === 'parsed') { - result.format = 'parsed'; - switch (result.type) { - case 'commit': - result.object = GitCommit.from(result.object).parse(); - break - case 'tree': - result.object = GitTree.from(result.object).entries(); - break - case 'blob': - // Here we consider returning a raw Buffer as the 'content' format - // and returning a string as the 'parsed' format - if (encoding) { - result.object = result.object.toString(encoding); - } else { - result.object = new Uint8Array(result.object); - result.format = 'content'; - } - break - case 'tag': - result.object = GitAnnotatedTag.from(result.object).parse(); - break - default: - throw new ObjectTypeError( - result.oid, - result.type, - 'blob|commit|tag|tree' - ) - } - } else if (result.format === 'deflated' || result.format === 'wrapped') { - result.type = result.format; - } - return result + oids, + write, + }) } catch (err) { - err.caller = 'git.readObject'; + err.caller = 'git.packObjects'; throw err } } @@ -29902,185 +28954,293 @@ async function readObject({ // @ts-check /** - * - * @typedef {Object} ReadTagResult - The object returned has the following schema: - * @property {string} oid - SHA-1 object id of this tag - * @property {TagObject} tag - the parsed tag object - * @property {string} payload - PGP signing payload - */ - -/** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {any} args.cache - * @param {string} args.gitdir - * @param {string} args.oid - * - * @returns {Promise} - */ -async function _readTag({ fs, cache, gitdir, oid }) { - const { type, object } = await _readObject({ - fs, - cache, - gitdir, - oid, - format: 'content', - }); - if (type !== 'tag') { - throw new ObjectTypeError(oid, type, 'tag') - } - const tag = GitAnnotatedTag.from(object); - const result = { - oid, - tag: tag.parse(), - payload: tag.payload(), - }; - // @ts-ignore - return result -} - -/** - * - * @typedef {Object} ReadTagResult - The object returned has the following schema: - * @property {string} oid - SHA-1 object id of this tag - * @property {TagObject} tag - the parsed tag object - * @property {string} payload - PGP signing payload - */ - -/** - * Read an annotated tag object directly + * Fetch and merge commits from a remote repository * * @param {object} args * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {HttpClient} args.http - an HTTP client + * @param {ProgressCallback} [args.onProgress] - optional progress event callback + * @param {MessageCallback} [args.onMessage] - optional message event callback + * @param {AuthCallback} [args.onAuth] - optional auth fill callback + * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback + * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback + * @param {string} args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.oid - The SHA-1 object id to get + * @param {string} [args.ref] - Which branch to merge into. By default this is the currently checked out branch. + * @param {string} [args.url] - (Added in 1.1.0) The URL of the remote repository. The default is the value set in the git config for that remote. + * @param {string} [args.remote] - (Added in 1.1.0) If URL is not specified, determines which remote to use. + * @param {string} [args.remoteRef] - (Added in 1.1.0) The name of the branch on the remote to fetch. By default this is the configured remote tracking branch. + * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Overrides value in repo config. + * @param {boolean} [args.singleBranch = false] - Instead of the default behavior of fetching all the branches, only fetch a single branch. + * @param {boolean} [args.fastForwardOnly = false] - Only perform simple fast-forward merges. (Don't create merge commits.) + * @param {Object} [args.headers] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config + * @param {Object} [args.author] - The details about the author. + * @param {string} [args.author.name] - Default is `user.name` config. + * @param {string} [args.author.email] - Default is `user.email` config. + * @param {number} [args.author.timestamp=Math.floor(Date.now()/1000)] - Set the author timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). + * @param {number} [args.author.timezoneOffset] - Set the author timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. + * @param {Object} [args.committer = author] - The details about the commit committer, in the same format as the author parameter. If not specified, the author details are used. + * @param {string} [args.committer.name] - Default is `user.name` config. + * @param {string} [args.committer.email] - Default is `user.email` config. + * @param {number} [args.committer.timestamp=Math.floor(Date.now()/1000)] - Set the committer timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). + * @param {number} [args.committer.timezoneOffset] - Set the committer timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. + * @param {string} [args.signingKey] - passed to [commit](commit.md) when creating a merge commit * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} Resolves successfully with a git object description - * @see ReadTagResult - * @see TagObject + * @returns {Promise} Resolves successfully when pull operation completes + * + * @example + * await git.pull({ + * fs, + * http, + * dir: '/tutorial', + * ref: 'main', + * singleBranch: true + * }) + * console.log('done') * */ -async function readTag({ - fs, +async function pull({ + fs: _fs, + http, + onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, dir, gitdir = join(dir, '.git'), - oid, + ref, + url, + remote, + remoteRef, + fastForwardOnly = false, + corsProxy, + singleBranch, + headers = {}, + author: _author, + committer: _committer, + signingKey, cache = {}, }) { try { - assertParameter('fs', fs); + assertParameter('fs', _fs); assertParameter('gitdir', gitdir); - assertParameter('oid', oid); - return await _readTag({ - fs: new FileSystem(fs), + const fs = new FileSystem(_fs); + + const author = await normalizeAuthorObject({ fs, gitdir, author: _author }); + if (!author) throw new MissingNameError('author') + + const committer = await normalizeCommitterObject({ + fs, + gitdir, + author, + committer: _committer, + }); + if (!committer) throw new MissingNameError('committer') + + return await _pull({ + fs, cache, + http, + onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, + dir, gitdir, - oid, + ref, + url, + remote, + remoteRef, + fastForwardOnly, + corsProxy, + singleBranch, + headers, + author, + committer, + signingKey, }) } catch (err) { - err.caller = 'git.readTag'; + err.caller = 'git.pull'; throw err } } -// @ts-check - /** - * - * @typedef {Object} ReadTreeResult - The object returned has the following schema: - * @property {string} oid - SHA-1 object id of this tree - * @property {TreeObject} tree - the parsed tree object + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache + * @param {string} [args.dir] + * @param {string} args.gitdir + * @param {Iterable} args.start + * @param {Iterable} args.finish + * @returns {Promise>} */ +async function listCommitsAndTags({ + fs, + cache, + dir, + gitdir = join(dir, '.git'), + start, + finish, +}) { + const shallows = await GitShallowManager.read({ fs, gitdir }); + const startingSet = new Set(); + const finishingSet = new Set(); + for (const ref of start) { + startingSet.add(await GitRefManager.resolve({ fs, gitdir, ref })); + } + for (const ref of finish) { + // We may not have these refs locally so we must try/catch + try { + const oid = await GitRefManager.resolve({ fs, gitdir, ref }); + finishingSet.add(oid); + } catch (err) {} + } + const visited = new Set(); + // Because git commits are named by their hash, there is no + // way to construct a cycle. Therefore we won't worry about + // setting a default recursion limit. + async function walk(oid) { + visited.add(oid); + const { type, object } = await _readObject({ fs, cache, gitdir, oid }); + // Recursively resolve annotated tags + if (type === 'tag') { + const tag = GitAnnotatedTag.from(object); + const commit = tag.headers().object; + return walk(commit) + } + if (type !== 'commit') { + throw new ObjectTypeError(oid, type, 'commit') + } + if (!shallows.has(oid)) { + const commit = GitCommit.from(object); + const parents = commit.headers().parent; + for (oid of parents) { + if (!finishingSet.has(oid) && !visited.has(oid)) { + await walk(oid); + } + } + } + } + // Let's go walking! + for (const oid of startingSet) { + await walk(oid); + } + return visited +} /** - * Read a tree object directly - * * @param {object} args - * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.oid - The SHA-1 object id to get. Annotated tags and commits are peeled. - * @param {string} [args.filepath] - Don't return the object with `oid` itself, but resolve `oid` to a tree and then return the tree object at that filepath. - * @param {object} [args.cache] - a [cache](cache.md) object - * - * @returns {Promise} Resolves successfully with a git tree object - * @see ReadTreeResult - * @see TreeObject - * @see TreeEntry - * + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache + * @param {string} [args.dir] + * @param {string} args.gitdir + * @param {Iterable} args.oids + * @returns {Promise>} */ -async function readTree({ +async function listObjects({ fs, + cache, dir, gitdir = join(dir, '.git'), - oid, - filepath = undefined, - cache = {}, + oids, }) { - try { - assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - assertParameter('oid', oid); + const visited = new Set(); + // We don't do the purest simplest recursion, because we can + // avoid reading Blob objects entirely since the Tree objects + // tell us which oids are Blobs and which are Trees. + async function walk(oid) { + if (visited.has(oid)) return + visited.add(oid); + const { type, object } = await _readObject({ fs, cache, gitdir, oid }); + if (type === 'tag') { + const tag = GitAnnotatedTag.from(object); + const obj = tag.headers().object; + await walk(obj); + } else if (type === 'commit') { + const commit = GitCommit.from(object); + const tree = commit.headers().tree; + await walk(tree); + } else if (type === 'tree') { + const tree = GitTree.from(object); + for (const entry of tree) { + // add blobs to the set + // skip over submodules whose type is 'commit' + if (entry.type === 'blob') { + visited.add(entry.oid); + } + // recurse for trees + if (entry.type === 'tree') { + await walk(entry.oid); + } + } + } + } + // Let's go walking! + for (const oid of oids) { + await walk(oid); + } + return visited +} - return await _readTree({ - fs: new FileSystem(fs), - cache, - gitdir, - oid, - filepath, - }) - } catch (err) { - err.caller = 'git.readTree'; - throw err +async function parseReceivePackResponse(packfile) { + /** @type PushResult */ + const result = {}; + let response = ''; + const read = GitPktLine.streamReader(packfile); + let line = await read(); + while (line !== true) { + if (line !== null) response += line.toString('utf8') + '\n'; + line = await read(); + } + + const lines = response.toString('utf8').split('\n'); + // We're expecting "unpack {unpack-result}" + line = lines.shift(); + if (!line.startsWith('unpack ')) { + throw new ParseError('unpack ok" or "unpack [error message]', line) + } + result.ok = line === 'unpack ok'; + if (!result.ok) { + result.error = line.slice('unpack '.length); + } + result.refs = {}; + for (const line of lines) { + if (line.trim() === '') continue + const status = line.slice(0, 2); + const refAndMessage = line.slice(3); + let space = refAndMessage.indexOf(' '); + if (space === -1) space = refAndMessage.length; + const ref = refAndMessage.slice(0, space); + const error = refAndMessage.slice(space + 1); + result.refs[ref] = { + ok: status === 'ok', + error, + }; } + return result } -// @ts-check - -/** - * Remove a file from the git index (aka staging area) - * - * Note that this does NOT delete the file in the working directory. - * - * @param {object} args - * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.filepath - The path to the file to remove from the index - * @param {object} [args.cache] - a [cache](cache.md) object - * - * @returns {Promise} Resolves successfully once the git index has been updated - * - * @example - * await git.remove({ fs, dir: '/tutorial', filepath: 'README.md' }) - * console.log('done') - * - */ -async function remove({ - fs: _fs, - dir, - gitdir = join(dir, '.git'), - filepath, - cache = {}, +async function writeReceivePackRequest({ + capabilities = [], + triplets = [], }) { - try { - assertParameter('fs', _fs); - assertParameter('gitdir', gitdir); - assertParameter('filepath', filepath); - - await GitIndexManager.acquire( - { fs: new FileSystem(_fs), gitdir, cache }, - async function(index) { - index.delete({ filepath }); - } + const packstream = []; + let capsFirstLine = `\x00 ${capabilities.join(' ')}`; + for (const trip of triplets) { + packstream.push( + GitPktLine.encode( + `${trip.oldoid} ${trip.oid} ${trip.fullRef}${capsFirstLine}\n` + ) ); - } catch (err) { - err.caller = 'git.remove'; - throw err + capsFirstLine = ''; } + packstream.push(GitPktLine.flush()); + return packstream } // @ts-check @@ -30088,354 +29248,532 @@ async function remove({ /** * @param {object} args * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {object} args.cache - * @param {SignCallback} [args.onSign] - * @param {string} [args.dir] - * @param {string} [args.gitdir=join(dir,'.git')] + * @param {any} args.cache + * @param {HttpClient} args.http + * @param {ProgressCallback} [args.onProgress] + * @param {MessageCallback} [args.onMessage] + * @param {AuthCallback} [args.onAuth] + * @param {AuthFailureCallback} [args.onAuthFailure] + * @param {AuthSuccessCallback} [args.onAuthSuccess] + * @param {string} args.gitdir * @param {string} [args.ref] - * @param {string} args.oid - * @param {Object} args.author - * @param {string} args.author.name - * @param {string} args.author.email - * @param {number} args.author.timestamp - * @param {number} args.author.timezoneOffset - * @param {Object} args.committer - * @param {string} args.committer.name - * @param {string} args.committer.email - * @param {number} args.committer.timestamp - * @param {number} args.committer.timezoneOffset - * @param {string} [args.signingKey] + * @param {string} [args.remoteRef] + * @param {string} [args.remote] + * @param {boolean} [args.force = false] + * @param {boolean} [args.delete = false] + * @param {string} [args.url] + * @param {string} [args.corsProxy] + * @param {Object} [args.headers] * - * @returns {Promise} + * @returns {Promise} */ +async function _push({ + fs, + cache, + http, + onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, + gitdir, + ref: _ref, + remoteRef: _remoteRef, + remote, + url: _url, + force = false, + delete: _delete = false, + corsProxy, + headers = {}, +}) { + const ref = _ref || (await _currentBranch({ fs, gitdir })); + if (typeof ref === 'undefined') { + throw new MissingParameterError('ref') + } + const config = await GitConfigManager.get({ fs, gitdir }); + // Figure out what remote to use. + remote = + remote || + (await config.get(`branch.${ref}.pushRemote`)) || + (await config.get('remote.pushDefault')) || + (await config.get(`branch.${ref}.remote`)) || + 'origin'; + // Lookup the URL for the given remote. + const url = + _url || + (await config.get(`remote.${remote}.pushurl`)) || + (await config.get(`remote.${remote}.url`)); + if (typeof url === 'undefined') { + throw new MissingParameterError('remote OR url') + } + // Figure out what remote ref to use. + const remoteRef = _remoteRef || (await config.get(`branch.${ref}.merge`)); + if (typeof url === 'undefined') { + throw new MissingParameterError('remoteRef') + } + + if (corsProxy === undefined) { + corsProxy = await config.get('http.corsProxy'); + } + + const fullRef = await GitRefManager.expand({ fs, gitdir, ref }); + const oid = _delete + ? '0000000000000000000000000000000000000000' + : await GitRefManager.resolve({ fs, gitdir, ref: fullRef }); + + /** @type typeof import("../managers/GitRemoteHTTP").GitRemoteHTTP */ + const GitRemoteHTTP = GitRemoteManager.getRemoteHelperFor({ url }); + const httpRemote = await GitRemoteHTTP.discover({ + http, + onAuth, + onAuthSuccess, + onAuthFailure, + corsProxy, + service: 'git-receive-pack', + url, + headers, + protocolVersion: 1, + }); + const auth = httpRemote.auth; // hack to get new credentials from CredentialManager API + let fullRemoteRef; + if (!remoteRef) { + fullRemoteRef = fullRef; + } else { + try { + fullRemoteRef = await GitRefManager.expandAgainstMap({ + ref: remoteRef, + map: httpRemote.refs, + }); + } catch (err) { + if (err instanceof NotFoundError) { + // The remote reference doesn't exist yet. + // If it is fully specified, use that value. Otherwise, treat it as a branch. + fullRemoteRef = remoteRef.startsWith('refs/') + ? remoteRef + : `refs/heads/${remoteRef}`; + } else { + throw err + } + } + } + const oldoid = + httpRemote.refs.get(fullRemoteRef) || + '0000000000000000000000000000000000000000'; + + // Remotes can always accept thin-packs UNLESS they specify the 'no-thin' capability + const thinPack = !httpRemote.capabilities.has('no-thin'); + + let objects = new Set(); + if (!_delete) { + const finish = [...httpRemote.refs.values()]; + let skipObjects = new Set(); + + // If remote branch is present, look for a common merge base. + if (oldoid !== '0000000000000000000000000000000000000000') { + // trick to speed up common force push scenarios + const mergebase = await _findMergeBase({ + fs, + cache, + gitdir, + oids: [oid, oldoid], + }); + for (const oid of mergebase) finish.push(oid); + if (thinPack) { + skipObjects = await listObjects({ fs, cache, gitdir, oids: mergebase }); + } + } + + // If remote does not have the commit, figure out the objects to send + if (!finish.includes(oid)) { + const commits = await listCommitsAndTags({ + fs, + cache, + gitdir, + start: [oid], + finish, + }); + objects = await listObjects({ fs, cache, gitdir, oids: commits }); + } + + if (thinPack) { + // If there's a default branch for the remote lets skip those objects too. + // Since this is an optional optimization, we just catch and continue if there is + // an error (because we can't find a default branch, or can't find a commit, etc) + try { + // Sadly, the discovery phase with 'forPush' doesn't return symrefs, so we have to + // rely on existing ones. + const ref = await GitRefManager.resolve({ + fs, + gitdir, + ref: `refs/remotes/${remote}/HEAD`, + depth: 2, + }); + const { oid } = await GitRefManager.resolveAgainstMap({ + ref: ref.replace(`refs/remotes/${remote}/`, ''), + fullref: ref, + map: httpRemote.refs, + }); + const oids = [oid]; + for (const oid of await listObjects({ fs, cache, gitdir, oids })) { + skipObjects.add(oid); + } + } catch (e) {} + + // Remove objects that we know the remote already has + for (const oid of skipObjects) { + objects.delete(oid); + } + } -async function _removeNote({ - fs, - cache, - onSign, - gitdir, - ref = 'refs/notes/commits', - oid, - author, - committer, - signingKey, -}) { - // Get the current note commit - let parent; - try { - parent = await GitRefManager.resolve({ gitdir, fs, ref }); - } catch (err) { - if (!(err instanceof NotFoundError)) { - throw err + if (!force) { + // Is it a tag that already exists? + if ( + fullRef.startsWith('refs/tags') && + oldoid !== '0000000000000000000000000000000000000000' + ) { + throw new PushRejectedError('tag-exists') + } + // Is it a non-fast-forward commit? + if ( + oid !== '0000000000000000000000000000000000000000' && + oldoid !== '0000000000000000000000000000000000000000' && + !(await _isDescendent({ + fs, + cache, + gitdir, + oid, + ancestor: oldoid, + depth: -1, + })) + ) { + throw new PushRejectedError('not-fast-forward') + } } } - - // I'm using the "empty tree" magic number here for brevity - const result = await _readTree({ - fs, - gitdir, - oid: parent || '4b825dc642cb6eb9a060e54bf8d69288fbee4904', - }); - let tree = result.tree; - - // Remove the note blob entry from the tree - tree = tree.filter(entry => entry.path !== oid); - - // Create the new note tree - const treeOid = await _writeTree({ - fs, - gitdir, - tree, + // We can only safely use capabilities that the server also understands. + // For instance, AWS CodeCommit aborts a push if you include the `agent`!!! + const capabilities = filterCapabilities( + [...httpRemote.capabilities], + ['report-status', 'side-band-64k', `agent=${pkg.agent}`] + ); + const packstream1 = await writeReceivePackRequest({ + capabilities, + triplets: [{ oldoid, oid, fullRef: fullRemoteRef }], }); - - // Create the new note commit - const commitOid = await _commit({ - fs, - cache, - onSign, - gitdir, - ref, - tree: treeOid, - parent: parent && [parent], - message: `Note removed by 'isomorphic-git removeNote'\n`, - author, - committer, - signingKey, + const packstream2 = _delete + ? [] + : await _pack({ + fs, + cache, + gitdir, + oids: [...objects], + }); + const res = await GitRemoteHTTP.connect({ + http, + onProgress, + corsProxy, + service: 'git-receive-pack', + url, + auth, + headers, + body: [...packstream1, ...packstream2], }); + const { packfile, progress } = await GitSideBand.demux(res.body); + if (onMessage) { + const lines = splitLines(progress); + forAwait(lines, async line => { + await onMessage(line); + }); + } + // Parse the response! + const result = await parseReceivePackResponse(packfile); + if (res.headers) { + result.headers = res.headers; + } - return commitOid + // Update the local copy of the remote ref + if (remote && result.ok && result.refs[fullRemoteRef].ok) { + // TODO: I think this should actually be using a refspec transform rather than assuming 'refs/remotes/{remote}' + const ref = `refs/remotes/${remote}/${fullRemoteRef.replace( + 'refs/heads', + '' + )}`; + if (_delete) { + await GitRefManager.deleteRef({ fs, gitdir, ref }); + } else { + await GitRefManager.writeRef({ fs, gitdir, ref, value: oid }); + } + } + if (result.ok && Object.values(result.refs).every(result => result.ok)) { + return result + } else { + const prettyDetails = Object.entries(result.refs) + .filter(([k, v]) => !v.ok) + .map(([k, v]) => `\n - ${k}: ${v.error}`) + .join(''); + throw new GitPushError(prettyDetails, result) + } } // @ts-check /** - * Remove an object note + * Push a branch or tag + * + * The push command returns an object that describes the result of the attempted push operation. + * *Notes:* If there were no errors, then there will be no `errors` property. There can be a mix of `ok` messages and `errors` messages. + * + * | param | type [= default] | description | + * | ------ | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | + * | ok | Array\ | The first item is "unpack" if the overall operation was successful. The remaining items are the names of refs that were updated successfully. | + * | errors | Array\ | If the overall operation threw and error, the first item will be "unpack {Overall error message}". The remaining items are individual refs that failed to be updated in the format "{ref name} {error message}". | * * @param {object} args * @param {FsClient} args.fs - a file system client - * @param {SignCallback} [args.onSign] - a PGP signing implementation + * @param {HttpClient} args.http - an HTTP client + * @param {ProgressCallback} [args.onProgress] - optional progress event callback + * @param {MessageCallback} [args.onMessage] - optional message event callback + * @param {AuthCallback} [args.onAuth] - optional auth fill callback + * @param {AuthFailureCallback} [args.onAuthFailure] - optional auth rejected callback + * @param {AuthSuccessCallback} [args.onAuthSuccess] - optional auth approved callback * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.ref] - The notes ref to look under - * @param {string} args.oid - The SHA-1 object id of the object to remove the note from. - * @param {Object} [args.author] - The details about the author. - * @param {string} [args.author.name] - Default is `user.name` config. - * @param {string} [args.author.email] - Default is `user.email` config. - * @param {number} [args.author.timestamp=Math.floor(Date.now()/1000)] - Set the author timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). - * @param {number} [args.author.timezoneOffset] - Set the author timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. - * @param {Object} [args.committer = author] - The details about the note committer, in the same format as the author parameter. If not specified, the author details are used. - * @param {string} [args.committer.name] - Default is `user.name` config. - * @param {string} [args.committer.email] - Default is `user.email` config. - * @param {number} [args.committer.timestamp=Math.floor(Date.now()/1000)] - Set the committer timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). - * @param {number} [args.committer.timezoneOffset] - Set the committer timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. - * @param {string} [args.signingKey] - Sign the tag object using this private PGP key. + * @param {string} [args.ref] - Which branch to push. By default this is the currently checked out branch. + * @param {string} [args.url] - The URL of the remote repository. The default is the value set in the git config for that remote. + * @param {string} [args.remote] - If URL is not specified, determines which remote to use. + * @param {string} [args.remoteRef] - The name of the receiving branch on the remote. By default this is the configured remote tracking branch. + * @param {boolean} [args.force = false] - If true, behaves the same as `git push --force` + * @param {boolean} [args.delete = false] - If true, delete the remote ref + * @param {string} [args.corsProxy] - Optional [CORS proxy](https://www.npmjs.com/%40isomorphic-git/cors-proxy). Overrides value in repo config. + * @param {Object} [args.headers] - Additional headers to include in HTTP requests, similar to git's `extraHeader` config * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} Resolves successfully with the SHA-1 object id of the commit object for the note removal. + * @returns {Promise} Resolves successfully when push completes with a detailed description of the operation from the server. + * @see PushResult + * @see RefUpdateStatus + * + * @example + * let pushResult = await git.push({ + * fs, + * http, + * dir: '/tutorial', + * remote: 'origin', + * ref: 'main', + * onAuth: () => ({ username: process.env.GITHUB_TOKEN }), + * }) + * console.log(pushResult) + * */ - -async function removeNote({ - fs: _fs, - onSign, +async function push({ + fs, + http, + onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, dir, gitdir = join(dir, '.git'), - ref = 'refs/notes/commits', - oid, - author: _author, - committer: _committer, - signingKey, + ref, + remoteRef, + remote = 'origin', + url, + force = false, + delete: _delete = false, + corsProxy, + headers = {}, cache = {}, }) { try { - assertParameter('fs', _fs); + assertParameter('fs', fs); + assertParameter('http', http); assertParameter('gitdir', gitdir); - assertParameter('oid', oid); - - const fs = new FileSystem(_fs); - - const author = await normalizeAuthorObject({ fs, gitdir, author: _author }); - if (!author) throw new MissingNameError('author') - - const committer = await normalizeCommitterObject({ - fs, - gitdir, - author, - committer: _committer, - }); - if (!committer) throw new MissingNameError('committer') - return await _removeNote({ - fs, + return await _push({ + fs: new FileSystem(fs), cache, - onSign, + http, + onProgress, + onMessage, + onAuth, + onAuthSuccess, + onAuthFailure, gitdir, ref, - oid, - author, - committer, - signingKey, + remoteRef, + remote, + url, + force, + delete: _delete, + corsProxy, + headers, }) } catch (err) { - err.caller = 'git.removeNote'; + err.caller = 'git.push'; throw err } } +async function resolveBlob({ fs, cache, gitdir, oid }) { + const { type, object } = await _readObject({ fs, cache, gitdir, oid }); + // Resolve annotated tag objects to whatever + if (type === 'tag') { + oid = GitAnnotatedTag.from(object).parse().object; + return resolveBlob({ fs, cache, gitdir, oid }) + } + if (type !== 'blob') { + throw new ObjectTypeError(oid, type, 'blob') + } + return { oid, blob: new Uint8Array(object) } +} + // @ts-check /** - * Rename a branch * + * @typedef {Object} ReadBlobResult - The object returned has the following schema: + * @property {string} oid + * @property {Uint8Array} blob + * + */ + +/** * @param {object} args * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache * @param {string} args.gitdir - * @param {string} args.ref - The name of the new branch - * @param {string} args.oldref - The name of the old branch - * @param {boolean} [args.checkout = false] + * @param {string} args.oid + * @param {string} [args.filepath] * - * @returns {Promise} Resolves successfully when filesystem operations are complete + * @returns {Promise} Resolves successfully with a blob object description + * @see ReadBlobResult */ -async function _renameBranch({ +async function _readBlob({ fs, + cache, gitdir, - oldref, - ref, - checkout = false, + oid, + filepath = undefined, }) { - if (ref !== cleanGitRef.clean(ref)) { - throw new InvalidRefNameError(ref, cleanGitRef.clean(ref)) - } - - if (oldref !== cleanGitRef.clean(oldref)) { - throw new InvalidRefNameError(oldref, cleanGitRef.clean(oldref)) - } - - const fulloldref = `refs/heads/${oldref}`; - const fullnewref = `refs/heads/${ref}`; - - const newexist = await GitRefManager.exists({ fs, gitdir, ref: fullnewref }); - - if (newexist) { - throw new AlreadyExistsError('branch', ref, false) + if (filepath !== undefined) { + oid = await resolveFilepath({ fs, cache, gitdir, oid, filepath }); } - - const value = await GitRefManager.resolve({ + const blob = await resolveBlob({ fs, + cache, gitdir, - ref: fulloldref, - depth: 1, + oid, }); - - await GitRefManager.writeRef({ fs, gitdir, ref: fullnewref, value }); - await GitRefManager.deleteRef({ fs, gitdir, ref: fulloldref }); - - if (checkout) { - // Update HEAD - await GitRefManager.writeSymbolicRef({ - fs, - gitdir, - ref: 'HEAD', - value: fullnewref, - }); - } + return blob } // @ts-check /** - * Rename a branch + * + * @typedef {Object} ReadBlobResult - The object returned has the following schema: + * @property {string} oid + * @property {Uint8Array} blob + * + */ + +/** + * Read a blob object directly * * @param {object} args - * @param {FsClient} args.fs - a file system implementation + * @param {FsClient} args.fs - a file system client * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.ref - What to name the branch - * @param {string} args.oldref - What the name of the branch was - * @param {boolean} [args.checkout = false] - Update `HEAD` to point at the newly created branch + * @param {string} args.oid - The SHA-1 object id to get. Annotated tags, commits, and trees are peeled. + * @param {string} [args.filepath] - Don't return the object with `oid` itself, but resolve `oid` to a tree and then return the blob object at that filepath. + * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} Resolves successfully when filesystem operations are complete + * @returns {Promise} Resolves successfully with a blob object description + * @see ReadBlobResult * * @example - * await git.renameBranch({ fs, dir: '/tutorial', ref: 'main', oldref: 'master' }) - * console.log('done') + * // Get the contents of 'README.md' in the main branch. + * let commitOid = await git.resolveRef({ fs, dir: '/tutorial', ref: 'main' }) + * console.log(commitOid) + * let { blob } = await git.readBlob({ + * fs, + * dir: '/tutorial', + * oid: commitOid, + * filepath: 'README.md' + * }) + * console.log(Buffer.from(blob).toString('utf8')) * */ -async function renameBranch({ +async function readBlob({ fs, dir, gitdir = join(dir, '.git'), - ref, - oldref, - checkout = false, + oid, + filepath, + cache = {}, }) { try { assertParameter('fs', fs); assertParameter('gitdir', gitdir); - assertParameter('ref', ref); - assertParameter('oldref', oldref); - return await _renameBranch({ + assertParameter('oid', oid); + + return await _readBlob({ fs: new FileSystem(fs), + cache, gitdir, - ref, - oldref, - checkout, + oid, + filepath, }) } catch (err) { - err.caller = 'git.renameBranch'; + err.caller = 'git.readBlob'; throw err } } -async function hashObject$1({ gitdir, type, object }) { - return shasum(GitObject.wrap({ type, object })) -} - // @ts-check /** - * Reset a file in the git index (aka staging area) - * - * Note that this does NOT modify the file in the working directory. + * Read a commit object directly * * @param {object} args * @param {FsClient} args.fs - a file system client * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.filepath - The path to the file to reset in the index - * @param {string} [args.ref = 'HEAD'] - A ref to the commit to use + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.oid - The SHA-1 object id to get. Annotated tags are peeled. * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} Resolves successfully once the git index has been updated + * @returns {Promise} Resolves successfully with a git commit object + * @see ReadCommitResult + * @see CommitObject * * @example - * await git.resetIndex({ fs, dir: '/tutorial', filepath: 'README.md' }) - * console.log('done') + * // Read a commit object + * let sha = await git.resolveRef({ fs, dir: '/tutorial', ref: 'main' }) + * console.log(sha) + * let commit = await git.readCommit({ fs, dir: '/tutorial', oid: sha }) + * console.log(commit) * */ -async function resetIndex({ - fs: _fs, +async function readCommit({ + fs, dir, gitdir = join(dir, '.git'), - filepath, - ref = 'HEAD', + oid, cache = {}, }) { try { - assertParameter('fs', _fs); + assertParameter('fs', fs); assertParameter('gitdir', gitdir); - assertParameter('filepath', filepath); - assertParameter('ref', ref); + assertParameter('oid', oid); - const fs = new FileSystem(_fs); - // Resolve commit - let oid = await GitRefManager.resolve({ fs, gitdir, ref }); - let workdirOid; - try { - // Resolve blob - oid = await resolveFilepath({ - fs, - cache, - gitdir, - oid, - filepath, - }); - } catch (e) { - // This means we're resetting the file to a "deleted" state - oid = null; - } - // For files that aren't in the workdir use zeros - let stats = { - ctime: new Date(0), - mtime: new Date(0), - dev: 0, - ino: 0, - mode: 0, - uid: 0, - gid: 0, - size: 0, - }; - // If the file exists in the workdir... - const object = dir && (await fs.read(join(dir, filepath))); - if (object) { - // ... and has the same hash as the desired state... - workdirOid = await hashObject$1({ - gitdir, - type: 'blob', - object, - }); - if (oid === workdirOid) { - // ... use the workdir Stats object - stats = await fs.lstat(join(dir, filepath)); - } - } - await GitIndexManager.acquire({ fs, gitdir, cache }, async function(index) { - index.delete({ filepath }); - if (oid) { - index.insert({ filepath, stats, oid }); - } - }); + return await _readCommit({ + fs: new FileSystem(fs), + cache, + gitdir, + oid, + }) } catch (err) { - err.caller = 'git.reset'; + err.caller = 'git.readCommit'; throw err } } @@ -30443,45 +29781,76 @@ async function resetIndex({ // @ts-check /** - * Get the value of a symbolic ref or resolve a ref to its SHA-1 object id + * Read the contents of a note + * + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache + * @param {string} args.gitdir + * @param {string} [args.ref] - The notes ref to look under + * @param {string} args.oid + * + * @returns {Promise} Resolves successfully with note contents as a Buffer. + */ + +async function _readNote({ + fs, + cache, + gitdir, + ref = 'refs/notes/commits', + oid, +}) { + const parent = await GitRefManager.resolve({ gitdir, fs, ref }); + const { blob } = await _readBlob({ + fs, + cache, + gitdir, + oid: parent, + filepath: oid, + }); + + return blob +} + +// @ts-check + +/** + * Read the contents of a note * * @param {object} args * @param {FsClient} args.fs - a file system client * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.ref - The ref to resolve - * @param {number} [args.depth = undefined] - How many symbolic references to follow before returning - * - * @returns {Promise} Resolves successfully with a SHA-1 object id or the value of a symbolic ref - * - * @example - * let currentCommit = await git.resolveRef({ fs, dir: '/tutorial', ref: 'HEAD' }) - * console.log(currentCommit) - * let currentBranch = await git.resolveRef({ fs, dir: '/tutorial', ref: 'HEAD', depth: 2 }) - * console.log(currentBranch) + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} [args.ref] - The notes ref to look under + * @param {string} args.oid - The SHA-1 object id of the object to get the note for. + * @param {object} [args.cache] - a [cache](cache.md) object * + * @returns {Promise} Resolves successfully with note contents as a Buffer. */ -async function resolveRef({ + +async function readNote({ fs, dir, gitdir = join(dir, '.git'), - ref, - depth, + ref = 'refs/notes/commits', + oid, + cache = {}, }) { try { assertParameter('fs', fs); assertParameter('gitdir', gitdir); assertParameter('ref', ref); + assertParameter('oid', oid); - const oid = await GitRefManager.resolve({ + return await _readNote({ fs: new FileSystem(fs), + cache, gitdir, ref, - depth, - }); - return oid + oid, + }) } catch (err) { - err.caller = 'git.resolveRef'; + err.caller = 'git.readNote'; throw err } } @@ -30489,550 +29858,605 @@ async function resolveRef({ // @ts-check /** - * Write an entry to the git config files. * - * *Caveats:* - * - Currently only the local `$GIT_DIR/config` file can be read or written. However support for the global `~/.gitconfig` and system `$(prefix)/etc/gitconfig` will be added in the future. - * - The current parser does not support the more exotic features of the git-config file format such as `[include]` and `[includeIf]`. + * @typedef {Object} DeflatedObject + * @property {string} oid + * @property {'deflated'} type + * @property {'deflated'} format + * @property {Uint8Array} object + * @property {string} [source] * - * @param {Object} args - * @param {FsClient} args.fs - a file system implementation - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.path - The key of the git config entry - * @param {string | boolean | number | void} args.value - A value to store at that path. (Use `undefined` as the value to delete a config entry.) - * @param {boolean} [args.append = false] - If true, will append rather than replace when setting (use with multi-valued config options). + */ + +/** * - * @returns {Promise} Resolves successfully when operation completed + * @typedef {Object} WrappedObject + * @property {string} oid + * @property {'wrapped'} type + * @property {'wrapped'} format + * @property {Uint8Array} object + * @property {string} [source] * - * @example - * // Write config value - * await git.setConfig({ - * fs, - * dir: '/tutorial', - * path: 'user.name', - * value: 'Mr. Test' - * }) + */ + +/** * - * // Print out config file - * let file = await fs.promises.readFile('/tutorial/.git/config', 'utf8') - * console.log(file) + * @typedef {Object} RawObject + * @property {string} oid + * @property {'blob'|'commit'|'tree'|'tag'} type + * @property {'content'} format + * @property {Uint8Array} object + * @property {string} [source] * - * // Delete a config entry - * await git.setConfig({ - * fs, - * dir: '/tutorial', - * path: 'user.name', - * value: undefined - * }) + */ + +/** + * + * @typedef {Object} ParsedBlobObject + * @property {string} oid + * @property {'blob'} type + * @property {'parsed'} format + * @property {string} object + * @property {string} [source] * - * // Print out config file - * file = await fs.promises.readFile('/tutorial/.git/config', 'utf8') - * console.log(file) */ -async function setConfig({ - fs: _fs, - dir, - gitdir = join(dir, '.git'), - path, - value, - append = false, -}) { - try { - assertParameter('fs', _fs); - assertParameter('gitdir', gitdir); - assertParameter('path', path); - // assertParameter('value', value) // We actually allow 'undefined' as a value to unset/delete - const fs = new FileSystem(_fs); - const config = await GitConfigManager.get({ fs, gitdir }); - if (append) { - await config.append(path, value); - } else { - await config.set(path, value); - } - await GitConfigManager.save({ fs, gitdir, config }); - } catch (err) { - err.caller = 'git.setConfig'; - throw err - } -} +/** + * + * @typedef {Object} ParsedCommitObject + * @property {string} oid + * @property {'commit'} type + * @property {'parsed'} format + * @property {CommitObject} object + * @property {string} [source] + * + */ -// @ts-check +/** + * + * @typedef {Object} ParsedTreeObject + * @property {string} oid + * @property {'tree'} type + * @property {'parsed'} format + * @property {TreeObject} object + * @property {string} [source] + * + */ /** - * Tell whether a file has been changed * - * The possible resolve values are: + * @typedef {Object} ParsedTagObject + * @property {string} oid + * @property {'tag'} type + * @property {'parsed'} format + * @property {TagObject} object + * @property {string} [source] * - * | status | description | - * | --------------------- | ------------------------------------------------------------------------------------- | - * | `"ignored"` | file ignored by a .gitignore rule | - * | `"unmodified"` | file unchanged from HEAD commit | - * | `"*modified"` | file has modifications, not yet staged | - * | `"*deleted"` | file has been removed, but the removal is not yet staged | - * | `"*added"` | file is untracked, not yet staged | - * | `"absent"` | file not present in HEAD commit, staging area, or working dir | - * | `"modified"` | file has modifications, staged | - * | `"deleted"` | file has been removed, staged | - * | `"added"` | previously untracked file, staged | - * | `"*unmodified"` | working dir and HEAD commit match, but index differs | - * | `"*absent"` | file not present in working dir or HEAD commit, but present in the index | - * | `"*undeleted"` | file was deleted from the index, but is still in the working dir | - * | `"*undeletemodified"` | file was deleted from the index, but is present with modifications in the working dir | + */ + +/** + * + * @typedef {ParsedBlobObject | ParsedCommitObject | ParsedTreeObject | ParsedTagObject} ParsedObject + */ + +/** + * + * @typedef {DeflatedObject | WrappedObject | RawObject | ParsedObject } ReadObjectResult + */ + +/** + * Read a git object directly by its SHA-1 object id + * + * Regarding `ReadObjectResult`: + * + * - `oid` will be the same as the `oid` argument unless the `filepath` argument is provided, in which case it will be the oid of the tree or blob being returned. + * - `type` of deflated objects is `'deflated'`, and `type` of wrapped objects is `'wrapped'` + * - `format` is usually, but not always, the format you requested. Packfiles do not store each object individually compressed so if you end up reading the object from a packfile it will be returned in format 'content' even if you requested 'deflated' or 'wrapped'. + * - `object` will be an actual Object if format is 'parsed' and the object is a commit, tree, or annotated tag. Blobs are still formatted as Buffers unless an encoding is provided in which case they'll be strings. If format is anything other than 'parsed', object will be a Buffer. + * - `source` is the name of the packfile or loose object file where the object was found. + * + * The `format` parameter can have the following values: + * + * | param | description | + * | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | + * | 'deflated' | Return the raw deflate-compressed buffer for an object if possible. Useful for efficiently shuffling around loose objects when you don't care about the contents and can save time by not inflating them. | + * | 'wrapped' | Return the inflated object buffer wrapped in the git object header if possible. This is the raw data used when calculating the SHA-1 object id of a git object. | + * | 'content' | Return the object buffer without the git header. | + * | 'parsed' | Returns a parsed representation of the object. | + * + * The result will be in one of the following schemas: + * + * ## `'deflated'` format + * + * {@link DeflatedObject typedef} + * + * ## `'wrapped'` format + * + * {@link WrappedObject typedef} + * + * ## `'content'` format + * + * {@link RawObject typedef} + * + * ## `'parsed'` format + * + * ### parsed `'blob'` type + * + * {@link ParsedBlobObject typedef} + * + * ### parsed `'commit'` type + * + * {@link ParsedCommitObject typedef} + * {@link CommitObject typedef} + * + * ### parsed `'tree'` type + * + * {@link ParsedTreeObject typedef} + * {@link TreeObject typedef} + * {@link TreeEntry typedef} + * + * ### parsed `'tag'` type + * + * {@link ParsedTagObject typedef} + * {@link TagObject typedef} + * + * @deprecated + * > This command is overly complicated. + * > + * > If you know the type of object you are reading, use [`readBlob`](./readBlob.md), [`readCommit`](./readCommit.md), [`readTag`](./readTag.md), or [`readTree`](./readTree.md). * * @param {object} args * @param {FsClient} args.fs - a file system client - * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.filepath - The path to the file to query + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.oid - The SHA-1 object id to get + * @param {'deflated' | 'wrapped' | 'content' | 'parsed'} [args.format = 'parsed'] - What format to return the object in. The choices are described in more detail below. + * @param {string} [args.filepath] - Don't return the object with `oid` itself, but resolve `oid` to a tree and then return the object at that filepath. To return the root directory of a tree set filepath to `''` + * @param {string} [args.encoding] - A convenience argument that only affects blobs. Instead of returning `object` as a buffer, it returns a string parsed using the given encoding. * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise<'ignored'|'unmodified'|'*modified'|'*deleted'|'*added'|'absent'|'modified'|'deleted'|'added'|'*unmodified'|'*absent'|'*undeleted'|'*undeletemodified'>} Resolves successfully with the file's git status + * @returns {Promise} Resolves successfully with a git object description + * @see ReadObjectResult * * @example - * let status = await git.status({ fs, dir: '/tutorial', filepath: 'README.md' }) - * console.log(status) + * // Given a ransom SHA-1 object id, figure out what it is + * let { type, object } = await git.readObject({ + * fs, + * dir: '/tutorial', + * oid: '0698a781a02264a6f37ba3ff41d78067eaf0f075' + * }) + * switch (type) { + * case 'commit': { + * console.log(object) + * break + * } + * case 'tree': { + * console.log(object) + * break + * } + * case 'blob': { + * console.log(object) + * break + * } + * case 'tag': { + * console.log(object) + * break + * } + * } * */ -async function status({ +async function readObject({ fs: _fs, dir, gitdir = join(dir, '.git'), - filepath, + oid, + format = 'parsed', + filepath = undefined, + encoding = undefined, cache = {}, }) { try { assertParameter('fs', _fs); assertParameter('gitdir', gitdir); - assertParameter('filepath', filepath); + assertParameter('oid', oid); const fs = new FileSystem(_fs); - const ignored = await GitIgnoreManager.isIgnored({ - fs, - gitdir, - dir, - filepath, - }); - if (ignored) { - return 'ignored' + if (filepath !== undefined) { + oid = await resolveFilepath({ + fs, + cache, + gitdir, + oid, + filepath, + }); } - const headTree = await getHeadTree({ fs, cache, gitdir }); - const treeOid = await getOidAtPath({ + // GitObjectManager does not know how to parse content, so we tweak that parameter before passing it. + const _format = format === 'parsed' ? 'content' : format; + const result = await _readObject({ fs, cache, gitdir, - tree: headTree, - path: filepath, + oid, + format: _format, }); - const indexEntry = await GitIndexManager.acquire( - { fs, gitdir, cache }, - async function(index) { - for (const entry of index) { - if (entry.path === filepath) return entry - } - return null - } - ); - const stats = await fs.lstat(join(dir, filepath)); - - const H = treeOid !== null; // head - const I = indexEntry !== null; // index - const W = stats !== null; // working dir - - const getWorkdirOid = async () => { - if (I && !compareStats(indexEntry, stats)) { - return indexEntry.oid - } else { - const object = await fs.read(join(dir, filepath)); - const workdirOid = await hashObject$1({ - gitdir, - type: 'blob', - object, - }); - // If the oid in the index === working dir oid but stats differed update cache - if (I && indexEntry.oid === workdirOid) { - // and as long as our fs.stats aren't bad. - // size of -1 happens over a BrowserFS HTTP Backend that doesn't serve Content-Length headers - // (like the Karma webserver) because BrowserFS HTTP Backend uses HTTP HEAD requests to do fs.stat - if (stats.size !== -1) { - // We don't await this so we can return faster for one-off cases. - GitIndexManager.acquire({ fs, gitdir, cache }, async function( - index - ) { - index.insert({ filepath, stats, oid: workdirOid }); - }); + result.oid = oid; + if (format === 'parsed') { + result.format = 'parsed'; + switch (result.type) { + case 'commit': + result.object = GitCommit.from(result.object).parse(); + break + case 'tree': + result.object = GitTree.from(result.object).entries(); + break + case 'blob': + // Here we consider returning a raw Buffer as the 'content' format + // and returning a string as the 'parsed' format + if (encoding) { + result.object = result.object.toString(encoding); + } else { + result.object = new Uint8Array(result.object); + result.format = 'content'; } - } - return workdirOid - } - }; - - if (!H && !W && !I) return 'absent' // --- - if (!H && !W && I) return '*absent' // -A- - if (!H && W && !I) return '*added' // --A - if (!H && W && I) { - const workdirOid = await getWorkdirOid(); - // @ts-ignore - return workdirOid === indexEntry.oid ? 'added' : '*added' // -AA : -AB - } - if (H && !W && !I) return 'deleted' // A-- - if (H && !W && I) { - // @ts-ignore - return treeOid === indexEntry.oid ? '*deleted' : '*deleted' // AA- : AB- - } - if (H && W && !I) { - const workdirOid = await getWorkdirOid(); - return workdirOid === treeOid ? '*undeleted' : '*undeletemodified' // A-A : A-B - } - if (H && W && I) { - const workdirOid = await getWorkdirOid(); - if (workdirOid === treeOid) { - // @ts-ignore - return workdirOid === indexEntry.oid ? 'unmodified' : '*unmodified' // AAA : ABA - } else { - // @ts-ignore - return workdirOid === indexEntry.oid ? 'modified' : '*modified' // ABB : AAB + break + case 'tag': + result.object = GitAnnotatedTag.from(result.object).parse(); + break + default: + throw new ObjectTypeError( + result.oid, + result.type, + 'blob|commit|tag|tree' + ) } + } else if (result.format === 'deflated' || result.format === 'wrapped') { + result.type = result.format; } - /* - --- - -A- - --A - -AA - -AB - A-- - AA- - AB- - A-A - A-B - AAA - ABA - ABB - AAB - */ + return result } catch (err) { - err.caller = 'git.status'; + err.caller = 'git.readObject'; throw err } } -async function getOidAtPath({ fs, cache, gitdir, tree, path }) { - if (typeof path === 'string') path = path.split('/'); - const dirname = path.shift(); - for (const entry of tree) { - if (entry.path === dirname) { - if (path.length === 0) { - return entry.oid - } - const { type, object } = await _readObject({ - fs, - cache, - gitdir, - oid: entry.oid, - }); - if (type === 'tree') { - const tree = GitTree.from(object); - return getOidAtPath({ fs, cache, gitdir, tree, path }) - } - if (type === 'blob') { - throw new ObjectTypeError(entry.oid, type, 'blob', path.join('/')) - } - } +// @ts-check + +/** + * + * @typedef {Object} ReadTagResult - The object returned has the following schema: + * @property {string} oid - SHA-1 object id of this tag + * @property {TagObject} tag - the parsed tag object + * @property {string} payload - PGP signing payload + */ + +/** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {any} args.cache + * @param {string} args.gitdir + * @param {string} args.oid + * + * @returns {Promise} + */ +async function _readTag({ fs, cache, gitdir, oid }) { + const { type, object } = await _readObject({ + fs, + cache, + gitdir, + oid, + format: 'content', + }); + if (type !== 'tag') { + throw new ObjectTypeError(oid, type, 'tag') } - return null + const tag = GitAnnotatedTag.from(object); + const result = { + oid, + tag: tag.parse(), + payload: tag.payload(), + }; + // @ts-ignore + return result } -async function getHeadTree({ fs, cache, gitdir }) { - // Get the tree from the HEAD commit. - let oid; +/** + * + * @typedef {Object} ReadTagResult - The object returned has the following schema: + * @property {string} oid - SHA-1 object id of this tag + * @property {TagObject} tag - the parsed tag object + * @property {string} payload - PGP signing payload + */ + +/** + * Read an annotated tag object directly + * + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.oid - The SHA-1 object id to get + * @param {object} [args.cache] - a [cache](cache.md) object + * + * @returns {Promise} Resolves successfully with a git object description + * @see ReadTagResult + * @see TagObject + * + */ +async function readTag({ + fs, + dir, + gitdir = join(dir, '.git'), + oid, + cache = {}, +}) { try { - oid = await GitRefManager.resolve({ fs, gitdir, ref: 'HEAD' }); - } catch (e) { - // Handle fresh branches with no commits - if (e instanceof NotFoundError) { - return [] - } + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + assertParameter('oid', oid); + + return await _readTag({ + fs: new FileSystem(fs), + cache, + gitdir, + oid, + }) + } catch (err) { + err.caller = 'git.readTag'; + throw err } - const { tree } = await _readTree({ fs, cache, gitdir, oid }); - return tree } // @ts-check /** - * Efficiently get the status of multiple files at once. - * - * The returned `StatusMatrix` is admittedly not the easiest format to read. - * However it conveys a large amount of information in dense format that should make it easy to create reports about the current state of the repository; - * without having to do multiple, time-consuming isomorphic-git calls. - * My hope is that the speed and flexibility of the function will make up for the learning curve of interpreting the return value. - * - * ```js live - * // get the status of all the files in 'src' - * let status = await git.statusMatrix({ - * fs, - * dir: '/tutorial', - * filter: f => f.startsWith('src/') - * }) - * console.log(status) - * ``` - * - * ```js live - * // get the status of all the JSON and Markdown files - * let status = await git.statusMatrix({ - * fs, - * dir: '/tutorial', - * filter: f => f.endsWith('.json') || f.endsWith('.md') - * }) - * console.log(status) - * ``` - * - * The result is returned as a 2D array. - * The outer array represents the files and/or blobs in the repo, in alphabetical order. - * The inner arrays describe the status of the file: - * the first value is the filepath, and the next three are integers - * representing the HEAD status, WORKDIR status, and STAGE status of the entry. - * - * ```js - * // example StatusMatrix - * [ - * ["a.txt", 0, 2, 0], // new, untracked - * ["b.txt", 0, 2, 2], // added, staged - * ["c.txt", 0, 2, 3], // added, staged, with unstaged changes - * ["d.txt", 1, 1, 1], // unmodified - * ["e.txt", 1, 2, 1], // modified, unstaged - * ["f.txt", 1, 2, 2], // modified, staged - * ["g.txt", 1, 2, 3], // modified, staged, with unstaged changes - * ["h.txt", 1, 0, 1], // deleted, unstaged - * ["i.txt", 1, 0, 0], // deleted, staged - * ] - * ``` - * - * - The HEAD status is either absent (0) or present (1). - * - The WORKDIR status is either absent (0), identical to HEAD (1), or different from HEAD (2). - * - The STAGE status is either absent (0), identical to HEAD (1), identical to WORKDIR (2), or different from WORKDIR (3). - * - * ```ts - * type Filename = string - * type HeadStatus = 0 | 1 - * type WorkdirStatus = 0 | 1 | 2 - * type StageStatus = 0 | 1 | 2 | 3 - * - * type StatusRow = [Filename, HeadStatus, WorkdirStatus, StageStatus] - * - * type StatusMatrix = StatusRow[] - * ``` * - * > Think of the natural progression of file modifications as being from HEAD (previous) -> WORKDIR (current) -> STAGE (next). - * > Then HEAD is "version 1", WORKDIR is "version 2", and STAGE is "version 3". - * > Then, imagine a "version 0" which is before the file was created. - * > Then the status value in each column corresponds to the oldest version of the file it is identical to. - * > (For a file to be identical to "version 0" means the file is deleted.) - * - * Here are some examples of queries you can answer using the result: - * - * #### Q: What files have been deleted? - * ```js - * const FILE = 0, WORKDIR = 2 - * - * const filenames = (await statusMatrix({ dir })) - * .filter(row => row[WORKDIR] === 0) - * .map(row => row[FILE]) - * ``` - * - * #### Q: What files have unstaged changes? - * ```js - * const FILE = 0, WORKDIR = 2, STAGE = 3 - * - * const filenames = (await statusMatrix({ dir })) - * .filter(row => row[WORKDIR] !== row[STAGE]) - * .map(row => row[FILE]) - * ``` - * - * #### Q: What files have been modified since the last commit? - * ```js - * const FILE = 0, HEAD = 1, WORKDIR = 2 - * - * const filenames = (await statusMatrix({ dir })) - * .filter(row => row[HEAD] !== row[WORKDIR]) - * .map(row => row[FILE]) - * ``` + * @typedef {Object} ReadTreeResult - The object returned has the following schema: + * @property {string} oid - SHA-1 object id of this tree + * @property {TreeObject} tree - the parsed tree object + */ + +/** + * Read a tree object directly * - * #### Q: What files will NOT be changed if I commit right now? - * ```js - * const FILE = 0, HEAD = 1, STAGE = 3 + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.oid - The SHA-1 object id to get. Annotated tags and commits are peeled. + * @param {string} [args.filepath] - Don't return the object with `oid` itself, but resolve `oid` to a tree and then return the tree object at that filepath. + * @param {object} [args.cache] - a [cache](cache.md) object * - * const filenames = (await statusMatrix({ dir })) - * .filter(row => row[HEAD] === row[STAGE]) - * .map(row => row[FILE]) - * ``` + * @returns {Promise} Resolves successfully with a git tree object + * @see ReadTreeResult + * @see TreeObject + * @see TreeEntry * - * For reference, here are all possible combinations: + */ +async function readTree({ + fs, + dir, + gitdir = join(dir, '.git'), + oid, + filepath = undefined, + cache = {}, +}) { + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + assertParameter('oid', oid); + + return await _readTree({ + fs: new FileSystem(fs), + cache, + gitdir, + oid, + filepath, + }) + } catch (err) { + err.caller = 'git.readTree'; + throw err + } +} + +// @ts-check + +/** + * Remove a file from the git index (aka staging area) * - * | HEAD | WORKDIR | STAGE | `git status --short` equivalent | - * | ---- | ------- | ----- | ------------------------------- | - * | 0 | 0 | 0 | `` | - * | 0 | 0 | 3 | `AD` | - * | 0 | 2 | 0 | `??` | - * | 0 | 2 | 2 | `A ` | - * | 0 | 2 | 3 | `AM` | - * | 1 | 0 | 0 | `D ` | - * | 1 | 0 | 1 | ` D` | - * | 1 | 0 | 3 | `MD` | - * | 1 | 1 | 0 | `D ` + `??` | - * | 1 | 1 | 1 | `` | - * | 1 | 1 | 3 | `MM` | - * | 1 | 2 | 0 | `D ` + `??` | - * | 1 | 2 | 1 | ` M` | - * | 1 | 2 | 2 | `M ` | - * | 1 | 2 | 3 | `MM` | + * Note that this does NOT delete the file in the working directory. * * @param {object} args * @param {FsClient} args.fs - a file system client - * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} [args.ref = 'HEAD'] - Optionally specify a different commit to compare against the workdir and stage instead of the HEAD - * @param {string[]} [args.filepaths = ['.']] - Limit the query to the given files and directories - * @param {function(string): boolean} [args.filter] - Filter the results to only those whose filepath matches a function. + * @param {string} args.filepath - The path to the file to remove from the index * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise>} Resolves with a status matrix, described below. - * @see StatusRow + * @returns {Promise} Resolves successfully once the git index has been updated + * + * @example + * await git.remove({ fs, dir: '/tutorial', filepath: 'README.md' }) + * console.log('done') + * + */ +async function remove({ + fs: _fs, + dir, + gitdir = join(dir, '.git'), + filepath, + cache = {}, +}) { + try { + assertParameter('fs', _fs); + assertParameter('gitdir', gitdir); + assertParameter('filepath', filepath); + + await GitIndexManager.acquire( + { fs: new FileSystem(_fs), gitdir, cache }, + async function(index) { + index.delete({ filepath }); + } + ); + } catch (err) { + err.caller = 'git.remove'; + throw err + } +} + +// @ts-check + +/** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {object} args.cache + * @param {SignCallback} [args.onSign] + * @param {string} [args.dir] + * @param {string} [args.gitdir=join(dir,'.git')] + * @param {string} [args.ref] + * @param {string} args.oid + * @param {Object} args.author + * @param {string} args.author.name + * @param {string} args.author.email + * @param {number} args.author.timestamp + * @param {number} args.author.timezoneOffset + * @param {Object} args.committer + * @param {string} args.committer.name + * @param {string} args.committer.email + * @param {number} args.committer.timestamp + * @param {number} args.committer.timezoneOffset + * @param {string} [args.signingKey] + * + * @returns {Promise} */ -async function statusMatrix({ - fs: _fs, - dir, - gitdir = join(dir, '.git'), - ref = 'HEAD', - filepaths = ['.'], - filter, - cache = {}, + +async function _removeNote({ + fs, + cache, + onSign, + gitdir, + ref = 'refs/notes/commits', + oid, + author, + committer, + signingKey, }) { + // Get the current note commit + let parent; try { - assertParameter('fs', _fs); - assertParameter('gitdir', gitdir); - assertParameter('ref', ref); + parent = await GitRefManager.resolve({ gitdir, fs, ref }); + } catch (err) { + if (!(err instanceof NotFoundError)) { + throw err + } + } - const fs = new FileSystem(_fs); - return await _walk({ - fs, - cache, - dir, - gitdir, - trees: [TREE({ ref }), WORKDIR(), STAGE()], - map: async function(filepath, [head, workdir, stage]) { - // Ignore ignored files, but only if they are not already tracked. - if (!head && !stage && workdir) { - if ( - await GitIgnoreManager.isIgnored({ - fs, - dir, - filepath, - }) - ) { - return null - } - } - // match against base paths - if (!filepaths.some(base => worthWalking(filepath, base))) { - return null - } - // Late filter against file names - if (filter) { - if (!filter(filepath)) return - } + // I'm using the "empty tree" magic number here for brevity + const result = await _readTree({ + fs, + gitdir, + oid: parent || '4b825dc642cb6eb9a060e54bf8d69288fbee4904', + }); + let tree = result.tree; - // For now, just bail on directories - const headType = head && (await head.type()); - if (headType === 'tree' || headType === 'special') return - if (headType === 'commit') return null + // Remove the note blob entry from the tree + tree = tree.filter(entry => entry.path !== oid); - const workdirType = workdir && (await workdir.type()); - if (workdirType === 'tree' || workdirType === 'special') return + // Create the new note tree + const treeOid = await _writeTree({ + fs, + gitdir, + tree, + }); - const stageType = stage && (await stage.type()); - if (stageType === 'commit') return null - if (stageType === 'tree' || stageType === 'special') return + // Create the new note commit + const commitOid = await _commit({ + fs, + cache, + onSign, + gitdir, + ref, + tree: treeOid, + parent: parent && [parent], + message: `Note removed by 'isomorphic-git removeNote'\n`, + author, + committer, + signingKey, + }); - // Figure out the oids, using the staged oid for the working dir oid if the stats match. - const headOid = head ? await head.oid() : undefined; - const stageOid = stage ? await stage.oid() : undefined; - let workdirOid; - if (!head && workdir && !stage) { - // We don't actually NEED the sha. Any sha will do - // TODO: update this logic to handle N trees instead of just 3. - workdirOid = '42'; - } else if (workdir) { - workdirOid = await workdir.oid(); - } - const entry = [undefined, headOid, workdirOid, stageOid]; - const result = entry.map(value => entry.indexOf(value)); - result.shift(); // remove leading undefined entry - return [filepath, ...result] - }, - }) - } catch (err) { - err.caller = 'git.statusMatrix'; - throw err - } + return commitOid } // @ts-check /** - * Create a lightweight tag + * Remove an object note * * @param {object} args * @param {FsClient} args.fs - a file system client + * @param {SignCallback} [args.onSign] - a PGP signing implementation * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.ref - What to name the tag - * @param {string} [args.object = 'HEAD'] - What oid the tag refers to. (Will resolve to oid if value is a ref.) By default, the commit object which is referred by the current `HEAD` is used. - * @param {boolean} [args.force = false] - Instead of throwing an error if a tag named `ref` already exists, overwrite the existing tag. - * - * @returns {Promise} Resolves successfully when filesystem operations are complete - * - * @example - * await git.tag({ fs, dir: '/tutorial', ref: 'test-tag' }) - * console.log('done') + * @param {string} [args.ref] - The notes ref to look under + * @param {string} args.oid - The SHA-1 object id of the object to remove the note from. + * @param {Object} [args.author] - The details about the author. + * @param {string} [args.author.name] - Default is `user.name` config. + * @param {string} [args.author.email] - Default is `user.email` config. + * @param {number} [args.author.timestamp=Math.floor(Date.now()/1000)] - Set the author timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). + * @param {number} [args.author.timezoneOffset] - Set the author timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. + * @param {Object} [args.committer = author] - The details about the note committer, in the same format as the author parameter. If not specified, the author details are used. + * @param {string} [args.committer.name] - Default is `user.name` config. + * @param {string} [args.committer.email] - Default is `user.email` config. + * @param {number} [args.committer.timestamp=Math.floor(Date.now()/1000)] - Set the committer timestamp field. This is the integer number of seconds since the Unix epoch (1970-01-01 00:00:00). + * @param {number} [args.committer.timezoneOffset] - Set the committer timezone offset field. This is the difference, in minutes, from the current timezone to UTC. Default is `(new Date()).getTimezoneOffset()`. + * @param {string} [args.signingKey] - Sign the tag object using this private PGP key. + * @param {object} [args.cache] - a [cache](cache.md) object * + * @returns {Promise} Resolves successfully with the SHA-1 object id of the commit object for the note removal. */ -async function tag({ + +async function removeNote({ fs: _fs, + onSign, dir, gitdir = join(dir, '.git'), - ref, - object, - force = false, + ref = 'refs/notes/commits', + oid, + author: _author, + committer: _committer, + signingKey, + cache = {}, }) { try { assertParameter('fs', _fs); assertParameter('gitdir', gitdir); - assertParameter('ref', ref); + assertParameter('oid', oid); const fs = new FileSystem(_fs); - if (ref === undefined) { - throw new MissingParameterError('ref') - } - - ref = ref.startsWith('refs/tags/') ? ref : `refs/tags/${ref}`; + const author = await normalizeAuthorObject({ fs, gitdir, author: _author }); + if (!author) throw new MissingNameError('author') - // Resolve passed object - const value = await GitRefManager.resolve({ + const committer = await normalizeCommitterObject({ fs, gitdir, - ref: object || 'HEAD', + author, + committer: _committer, }); + if (!committer) throw new MissingNameError('committer') - if (!force && (await GitRefManager.exists({ fs, gitdir, ref }))) { - throw new AlreadyExistsError('tag', ref) - } - - await GitRefManager.writeRef({ fs, gitdir, ref, value }); + return await _removeNote({ + fs, + cache, + onSign, + gitdir, + ref, + oid, + author, + committer, + signingKey, + }) } catch (err) { - err.caller = 'git.tag'; + err.caller = 'git.removeNote'; throw err } } @@ -31040,298 +30464,198 @@ async function tag({ // @ts-check /** - * Return the version number of isomorphic-git - * - * I don't know why you might need this. I added it just so I could check that I was getting - * the correct version of the library and not a cached version. - * - * @returns {string} the version string taken from package.json at publication time + * Rename a branch * - * @example - * console.log(git.version()) + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.gitdir + * @param {string} args.ref - The name of the new branch + * @param {string} args.oldref - The name of the old branch + * @param {boolean} [args.checkout = false] * + * @returns {Promise} Resolves successfully when filesystem operations are complete */ -function version() { - try { - return pkg.version - } catch (err) { - err.caller = 'git.version'; - throw err +async function _renameBranch({ + fs, + gitdir, + oldref, + ref, + checkout = false, +}) { + if (ref !== cleanGitRef.clean(ref)) { + throw new InvalidRefNameError(ref, cleanGitRef.clean(ref)) + } + + if (oldref !== cleanGitRef.clean(oldref)) { + throw new InvalidRefNameError(oldref, cleanGitRef.clean(oldref)) + } + + const fulloldref = `refs/heads/${oldref}`; + const fullnewref = `refs/heads/${ref}`; + + const newexist = await GitRefManager.exists({ fs, gitdir, ref: fullnewref }); + + if (newexist) { + throw new AlreadyExistsError('branch', ref, false) + } + + const value = await GitRefManager.resolve({ + fs, + gitdir, + ref: fulloldref, + depth: 1, + }); + + await GitRefManager.writeRef({ fs, gitdir, ref: fullnewref, value }); + await GitRefManager.deleteRef({ fs, gitdir, ref: fulloldref }); + + if (checkout) { + // Update HEAD + await GitRefManager.writeSymbolicRef({ + fs, + gitdir, + ref: 'HEAD', + value: fullnewref, + }); } } // @ts-check /** - * @callback WalkerMap - * @param {string} filename - * @param {Array} entries - * @returns {Promise} - */ - -/** - * @callback WalkerReduce - * @param {any} parent - * @param {any[]} children - * @returns {Promise} - */ - -/** - * @callback WalkerIterateCallback - * @param {WalkerEntry[]} entries - * @returns {Promise} - */ - -/** - * @callback WalkerIterate - * @param {WalkerIterateCallback} walk - * @param {IterableIterator} children - * @returns {Promise} - */ - -/** - * A powerful recursive tree-walking utility. - * - * The `walk` API simplifies gathering detailed information about a tree or comparing all the filepaths in two or more trees. - * Trees can be git commits, the working directory, or the or git index (staging area). - * As long as a file or directory is present in at least one of the trees, it will be traversed. - * Entries are traversed in alphabetical order. - * - * The arguments to `walk` are the `trees` you want to traverse, and 3 optional transform functions: - * `map`, `reduce`, and `iterate`. - * - * ## `TREE`, `WORKDIR`, and `STAGE` - * - * Tree walkers are represented by three separate functions that can be imported: - * - * ```js - * import { TREE, WORKDIR, STAGE } from 'isomorphic-git' - * ``` - * - * These functions return opaque handles called `Walker`s. - * The only thing that `Walker` objects are good for is passing into `walk`. - * Here are the three `Walker`s passed into `walk` by the `statusMatrix` command for example: - * - * ```js - * let ref = 'HEAD' - * - * let trees = [TREE({ ref }), WORKDIR(), STAGE()] - * ``` - * - * For the arguments, see the doc pages for [TREE](./TREE.md), [WORKDIR](./WORKDIR.md), and [STAGE](./STAGE.md). - * - * `map`, `reduce`, and `iterate` allow you control the recursive walk by pruning and transforming `WalkerEntry`s into the desired result. - * - * ## WalkerEntry - * - * {@link WalkerEntry typedef} - * - * `map` receives an array of `WalkerEntry[]` as its main argument, one `WalkerEntry` for each `Walker` in the `trees` argument. - * The methods are memoized per `WalkerEntry` so calling them multiple times in a `map` function does not adversely impact performance. - * By only computing these values if needed, you build can build lean, mean, efficient walking machines. - * - * ### WalkerEntry#type() - * - * Returns the kind as a string. This is normally either `tree` or `blob`. - * - * `TREE`, `STAGE`, and `WORKDIR` walkers all return a string. - * - * Possible values: - * - * - `'tree'` directory - * - `'blob'` file - * - `'special'` used by `WORKDIR` to represent irregular files like sockets and FIFOs - * - `'commit'` used by `TREE` to represent submodules - * - * ```js - * await entry.type() - * ``` - * - * ### WalkerEntry#mode() - * - * Returns the file mode as a number. Use this to distinguish between regular files, symlinks, and executable files. - * - * `TREE`, `STAGE`, and `WORKDIR` walkers all return a number for all `type`s of entries. - * - * It has been normalized to one of the 4 values that are allowed in git commits: - * - * - `0o40000` directory - * - `0o100644` file - * - `0o100755` file (executable) - * - `0o120000` symlink - * - * Tip: to make modes more readable, you can print them to octal using `.toString(8)`. - * - * ```js - * await entry.mode() - * ``` - * - * ### WalkerEntry#oid() - * - * Returns the SHA-1 object id for blobs and trees. - * - * `TREE` walkers return a string for `blob` and `tree` entries. - * - * `STAGE` and `WORKDIR` walkers return a string for `blob` entries and `undefined` for `tree` entries. - * - * ```js - * await entry.oid() - * ``` - * - * ### WalkerEntry#content() - * - * Returns the file contents as a Buffer. - * - * `TREE` and `WORKDIR` walkers return a Buffer for `blob` entries and `undefined` for `tree` entries. - * - * `STAGE` walkers always return `undefined` since the file contents are never stored in the stage. - * - * ```js - * await entry.content() - * ``` - * - * ### WalkerEntry#stat() - * - * Returns a normalized subset of filesystem Stat data. - * - * `WORKDIR` walkers return a `Stat` for `blob` and `tree` entries. - * - * `STAGE` walkers return a `Stat` for `blob` entries and `undefined` for `tree` entries. - * - * `TREE` walkers return `undefined` for all entry types. - * - * ```js - * await entry.stat() - * ``` - * - * {@link Stat typedef} - * - * ## map(string, Array) => Promise - * - * {@link WalkerMap typedef} - * - * This is the function that is called once per entry BEFORE visiting the children of that node. - * - * If you return `null` for a `tree` entry, then none of the children of that `tree` entry will be walked. - * - * This is a good place for query logic, such as examining the contents of a file. - * Ultimately, compare all the entries and return any values you are interested in. - * If you do not return a value (or return undefined) that entry will be filtered from the results. - * - * Example 1: Find all the files containing the word 'foo'. - * ```js - * async function map(filepath, [head, workdir]) { - * let content = (await workdir.content()).toString('utf8') - * if (content.contains('foo')) { - * return { - * filepath, - * content - * } - * } - * } - * ``` - * - * Example 2: Return the difference between the working directory and the HEAD commit - * ```js - * const diff = require('diff-lines') - * async function map(filepath, [head, workdir]) { - * return { - * filepath, - * oid: await head.oid(), - * diff: diff((await head.content()).toString('utf8'), (await workdir.content()).toString('utf8')) - * } - * } - * ``` - * - * Example 3: - * ```js - * let path = require('path') - * // Only examine files in the directory `cwd` - * let cwd = 'src/app' - * async function map (filepath, [head, workdir, stage]) { - * if ( - * // don't skip the root directory - * head.fullpath !== '.' && - * // return true for 'src' and 'src/app' - * !cwd.startsWith(filepath) && - * // return true for 'src/app/*' - * path.dirname(filepath) !== cwd - * ) { - * return null - * } else { - * return filepath - * } - * } - * ``` - * - * ## reduce(parent, children) - * - * {@link WalkerReduce typedef} - * - * This is the function that is called once per entry AFTER visiting the children of that node. - * - * Default: `async (parent, children) => parent === undefined ? children.flat() : [parent, children].flat()` - * - * The default implementation of this function returns all directories and children in a giant flat array. - * You can define a different accumulation method though. - * - * Example: Return a hierarchical structure - * ```js - * async function reduce (parent, children) { - * return Object.assign(parent, { children }) - * } - * ``` + * Rename a branch * - * ## iterate(walk, children) + * @param {object} args + * @param {FsClient} args.fs - a file system implementation + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.ref - What to name the branch + * @param {string} args.oldref - What the name of the branch was + * @param {boolean} [args.checkout = false] - Update `HEAD` to point at the newly created branch * - * {@link WalkerIterate typedef} + * @returns {Promise} Resolves successfully when filesystem operations are complete * - * {@link WalkerIterateCallback typedef} + * @example + * await git.renameBranch({ fs, dir: '/tutorial', ref: 'main', oldref: 'master' }) + * console.log('done') * - * Default: `(walk, children) => Promise.all([...children].map(walk))` + */ +async function renameBranch({ + fs, + dir, + gitdir = join(dir, '.git'), + ref, + oldref, + checkout = false, +}) { + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + assertParameter('ref', ref); + assertParameter('oldref', oldref); + return await _renameBranch({ + fs: new FileSystem(fs), + gitdir, + ref, + oldref, + checkout, + }) + } catch (err) { + err.caller = 'git.renameBranch'; + throw err + } +} + +async function hashObject$1({ gitdir, type, object }) { + return shasum(GitObject.wrap({ type, object })) +} + +// @ts-check + +/** + * Reset a file in the git index (aka staging area) * - * The default implementation recurses all children concurrently using Promise.all. - * However you could use a custom function to traverse children serially or use a global queue to throttle recursion. + * Note that this does NOT modify the file in the working directory. * * @param {object} args * @param {FsClient} args.fs - a file system client * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {Walker[]} args.trees - The trees you want to traverse - * @param {WalkerMap} [args.map] - Transform `WalkerEntry`s into a result form - * @param {WalkerReduce} [args.reduce] - Control how mapped entries are combined with their parent result - * @param {WalkerIterate} [args.iterate] - Fine-tune how entries within a tree are iterated over + * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.filepath - The path to the file to reset in the index + * @param {string} [args.ref = 'HEAD'] - A ref to the commit to use * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} The finished tree-walking result + * @returns {Promise} Resolves successfully once the git index has been updated + * + * @example + * await git.resetIndex({ fs, dir: '/tutorial', filepath: 'README.md' }) + * console.log('done') + * */ -async function walk({ - fs, +async function resetIndex({ + fs: _fs, dir, gitdir = join(dir, '.git'), - trees, - map, - reduce, - iterate, + filepath, + ref = 'HEAD', cache = {}, }) { try { - assertParameter('fs', fs); + assertParameter('fs', _fs); assertParameter('gitdir', gitdir); - assertParameter('trees', trees); + assertParameter('filepath', filepath); + assertParameter('ref', ref); - return await _walk({ - fs: new FileSystem(fs), - cache, - dir, - gitdir, - trees, - map, - reduce, - iterate, - }) + const fs = new FileSystem(_fs); + // Resolve commit + let oid = await GitRefManager.resolve({ fs, gitdir, ref }); + let workdirOid; + try { + // Resolve blob + oid = await resolveFilepath({ + fs, + cache, + gitdir, + oid, + filepath, + }); + } catch (e) { + // This means we're resetting the file to a "deleted" state + oid = null; + } + // For files that aren't in the workdir use zeros + let stats = { + ctime: new Date(0), + mtime: new Date(0), + dev: 0, + ino: 0, + mode: 0, + uid: 0, + gid: 0, + size: 0, + }; + // If the file exists in the workdir... + const object = dir && (await fs.read(join(dir, filepath))); + if (object) { + // ... and has the same hash as the desired state... + workdirOid = await hashObject$1({ + gitdir, + type: 'blob', + object, + }); + if (oid === workdirOid) { + // ... use the workdir Stats object + stats = await fs.lstat(join(dir, filepath)); + } + } + await GitIndexManager.acquire({ fs, gitdir, cache }, async function(index) { + index.delete({ filepath }); + if (oid) { + index.insert({ filepath, stats, oid }); + } + }); } catch (err) { - err.caller = 'git.walk'; + err.caller = 'git.reset'; throw err } } @@ -31339,42 +30663,45 @@ async function walk({ // @ts-check /** - * Write a blob object directly + * Get the value of a symbolic ref or resolve a ref to its SHA-1 object id * * @param {object} args * @param {FsClient} args.fs - a file system client * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {Uint8Array} args.blob - The blob object to write + * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.ref - The ref to resolve + * @param {number} [args.depth = undefined] - How many symbolic references to follow before returning * - * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly written object + * @returns {Promise} Resolves successfully with a SHA-1 object id or the value of a symbolic ref * * @example - * // Manually create a blob. - * let oid = await git.writeBlob({ - * fs, - * dir: '/tutorial', - * blob: new Uint8Array([]) - * }) - * - * console.log('oid', oid) // should be 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391' + * let currentCommit = await git.resolveRef({ fs, dir: '/tutorial', ref: 'HEAD' }) + * console.log(currentCommit) + * let currentBranch = await git.resolveRef({ fs, dir: '/tutorial', ref: 'HEAD', depth: 2 }) + * console.log(currentBranch) * */ -async function writeBlob({ fs, dir, gitdir = join(dir, '.git'), blob }) { +async function resolveRef({ + fs, + dir, + gitdir = join(dir, '.git'), + ref, + depth, +}) { try { assertParameter('fs', fs); assertParameter('gitdir', gitdir); - assertParameter('blob', blob); + assertParameter('ref', ref); - return await _writeObject({ + const oid = await GitRefManager.resolve({ fs: new FileSystem(fs), gitdir, - type: 'blob', - object: blob, - format: 'content', - }) + ref, + depth, + }); + return oid } catch (err) { - err.caller = 'git.writeBlob'; + err.caller = 'git.resolveRef'; throw err } } @@ -31382,176 +30709,491 @@ async function writeBlob({ fs, dir, gitdir = join(dir, '.git'), blob }) { // @ts-check /** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir - * @param {CommitObject} args.commit + * Write an entry to the git config files. * - * @returns {Promise} - * @see CommitObject + * *Caveats:* + * - Currently only the local `$GIT_DIR/config` file can be read or written. However support for the global `~/.gitconfig` and system `$(prefix)/etc/gitconfig` will be added in the future. + * - The current parser does not support the more exotic features of the git-config file format such as `[include]` and `[includeIf]`. + * + * @param {Object} args + * @param {FsClient} args.fs - a file system implementation + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.path - The key of the git config entry + * @param {string | boolean | number | void} args.value - A value to store at that path. (Use `undefined` as the value to delete a config entry.) + * @param {boolean} [args.append = false] - If true, will append rather than replace when setting (use with multi-valued config options). + * + * @returns {Promise} Resolves successfully when operation completed + * + * @example + * // Write config value + * await git.setConfig({ + * fs, + * dir: '/tutorial', + * path: 'user.name', + * value: 'Mr. Test' + * }) + * + * // Print out config file + * let file = await fs.promises.readFile('/tutorial/.git/config', 'utf8') + * console.log(file) + * + * // Delete a config entry + * await git.setConfig({ + * fs, + * dir: '/tutorial', + * path: 'user.name', + * value: undefined + * }) * + * // Print out config file + * file = await fs.promises.readFile('/tutorial/.git/config', 'utf8') + * console.log(file) */ -async function _writeCommit({ fs, gitdir, commit }) { - // Convert object to buffer - const object = GitCommit.from(commit).toObject(); - const oid = await _writeObject({ - fs, - gitdir, - type: 'commit', - object, - format: 'content', - }); - return oid +async function setConfig({ + fs: _fs, + dir, + gitdir = join(dir, '.git'), + path, + value, + append = false, +}) { + try { + assertParameter('fs', _fs); + assertParameter('gitdir', gitdir); + assertParameter('path', path); + // assertParameter('value', value) // We actually allow 'undefined' as a value to unset/delete + + const fs = new FileSystem(_fs); + const config = await GitConfigManager.get({ fs, gitdir }); + if (append) { + await config.append(path, value); + } else { + await config.set(path, value); + } + await GitConfigManager.save({ fs, gitdir, config }); + } catch (err) { + err.caller = 'git.setConfig'; + throw err + } } // @ts-check /** - * Write a commit object directly + * Tell whether a file has been changed + * + * The possible resolve values are: + * + * | status | description | + * | --------------------- | ------------------------------------------------------------------------------------- | + * | `"ignored"` | file ignored by a .gitignore rule | + * | `"unmodified"` | file unchanged from HEAD commit | + * | `"*modified"` | file has modifications, not yet staged | + * | `"*deleted"` | file has been removed, but the removal is not yet staged | + * | `"*added"` | file is untracked, not yet staged | + * | `"absent"` | file not present in HEAD commit, staging area, or working dir | + * | `"modified"` | file has modifications, staged | + * | `"deleted"` | file has been removed, staged | + * | `"added"` | previously untracked file, staged | + * | `"*unmodified"` | working dir and HEAD commit match, but index differs | + * | `"*absent"` | file not present in working dir or HEAD commit, but present in the index | + * | `"*undeleted"` | file was deleted from the index, but is still in the working dir | + * | `"*undeletemodified"` | file was deleted from the index, but is present with modifications in the working dir | * * @param {object} args * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {CommitObject} args.commit - The object to write + * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.filepath - The path to the file to query + * @param {object} [args.cache] - a [cache](cache.md) object * - * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly written object - * @see CommitObject + * @returns {Promise<'ignored'|'unmodified'|'*modified'|'*deleted'|'*added'|'absent'|'modified'|'deleted'|'added'|'*unmodified'|'*absent'|'*undeleted'|'*undeletemodified'>} Resolves successfully with the file's git status + * + * @example + * let status = await git.status({ fs, dir: '/tutorial', filepath: 'README.md' }) + * console.log(status) * */ -async function writeCommit({ - fs, +async function status({ + fs: _fs, dir, gitdir = join(dir, '.git'), - commit, + filepath, + cache = {}, }) { try { - assertParameter('fs', fs); + assertParameter('fs', _fs); assertParameter('gitdir', gitdir); - assertParameter('commit', commit); + assertParameter('filepath', filepath); - return await _writeCommit({ - fs: new FileSystem(fs), + const fs = new FileSystem(_fs); + const ignored = await GitIgnoreManager.isIgnored({ + fs, gitdir, - commit, - }) + dir, + filepath, + }); + if (ignored) { + return 'ignored' + } + const headTree = await getHeadTree({ fs, cache, gitdir }); + const treeOid = await getOidAtPath({ + fs, + cache, + gitdir, + tree: headTree, + path: filepath, + }); + const indexEntry = await GitIndexManager.acquire( + { fs, gitdir, cache }, + async function(index) { + for (const entry of index) { + if (entry.path === filepath) return entry + } + return null + } + ); + const stats = await fs.lstat(join(dir, filepath)); + + const H = treeOid !== null; // head + const I = indexEntry !== null; // index + const W = stats !== null; // working dir + + const getWorkdirOid = async () => { + if (I && !compareStats(indexEntry, stats)) { + return indexEntry.oid + } else { + const object = await fs.read(join(dir, filepath)); + const workdirOid = await hashObject$1({ + gitdir, + type: 'blob', + object, + }); + // If the oid in the index === working dir oid but stats differed update cache + if (I && indexEntry.oid === workdirOid) { + // and as long as our fs.stats aren't bad. + // size of -1 happens over a BrowserFS HTTP Backend that doesn't serve Content-Length headers + // (like the Karma webserver) because BrowserFS HTTP Backend uses HTTP HEAD requests to do fs.stat + if (stats.size !== -1) { + // We don't await this so we can return faster for one-off cases. + GitIndexManager.acquire({ fs, gitdir, cache }, async function( + index + ) { + index.insert({ filepath, stats, oid: workdirOid }); + }); + } + } + return workdirOid + } + }; + + if (!H && !W && !I) return 'absent' // --- + if (!H && !W && I) return '*absent' // -A- + if (!H && W && !I) return '*added' // --A + if (!H && W && I) { + const workdirOid = await getWorkdirOid(); + // @ts-ignore + return workdirOid === indexEntry.oid ? 'added' : '*added' // -AA : -AB + } + if (H && !W && !I) return 'deleted' // A-- + if (H && !W && I) { + // @ts-ignore + return treeOid === indexEntry.oid ? '*deleted' : '*deleted' // AA- : AB- + } + if (H && W && !I) { + const workdirOid = await getWorkdirOid(); + return workdirOid === treeOid ? '*undeleted' : '*undeletemodified' // A-A : A-B + } + if (H && W && I) { + const workdirOid = await getWorkdirOid(); + if (workdirOid === treeOid) { + // @ts-ignore + return workdirOid === indexEntry.oid ? 'unmodified' : '*unmodified' // AAA : ABA + } else { + // @ts-ignore + return workdirOid === indexEntry.oid ? 'modified' : '*modified' // ABB : AAB + } + } + /* + --- + -A- + --A + -AA + -AB + A-- + AA- + AB- + A-A + A-B + AAA + ABA + ABB + AAB + */ } catch (err) { - err.caller = 'git.writeCommit'; + err.caller = 'git.status'; throw err } } +async function getOidAtPath({ fs, cache, gitdir, tree, path }) { + if (typeof path === 'string') path = path.split('/'); + const dirname = path.shift(); + for (const entry of tree) { + if (entry.path === dirname) { + if (path.length === 0) { + return entry.oid + } + const { type, object } = await _readObject({ + fs, + cache, + gitdir, + oid: entry.oid, + }); + if (type === 'tree') { + const tree = GitTree.from(object); + return getOidAtPath({ fs, cache, gitdir, tree, path }) + } + if (type === 'blob') { + throw new ObjectTypeError(entry.oid, type, 'blob', path.join('/')) + } + } + } + return null +} + +async function getHeadTree({ fs, cache, gitdir }) { + // Get the tree from the HEAD commit. + let oid; + try { + oid = await GitRefManager.resolve({ fs, gitdir, ref: 'HEAD' }); + } catch (e) { + // Handle fresh branches with no commits + if (e instanceof NotFoundError) { + return [] + } + } + const { tree } = await _readTree({ fs, cache, gitdir, oid }); + return tree +} + // @ts-check /** - * Write a git object directly + * Efficiently get the status of multiple files at once. * - * `format` can have the following values: + * The returned `StatusMatrix` is admittedly not the easiest format to read. + * However it conveys a large amount of information in dense format that should make it easy to create reports about the current state of the repository; + * without having to do multiple, time-consuming isomorphic-git calls. + * My hope is that the speed and flexibility of the function will make up for the learning curve of interpreting the return value. * - * | param | description | - * | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | - * | 'deflated' | Treat `object` as the raw deflate-compressed buffer for an object, meaning can be written to `.git/objects/**` as-is. | - * | 'wrapped' | Treat `object` as the inflated object buffer wrapped in the git object header. This is the raw buffer used when calculating the SHA-1 object id of a git object. | - * | 'content' | Treat `object` as the object buffer without the git header. | - * | 'parsed' | Treat `object` as a parsed representation of the object. | + * ```js live + * // get the status of all the files in 'src' + * let status = await git.statusMatrix({ + * fs, + * dir: '/tutorial', + * filter: f => f.startsWith('src/') + * }) + * console.log(status) + * ``` + * + * ```js live + * // get the status of all the JSON and Markdown files + * let status = await git.statusMatrix({ + * fs, + * dir: '/tutorial', + * filter: f => f.endsWith('.json') || f.endsWith('.md') + * }) + * console.log(status) + * ``` + * + * The result is returned as a 2D array. + * The outer array represents the files and/or blobs in the repo, in alphabetical order. + * The inner arrays describe the status of the file: + * the first value is the filepath, and the next three are integers + * representing the HEAD status, WORKDIR status, and STAGE status of the entry. + * + * ```js + * // example StatusMatrix + * [ + * ["a.txt", 0, 2, 0], // new, untracked + * ["b.txt", 0, 2, 2], // added, staged + * ["c.txt", 0, 2, 3], // added, staged, with unstaged changes + * ["d.txt", 1, 1, 1], // unmodified + * ["e.txt", 1, 2, 1], // modified, unstaged + * ["f.txt", 1, 2, 2], // modified, staged + * ["g.txt", 1, 2, 3], // modified, staged, with unstaged changes + * ["h.txt", 1, 0, 1], // deleted, unstaged + * ["i.txt", 1, 0, 0], // deleted, staged + * ] + * ``` + * + * - The HEAD status is either absent (0) or present (1). + * - The WORKDIR status is either absent (0), identical to HEAD (1), or different from HEAD (2). + * - The STAGE status is either absent (0), identical to HEAD (1), identical to WORKDIR (2), or different from WORKDIR (3). + * + * ```ts + * type Filename = string + * type HeadStatus = 0 | 1 + * type WorkdirStatus = 0 | 1 | 2 + * type StageStatus = 0 | 1 | 2 | 3 + * + * type StatusRow = [Filename, HeadStatus, WorkdirStatus, StageStatus] + * + * type StatusMatrix = StatusRow[] + * ``` + * + * > Think of the natural progression of file modifications as being from HEAD (previous) -> WORKDIR (current) -> STAGE (next). + * > Then HEAD is "version 1", WORKDIR is "version 2", and STAGE is "version 3". + * > Then, imagine a "version 0" which is before the file was created. + * > Then the status value in each column corresponds to the oldest version of the file it is identical to. + * > (For a file to be identical to "version 0" means the file is deleted.) + * + * Here are some examples of queries you can answer using the result: * - * If `format` is `'parsed'`, then `object` must match one of the schemas for `CommitObject`, `TreeObject`, `TagObject`, or a `string` (for blobs). + * #### Q: What files have been deleted? + * ```js + * const FILE = 0, WORKDIR = 2 * - * {@link CommitObject typedef} + * const filenames = (await statusMatrix({ dir })) + * .filter(row => row[WORKDIR] === 0) + * .map(row => row[FILE]) + * ``` * - * {@link TreeObject typedef} + * #### Q: What files have unstaged changes? + * ```js + * const FILE = 0, WORKDIR = 2, STAGE = 3 * - * {@link TagObject typedef} + * const filenames = (await statusMatrix({ dir })) + * .filter(row => row[WORKDIR] !== row[STAGE]) + * .map(row => row[FILE]) + * ``` * - * If `format` is `'content'`, `'wrapped'`, or `'deflated'`, `object` should be a `Uint8Array`. + * #### Q: What files have been modified since the last commit? + * ```js + * const FILE = 0, HEAD = 1, WORKDIR = 2 * - * @deprecated - * > This command is overly complicated. - * > - * > If you know the type of object you are writing, use [`writeBlob`](./writeBlob.md), [`writeCommit`](./writeCommit.md), [`writeTag`](./writeTag.md), or [`writeTree`](./writeTree.md). + * const filenames = (await statusMatrix({ dir })) + * .filter(row => row[HEAD] !== row[WORKDIR]) + * .map(row => row[FILE]) + * ``` * - * @param {object} args - * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string | Uint8Array | CommitObject | TreeObject | TagObject} args.object - The object to write. - * @param {'blob'|'tree'|'commit'|'tag'} [args.type] - The kind of object to write. - * @param {'deflated' | 'wrapped' | 'content' | 'parsed'} [args.format = 'parsed'] - What format the object is in. The possible choices are listed below. - * @param {string} [args.oid] - If `format` is `'deflated'` then this param is required. Otherwise it is calculated. - * @param {string} [args.encoding] - If `type` is `'blob'` then `object` will be converted to a Uint8Array using `encoding`. + * #### Q: What files will NOT be changed if I commit right now? + * ```js + * const FILE = 0, HEAD = 1, STAGE = 3 * - * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly written object. + * const filenames = (await statusMatrix({ dir })) + * .filter(row => row[HEAD] === row[STAGE]) + * .map(row => row[FILE]) + * ``` * - * @example - * // Manually create an annotated tag. - * let sha = await git.resolveRef({ fs, dir: '/tutorial', ref: 'HEAD' }) - * console.log('commit', sha) + * For reference, here are all possible combinations: * - * let oid = await git.writeObject({ - * fs, - * dir: '/tutorial', - * type: 'tag', - * object: { - * object: sha, - * type: 'commit', - * tag: 'my-tag', - * tagger: { - * name: 'your name', - * email: 'email@example.com', - * timestamp: Math.floor(Date.now()/1000), - * timezoneOffset: new Date().getTimezoneOffset() - * }, - * message: 'Optional message' - * } - * }) + * | HEAD | WORKDIR | STAGE | `git status --short` equivalent | + * | ---- | ------- | ----- | ------------------------------- | + * | 0 | 0 | 0 | `` | + * | 0 | 0 | 3 | `AD` | + * | 0 | 2 | 0 | `??` | + * | 0 | 2 | 2 | `A ` | + * | 0 | 2 | 3 | `AM` | + * | 1 | 0 | 0 | `D ` | + * | 1 | 0 | 1 | ` D` | + * | 1 | 0 | 3 | `MD` | + * | 1 | 1 | 0 | `D ` + `??` | + * | 1 | 1 | 1 | `` | + * | 1 | 1 | 3 | `MM` | + * | 1 | 2 | 0 | `D ` + `??` | + * | 1 | 2 | 1 | ` M` | + * | 1 | 2 | 2 | `M ` | + * | 1 | 2 | 3 | `MM` | * - * console.log('tag', oid) + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} args.dir - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir, '.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} [args.ref = 'HEAD'] - Optionally specify a different commit to compare against the workdir and stage instead of the HEAD + * @param {string[]} [args.filepaths = ['.']] - Limit the query to the given files and directories + * @param {function(string): boolean} [args.filter] - Filter the results to only those whose filepath matches a function. + * @param {object} [args.cache] - a [cache](cache.md) object * + * @returns {Promise>} Resolves with a status matrix, described below. + * @see StatusRow */ -async function writeObject({ +async function statusMatrix({ fs: _fs, dir, gitdir = join(dir, '.git'), - type, - object, - format = 'parsed', - oid, - encoding = undefined, + ref = 'HEAD', + filepaths = ['.'], + filter, + cache = {}, }) { try { + assertParameter('fs', _fs); + assertParameter('gitdir', gitdir); + assertParameter('ref', ref); + const fs = new FileSystem(_fs); - // Convert object to buffer - if (format === 'parsed') { - switch (type) { - case 'commit': - object = GitCommit.from(object).toObject(); - break - case 'tree': - object = GitTree.from(object).toObject(); - break - case 'blob': - object = Buffer.from(object, encoding); - break - case 'tag': - object = GitAnnotatedTag.from(object).toObject(); - break - default: - throw new ObjectTypeError(oid || '', type, 'blob|commit|tag|tree') - } - // GitObjectManager does not know how to serialize content, so we tweak that parameter before passing it. - format = 'content'; - } - oid = await _writeObject({ + return await _walk({ fs, + cache, + dir, gitdir, - type, - object, - oid, - format, - }); - return oid + trees: [TREE({ ref }), WORKDIR(), STAGE()], + map: async function(filepath, [head, workdir, stage]) { + // Ignore ignored files, but only if they are not already tracked. + if (!head && !stage && workdir) { + if ( + await GitIgnoreManager.isIgnored({ + fs, + dir, + filepath, + }) + ) { + return null + } + } + // match against base paths + if (!filepaths.some(base => worthWalking(filepath, base))) { + return null + } + // Late filter against file names + if (filter) { + if (!filter(filepath)) return + } + + // For now, just bail on directories + const headType = head && (await head.type()); + if (headType === 'tree' || headType === 'special') return + if (headType === 'commit') return null + + const workdirType = workdir && (await workdir.type()); + if (workdirType === 'tree' || workdirType === 'special') return + + const stageType = stage && (await stage.type()); + if (stageType === 'commit') return null + if (stageType === 'tree' || stageType === 'special') return + + // Figure out the oids, using the staged oid for the working dir oid if the stats match. + const headOid = head ? await head.oid() : undefined; + const stageOid = stage ? await stage.oid() : undefined; + let workdirOid; + if (!head && workdir && !stage) { + // We don't actually NEED the sha. Any sha will do + // TODO: update this logic to handle N trees instead of just 3. + workdirOid = '42'; + } else if (workdir) { + workdirOid = await workdir.oid(); + } + const entry = [undefined, headOid, workdirOid, stageOid]; + const result = entry.map(value => entry.indexOf(value)); + result.shift(); // remove leading undefined entry + return [filepath, ...result] + }, + }) } catch (err) { - err.caller = 'git.writeObject'; + err.caller = 'git.statusMatrix'; throw err } } @@ -31559,84 +31201,58 @@ async function writeObject({ // @ts-check /** - * Write a ref which refers to the specified SHA-1 object id, or a symbolic ref which refers to the specified ref. + * Create a lightweight tag * * @param {object} args * @param {FsClient} args.fs - a file system client * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {string} args.ref - The name of the ref to write - * @param {string} args.value - When `symbolic` is false, a ref or an SHA-1 object id. When true, a ref starting with `refs/`. - * @param {boolean} [args.force = false] - Instead of throwing an error if a ref named `ref` already exists, overwrite the existing ref. - * @param {boolean} [args.symbolic = false] - Whether the ref is symbolic or not. + * @param {string} args.ref - What to name the tag + * @param {string} [args.object = 'HEAD'] - What oid the tag refers to. (Will resolve to oid if value is a ref.) By default, the commit object which is referred by the current `HEAD` is used. + * @param {boolean} [args.force = false] - Instead of throwing an error if a tag named `ref` already exists, overwrite the existing tag. * * @returns {Promise} Resolves successfully when filesystem operations are complete * * @example - * await git.writeRef({ - * fs, - * dir: '/tutorial', - * ref: 'refs/heads/another-branch', - * value: 'HEAD' - * }) - * await git.writeRef({ - * fs, - * dir: '/tutorial', - * ref: 'HEAD', - * value: 'refs/heads/another-branch', - * force: true, - * symbolic: true - * }) + * await git.tag({ fs, dir: '/tutorial', ref: 'test-tag' }) * console.log('done') * */ -async function writeRef({ +async function tag({ fs: _fs, dir, gitdir = join(dir, '.git'), ref, - value, + object, force = false, - symbolic = false, }) { try { assertParameter('fs', _fs); assertParameter('gitdir', gitdir); assertParameter('ref', ref); - assertParameter('value', value); const fs = new FileSystem(_fs); - if (ref !== cleanGitRef.clean(ref)) { - throw new InvalidRefNameError(ref, cleanGitRef.clean(ref)) - } - - if (!force && (await GitRefManager.exists({ fs, gitdir, ref }))) { - throw new AlreadyExistsError('ref', ref) - } - - if (symbolic) { - await GitRefManager.writeSymbolicRef({ - fs, - gitdir, - ref, - value, - }); - } else { - value = await GitRefManager.resolve({ - fs, - gitdir, - ref: value, - }); - await GitRefManager.writeRef({ - fs, - gitdir, - ref, - value, - }); + if (ref === undefined) { + throw new MissingParameterError('ref') + } + + ref = ref.startsWith('refs/tags/') ? ref : `refs/tags/${ref}`; + + // Resolve passed object + const value = await GitRefManager.resolve({ + fs, + gitdir, + ref: object || 'HEAD', + }); + + if (!force && (await GitRefManager.exists({ fs, gitdir, ref }))) { + throw new AlreadyExistsError('tag', ref) } + + await GitRefManager.writeRef({ fs, gitdir, ref, value }); } catch (err) { - err.caller = 'git.writeRef'; + err.caller = 'git.tag'; throw err } } @@ -31644,532 +31260,917 @@ async function writeRef({ // @ts-check /** - * @param {object} args - * @param {import('../models/FileSystem.js').FileSystem} args.fs - * @param {string} args.gitdir - * @param {TagObject} args.tag + * Return the version number of isomorphic-git + * + * I don't know why you might need this. I added it just so I could check that I was getting + * the correct version of the library and not a cached version. + * + * @returns {string} the version string taken from package.json at publication time + * + * @example + * console.log(git.version()) * - * @returns {Promise} */ -async function _writeTag({ fs, gitdir, tag }) { - // Convert object to buffer - const object = GitAnnotatedTag.from(tag).toObject(); - const oid = await _writeObject({ - fs, - gitdir, - type: 'tag', - object, - format: 'content', - }); - return oid +function version() { + try { + return pkg.version + } catch (err) { + err.caller = 'git.version'; + throw err + } } // @ts-check /** - * Write an annotated tag object directly + * @callback WalkerMap + * @param {string} filename + * @param {Array} entries + * @returns {Promise} + */ + +/** + * @callback WalkerReduce + * @param {any} parent + * @param {any[]} children + * @returns {Promise} + */ + +/** + * @callback WalkerIterateCallback + * @param {WalkerEntry[]} entries + * @returns {Promise} + */ + +/** + * @callback WalkerIterate + * @param {WalkerIterateCallback} walk + * @param {IterableIterator} children + * @returns {Promise} + */ + +/** + * A powerful recursive tree-walking utility. + * + * The `walk` API simplifies gathering detailed information about a tree or comparing all the filepaths in two or more trees. + * Trees can be git commits, the working directory, or the or git index (staging area). + * As long as a file or directory is present in at least one of the trees, it will be traversed. + * Entries are traversed in alphabetical order. + * + * The arguments to `walk` are the `trees` you want to traverse, and 3 optional transform functions: + * `map`, `reduce`, and `iterate`. + * + * ## `TREE`, `WORKDIR`, and `STAGE` + * + * Tree walkers are represented by three separate functions that can be imported: + * + * ```js + * import { TREE, WORKDIR, STAGE } from 'isomorphic-git' + * ``` + * + * These functions return opaque handles called `Walker`s. + * The only thing that `Walker` objects are good for is passing into `walk`. + * Here are the three `Walker`s passed into `walk` by the `statusMatrix` command for example: + * + * ```js + * let ref = 'HEAD' + * + * let trees = [TREE({ ref }), WORKDIR(), STAGE()] + * ``` + * + * For the arguments, see the doc pages for [TREE](./TREE.md), [WORKDIR](./WORKDIR.md), and [STAGE](./STAGE.md). + * + * `map`, `reduce`, and `iterate` allow you control the recursive walk by pruning and transforming `WalkerEntry`s into the desired result. + * + * ## WalkerEntry + * + * {@link WalkerEntry typedef} + * + * `map` receives an array of `WalkerEntry[]` as its main argument, one `WalkerEntry` for each `Walker` in the `trees` argument. + * The methods are memoized per `WalkerEntry` so calling them multiple times in a `map` function does not adversely impact performance. + * By only computing these values if needed, you build can build lean, mean, efficient walking machines. + * + * ### WalkerEntry#type() + * + * Returns the kind as a string. This is normally either `tree` or `blob`. + * + * `TREE`, `STAGE`, and `WORKDIR` walkers all return a string. + * + * Possible values: + * + * - `'tree'` directory + * - `'blob'` file + * - `'special'` used by `WORKDIR` to represent irregular files like sockets and FIFOs + * - `'commit'` used by `TREE` to represent submodules + * + * ```js + * await entry.type() + * ``` + * + * ### WalkerEntry#mode() + * + * Returns the file mode as a number. Use this to distinguish between regular files, symlinks, and executable files. + * + * `TREE`, `STAGE`, and `WORKDIR` walkers all return a number for all `type`s of entries. + * + * It has been normalized to one of the 4 values that are allowed in git commits: + * + * - `0o40000` directory + * - `0o100644` file + * - `0o100755` file (executable) + * - `0o120000` symlink + * + * Tip: to make modes more readable, you can print them to octal using `.toString(8)`. + * + * ```js + * await entry.mode() + * ``` + * + * ### WalkerEntry#oid() + * + * Returns the SHA-1 object id for blobs and trees. + * + * `TREE` walkers return a string for `blob` and `tree` entries. + * + * `STAGE` and `WORKDIR` walkers return a string for `blob` entries and `undefined` for `tree` entries. + * + * ```js + * await entry.oid() + * ``` + * + * ### WalkerEntry#content() + * + * Returns the file contents as a Buffer. + * + * `TREE` and `WORKDIR` walkers return a Buffer for `blob` entries and `undefined` for `tree` entries. + * + * `STAGE` walkers always return `undefined` since the file contents are never stored in the stage. + * + * ```js + * await entry.content() + * ``` + * + * ### WalkerEntry#stat() + * + * Returns a normalized subset of filesystem Stat data. + * + * `WORKDIR` walkers return a `Stat` for `blob` and `tree` entries. + * + * `STAGE` walkers return a `Stat` for `blob` entries and `undefined` for `tree` entries. + * + * `TREE` walkers return `undefined` for all entry types. + * + * ```js + * await entry.stat() + * ``` + * + * {@link Stat typedef} + * + * ## map(string, Array) => Promise + * + * {@link WalkerMap typedef} + * + * This is the function that is called once per entry BEFORE visiting the children of that node. + * + * If you return `null` for a `tree` entry, then none of the children of that `tree` entry will be walked. + * + * This is a good place for query logic, such as examining the contents of a file. + * Ultimately, compare all the entries and return any values you are interested in. + * If you do not return a value (or return undefined) that entry will be filtered from the results. + * + * Example 1: Find all the files containing the word 'foo'. + * ```js + * async function map(filepath, [head, workdir]) { + * let content = (await workdir.content()).toString('utf8') + * if (content.contains('foo')) { + * return { + * filepath, + * content + * } + * } + * } + * ``` + * + * Example 2: Return the difference between the working directory and the HEAD commit + * ```js + * const diff = require('diff-lines') + * async function map(filepath, [head, workdir]) { + * return { + * filepath, + * oid: await head.oid(), + * diff: diff((await head.content()).toString('utf8'), (await workdir.content()).toString('utf8')) + * } + * } + * ``` + * + * Example 3: + * ```js + * let path = require('path') + * // Only examine files in the directory `cwd` + * let cwd = 'src/app' + * async function map (filepath, [head, workdir, stage]) { + * if ( + * // don't skip the root directory + * head.fullpath !== '.' && + * // return true for 'src' and 'src/app' + * !cwd.startsWith(filepath) && + * // return true for 'src/app/*' + * path.dirname(filepath) !== cwd + * ) { + * return null + * } else { + * return filepath + * } + * } + * ``` + * + * ## reduce(parent, children) + * + * {@link WalkerReduce typedef} + * + * This is the function that is called once per entry AFTER visiting the children of that node. * - * @param {object} args - * @param {FsClient} args.fs - a file system client - * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path - * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {TagObject} args.tag - The object to write + * Default: `async (parent, children) => parent === undefined ? children.flat() : [parent, children].flat()` * - * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly written object - * @see TagObject + * The default implementation of this function returns all directories and children in a giant flat array. + * You can define a different accumulation method though. * - * @example - * // Manually create an annotated tag. - * let sha = await git.resolveRef({ fs, dir: '/tutorial', ref: 'HEAD' }) - * console.log('commit', sha) + * Example: Return a hierarchical structure + * ```js + * async function reduce (parent, children) { + * return Object.assign(parent, { children }) + * } + * ``` * - * let oid = await git.writeTag({ - * fs, - * dir: '/tutorial', - * tag: { - * object: sha, - * type: 'commit', - * tag: 'my-tag', - * tagger: { - * name: 'your name', - * email: 'email@example.com', - * timestamp: Math.floor(Date.now()/1000), - * timezoneOffset: new Date().getTimezoneOffset() - * }, - * message: 'Optional message' - * } - * }) + * ## iterate(walk, children) * - * console.log('tag', oid) + * {@link WalkerIterate typedef} * - */ -async function writeTag({ fs, dir, gitdir = join(dir, '.git'), tag }) { - try { - assertParameter('fs', fs); - assertParameter('gitdir', gitdir); - assertParameter('tag', tag); - - return await _writeTag({ - fs: new FileSystem(fs), - gitdir, - tag, - }) - } catch (err) { - err.caller = 'git.writeTag'; - throw err - } -} - -// @ts-check - -/** - * Write a tree object directly + * {@link WalkerIterateCallback typedef} + * + * Default: `(walk, children) => Promise.all([...children].map(walk))` + * + * The default implementation recurses all children concurrently using Promise.all. + * However you could use a custom function to traverse children serially or use a global queue to throttle recursion. * * @param {object} args * @param {FsClient} args.fs - a file system client * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path - * @param {TreeObject} args.tree - The object to write - * - * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly written object. - * @see TreeObject - * @see TreeEntry + * @param {Walker[]} args.trees - The trees you want to traverse + * @param {WalkerMap} [args.map] - Transform `WalkerEntry`s into a result form + * @param {WalkerReduce} [args.reduce] - Control how mapped entries are combined with their parent result + * @param {WalkerIterate} [args.iterate] - Fine-tune how entries within a tree are iterated over + * @param {object} [args.cache] - a [cache](cache.md) object * + * @returns {Promise} The finished tree-walking result */ -async function writeTree({ fs, dir, gitdir = join(dir, '.git'), tree }) { +async function walk({ + fs, + dir, + gitdir = join(dir, '.git'), + trees, + map, + reduce, + iterate, + cache = {}, +}) { try { assertParameter('fs', fs); assertParameter('gitdir', gitdir); - assertParameter('tree', tree); + assertParameter('trees', trees); - return await _writeTree({ + return await _walk({ fs: new FileSystem(fs), + cache, + dir, gitdir, - tree, + trees, + map, + reduce, + iterate, }) } catch (err) { - err.caller = 'git.writeTree'; - throw err - } -} - -// default export -var index = { - Errors, - STAGE, - TREE, - WORKDIR, - add, - addNote, - addRemote, - annotatedTag, - branch, - checkout, - clone, - commit, - getConfig, - getConfigAll, - setConfig, - currentBranch, - deleteBranch, - deleteRef, - deleteRemote, - deleteTag, - expandOid, - expandRef, - fastForward, - fetch, - findMergeBase, - findRoot, - getRemoteInfo, - getRemoteInfo2, - hashBlob, - indexPack, - init, - isDescendent, - isIgnored, - listBranches, - listFiles, - listNotes, - listRemotes, - listServerRefs, - listTags, - log, - merge, - packObjects, - pull, - push, - readBlob, - readCommit, - readNote, - readObject, - readTag, - readTree, - remove, - removeNote, - renameBranch, - resetIndex, - resolveRef, - status, - statusMatrix, - tag, - version, - walk, - writeBlob, - writeCommit, - writeObject, - writeRef, - writeTag, - writeTree, -}; - -exports.Errors = Errors; -exports.STAGE = STAGE; -exports.TREE = TREE; -exports.WORKDIR = WORKDIR; -exports.add = add; -exports.addNote = addNote; -exports.addRemote = addRemote; -exports.annotatedTag = annotatedTag; -exports.branch = branch; -exports.checkout = checkout; -exports.clone = clone; -exports.commit = commit; -exports.currentBranch = currentBranch; -exports.default = index; -exports.deleteBranch = deleteBranch; -exports.deleteRef = deleteRef; -exports.deleteRemote = deleteRemote; -exports.deleteTag = deleteTag; -exports.expandOid = expandOid; -exports.expandRef = expandRef; -exports.fastForward = fastForward; -exports.fetch = fetch; -exports.findMergeBase = findMergeBase; -exports.findRoot = findRoot; -exports.getConfig = getConfig; -exports.getConfigAll = getConfigAll; -exports.getRemoteInfo = getRemoteInfo; -exports.getRemoteInfo2 = getRemoteInfo2; -exports.hashBlob = hashBlob; -exports.indexPack = indexPack; -exports.init = init; -exports.isDescendent = isDescendent; -exports.isIgnored = isIgnored; -exports.listBranches = listBranches; -exports.listFiles = listFiles; -exports.listNotes = listNotes; -exports.listRemotes = listRemotes; -exports.listServerRefs = listServerRefs; -exports.listTags = listTags; -exports.log = log; -exports.merge = merge; -exports.packObjects = packObjects; -exports.pull = pull; -exports.push = push; -exports.readBlob = readBlob; -exports.readCommit = readCommit; -exports.readNote = readNote; -exports.readObject = readObject; -exports.readTag = readTag; -exports.readTree = readTree; -exports.remove = remove; -exports.removeNote = removeNote; -exports.renameBranch = renameBranch; -exports.resetIndex = resetIndex; -exports.resolveRef = resolveRef; -exports.setConfig = setConfig; -exports.status = status; -exports.statusMatrix = statusMatrix; -exports.tag = tag; -exports.version = version; -exports.walk = walk; -exports.writeBlob = writeBlob; -exports.writeCommit = writeCommit; -exports.writeObject = writeObject; -exports.writeRef = writeRef; -exports.writeTag = writeTag; -exports.writeTree = writeTree; - - -/***/ }), - -/***/ 962: -/***/ (function(__unusedmodule, exports, __webpack_require__) { - -"use strict"; - -Object.defineProperty(exports, "__esModule", { value: true }); -const common = __webpack_require__(617); -class Reader { - constructor(_root, _settings) { - this._root = _root; - this._settings = _settings; - this._root = common.replacePathSegmentSeparator(_root, _settings.pathSegmentSeparator); - } + err.caller = 'git.walk'; + throw err + } } -exports.default = Reader; - - -/***/ }), - -/***/ 969: -/***/ (function(module) { - -"use strict"; +// @ts-check -// (C) 1995-2013 Jean-loup Gailly and Mark Adler -// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. +/** + * Write a blob object directly + * + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {Uint8Array} args.blob - The blob object to write + * + * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly written object + * + * @example + * // Manually create a blob. + * let oid = await git.writeBlob({ + * fs, + * dir: '/tutorial', + * blob: new Uint8Array([]) + * }) + * + * console.log('oid', oid) // should be 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391' + * + */ +async function writeBlob({ fs, dir, gitdir = join(dir, '.git'), blob }) { + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + assertParameter('blob', blob); -function GZheader() { - /* true if compressed data believed to be text */ - this.text = 0; - /* modification time */ - this.time = 0; - /* extra flags (not used when writing a gzip file) */ - this.xflags = 0; - /* operating system */ - this.os = 0; - /* pointer to extra field or Z_NULL if none */ - this.extra = null; - /* extra field length (valid if extra != Z_NULL) */ - this.extra_len = 0; // Actually, we don't need it in JS, - // but leave for few code modifications + return await _writeObject({ + fs: new FileSystem(fs), + gitdir, + type: 'blob', + object: blob, + format: 'content', + }) + } catch (err) { + err.caller = 'git.writeBlob'; + throw err + } +} - // - // Setup limits is not necessary because in js we should not preallocate memory - // for inflate use constant limit in 65536 bytes - // +// @ts-check - /* space at extra (only when reading header) */ - // this.extra_max = 0; - /* pointer to zero-terminated file name or Z_NULL */ - this.name = ''; - /* space at name (only when reading header) */ - // this.name_max = 0; - /* pointer to zero-terminated comment or Z_NULL */ - this.comment = ''; - /* space at comment (only when reading header) */ - // this.comm_max = 0; - /* true if there was or will be a header crc */ - this.hcrc = 0; - /* true when done reading gzip header (not used when writing a gzip file) */ - this.done = false; +/** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.gitdir + * @param {CommitObject} args.commit + * + * @returns {Promise} + * @see CommitObject + * + */ +async function _writeCommit({ fs, gitdir, commit }) { + // Convert object to buffer + const object = GitCommit.from(commit).toObject(); + const oid = await _writeObject({ + fs, + gitdir, + type: 'commit', + object, + format: 'content', + }); + return oid } -module.exports = GZheader; - +// @ts-check -/***/ }), +/** + * Write a commit object directly + * + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {CommitObject} args.commit - The object to write + * + * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly written object + * @see CommitObject + * + */ +async function writeCommit({ + fs, + dir, + gitdir = join(dir, '.git'), + commit, +}) { + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + assertParameter('commit', commit); -/***/ 984: -/***/ (function(__unusedmodule, exports, __webpack_require__) { + return await _writeCommit({ + fs: new FileSystem(fs), + gitdir, + commit, + }) + } catch (err) { + err.caller = 'git.writeCommit'; + throw err + } +} -"use strict"; +// @ts-check -Object.defineProperty(exports, "__esModule", { value: true }); -exports.createFileSystemAdapter = exports.FILE_SYSTEM_ADAPTER = void 0; -const fs = __webpack_require__(747); -exports.FILE_SYSTEM_ADAPTER = { - lstat: fs.lstat, - stat: fs.stat, - lstatSync: fs.lstatSync, - statSync: fs.statSync -}; -function createFileSystemAdapter(fsMethods) { - if (fsMethods === undefined) { - return exports.FILE_SYSTEM_ADAPTER; +/** + * Write a git object directly + * + * `format` can have the following values: + * + * | param | description | + * | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | + * | 'deflated' | Treat `object` as the raw deflate-compressed buffer for an object, meaning can be written to `.git/objects/**` as-is. | + * | 'wrapped' | Treat `object` as the inflated object buffer wrapped in the git object header. This is the raw buffer used when calculating the SHA-1 object id of a git object. | + * | 'content' | Treat `object` as the object buffer without the git header. | + * | 'parsed' | Treat `object` as a parsed representation of the object. | + * + * If `format` is `'parsed'`, then `object` must match one of the schemas for `CommitObject`, `TreeObject`, `TagObject`, or a `string` (for blobs). + * + * {@link CommitObject typedef} + * + * {@link TreeObject typedef} + * + * {@link TagObject typedef} + * + * If `format` is `'content'`, `'wrapped'`, or `'deflated'`, `object` should be a `Uint8Array`. + * + * @deprecated + * > This command is overly complicated. + * > + * > If you know the type of object you are writing, use [`writeBlob`](./writeBlob.md), [`writeCommit`](./writeCommit.md), [`writeTag`](./writeTag.md), or [`writeTree`](./writeTree.md). + * + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string | Uint8Array | CommitObject | TreeObject | TagObject} args.object - The object to write. + * @param {'blob'|'tree'|'commit'|'tag'} [args.type] - The kind of object to write. + * @param {'deflated' | 'wrapped' | 'content' | 'parsed'} [args.format = 'parsed'] - What format the object is in. The possible choices are listed below. + * @param {string} [args.oid] - If `format` is `'deflated'` then this param is required. Otherwise it is calculated. + * @param {string} [args.encoding] - If `type` is `'blob'` then `object` will be converted to a Uint8Array using `encoding`. + * + * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly written object. + * + * @example + * // Manually create an annotated tag. + * let sha = await git.resolveRef({ fs, dir: '/tutorial', ref: 'HEAD' }) + * console.log('commit', sha) + * + * let oid = await git.writeObject({ + * fs, + * dir: '/tutorial', + * type: 'tag', + * object: { + * object: sha, + * type: 'commit', + * tag: 'my-tag', + * tagger: { + * name: 'your name', + * email: 'email@example.com', + * timestamp: Math.floor(Date.now()/1000), + * timezoneOffset: new Date().getTimezoneOffset() + * }, + * message: 'Optional message' + * } + * }) + * + * console.log('tag', oid) + * + */ +async function writeObject({ + fs: _fs, + dir, + gitdir = join(dir, '.git'), + type, + object, + format = 'parsed', + oid, + encoding = undefined, +}) { + try { + const fs = new FileSystem(_fs); + // Convert object to buffer + if (format === 'parsed') { + switch (type) { + case 'commit': + object = GitCommit.from(object).toObject(); + break + case 'tree': + object = GitTree.from(object).toObject(); + break + case 'blob': + object = Buffer.from(object, encoding); + break + case 'tag': + object = GitAnnotatedTag.from(object).toObject(); + break + default: + throw new ObjectTypeError(oid || '', type, 'blob|commit|tag|tree') + } + // GitObjectManager does not know how to serialize content, so we tweak that parameter before passing it. + format = 'content'; } - return Object.assign(Object.assign({}, exports.FILE_SYSTEM_ADAPTER), fsMethods); -} -exports.createFileSystemAdapter = createFileSystemAdapter; - - -/***/ }), - -/***/ 991: -/***/ (function(module) { - -"use strict"; - - -// (C) 1995-2013 Jean-loup Gailly and Mark Adler -// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. - -function ZStream() { - /* next input byte */ - this.input = null; // JS specific, because we have no pointers - this.next_in = 0; - /* number of bytes available at input */ - this.avail_in = 0; - /* total number of input bytes read so far */ - this.total_in = 0; - /* next output byte should be put there */ - this.output = null; // JS specific, because we have no pointers - this.next_out = 0; - /* remaining free space at output */ - this.avail_out = 0; - /* total number of bytes output so far */ - this.total_out = 0; - /* last error message, NULL if no error */ - this.msg = ''/*Z_NULL*/; - /* not visible by applications */ - this.state = null; - /* best guess about the data type: binary or text */ - this.data_type = 2/*Z_UNKNOWN*/; - /* adler32 value of the uncompressed data */ - this.adler = 0; + oid = await _writeObject({ + fs, + gitdir, + type, + object, + oid, + format, + }); + return oid + } catch (err) { + err.caller = 'git.writeObject'; + throw err + } } -module.exports = ZStream; - - -/***/ }), - -/***/ 999: -/***/ (function(__unusedmodule, exports) { - -"use strict"; - - +// @ts-check -var TYPED_OK = (typeof Uint8Array !== 'undefined') && - (typeof Uint16Array !== 'undefined') && - (typeof Int32Array !== 'undefined'); +/** + * Write a ref which refers to the specified SHA-1 object id, or a symbolic ref which refers to the specified ref. + * + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {string} args.ref - The name of the ref to write + * @param {string} args.value - When `symbolic` is false, a ref or an SHA-1 object id. When true, a ref starting with `refs/`. + * @param {boolean} [args.force = false] - Instead of throwing an error if a ref named `ref` already exists, overwrite the existing ref. + * @param {boolean} [args.symbolic = false] - Whether the ref is symbolic or not. + * + * @returns {Promise} Resolves successfully when filesystem operations are complete + * + * @example + * await git.writeRef({ + * fs, + * dir: '/tutorial', + * ref: 'refs/heads/another-branch', + * value: 'HEAD' + * }) + * await git.writeRef({ + * fs, + * dir: '/tutorial', + * ref: 'HEAD', + * value: 'refs/heads/another-branch', + * force: true, + * symbolic: true + * }) + * console.log('done') + * + */ +async function writeRef({ + fs: _fs, + dir, + gitdir = join(dir, '.git'), + ref, + value, + force = false, + symbolic = false, +}) { + try { + assertParameter('fs', _fs); + assertParameter('gitdir', gitdir); + assertParameter('ref', ref); + assertParameter('value', value); -function _has(obj, key) { - return Object.prototype.hasOwnProperty.call(obj, key); -} + const fs = new FileSystem(_fs); -exports.assign = function (obj /*from1, from2, from3, ...*/) { - var sources = Array.prototype.slice.call(arguments, 1); - while (sources.length) { - var source = sources.shift(); - if (!source) { continue; } + if (ref !== cleanGitRef.clean(ref)) { + throw new InvalidRefNameError(ref, cleanGitRef.clean(ref)) + } - if (typeof source !== 'object') { - throw new TypeError(source + 'must be non-object'); + if (!force && (await GitRefManager.exists({ fs, gitdir, ref }))) { + throw new AlreadyExistsError('ref', ref) } - for (var p in source) { - if (_has(source, p)) { - obj[p] = source[p]; - } + if (symbolic) { + await GitRefManager.writeSymbolicRef({ + fs, + gitdir, + ref, + value, + }); + } else { + value = await GitRefManager.resolve({ + fs, + gitdir, + ref: value, + }); + await GitRefManager.writeRef({ + fs, + gitdir, + ref, + value, + }); } + } catch (err) { + err.caller = 'git.writeRef'; + throw err } +} - return obj; -}; - - -// reduce buffer size, avoiding mem copy -exports.shrinkBuf = function (buf, size) { - if (buf.length === size) { return buf; } - if (buf.subarray) { return buf.subarray(0, size); } - buf.length = size; - return buf; -}; - +// @ts-check -var fnTyped = { - arraySet: function (dest, src, src_offs, len, dest_offs) { - if (src.subarray && dest.subarray) { - dest.set(src.subarray(src_offs, src_offs + len), dest_offs); - return; - } - // Fallback to ordinary array - for (var i = 0; i < len; i++) { - dest[dest_offs + i] = src[src_offs + i]; - } - }, - // Join array of chunks to single array. - flattenChunks: function (chunks) { - var i, l, len, pos, chunk, result; +/** + * @param {object} args + * @param {import('../models/FileSystem.js').FileSystem} args.fs + * @param {string} args.gitdir + * @param {TagObject} args.tag + * + * @returns {Promise} + */ +async function _writeTag({ fs, gitdir, tag }) { + // Convert object to buffer + const object = GitAnnotatedTag.from(tag).toObject(); + const oid = await _writeObject({ + fs, + gitdir, + type: 'tag', + object, + format: 'content', + }); + return oid +} - // calculate data length - len = 0; - for (i = 0, l = chunks.length; i < l; i++) { - len += chunks[i].length; - } +// @ts-check - // join chunks - result = new Uint8Array(len); - pos = 0; - for (i = 0, l = chunks.length; i < l; i++) { - chunk = chunks[i]; - result.set(chunk, pos); - pos += chunk.length; - } +/** + * Write an annotated tag object directly + * + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {TagObject} args.tag - The object to write + * + * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly written object + * @see TagObject + * + * @example + * // Manually create an annotated tag. + * let sha = await git.resolveRef({ fs, dir: '/tutorial', ref: 'HEAD' }) + * console.log('commit', sha) + * + * let oid = await git.writeTag({ + * fs, + * dir: '/tutorial', + * tag: { + * object: sha, + * type: 'commit', + * tag: 'my-tag', + * tagger: { + * name: 'your name', + * email: 'email@example.com', + * timestamp: Math.floor(Date.now()/1000), + * timezoneOffset: new Date().getTimezoneOffset() + * }, + * message: 'Optional message' + * } + * }) + * + * console.log('tag', oid) + * + */ +async function writeTag({ fs, dir, gitdir = join(dir, '.git'), tag }) { + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + assertParameter('tag', tag); - return result; + return await _writeTag({ + fs: new FileSystem(fs), + gitdir, + tag, + }) + } catch (err) { + err.caller = 'git.writeTag'; + throw err } -}; +} -var fnUntyped = { - arraySet: function (dest, src, src_offs, len, dest_offs) { - for (var i = 0; i < len; i++) { - dest[dest_offs + i] = src[src_offs + i]; - } - }, - // Join array of chunks to single array. - flattenChunks: function (chunks) { - return [].concat.apply([], chunks); - } -}; +// @ts-check +/** + * Write a tree object directly + * + * @param {object} args + * @param {FsClient} args.fs - a file system client + * @param {string} [args.dir] - The [working tree](dir-vs-gitdir.md) directory path + * @param {string} [args.gitdir=join(dir,'.git')] - [required] The [git directory](dir-vs-gitdir.md) path + * @param {TreeObject} args.tree - The object to write + * + * @returns {Promise} Resolves successfully with the SHA-1 object id of the newly written object. + * @see TreeObject + * @see TreeEntry + * + */ +async function writeTree({ fs, dir, gitdir = join(dir, '.git'), tree }) { + try { + assertParameter('fs', fs); + assertParameter('gitdir', gitdir); + assertParameter('tree', tree); -// Enable/Disable typed arrays use, for testing -// -exports.setTyped = function (on) { - if (on) { - exports.Buf8 = Uint8Array; - exports.Buf16 = Uint16Array; - exports.Buf32 = Int32Array; - exports.assign(exports, fnTyped); - } else { - exports.Buf8 = Array; - exports.Buf16 = Array; - exports.Buf32 = Array; - exports.assign(exports, fnUntyped); + return await _writeTree({ + fs: new FileSystem(fs), + gitdir, + tree, + }) + } catch (err) { + err.caller = 'git.writeTree'; + throw err } +} + +// default export +var index = { + Errors, + STAGE, + TREE, + WORKDIR, + add, + addNote, + addRemote, + annotatedTag, + branch, + checkout, + clone, + commit, + getConfig, + getConfigAll, + setConfig, + currentBranch, + deleteBranch, + deleteRef, + deleteRemote, + deleteTag, + expandOid, + expandRef, + fastForward, + fetch, + findMergeBase, + findRoot, + getRemoteInfo, + getRemoteInfo2, + hashBlob, + indexPack, + init, + isDescendent, + isIgnored, + listBranches, + listFiles, + listNotes, + listRemotes, + listServerRefs, + listTags, + log, + merge, + packObjects, + pull, + push, + readBlob, + readCommit, + readNote, + readObject, + readTag, + readTree, + remove, + removeNote, + renameBranch, + resetIndex, + resolveRef, + status, + statusMatrix, + tag, + version, + walk, + writeBlob, + writeCommit, + writeObject, + writeRef, + writeTag, + writeTree, }; -exports.setTyped(TYPED_OK); +exports.Errors = Errors; +exports.STAGE = STAGE; +exports.TREE = TREE; +exports.WORKDIR = WORKDIR; +exports.add = add; +exports.addNote = addNote; +exports.addRemote = addRemote; +exports.annotatedTag = annotatedTag; +exports.branch = branch; +exports.checkout = checkout; +exports.clone = clone; +exports.commit = commit; +exports.currentBranch = currentBranch; +exports["default"] = index; +exports.deleteBranch = deleteBranch; +exports.deleteRef = deleteRef; +exports.deleteRemote = deleteRemote; +exports.deleteTag = deleteTag; +exports.expandOid = expandOid; +exports.expandRef = expandRef; +exports.fastForward = fastForward; +exports.fetch = fetch; +exports.findMergeBase = findMergeBase; +exports.findRoot = findRoot; +exports.getConfig = getConfig; +exports.getConfigAll = getConfigAll; +exports.getRemoteInfo = getRemoteInfo; +exports.getRemoteInfo2 = getRemoteInfo2; +exports.hashBlob = hashBlob; +exports.indexPack = indexPack; +exports.init = init; +exports.isDescendent = isDescendent; +exports.isIgnored = isIgnored; +exports.listBranches = listBranches; +exports.listFiles = listFiles; +exports.listNotes = listNotes; +exports.listRemotes = listRemotes; +exports.listServerRefs = listServerRefs; +exports.listTags = listTags; +exports.log = log; +exports.merge = merge; +exports.packObjects = packObjects; +exports.pull = pull; +exports.push = push; +exports.readBlob = readBlob; +exports.readCommit = readCommit; +exports.readNote = readNote; +exports.readObject = readObject; +exports.readTag = readTag; +exports.readTree = readTree; +exports.remove = remove; +exports.removeNote = removeNote; +exports.renameBranch = renameBranch; +exports.resetIndex = resetIndex; +exports.resolveRef = resolveRef; +exports.setConfig = setConfig; +exports.status = status; +exports.statusMatrix = statusMatrix; +exports.tag = tag; +exports.version = version; +exports.walk = walk; +exports.writeBlob = writeBlob; +exports.writeCommit = writeCommit; +exports.writeObject = writeObject; +exports.writeRef = writeRef; +exports.writeTag = writeTag; +exports.writeTree = writeTree; /***/ }) -/******/ }); \ No newline at end of file +/******/ }); +/************************************************************************/ +/******/ // The module cache +/******/ var __webpack_module_cache__ = {}; +/******/ +/******/ // The require function +/******/ function __nccwpck_require__(moduleId) { +/******/ // Check if module is in cache +/******/ var cachedModule = __webpack_module_cache__[moduleId]; +/******/ if (cachedModule !== undefined) { +/******/ return cachedModule.exports; +/******/ } +/******/ // Create a new module (and put it into the cache) +/******/ var module = __webpack_module_cache__[moduleId] = { +/******/ // no module.id needed +/******/ // no module.loaded needed +/******/ exports: {} +/******/ }; +/******/ +/******/ // Execute the module function +/******/ var threw = true; +/******/ try { +/******/ __webpack_modules__[moduleId].call(module.exports, module, module.exports, __nccwpck_require__); +/******/ threw = false; +/******/ } finally { +/******/ if(threw) delete __webpack_module_cache__[moduleId]; +/******/ } +/******/ +/******/ // Return the exports of the module +/******/ return module.exports; +/******/ } +/******/ +/************************************************************************/ +/******/ /* webpack/runtime/compat */ +/******/ +/******/ if (typeof __nccwpck_require__ !== 'undefined') __nccwpck_require__.ab = __dirname + "/"; +/******/ +/************************************************************************/ +var __webpack_exports__ = {}; +// This entry need to be wrapped in an IIFE because it need to be in strict mode. +(() => { +"use strict"; +var exports = __webpack_exports__; + +/* istanbul ignore file - this file is used purely as an entry-point */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +const _1 = __nccwpck_require__(9726); +(0, _1.main)({ + log: console, + env: process.env, +}).catch((err) => { + console.error(err); + process.exit(1); +}); + +})(); + +module.exports = __webpack_exports__; +/******/ })() +; \ No newline at end of file diff --git a/action/package-lock.json b/action/package-lock.json index 8964feef..2aeca6e1 100644 --- a/action/package-lock.json +++ b/action/package-lock.json @@ -14,7 +14,7 @@ "@types/git-url-parse": "^9.0.1", "@types/jest": "^27.4.0", "@types/node": "^17.0.16", - "@zeit/ncc": "^0.22.3", + "@vercel/ncc": "^0.38.1", "dotenv": "^16.0.0", "git-url-parse": "^13.1.0", "jest": "^27.5.1", @@ -1100,12 +1100,12 @@ "integrity": "sha512-7tFImggNeNBVMsn0vLrpn1H1uPrUBdnARPTpZoitY37ZrdJREzf7I16tMrlK3hen349gr1NYh8CmZQa7CTG6Aw==", "dev": true }, - "node_modules/@zeit/ncc": { - "version": "0.22.3", - "resolved": "https://registry.npmjs.org/@zeit/ncc/-/ncc-0.22.3.tgz", - "integrity": "sha512-jnCLpLXWuw/PAiJiVbLjA8WBC0IJQbFeUwF4I9M+23MvIxTxk5pD4Q8byQBSPmHQjz5aBoA7AKAElQxMpjrCLQ==", - "deprecated": "@zeit/ncc is no longer maintained. Please use @vercel/ncc instead.", + "node_modules/@vercel/ncc": { + "version": "0.38.1", + "resolved": "https://registry.npmjs.org/@vercel/ncc/-/ncc-0.38.1.tgz", + "integrity": "sha512-IBBb+iI2NLu4VQn3Vwldyi2QwaXt5+hTyh58ggAMoCGE6DJmPvwL3KPBWcJl1m9LYPChBLE980Jw+CS4Wokqxw==", "dev": true, + "license": "MIT", "bin": { "ncc": "dist/ncc/cli.js" } @@ -5567,10 +5567,10 @@ "integrity": "sha512-7tFImggNeNBVMsn0vLrpn1H1uPrUBdnARPTpZoitY37ZrdJREzf7I16tMrlK3hen349gr1NYh8CmZQa7CTG6Aw==", "dev": true }, - "@zeit/ncc": { - "version": "0.22.3", - "resolved": "https://registry.npmjs.org/@zeit/ncc/-/ncc-0.22.3.tgz", - "integrity": "sha512-jnCLpLXWuw/PAiJiVbLjA8WBC0IJQbFeUwF4I9M+23MvIxTxk5pD4Q8byQBSPmHQjz5aBoA7AKAElQxMpjrCLQ==", + "@vercel/ncc": { + "version": "0.38.1", + "resolved": "https://registry.npmjs.org/@vercel/ncc/-/ncc-0.38.1.tgz", + "integrity": "sha512-IBBb+iI2NLu4VQn3Vwldyi2QwaXt5+hTyh58ggAMoCGE6DJmPvwL3KPBWcJl1m9LYPChBLE980Jw+CS4Wokqxw==", "dev": true }, "abab": { diff --git a/action/package.json b/action/package.json index bdc1c98c..853731b1 100644 --- a/action/package.json +++ b/action/package.json @@ -14,7 +14,7 @@ "@types/git-url-parse": "^9.0.1", "@types/jest": "^27.4.0", "@types/node": "^17.0.16", - "@zeit/ncc": "^0.22.3", + "@vercel/ncc": "^0.38.1", "dotenv": "^16.0.0", "git-url-parse": "^13.1.0", "jest": "^27.5.1", From 30860fc98f35db6db39d46ac88a9b60413b5525b Mon Sep 17 00:00:00 2001 From: Sam Lanning Date: Sat, 7 Sep 2024 14:24:28 +0100 Subject: [PATCH 2/2] Fix CI Workflows & required checks in forked PRs --- .github/workflows/ci-pr.yml | 2 ++ .github/workflows/ci.yml | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci-pr.yml b/.github/workflows/ci-pr.yml index a20e433c..bc5ba613 100644 --- a/.github/workflows/ci-pr.yml +++ b/.github/workflows/ci-pr.yml @@ -22,6 +22,8 @@ jobs: name: Run Unit Tests runs-on: ubuntu-latest steps: + - name: Install docker-compose + run: sudo apt-get update && sudo apt-get install -y docker-compose - uses: actions/checkout@master - name: Use Node.js uses: actions/setup-node@main diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 89f999db..b7945a0c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,5 +1,7 @@ name: Test branch -on: push +on: + - push + - pull_request jobs: deploy-ssh-no-branch: