From 17feeb410fcdecc1b3be5b724540985f7a707e2f Mon Sep 17 00:00:00 2001 From: Mathias Wulff Date: Sun, 8 Dec 2024 12:13:39 +1100 Subject: [PATCH] Update dependencies --- .gitattributes | 1 + .prettierignore | 3 ++- README.md | 10 ++-------- src/engine.ts | 9 +++------ 4 files changed, 8 insertions(+), 15 deletions(-) diff --git a/.gitattributes b/.gitattributes index 3e89ce48..dc13274d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1,2 @@ *.min.js binary +yarn.lock binary \ No newline at end of file diff --git a/.prettierignore b/.prettierignore index 0a9d1614..08d8e289 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,4 +1,5 @@ legacy/ dist/ bin/ -*.md \ No newline at end of file +*.md +*.html \ No newline at end of file diff --git a/README.md b/README.md index 42e6a720..10c9f47d 100644 --- a/README.md +++ b/README.md @@ -264,7 +264,7 @@ Now, what is relevant to notice is how `sed` only takes 4.9 seconds longer for t ``` Speed relative to fastest tool for each file size --------------------------------------------------- -Bytes sed rr Time it took longer (seconds) +Bytes sed rr Diff (seconds) 1 1 39 0,5 <= sed is 39x faster 5 1 32 0,4 10 1 27 0,4 @@ -282,19 +282,13 @@ So even though the speed evolves very differently, there is only little practica Please note that speeds might look very different when files get as large as the memory available. -Sure, I can help sharpen up the text for better clarity and impact. While I don't have specific knowledge about the rexreplace project, I understand the general idea conveyed in the text. Here's a revised version: - ---- - -Here's an improved version of the text for your README: - ### Tips and Tricks for Performance Reading many files multiple times is far more time-consuming than creating a complex regex and reading each file once. > **Anecdote time** > -> Imagine you need to duplicate a set of files, but each duplicate must have unique keys. To achieve this, you can append `_v2` to each key in the duplicated files. Running a separate `rexreplace` command for each key is a reasoable approach. +> Imagine you need to duplicate a set of files, but within the content are references (let call them keys) that must be unique across both old and new files. We have the complete list of keys so a reasoable approach to append `_v2` to each old key in the new files by running a separate `rexreplace` command for each key. > > However, in a real-world scenario with 5,000 keys across 10,000 files, this approach took **57 minutes**. The bottleneck was identifying the 10,000 files 5,000 times, open + read each file 5000 times and the startup time of node x 5,000. > diff --git a/src/engine.ts b/src/engine.ts index 0cd93903..cd002300 100644 --- a/src/engine.ts +++ b/src/engine.ts @@ -87,15 +87,12 @@ function openFile(file, config) { function doReplacement(_file_rr: string, _config_rr: any, _data_rr: string) { debug('Work on content from: ' + _file_rr); - // Variables to be accessible from js. - if (_config_rr.replacementJs) { - _config_rr.replacementDynamic = dynamicReplacement(_file_rr, _config_rr, _data_rr); - } - // Main regexp of the whole thing const result = _data_rr.replace( _config_rr.regex, - _config_rr.replacementJs ? _config_rr.replacementDynamic : _config_rr.replacement + _config_rr.replacementJs + ? dynamicReplacement(_file_rr, _config_rr, _data_rr) + : _config_rr.replacement ); // The output of matched strings is done from the replacement, so no need to continue