Skip to content

Commit

Permalink
♻️ refacto test functions
Browse files Browse the repository at this point in the history
  • Loading branch information
jcaillon committed Jan 15, 2025
1 parent 4c4dc09 commit 4366e25
Show file tree
Hide file tree
Showing 57 changed files with 934 additions and 895 deletions.
8 changes: 4 additions & 4 deletions commands.d/self-mock.sh
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,10 @@ function selfMock1() {
io::createTempFile && tmp2="${RETURNED_VALUE}"
io::createTempDirectory && tmp3="${RETURNED_VALUE}"
io::createTempDirectory && tmp4="${RETURNED_VALUE}"
log::info "Created temp file: ${tmp1//${GLOBAL_TEMPORARY_PREFIX}*.valet/\/tmp/valet}."
log::info "Created temp file: ${tmp2//${GLOBAL_TEMPORARY_PREFIX}*.valet/\/tmp/valet}."
log::info "Created temp directory: ${tmp3//${GLOBAL_TEMPORARY_PREFIX}*.valet/\/tmp/valet}."
log::info "Created temp directory: ${tmp4//${GLOBAL_TEMPORARY_PREFIX}*.valet/\/tmp/valet}."
log::info "Created temp file: ${tmp1//${GLOBAL_TEMPORARY_DIRECTORY_PREFIX}/\/tmp/valet}."
log::info "Created temp file: ${tmp2//${GLOBAL_TEMPORARY_DIRECTORY_PREFIX}/\/tmp/valet}."
log::info "Created temp directory: ${tmp3//${GLOBAL_TEMPORARY_DIRECTORY_PREFIX}/\/tmp/valet}."
log::info "Created temp directory: ${tmp4//${GLOBAL_TEMPORARY_DIRECTORY_PREFIX}/\/tmp/valet}."
# activating debug log to see the cleanup
log::setLevel debug
;;
Expand Down
67 changes: 34 additions & 33 deletions commands.d/self-test-utils
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ function selfTestUtils_setupValetForConsistency() {
# shellcheck disable=SC2086
unset -v ${!VALET_*}


export VALET_CONFIG_ENABLE_COLORS=false
export VALET_CONFIG_ENABLE_NERDFONT_ICONS=false
export VALET_CONFIG_LOG_DISABLE_TIME=true
Expand Down Expand Up @@ -72,6 +71,12 @@ function selfTestUtils_setupValetForConsistency() {
export GLOBAL_LOG_LEVEL_INT=1
export GLOBAL_LOG_LEVEL=info

# fix the time to a known value
export TZ=Etc/GMT+0
unset EPOCHSECONDS EPOCHREALTIME
export EPOCHSECONDS=548902800
export EPOCHREALTIME=548902800.000000

log::createPrintFunction
eval "${GLOBAL_LOG_PRINT_FUNCTION}"
}
Expand Down Expand Up @@ -101,11 +106,11 @@ function selfTestUtils_compareWithApproved() {
: >"${approvedFile}"
fi

local command="${_TEST_DIFF_COMMAND//%APPROVED_FILE%/"${approvedFile}"}"
local command="${GLOBAL_TEST_DIFF_COMMAND//%APPROVED_FILE%/"${approvedFile}"}"
command="${command//%RECEIVED_FILE%/"${receivedFileToCopy}"}"

if [[ -n ${VALET_CONFIGGLOBAL_TEST_REPORT_FILE_MODE:-} ]]; then
chmod "${VALET_CONFIGGLOBAL_TEST_REPORT_FILE_MODE}" "${receivedFileToCopy}"
if [[ -n ${GLOBAL_TEST_REPORT_FILE_MODE:-} ]]; then
chmod "${GLOBAL_TEST_REPORT_FILE_MODE}" "${receivedFileToCopy}"
fi

if ${command} 1>&2; then
Expand All @@ -123,78 +128,74 @@ function selfTestUtils_compareWithApproved() {
if [[ ${_TEST_AUTO_APPROVE:-false} == "true" ]]; then
log::info "→ test suite KO but auto-approving."
cp -f "${receivedFileToCopy}" "${approvedFile}"
if [[ -n ${VALET_CONFIGGLOBAL_TEST_REPORT_FILE_MODE:-} ]]; then
chmod "${VALET_CONFIGGLOBAL_TEST_REPORT_FILE_MODE}" "${approvedFile}"
if [[ -n ${GLOBAL_TEST_REPORT_FILE_MODE:-} ]]; then
chmod "${GLOBAL_TEST_REPORT_FILE_MODE}" "${approvedFile}"
fi
if [[ -f "${receivedFile}" ]]; then
rm -f "${receivedFile}" 1>/dev/null
fi
else
cp -f "${receivedFileToCopy}" "${receivedFile}"
if [[ -n ${VALET_CONFIGGLOBAL_TEST_REPORT_FILE_MODE:-} ]]; then
chmod "${VALET_CONFIGGLOBAL_TEST_REPORT_FILE_MODE}" "${approvedFile}"
if [[ -n ${GLOBAL_TEST_REPORT_FILE_MODE:-} ]]; then
chmod "${GLOBAL_TEST_REPORT_FILE_MODE}" "${approvedFile}"
fi
fi

return 1
}

function selfTestUtils_displayTestLogs() {
if [[ -s "${_TEST_LOG_FILE}" ]]; then
log::errorTrace "Logs for script ⌜${_TEST_SUITE_SCRIPT_NAME}⌝ (>&4):"
log::printFile "${_TEST_LOG_FILE}"
if [[ -s "${GLOBAL_TEST_LOG_FILE}" ]]; then
log::errorTrace "Logs for script ⌜${GLOBAL_TEST_SUITE_SCRIPT_NAME}⌝ (>&4):"
log::printFile "${GLOBAL_TEST_LOG_FILE}"
else
log::errorTrace "Empty logs for script ⌜${_TEST_SUITE_SCRIPT_NAME}⌝."
log::errorTrace "Empty logs for script ⌜${GLOBAL_TEST_SUITE_SCRIPT_NAME}⌝."
fi
}

function selfTestUtils_displayTestSuiteOutputs() {
if [[ -s "${GLOBAL_TEST_REPORT_FILE}" ]]; then
log::errorTrace "Test suite report for ⌜${_TEST_SUITE_NAME}⌝:"
log::errorTrace "Test suite report for ⌜${GLOBAL_TEST_SUITE_NAME}⌝:"
log::printFile "${GLOBAL_TEST_REPORT_FILE}"
else
log::errorTrace "Empty report for the test suite ⌜${_TEST_SUITE_NAME}⌝."
log::errorTrace "Empty report for the test suite ⌜${GLOBAL_TEST_SUITE_NAME}⌝."
fi

if [[ -s "${GLOBAL_TEST_STANDARD_OUTPUT_FILE}" ]]; then
log::errorTrace "Test suite standard output for ⌜${_TEST_SUITE_NAME}⌝:"
io::readFile "${GLOBAL_TEST_STANDARD_OUTPUT_FILE}"
log::printFileString "${RETURNED_VALUE}"
log::errorTrace "Test suite standard output for ⌜${GLOBAL_TEST_SUITE_NAME}⌝:"
log::printFile "${GLOBAL_TEST_STANDARD_OUTPUT_FILE}"
else
log::errorTrace "Empty standard output for the test suite ⌜${_TEST_SUITE_NAME}⌝."
log::errorTrace "Empty standard output for the test suite ⌜${GLOBAL_TEST_SUITE_NAME}⌝."
fi

if [[ -s "${GLOBAL_TEST_STANDARD_ERROR_FILE}" ]]; then
log::errorTrace "Test suite error output for ⌜${_TEST_SUITE_NAME}⌝:"
io::readFile "${GLOBAL_TEST_STANDARD_ERROR_FILE}"
log::printFileString "${RETURNED_VALUE}"
log::errorTrace "Test suite error output for ⌜${GLOBAL_TEST_SUITE_NAME}⌝:"
log::printFile "${GLOBAL_TEST_STANDARD_ERROR_FILE}"
else
log::errorTrace "Empty error output for the test suite ⌜${_TEST_SUITE_NAME}⌝."
log::errorTrace "Empty error output for the test suite ⌜${GLOBAL_TEST_SUITE_NAME}⌝."
fi
}

function selfTestUtils_setupDiffCommand() {
_TEST_DIFF_COMMAND="${VALET_CONFIG_TEST_DIFF_COMMAND:-}"
if [[ -z ${_TEST_DIFF_COMMAND} ]]; then
GLOBAL_TEST_DIFF_COMMAND="${VALET_CONFIG_TEST_DIFF_COMMAND:-}"
if [[ -z ${GLOBAL_TEST_DIFF_COMMAND} ]]; then
if command -v delta &>/dev/null; then
log::debug "Using delta as diff command."
_TEST_DIFF_COMMAND="delta --paging=never --no-gitconfig --line-numbers --side-by-side %APPROVED_FILE% %RECEIVED_FILE%"
if [[ -z ${VALET_CONFIGGLOBAL_TEST_REPORT_FILE_MODE:-} ]]; then
# delta compares the file modes, so we need to match them
VALET_CONFIGGLOBAL_TEST_REPORT_FILE_MODE="644"
fi
GLOBAL_TEST_DIFF_COMMAND="delta --paging=never --no-gitconfig --line-numbers --side-by-side %APPROVED_FILE% %RECEIVED_FILE%"
# delta compares the file modes, so we need to match them
GLOBAL_TEST_REPORT_FILE_MODE="${VALET_CONFIG_TEST_REPORT_FILE_MODE:-644}"
elif command -v diff &>/dev/null; then
log::debug "Using diff as diff command."
_TEST_DIFF_COMMAND="diff --color -u %APPROVED_FILE% %RECEIVED_FILE%"
GLOBAL_TEST_DIFF_COMMAND="diff --color -u %APPROVED_FILE% %RECEIVED_FILE%"
elif command -v cmp &>/dev/null; then
log::warning "Using cmp as diff command, consider setting up a diff tool using the VALET_CONFIG_TEST_DIFF_COMMAND config variable."
_TEST_DIFF_COMMAND="cmp %APPROVED_FILE% %RECEIVED_FILE%"
GLOBAL_TEST_DIFF_COMMAND="cmp %APPROVED_FILE% %RECEIVED_FILE%"
else
log::warning "Using internal comparison function, consider setting up a diff tool using the VALET_CONFIG_TEST_DIFF_COMMAND config variable."
_TEST_DIFF_COMMAND="selfTestUtils_internalCompare %APPROVED_FILE% %RECEIVED_FILE%"
GLOBAL_TEST_DIFF_COMMAND="selfTestUtils_internalCompare %APPROVED_FILE% %RECEIVED_FILE%"
fi
else
string::cutField "${_TEST_DIFF_COMMAND}" 0 " "
string::cutField "${GLOBAL_TEST_DIFF_COMMAND}" 0 " "
local diffExecutable="${RETURNED_VALUE}"
if ! command -v "${diffExecutable}" &>/dev/null; then
log::warning "The user diff command ⌜${diffExecutable}⌝ set with VALET_CONFIG_TEST_DIFF_COMMAND is not available, using the internal comparison function."
Expand Down
70 changes: 37 additions & 33 deletions commands.d/self-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -309,10 +309,13 @@ function selfTest_parallelCallback() {
#
function selfTest_runSingleTestSuite() {
local testSuiteDirectory="${1}"
local testsDotDirectory="${testSuiteDirectory%/*}"
log::debug "Running test suite ⌜${testSuiteDirectory}⌝."

core::setShellOptions
main::unregisterTraps

GLOBAL_TEST_SUITE_NAME="${testSuiteDirectory##*/}"
local testsDotDirectory="${testSuiteDirectory%/*}"
log::debug "Running test suite ⌜${testSuiteDirectory}⌝."

# make a list of all test scripts
local -a testScripts=()
Expand All @@ -330,43 +333,43 @@ function selfTest_runSingleTestSuite() {
return 0
fi

_TEST_SUITE_NAME="${testSuiteDirectory##*/}"
GLOBAL_TEST_STANDARD_OUTPUT_FILE="${GLOBAL_TEMPORARY_IN_MEM_PREFIX}${BASHPID}.valet-test-stdout-${_TEST_SUITE_NAME}"
GLOBAL_TEST_STANDARD_ERROR_FILE="${GLOBAL_TEMPORARY_IN_MEM_PREFIX}${BASHPID}.valet-test-stderr-${_TEST_SUITE_NAME}"
GLOBAL_TEST_REPORT_FILE="${GLOBAL_TEMPORARY_IN_MEM_PREFIX}${BASHPID}.valet-test-report-${_TEST_SUITE_NAME}"
_TEST_STACK_FILE="${GLOBAL_TEMPORARY_IN_MEM_PREFIX}${BASHPID}.valet-test-stack-${_TEST_SUITE_NAME}"
_TEST_LOG_FILE="${GLOBAL_TEMPORARY_IN_MEM_PREFIX}${BASHPID}.valet-test-log-${_TEST_SUITE_NAME}"

# setup the temp locations for this test suite
_TEST_BASE_TEMPORARY_DIRECTORY="${TMPDIR:-/tmp}/valet-tests-${BASHPID}-${_TEST_SUITE_NAME}"

if [[ -d ${_TEST_BASE_TEMPORARY_DIRECTORY:-} ]]; then
log::debug "Removing existing temporary test files ⌜${_TEST_BASE_TEMPORARY_DIRECTORY}⌝."
rm -Rf "${_TEST_BASE_TEMPORARY_DIRECTORY}" &>/dev/null || :
rm -f "${GLOBAL_TEST_STANDARD_OUTPUT_FILE}" "${GLOBAL_TEST_STANDARD_ERROR_FILE}" \
"${GLOBAL_TEST_REPORT_FILE}" "${_TEST_STACK_FILE}" "${_TEST_LOG_FILE}" &>/dev/null || :
fi
# setup the temp locations for this test suite (cleanup is done at self test command level since
# we create everything in the original temp directory)
GLOBAL_TEST_BASE_TEMPORARY_DIRECTORY="${GLOBAL_TEMPORARY_DIRECTORY}/tmp-${BASHPID}.${GLOBAL_TEST_SUITE_NAME}"
unset -v VALET_CONFIG_WORK_FILES_DIRECTORY
TMPDIR="${GLOBAL_TEST_BASE_TEMPORARY_DIRECTORY}"

GLOBAL_TEST_OUTPUT_TEMPORARY_DIRECTORY="${GLOBAL_TEMPORARY_DIRECTORY}/output-${BASHPID}.${GLOBAL_TEST_SUITE_NAME}"
GLOBAL_TEST_STANDARD_OUTPUT_FILE="${GLOBAL_TEST_OUTPUT_TEMPORARY_DIRECTORY}/stdout"
GLOBAL_TEST_STANDARD_ERROR_FILE="${GLOBAL_TEST_OUTPUT_TEMPORARY_DIRECTORY}/stderr"
GLOBAL_TEST_REPORT_FILE="${GLOBAL_TEST_OUTPUT_TEMPORARY_DIRECTORY}/report"
GLOBAL_TEST_STACK_FILE="${GLOBAL_TEST_OUTPUT_TEMPORARY_DIRECTORY}/stack"
GLOBAL_TEST_LOG_FILE="${GLOBAL_TEST_OUTPUT_TEMPORARY_DIRECTORY}/log"
mkdir -p "${GLOBAL_TEST_OUTPUT_TEMPORARY_DIRECTORY}"

# trap to cleanup the temp files
# shellcheck disable=SC2064
# trap "rm -Rf '${GLOBAL_TEST_BASE_TEMPORARY_DIRECTORY}'; rm -Rf '${GLOBAL_TEST_OUTPUT_TEMPORARY_DIRECTORY}';" EXIT

# run a custom user script before the test suite if it exists
selfTestUtils_runHookScript "${testsDotDirectory}/before-each-test-suite"

# write the test suite title
printf "%s\n\n" "# Test suite ${_TEST_SUITE_NAME}" >"${GLOBAL_TEST_REPORT_FILE}"
printf "%s\n\n" "# Test suite ${GLOBAL_TEST_SUITE_NAME}" >"${GLOBAL_TEST_REPORT_FILE}"

pushd "${testSuiteDirectory}" 1>/dev/null

# run the test scripts
log::printString "█████████████████████"
log::info "${_TEST_SUITE_NAME}⌝:"
log::info "${GLOBAL_TEST_SUITE_NAME}⌝:"
local treeString=" ├─" treePadding=""
local -i nbScriptsDone=0 nbOfScriptsWithErrors=0
for testScript in "${testScripts[@]}"; do
_TEST_SUITE_SCRIPT_NAME="${testScript##*/}"
GLOBAL_TEST_SUITE_SCRIPT_NAME="${testScript##*/}"
if ((nbScriptsDone == ${#testScripts[@]} - 1)); then
treeString=" └─"
treePadding=" "
fi
log::printString "${treeString} ${_TEST_SUITE_SCRIPT_NAME}" "${treePadding}"
log::printString "${treeString} ${GLOBAL_TEST_SUITE_SCRIPT_NAME}" "${treePadding}"

# Run the test script in a subshell.
# This way each test can define any vars or functions without polluting
Expand All @@ -375,15 +378,15 @@ function selfTest_runSingleTestSuite() {
core::setShellOptions
trap 'selfTest_onExitTestInternal $? ' EXIT;
selfTest_runSingleTest "${testSuiteDirectory}" "${testScript}" || exit $?
) >"${_TEST_STACK_FILE}" 2>"${_TEST_LOG_FILE}"; then
) >"${GLOBAL_TEST_STACK_FILE}" 2>"${GLOBAL_TEST_LOG_FILE}"; then

# Handle an error that occurred in the test script.
# We trapped the EXIT signal in the subshell that runs the test and we make it output the
# stack trace in a file (selfTest_onExitTestInternal). We can now read this file and display the stack trace.
local exitCode="${PIPESTATUS[0]:-}"

# get the stack trace at script exit
io::readFile "${_TEST_STACK_FILE}"
io::readFile "${GLOBAL_TEST_STACK_FILE}"
eval "${RETURNED_VALUE//declare -a/}"

selfTestUtils_displayTestLogs
Expand Down Expand Up @@ -455,21 +458,19 @@ function selfTest_runSingleTest() {

# set a simplified log print function to have consistent results in tests
selfTestUtils_setupValetForConsistency
declare -f log::print >&2

# reset the temporary location (to have consistency when using io::createTempDirectory for example)
if [[ -d ${_TEST_BASE_TEMPORARY_DIRECTORY} ]]; then
rm -Rf "${_TEST_BASE_TEMPORARY_DIRECTORY}"
if [[ -d ${GLOBAL_TEST_BASE_TEMPORARY_DIRECTORY} ]]; then
rm -Rf "${GLOBAL_TEST_BASE_TEMPORARY_DIRECTORY}"
fi
unset -v VALET_CONFIG_WORK_FILES_DIRECTORY
TMPDIR="${_TEST_BASE_TEMPORARY_DIRECTORY}"
io::setupTempFileGlobalVariable

io::cleanupTempFiles
mkdir -p "${_TEST_BASE_TEMPORARY_DIRECTORY}"
mkdir -p "${GLOBAL_TEST_BASE_TEMPORARY_DIRECTORY}"

# The following file can be used by tests during tests.
# shellcheck disable=SC2034
GLOBAL_TEST_TEMP_FILE="${GLOBAL_TEMPORARY_IN_MEM_PREFIX}${BASHPID}.valet-test-tempfile"
GLOBAL_TEST_TEMP_FILE="${GLOBAL_TEMPORARY_FILE_PREFIX}${BASHPID}.valet-test-tempfile"

# redirect the standard output and error output to files
exec 3>&1 1>"${GLOBAL_TEST_STANDARD_OUTPUT_FILE}"
Expand All @@ -480,7 +481,7 @@ function selfTest_runSingleTest() {
GLOBAL_TEST_CURRENT_DIRECTORY="${PWD}"

# write the test script name
printf "%s\n\n" "## Test script ${_TEST_SUITE_SCRIPT_NAME%.sh}" >>"${GLOBAL_TEST_REPORT_FILE}"
printf "%s\n\n" "## Test script ${GLOBAL_TEST_SUITE_SCRIPT_NAME%.sh}" >>"${GLOBAL_TEST_REPORT_FILE}"

# run a custom user script before the test if it exists
selfTestUtils_runHookScript "${testDirectory}/before-each-test"
Expand All @@ -491,4 +492,7 @@ function selfTest_runSingleTest() {

# run a custom user script after the test if it exists
selfTestUtils_runHookScript "${testDirectory}/after-each-test"

exec 3>&-
exec 4>&-
}
4 changes: 2 additions & 2 deletions docs/content/docs/020.new-commands/_index.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ weight: 20
url: /docs/new-commands
---

Once you have created an extension and opened its directory, you can start creating your new commands.
Once you have [created an extension][newExtensionsLink] and opened its directory, you can start creating your new commands.

## 📂 Command files location

Expand Down Expand Up @@ -274,7 +274,7 @@ You can activate the debug log level with Valet `-v` option, e.g. `valet -v my c
[command-properties]: ../command-properties
[core-library]: ../libraries/core/
[bash-manual-set]: https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html#index-set
[profiler-output-example]: https://github.com/jcaillon/valet/blob/latest/tests.d/1301-profiler/results.approved.md
[profiler-output-example]: https://github.com/jcaillon/valet/blob/latest/tests.d/0901-profiler/results.approved.md
[libraries]: ../libraries
[newExtensionsLink]: ../new-extensions
[newLibraryLink]: ../new-libraries
17 changes: 16 additions & 1 deletion docs/content/docs/030.test-commands/_index.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ url: /docs/test-commands

Valet comes with a standardized way to implement and run tests for your commands and library functions.

Once you have created an extension and opened its directory, you can start creating your new tests.
Once you have [created an extension][newExtensionsLink] and opened its directory, you can start creating your new tests.

## 📂 Test suites and test files

Expand Down Expand Up @@ -46,6 +46,7 @@ Here is an example of directory structure for your user directory:
{{< filetree/folder name="shared-commands" >}}
{{< filetree/folder name="tests.d" >}}
{{< filetree/folder name="test-suite3" >}}
{{< filetree/file name="before-each-test" >}}
{{< filetree/file name="test.sh" >}}
{{< /filetree/folder >}}
{{< /filetree/folder >}}
Expand Down Expand Up @@ -113,6 +114,8 @@ Find another example for [the showcase here][showcase-tests].
It is very important to note that tests, like commands, are executed with the bash options `set -Eeu -o pipefail`. If you expect a function or a command to return a code different than 0, you must handle it or the test (and thus the whole program) will exit.

E.g. do `myFunctionThatReturnsOne || echo "Failed as expected"`.

And if your function calls `exit` at some point, do: `(myFunctionThatReturnsOne) || echo "Exited as expected"`.
{{< /callout >}}

While you can test a command by invoking valet (e.g. `valet my-command argument1`), it is recommended to test the command function itself (e.g. `myCommandFunction argument1`):
Expand Down Expand Up @@ -142,6 +145,15 @@ You can also exclude or include test suite using `-i` and `-e` options (check `v
valet self test -i my-test-suite
```

{{< callout type="info" >}}
Some additional information about the test execution:

- Each test suite is executed in a separate subshell.
- Each test script is executed in a separate subshell (within the subshell of the test suite).

This allow you to modify the shell as you wish in the hooks and test implementation without impacting the other tests.
{{< /callout >}}

## 🪝 Test hooks

In addition to the test scripts, you can create other specific scripts which will be source'd at different time during the tests execution:
Expand All @@ -152,6 +164,8 @@ In addition to the test scripts, you can create other specific scripts which wil
| `tests.d/after-tests` | Source'd after all the test suites inside the tests.d folder are executed. |
| `tests.d/before-each-test-suite` | Source'd before the execution of each test suite. |
| `tests.d/after-each-test-suite` | Source'd after the execution of each test suite. |
| `tests.d/{test-suite}/before-each-test` | Source'd before the execution of each test script. |
| `tests.d/{test-suite}/after-each-test` | Source'd after the execution of each test script. |

{{< cards >}}
{{< card icon="arrow-circle-left" link="../command-properties" title="Command properties" >}}
Expand All @@ -163,3 +177,4 @@ In addition to the test scripts, you can create other specific scripts which wil
[valet-string-lib-tests]: https://github.com/jcaillon/valet/blob/latest/tests.d/1003-lib-string/00.tests.sh
[showcase-tests]: https://github.com/jcaillon/valet/blob/latest/examples.d/showcase/tests.d/001-showcase-test-suite/00.tests.sh
[libraries-tests]: ../libraries/test
[newExtensionsLink]: ../new-extensions
2 changes: 2 additions & 0 deletions docs/content/docs/800.roadmap/_index.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ url: /docs/roadmap

This page lists the features that I would like to implement in Valet. They come in addition to new features described in the [issues][valet-issues].

- test the new bash:: lib
- refacto progress bar; use signal to tell the bg job to redraw the progress bar after displaying a log.
- after logging, if progress bar is in progress, we need to redraw it immediately.
- for interactive mode, a first iteration is to prompt the user in the scrolling terminal. Then we add an option to instead open a full screen editor.
- might be able to improve the quicksort if we use direct statements instead of functions.
Expand Down
Loading

0 comments on commit 4366e25

Please sign in to comment.