diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 38413705..b1c69fde 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -13,6 +13,7 @@ # git config blame.ignoreRevsFile .git-blame-ignore-revs ## Old changes +0c93c42891ad6b95aee81709398ded7416c9f397 767537d9523253de1615b01450a8b22c8e2cc6a2 ## 1.0.17 - Fix XML indentation diff --git a/.github/workflows/check_clang_format_and_codespell.yml b/.github/workflows/check_clang_format_and_codespell.yml index d3f20353..1c259a47 100644 --- a/.github/workflows/check_clang_format_and_codespell.yml +++ b/.github/workflows/check_clang_format_and_codespell.yml @@ -10,8 +10,7 @@ jobs: clang-format: runs-on: ubuntu-latest container: - # 20230614 - image: khronosgroup/docker-images:openxr-sdk@sha256:fbc5fe29a0787cccc8f66bd9bd03c9dbddf591c7d1aea673108c38c908b280f5 + image: khronosgroup/docker-images:openxr-sdk.20240412@sha256:e477137f8558565ac9173de8713a3e44d6d564ba042db9a20ff560eb4bfdeccc steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/gradle-wrapper-validation.yml b/.github/workflows/gradle-wrapper-validation.yml index f7579a77..1a423a9e 100644 --- a/.github/workflows/gradle-wrapper-validation.yml +++ b/.github/workflows/gradle-wrapper-validation.yml @@ -1,4 +1,5 @@ -# Copyright 2022-2023, Collabora, Ltd. +# Copyright 2022-2024, Collabora, Ltd. +# # SPDX-License-Identifier: CC0-1.0 name: Validate Gradle Wrapper @@ -18,4 +19,4 @@ jobs: - uses: actions/checkout@v4 with: lfs: true - - uses: gradle/wrapper-validation-action@v1 + - uses: gradle/actions/wrapper-validation@v3 diff --git a/.mailmap b/.mailmap index 42a9016c..3482c762 100644 --- a/.mailmap +++ b/.mailmap @@ -14,11 +14,20 @@ Blake Taylor Bryce Hutchings Bryce Hutchings <5100250+brycehutchings@users.noreply.github.com> Bryce Hutchings +Daniel Willmott <39023874+danwillm@users.noreply.github.com> +Ivan Chupakhin +Ivan Chupakhin +Kevin Qin +Kevin Qin Khronos Group Web Services Khronos Group Web Services +Liu Knox +OC Lin +Rachel Huang Rylie Pavlik Rylie Pavlik Rylie Pavlik Yin Li +samuel degrande utzcoz utzcoz <43091780+utzcoz@users.noreply.github.com> diff --git a/.reuse/dep5 b/.reuse/dep5 index ab32d6b8..747b1199 100644 --- a/.reuse/dep5 +++ b/.reuse/dep5 @@ -4,6 +4,7 @@ Upstream-Contact: Rylie Pavlik Source: https://registry.khronos.org/OpenXR/ Files: changes/conformance/* + changes/major/* changes/registry/* changes/sdk/* changes/specification/* @@ -79,6 +80,11 @@ Files: external/python/markupsafe/* Copyright: 2010 Pallets License: BSD-3-Clause +Files: external/python/pyparsing/* + external/python/pyparsing-3.1.2.dist-info/* +Copyright: 2003-2022, Paul T. McGuire +License: BSD-3-Clause + Files: specification/sources/chapters/extensions/epic/epic_view_configuration_fov.adoc Copyright: 2020, Epic Games, Inc. License: CC-BY-4.0 diff --git a/CHANGELOG.CTS.md b/CHANGELOG.CTS.md index 67036b49..6a354453 100644 --- a/CHANGELOG.CTS.md +++ b/CHANGELOG.CTS.md @@ -17,6 +17,108 @@ particular, since it is primarily software, pull requests may be integrated as they are accepted even between periodic updates. However, versions that are not signed tags on the `approved` branch are not valid for conformance submission. +## OpenXR CTS 1.1.36.0 (2024-04-25) + +This new release supports testing both OpenXR 1.0 and OpenXR 1.1 runtimes, and +defaults to OpenXR 1.1 mode. See the README for more details. + +- Conformance Tests + - Fix: In multithreading test, only verify written portion of string buffer is + UTF-8. + ([internal MR 3232](https://gitlab.khronos.org/openxr/openxr/merge_requests/3232)) + - Fix: Increase `eps` for hand-tracking conformance tests. + ([internal MR 3233](https://gitlab.khronos.org/openxr/openxr/merge_requests/3233)) + - Fix: Remove invalid interpretation of `XrInstanceProperties::runtimeVersion`. + ([internal MR 3275](https://gitlab.khronos.org/openxr/openxr/merge_requests/3275)) + - Fix: Correct typo in CLI help text. + ([internal MR 3302](https://gitlab.khronos.org/openxr/openxr/merge_requests/3302)) + - Fix: Correct typo in sample command lines in README. + ([internal MR 3326](https://gitlab.khronos.org/openxr/openxr/merge_requests/3326)) + - Improvement: Update Android compile SDK version (to 33), NDK version (to 23.2), + and build tools version (to 34.0.0). + ([internal MR 2992](https://gitlab.khronos.org/openxr/openxr/merge_requests/2992)) + - Improvement: Reduce duplication of environment variable getters and setters. + ([internal MR 3039](https://gitlab.khronos.org/openxr/openxr/merge_requests/3039)) + - Improvement: Enhancements to existing test of `XR_EXT_local_floor`. + ([internal MR 3154](https://gitlab.khronos.org/openxr/openxr/merge_requests/3154), + [internal issue 2150](https://gitlab.khronos.org/openxr/openxr/issues/2150), + [internal MR 3318](https://gitlab.khronos.org/openxr/openxr/merge_requests/3318), + [internal MR 3327](https://gitlab.khronos.org/openxr/openxr/merge_requests/3327)) + - Improvement: Use generated data from the XML in existing action tests rather + than hardcoded tables. + ([internal MR 3224](https://gitlab.khronos.org/openxr/openxr/merge_requests/3224), + [internal issue 2063](https://gitlab.khronos.org/openxr/openxr/issues/2063), + [internal MR 3306](https://gitlab.khronos.org/openxr/openxr/merge_requests/3306), + [internal MR 3318](https://gitlab.khronos.org/openxr/openxr/merge_requests/3318), + [internal MR 3321](https://gitlab.khronos.org/openxr/openxr/merge_requests/3321)) + - Improvement: Automatically enabled extension(s) needed for the interaction + profile specified on the command line. + ([internal MR 3224](https://gitlab.khronos.org/openxr/openxr/merge_requests/3224), + [internal issue 2063](https://gitlab.khronos.org/openxr/openxr/issues/2063), + [internal MR 3306](https://gitlab.khronos.org/openxr/openxr/merge_requests/3306), + [internal MR 3318](https://gitlab.khronos.org/openxr/openxr/merge_requests/3318), + [internal MR 3321](https://gitlab.khronos.org/openxr/openxr/merge_requests/3321)) + - Improvement: Code cleanup. + ([internal MR 3257](https://gitlab.khronos.org/openxr/openxr/merge_requests/3257), + [internal MR 3273](https://gitlab.khronos.org/openxr/openxr/merge_requests/3273), + [internal MR 3208](https://gitlab.khronos.org/openxr/openxr/merge_requests/3208), + [internal MR 3241](https://gitlab.khronos.org/openxr/openxr/merge_requests/3241)) + - Improvement: Allow `VK_FORMAT_R8G8_SRGB` in swapchains test + ([internal MR 3258](https://gitlab.khronos.org/openxr/openxr/merge_requests/3258)) + - Improvement: Support specifying API version (1.0 or 1.1) - defaults to 1.1. + ([internal MR 3274](https://gitlab.khronos.org/openxr/openxr/merge_requests/3274), + [internal issue 2205](https://gitlab.khronos.org/openxr/openxr/issues/2205), + [internal MR 3296](https://gitlab.khronos.org/openxr/openxr/merge_requests/3296), + [internal MR 3297](https://gitlab.khronos.org/openxr/openxr/merge_requests/3297), + [internal issue 2236](https://gitlab.khronos.org/openxr/openxr/issues/2236), + [internal MR 3298](https://gitlab.khronos.org/openxr/openxr/merge_requests/3298), + [internal MR 3318](https://gitlab.khronos.org/openxr/openxr/merge_requests/3318)) + - Improvement: Use spec-provided constants for inspecting enums for core vs + extension origination. + ([internal MR 3275](https://gitlab.khronos.org/openxr/openxr/merge_requests/3275)) + - New test: Automated test of core OpenXR 1.1 feature `LOCAL_FLOOR` reference + space. + ([internal MR 3154](https://gitlab.khronos.org/openxr/openxr/merge_requests/3154), + [internal issue 2150](https://gitlab.khronos.org/openxr/openxr/issues/2150), + [internal MR 3318](https://gitlab.khronos.org/openxr/openxr/merge_requests/3318), + [internal MR 3327](https://gitlab.khronos.org/openxr/openxr/merge_requests/3327)) + - New test: Interactive test of `LOCAL_FLOOR` reference space (in both extension + and promoted to core). + ([internal MR 3154](https://gitlab.khronos.org/openxr/openxr/merge_requests/3154), + [internal issue 2150](https://gitlab.khronos.org/openxr/openxr/issues/2150), + [internal MR 3318](https://gitlab.khronos.org/openxr/openxr/merge_requests/3318), + [internal MR 3327](https://gitlab.khronos.org/openxr/openxr/merge_requests/3327)) + - New test: Test for `xrLocateSpacesKHR` (from `XR_KHR_locate_spaces`) and + `xrLocateSpaces` (promoted to core OpenXR 1.1). + ([internal MR 3208](https://gitlab.khronos.org/openxr/openxr/merge_requests/3208), + [internal issue 2149](https://gitlab.khronos.org/openxr/openxr/issues/2149)) + - New test: Verify correct handling of all interaction profile paths and their + input component paths (accept vs reject suggested binding), in the "default" + configuration of the instance, using generated data from the XML. + ([internal MR 3224](https://gitlab.khronos.org/openxr/openxr/merge_requests/3224), + [internal issue 2063](https://gitlab.khronos.org/openxr/openxr/issues/2063), + [internal MR 3306](https://gitlab.khronos.org/openxr/openxr/merge_requests/3306), + [internal MR 3318](https://gitlab.khronos.org/openxr/openxr/merge_requests/3318), + [internal MR 3321](https://gitlab.khronos.org/openxr/openxr/merge_requests/3321)) + - New test: Created ProjectionDepth interactive test to visually verify behavior + of `XR_FB_composition_layer_depth_test` extension. + ([internal MR 3229](https://gitlab.khronos.org/openxr/openxr/merge_requests/3229)) + - New test: Automated and interactive tests for the "stereo with foveated inset" + view configuration type (promoted to core OpenXR 1.1), as well as its extension + predecessor `XR_VARJO_quad_views`. + ([internal MR 3241](https://gitlab.khronos.org/openxr/openxr/merge_requests/3241), + [internal issue 2152](https://gitlab.khronos.org/openxr/openxr/issues/2152), + [internal MR 3310](https://gitlab.khronos.org/openxr/openxr/merge_requests/3310), + [internal MR 3318](https://gitlab.khronos.org/openxr/openxr/merge_requests/3318)) + - New test: Additional test for grip_surface pose identifier (promoted to core + OpenXR 1.1), as well as extension `XR_EXT_palm_pose`. + ([internal MR 3245](https://gitlab.khronos.org/openxr/openxr/merge_requests/3245), + [internal issue 2151](https://gitlab.khronos.org/openxr/openxr/issues/2151), + [internal MR 3318](https://gitlab.khronos.org/openxr/openxr/merge_requests/3318), + [internal MR 3328](https://gitlab.khronos.org/openxr/openxr/merge_requests/3328)) + - New test: Created non-interactive test for `XR_FB_space_warp` extension. + ([internal MR 3278](https://gitlab.khronos.org/openxr/openxr/merge_requests/3278)) + ## OpenXR CTS 1.0.34.0 (2024-02-29) - Conformance Tests @@ -647,11 +749,11 @@ Android. layer to avoid deeply-nested `if ... else` blocks. (Some compilers have limits we were nearing or hitting.) ([internal MR 2050](https://gitlab.khronos.org/openxr/openxr/merge_requests/2050)) - - Improvement: Add device re-use test to `XR_KHR_D3D11_enable` test. + - Improvement: Add device reuse test to `XR_KHR_D3D11_enable` test. ([internal MR 2054](https://gitlab.khronos.org/openxr/openxr/merge_requests/2054)) - - Improvement: Add device re-use test to `XR_KHR_D3D12_enable` test. + - Improvement: Add device reuse test to `XR_KHR_D3D12_enable` test. ([internal MR 2054](https://gitlab.khronos.org/openxr/openxr/merge_requests/2054)) - - Improvement: Add device re-use test to `XR_KHR_opengl_enable` test. + - Improvement: Add device reuse test to `XR_KHR_opengl_enable` test. ([internal MR 2054](https://gitlab.khronos.org/openxr/openxr/merge_requests/2054)) - Improvement: Add support for `XR_KHR_vulkan_enable2` to conformance test suite. ([internal MR 2073](https://gitlab.khronos.org/openxr/openxr/merge_requests/2073)) diff --git a/README.md b/README.md index cf00a543..45dfcbd0 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,16 @@ presence of an appropriately-named tag: it does not check for a signature on the tag, so if you have added tags to your repo it may not warn if you are not on a release.) +When you submit for OpenXR 1.1 conformance, you must also submit for each earlier +OpenXR minor version that your runtime supports in the same submission if you want +to claim conformance with each of those minor versions too. +For runtimes which want to claim support for OpenXR 1.0 and OpenXR 1.1 this means +executing each test run with both `--apiVersion 1.0` and `--apiVersion 1.1` separately. + +If you don't supply the `--apiVersion` argument, it defaults to the latest OpenXR +version as defined in the CTS's own OpenXR header. When running the CTS for a +conformance submission, you should always supply an `--apiVersion` argument. + Running CTS ----------- @@ -48,13 +58,21 @@ as it is important for interpreting the results. 1. Run the automated tests (non-interactive tests) for every graphics API that is supported. - Example: + Example for OpenXR 1.0: + + conformance_cli "exclude:[interactive]" -G d3d11 --apiVersion 1.0 --reporter ctsxml::out=automated_d3d11_1_0.xml + conformance_cli "exclude:[interactive]" -G d3d12 --apiVersion 1.0 --reporter ctsxml::out=automated_d3d12_1_0.xml + conformance_cli "exclude:[interactive]" -G vulkan --apiVersion 1.0 --reporter ctsxml::out=automated_vulkan_1_0.xml + conformance_cli "exclude:[interactive]" -G vulkan2 --apiVersion 1.0 --reporter ctsxml::out=automated_vulkan2_1_0.xml + conformance_cli "exclude:[interactive]" -G opengl --apiVersion 1.0 --reporter ctsxml::out=automated_opengl_1_0.xml + + Example for OpenXR 1.1: - conformance_cli "exclude:[interactive]" -G d3d11 --reporter ctsxml::out=automated_d3d11.xml - conformance_cli "exclude:[interactive]" -G d3d12 --reporter ctsxml::out=automated_d3d12.xml - conformance_cli "exclude:[interactive]" -G vulkan --reporter ctsxml::out=automated_vulkan.xml - conformance_cli "exclude:[interactive]" -G vulkan2 --reporter ctsxml::out=automated_vulkan2.xml - conformance_cli "exclude:[interactive]" -G opengl --reporter ctsxml::out=automated_opengl.xml + conformance_cli "exclude:[interactive]" -G d3d11 --apiVersion 1.1 --reporter ctsxml::out=automated_d3d11_1_1.xml + conformance_cli "exclude:[interactive]" -G d3d12 --apiVersion 1.1 --reporter ctsxml::out=automated_d3d12_1_1.xml + conformance_cli "exclude:[interactive]" -G vulkan --apiVersion 1.1 --reporter ctsxml::out=automated_vulkan_1_1.xml + conformance_cli "exclude:[interactive]" -G vulkan2 --apiVersion 1.1 --reporter ctsxml::out=automated_vulkan2_1_1.xml + conformance_cli "exclude:[interactive]" -G opengl --apiVersion 1.1 --reporter ctsxml::out=automated_opengl_1_1.xml Notes: * Some tests require that a begun session progresses to `XR_SESSION_STATE_FOCUSED`. @@ -65,13 +83,21 @@ as it is important for interpreting the results. 2. Run the interactive composition tests for every graphics API that is supported. - Example: + Example for OpenXR 1.0: - conformance_cli "[composition][interactive]" -G d3d11 --reporter ctsxml::out=interactive_composition_d3d11.xml - conformance_cli "[composition][interactive]" -G d3d12 --reporter ctsxml::out=interactive_composition_d3d12.xml - conformance_cli "[composition][interactive]" -G vulkan --reporter ctsxml::out=interactive_composition_vulkan.xml - conformance_cli "[composition][interactive]" -G vulkan2 --reporter ctsxml::out=interactive_composition_vulkan2.xml - conformance_cli "[composition][interactive]" -G opengl --reporter ctsxml::out=interactive_composition_opengl.xml + conformance_cli "[composition][interactive]" -G d3d11 --apiVersion 1.0 --reporter ctsxml::out=interactive_composition_d3d11_1_0.xml + conformance_cli "[composition][interactive]" -G d3d12 --apiVersion 1.0 --reporter ctsxml::out=interactive_composition_d3d12_1_0.xml + conformance_cli "[composition][interactive]" -G vulkan --apiVersion 1.0 --reporter ctsxml::out=interactive_composition_vulkan_1_0.xml + conformance_cli "[composition][interactive]" -G vulkan2 --apiVersion 1.0 --reporter ctsxml::out=interactive_composition_vulkan2_1_0.xml + conformance_cli "[composition][interactive]" -G opengl --apiVersion 1.0 --reporter ctsxml::out=interactive_composition_opengl_1_0.xml + + Example for OpenXR 1.1: + + conformance_cli "[composition][interactive]" -G d3d11 --apiVersion 1.1 --reporter ctsxml::out=interactive_composition_d3d11_1_1.xml + conformance_cli "[composition][interactive]" -G d3d12 --apiVersion 1.1 --reporter ctsxml::out=interactive_composition_d3d12_1_1.xml + conformance_cli "[composition][interactive]" -G vulkan --apiVersion 1.1 --reporter ctsxml::out=interactive_composition_vulkan_1_1.xml + conformance_cli "[composition][interactive]" -G vulkan2 --apiVersion 1.1 --reporter ctsxml::out=interactive_composition_vulkan2_1_1.xml + conformance_cli "[composition][interactive]" -G opengl --apiVersion 1.1 --reporter ctsxml::out=interactive_composition_opengl_1_1.xml Notes: * The runtime must support `khr/simple_controller` to manually pass or fail @@ -84,9 +110,13 @@ as it is important for interpreting the results. 3. Run the interactive scenario tests. Run the tests for at least one graphics API. - Example: + Example for OpenXR 1.0: + + conformance_cli "[scenario][interactive]" -G opengl --apiVersion 1.0 --reporter ctsxml::out=interactive_scenarios_1_0.xml - conformance_cli "[scenario][interactive]" -G opengl --reporter ctsxml::out=interactive_scenarios.xml + Example for OpenXR 1.1: + + conformance_cli "[scenario][interactive]" -G opengl --apiVersion 1.1 --reporter ctsxml::out=interactive_scenarios_1_1.xml Notes: * The runtime must support `khr/simple_controller`. If it cannot be included @@ -98,15 +128,30 @@ as it is important for interpreting the results. Example: - conformance_cli "[actions][interactive]" -G d3d11 -I "khr/simple_controller" --reporter ctsxml::out=interactive_action_simple_controller.xml - conformance_cli "[actions][interactive]" -G d3d11 -I "microsoft/motion_controller" --reporter ctsxml::out=interactive_action_microsoft_motion_controller.xml - conformance_cli "[actions][interactive]" -G d3d11 -I "oculus/touch_controller" --reporter ctsxml::out=interactive_action_oculus_touch_controller.xml - conformance_cli "[actions][interactive]" -G d3d11 -I "htc/vive_controller" --reporter ctsxml::out=interactive_action_htc_vive_controller.xml + Example for OpenXR 1.0: + + conformance_cli "[actions][interactive]" -G d3d11 -I "khr/simple_controller" --apiVersion 1.0 --reporter ctsxml::out=interactive_action_simple_controller_1_0.xml + conformance_cli "[actions][interactive]" -G d3d11 -I "microsoft/motion_controller" --apiVersion 1.0 --reporter ctsxml::out=interactive_action_microsoft_motion_controller_1_0.xml + conformance_cli "[actions][interactive]" -G d3d11 -I "oculus/touch_controller" --apiVersion 1.0 --reporter ctsxml::out=interactive_action_oculus_touch_controller_1_0.xml + conformance_cli "[actions][interactive]" -G d3d11 -I "htc/vive_controller" --apiVersion 1.0 --reporter ctsxml::out=interactive_action_htc_vive_controller_1_0.xml + + Example for OpenXR 1.1: + + conformance_cli "[actions][interactive]" -G d3d11 -I "khr/simple_controller" --apiVersion 1.1 --reporter ctsxml::out=interactive_action_simple_controller_1_1.xml + conformance_cli "[actions][interactive]" -G d3d11 -I "microsoft/motion_controller" --apiVersion 1.1 --reporter ctsxml::out=interactive_action_microsoft_motion_controller_1_1.xml + conformance_cli "[actions][interactive]" -G d3d11 -I "oculus/touch_controller" --apiVersion 1.1 --reporter ctsxml::out=interactive_action_oculus_touch_controller_1_1.xml + conformance_cli "[actions][interactive]" -G d3d11 -I "htc/vive_controller" --apiVersion 1.1 --reporter ctsxml::out=interactive_action_htc_vive_controller_1_1.xml Note that the `microsoft/xbox_controller` interaction profile only needs to run against the `[gamepad]` tests: - conformance_cli "[gamepad]" -G d3d11 -I "microsoft/xbox_controller" --reporter ctsxml::out=interactive_action_microsoft_xbox_controller.xml + Example for OpenXR 1.0: + + conformance_cli "[gamepad]" -G d3d11 -I "microsoft/xbox_controller" --apiVersion 1.0 --reporter ctsxml::out=interactive_action_microsoft_xbox_controller_1_0.xml + + Example for OpenXR 1.0: + + conformance_cli "[gamepad]" -G d3d11 -I "microsoft/xbox_controller" --apiVersion 1.1 --reporter ctsxml::out=interactive_action_microsoft_xbox_controller_1_1.xml Notes: * A person must use the OpenXR action system input by following the displayed @@ -127,7 +172,13 @@ Conformance Layer. The activity accepts the equivalent of the command line arguments described above using "Intent Extras" instead. `adb shell` may be used to start the CTS and pass intent extras, for example as follows: - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "exclude:[interactive]" -e graphicsPlugin vulkan -e xmlFilename automated_vulkan.xml +Example for OpenXR 1.0: + + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "exclude:[interactive]" -e graphicsPlugin vulkan -e apiVersion 1.0 -e xmlFilename automated_vulkan_1_0.xml + +Example for OpenXR 1.1: + + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "exclude:[interactive]" -e graphicsPlugin vulkan -e apiVersion 1.1 -e xmlFilename automated_vulkan_1_1.xml which is the rough equivalent of the Vulkan-related command in item 1 above. @@ -153,22 +204,43 @@ You will need to translate the sample command lines in the preceding section to this format using intent extras, or create a launcher activity that generates those intents. Samples translated include: - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "exclude:[interactive]" -e graphicsPlugin vulkan -e xmlFilename automated_vulkan.xml - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "exclude:[interactive]" -e graphicsPlugin vulkan2 -e xmlFilename automated_vulkan2.xml - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "exclude:[interactive]" -e graphicsPlugin opengles -e xmlFilename automated_opengles.xml + Example for OpenXR 1.0: + + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "exclude:[interactive]" -e graphicsPlugin vulkan -e apiVersion 1.0 -e xmlFilename automated_vulkan_1_0.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "exclude:[interactive]" -e graphicsPlugin vulkan2 -e apiVersion 1.0 -e xmlFilename automated_vulkan2_1_0.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "exclude:[interactive]" -e graphicsPlugin opengles -e apiVersion 1.0 -e xmlFilename automated_opengles_1_0.xml + + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[composition][interactive]" -e graphicsPlugin vulkan -e apiVersion 1.0 -e xmlFilename interactive_composition_vulkan_1_0.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[composition][interactive]" -e graphicsPlugin vulkan2 -e apiVersion 1.0 -e xmlFilename interactive_composition_vulkan2_1_0.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[composition][interactive]" -e graphicsPlugin opengles -e apiVersion 1.0 -e xmlFilename interactive_composition_opengles_1_0.xml + + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[scenario][interactive]" -e apiVersion 1.0 -e xmlFilename interactive_scenarios_1_0.xml + + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[actions][interactive],-I,khr/simple_controller" -e apiVersion 1.0 -e xmlFilename interactive_action_simple_controller_1_0.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[actions][interactive],-I,microsoft/motion_controller" -e apiVersion 1.0 -e xmlFilename interactive_action_microsoft_motion_controller_1_0.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[actions][interactive],-I,oculus/touch_controller" -e apiVersion 1.0 -e xmlFilename interactive_action_oculus_touch_controller_1_0.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[actions][interactive],-I,htc/vive_controller" -e apiVersion 1.0 -e xmlFilename interactive_action_htc_vive_controller_1_0.xml - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[composition][interactive]" -e graphicsPlugin vulkan -e xmlFilename interactive_composition_vulkan.xml - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[composition][interactive]" -e graphicsPlugin vulkan2 -e xmlFilename interactive_composition_vulkan2.xml - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[composition][interactive]" -e graphicsPlugin opengles -e xmlFilename interactive_composition_opengles.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[gamepad],-I,microsoft/xbox_controller" -e apiVersion 1.0 -e xmlFilename interactive_action_microsoft_xbox_controller_1_0.xml - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[scenario][interactive]" -e xmlFilename interactive_scenarios.xml + Example for OpenXR 1.1: - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[actions][interactive],-I,khr/simple_controller" -e xmlFilename interactive_action_simple_controller.xml - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[actions][interactive],-I,microsoft/motion_controller" -e xmlFilename interactive_action_microsoft_motion_controller.xml - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[actions][interactive],-I,oculus/touch_controller" -e xmlFilename interactive_action_oculus_touch_controller.xml - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[gamepad],-I,htc/vive_controller" -e xmlFilename interactive_action_htc_vive_controller.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "exclude:[interactive]" -e graphicsPlugin vulkan -e apiVersion 1.1 -e xmlFilename automated_vulkan_1_1.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "exclude:[interactive]" -e graphicsPlugin vulkan2 -e apiVersion 1.1 -e xmlFilename automated_vulkan2_1_1.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "exclude:[interactive]" -e graphicsPlugin opengles -e apiVersion 1.1 -e xmlFilename automated_opengles_1_1.xml - adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[actions][interactive],-I,microsoft/xbox_controller" -e xmlFilename interactive_action_microsoft_xbox_controller.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[composition][interactive]" -e graphicsPlugin vulkan -e apiVersion 1.1 -e xmlFilename interactive_composition_vulkan_1_1.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[composition][interactive]" -e graphicsPlugin vulkan2 -e apiVersion 1.1 -e xmlFilename interactive_composition_vulkan2_1_1.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[composition][interactive]" -e graphicsPlugin opengles -e apiVersion 1.1 -e xmlFilename interactive_composition_opengles_1_1.xml + + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[scenario][interactive]" -e apiVersion 1.1 -e xmlFilename interactive_scenarios_1_1.xml + + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[actions][interactive],-I,khr/simple_controller" -e apiVersion 1.1 -e xmlFilename interactive_action_simple_controller_1_1.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[actions][interactive],-I,microsoft/motion_controller" -e apiVersion 1.1 -e xmlFilename interactive_action_microsoft_motion_controller_1_1.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[actions][interactive],-I,oculus/touch_controller" -e apiVersion 1.1 -e xmlFilename interactive_action_oculus_touch_controller_1_1.xml + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[actions][interactive],-I,htc/vive_controller" -e apiVersion 1.1 -e xmlFilename interactive_action_htc_vive_controller_1_1.xml + + adb shell am start-activity -S -n org.khronos.openxr.cts/android.app.NativeActivity --esa args "[gamepad],-I,microsoft/xbox_controller" -e apiVersion 1.1 -e xmlFilename interactive_action_microsoft_xbox_controller_1_1.xml If you need to specify a different environment blend mode than `XR_ENVIRONMENT_BLEND_MODE_OPAQUE`, pass something like the following @@ -194,11 +266,21 @@ specific details of the renderer. To run the self-tests, commands similar to the following can be used: - conformance_cli "[self_test][interactive]" -G d3d11 --reporter ctsxml::out=interactive_self_test_d3d11.xml - conformance_cli "[self_test][interactive]" -G d3d12 --reporter ctsxml::out=interactive_self_test_d3d12.xml - conformance_cli "[self_test][interactive]" -G vulkan --reporter ctsxml::out=interactive_self_test_vulkan.xml - conformance_cli "[self_test][interactive]" -G vulkan2 --reporter ctsxml::out=interactive_self_test_vulkan2.xml - conformance_cli "[self_test][interactive]" -G opengl --reporter ctsxml::out=interactive_self_test_opengl.xml + Example for OpenXR 1.0: + + conformance_cli "[self_test][interactive]" -G d3d11 --apiVersion 1.0 --reporter ctsxml::out=interactive_self_test_d3d11_1_0.xml + conformance_cli "[self_test][interactive]" -G d3d12 --apiVersion 1.0 --reporter ctsxml::out=interactive_self_test_d3d12_1_0.xml + conformance_cli "[self_test][interactive]" -G vulkan --apiVersion 1.0 --reporter ctsxml::out=interactive_self_test_vulkan_1_0.xml + conformance_cli "[self_test][interactive]" -G vulkan2 --apiVersion 1.0 --reporter ctsxml::out=interactive_self_test_vulkan2_1_0.xml + conformance_cli "[self_test][interactive]" -G opengl --apiVersion 1.0 --reporter ctsxml::out=interactive_self_test_opengl_1_0.xml + + Example for OpenXR 1.1: + + conformance_cli "[self_test][interactive]" -G d3d11 --apiVersion 1.1 --reporter ctsxml::out=interactive_self_test_d3d11_1_1.xml + conformance_cli "[self_test][interactive]" -G d3d12 --apiVersion 1.1 --reporter ctsxml::out=interactive_self_test_d3d12_1_1.xml + conformance_cli "[self_test][interactive]" -G vulkan --apiVersion 1.1 --reporter ctsxml::out=interactive_self_test_vulkan_1_1.xml + conformance_cli "[self_test][interactive]" -G vulkan2 --apiVersion 1.1 --reporter ctsxml::out=interactive_self_test_vulkan2_1_1.xml + conformance_cli "[self_test][interactive]" -G opengl --apiVersion 1.1 --reporter ctsxml::out=interactive_self_test_opengl_1_1.xml Conformance Submission Package Requirements ------------------------------------------- @@ -217,27 +299,40 @@ Details: One or more automated test result XML files, 1 per graphics API supported, therefore one or more of the following generated output files: - automated_d3d11.xml - automated_d3d12.xml - automated_opengl.xml - automated_gles.xml - automated_vulkan.xml - automated_vulkan2.xml + automated_d3d11_1_0.xml + automated_d3d12_1_0.xml + automated_opengl_1_0.xml + automated_gles_1_0.xml + automated_vulkan_1_0.xml + automated_vulkan2_1_0.xml + automated_d3d11_1_1.xml + automated_d3d12_1_1.xml + automated_opengl_1_1.xml + automated_gles_1_1.xml + automated_vulkan_1_1.xml + automated_vulkan2_1_1.xml The output XML file(s) from running the interactive tests, 1 per supported graphics API, therefore one or more of the following generated output files: - interactive_composition_d3d11.xml - interactive_composition_d3d12.xml - interactive_composition_opengl.xml - interactive_composition_gles.xml - interactive_composition_vulkan.xml - interactive_composition_vulkan2.xml + interactive_composition_d3d11_1_0.xml + interactive_composition_d3d12_1_0.xml + interactive_composition_opengl_1_0.xml + interactive_composition_gles_1_0.xml + interactive_composition_vulkan_1_0.xml + interactive_composition_vulkan2_1_0.xml + interactive_composition_d3d11_1_1.xml + interactive_composition_d3d12_1_1.xml + interactive_composition_opengl_1_1.xml + interactive_composition_gles_1_1.xml + interactive_composition_vulkan_1_1.xml + interactive_composition_vulkan2_1_1.xml At least one output file from running the interactive scenario tests on a single graphics API (more is better): - interactive_scenarios.xml + interactive_scenarios_1_0.xml + interactive_scenarios_1_1.xml The output XML file(s) from running the interactive action tests, 1 per supported interaction profile, therefore one or more of the following @@ -245,12 +340,18 @@ Details: have their own controllers though simple_controller is expected to be supported at a minimum. - interactive_action_simple_controller.xml - interactive_action_microsoft_xbox_controller.xml - interactive_action_microsoft_motion_controller.xml - interactive_action_oculus_touch_controller.xml - interactive_action_valve_index_controller.xml - interactive_action_htc_vive_controller.xml + interactive_action_simple_controller_1_0.xml + interactive_action_microsoft_xbox_controller_1_0.xml + interactive_action_microsoft_motion_controller_1_0.xml + interactive_action_oculus_touch_controller_1_0.xml + interactive_action_valve_index_controller_1_0.xml + interactive_action_htc_vive_controller_1_0.xml + interactive_action_simple_controller_1_1.xml + interactive_action_microsoft_xbox_controller_1_1.xml + interactive_action_microsoft_motion_controller_1_1.xml + interactive_action_oculus_touch_controller_1_1.xml + interactive_action_valve_index_controller_1_1.xml + interactive_action_htc_vive_controller_1_1.xml 2. The console output produced by the CTS runs above. @@ -301,8 +402,8 @@ Details: member company, or some recognizable abbreviation. The `<_info>` field is optional. It may be used to uniquely identify a submission by OS, platform, date, or other criteria when making multiple submissions. For example, a - company XYZ may make a submission for an OpenXR 1.0 implementation named - `XR10_XYZ_PRODUCTA_Windows10.tgz`. + company XYZ may make a submission for an OpenXR 1.1 implementation named + `XR11_XYZ_PRODUCTA_Windows10.tgz`. Waivers ------- diff --git a/changes/registry/README.md b/changes/registry/README.md index 58f646d7..ac93dd9c 100644 --- a/changes/registry/README.md +++ b/changes/registry/README.md @@ -35,6 +35,6 @@ For reserving one or more extensions: > Extension reservation: Reserve an extension for VendorName. - Pluralize "an extension" if reserving multiple extensions -- May pre-pend "Register author ID and" if applicable (after the colon). +- May prepend "Register author ID and" if applicable (after the colon). - You may optionally provide information about your plans for those extensions, but this is not required, just permitted. diff --git a/checkCodespell b/checkCodespell index 0a19e7c2..f3bf3935 100755 --- a/checkCodespell +++ b/checkCodespell @@ -45,7 +45,7 @@ CODESPELL_IGNORE=${CODESPELL_IGNORE:-${DEFAULT_CODESPELL_IGNORE}} # ba is from a regex # unknwn is a header file name # Wee, Ser, Nuber, Blok are names -IGNORE_WORDS="lod,nd,ba,unknwn,wee,ser,nuber,blok" +IGNORE_WORDS="lod,nd,ba,unknwn,wee,ser,nuber,blok,rouge,implementor,implementors" # Add to this to exclude individual files or directories (comma-delimited) # - Skipping external code. diff --git a/external/python/pyparsing-3.1.2.dist-info/LICENSE b/external/python/pyparsing-3.1.2.dist-info/LICENSE new file mode 100644 index 00000000..1bf98523 --- /dev/null +++ b/external/python/pyparsing-3.1.2.dist-info/LICENSE @@ -0,0 +1,18 @@ +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/external/python/pyparsing-3.1.2.dist-info/METADATA b/external/python/pyparsing-3.1.2.dist-info/METADATA new file mode 100644 index 00000000..cac4d35d --- /dev/null +++ b/external/python/pyparsing-3.1.2.dist-info/METADATA @@ -0,0 +1,127 @@ +Metadata-Version: 2.1 +Name: pyparsing +Version: 3.1.2 +Summary: pyparsing module - Classes and methods to define and execute parsing grammars +Author-email: Paul McGuire +Requires-Python: >=3.6.8 +Description-Content-Type: text/x-rst +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Compilers +Classifier: Topic :: Text Processing +Classifier: Typing :: Typed +Requires-Dist: railroad-diagrams ; extra == "diagrams" +Requires-Dist: jinja2 ; extra == "diagrams" +Project-URL: Homepage, https://github.com/pyparsing/pyparsing/ +Provides-Extra: diagrams + +PyParsing -- A Python Parsing Module +==================================== + +|Version| |Build Status| |Coverage| |License| |Python Versions| |Snyk Score| + +Introduction +============ + +The pyparsing module is an alternative approach to creating and +executing simple grammars, vs. the traditional lex/yacc approach, or the +use of regular expressions. The pyparsing module provides a library of +classes that client code uses to construct the grammar directly in +Python code. + +*[Since first writing this description of pyparsing in late 2003, this +technique for developing parsers has become more widespread, under the +name Parsing Expression Grammars - PEGs. See more information on PEGs* +`here `__ +*.]* + +Here is a program to parse ``"Hello, World!"`` (or any greeting of the form +``"salutation, addressee!"``): + +.. code:: python + + from pyparsing import Word, alphas + greet = Word(alphas) + "," + Word(alphas) + "!" + hello = "Hello, World!" + print(hello, "->", greet.parseString(hello)) + +The program outputs the following:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + +The Python representation of the grammar is quite readable, owing to the +self-explanatory class names, and the use of '+', '|' and '^' operator +definitions. + +The parsed results returned from ``parseString()`` is a collection of type +``ParseResults``, which can be accessed as a +nested list, a dictionary, or an object with named attributes. + +The pyparsing module handles some of the problems that are typically +vexing when writing text parsers: + +- extra or missing whitespace (the above program will also handle ``"Hello,World!"``, ``"Hello , World !"``, etc.) +- quoted strings +- embedded comments + +The examples directory includes a simple SQL parser, simple CORBA IDL +parser, a config file parser, a chemical formula parser, and a four- +function algebraic notation parser, among many others. + +Documentation +============= + +There are many examples in the online docstrings of the classes +and methods in pyparsing. You can find them compiled into `online docs `__. Additional +documentation resources and project info are listed in the online +`GitHub wiki `__. An +entire directory of examples can be found `here `__. + +License +======= + +MIT License. See header of the `pyparsing __init__.py `__ file. + +History +======= + +See `CHANGES `__ file. + +.. |Build Status| image:: https://github.com/pyparsing/pyparsing/actions/workflows/ci.yml/badge.svg + :target: https://github.com/pyparsing/pyparsing/actions/workflows/ci.yml + +.. |Coverage| image:: https://codecov.io/gh/pyparsing/pyparsing/branch/master/graph/badge.svg + :target: https://codecov.io/gh/pyparsing/pyparsing + +.. |Version| image:: https://img.shields.io/pypi/v/pyparsing?style=flat-square + :target: https://pypi.org/project/pyparsing/ + :alt: Version + +.. |License| image:: https://img.shields.io/pypi/l/pyparsing.svg?style=flat-square + :target: https://pypi.org/project/pyparsing/ + :alt: License + +.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/pyparsing.svg?style=flat-square + :target: https://pypi.org/project/python-liquid/ + :alt: Python versions + +.. |Snyk Score| image:: https://snyk.io//advisor/python/pyparsing/badge.svg + :target: https://snyk.io//advisor/python/pyparsing + :alt: pyparsing + diff --git a/external/python/pyparsing-3.1.2.dist-info/RECORD b/external/python/pyparsing-3.1.2.dist-info/RECORD new file mode 100644 index 00000000..2c891683 --- /dev/null +++ b/external/python/pyparsing-3.1.2.dist-info/RECORD @@ -0,0 +1,16 @@ +pyparsing/__init__.py,sha256=lGBJ8MMj1BW_WNvsO0HzCN-vL8rUz_Hh2wancJgYKHc,9148 +pyparsing/actions.py,sha256=JERyInPyDIR5nD_fdNW82izgqC3CWldkQe5ocqiEpH4,6590 +pyparsing/common.py,sha256=Jp137zU--CG8_7XXMGHHPJciTmJjLI_YAWioYtBaKg8,13652 +pyparsing/core.py,sha256=4lwoHS9akOkm1uTiio8D9riXAbjouLqyJHLvcApUzdc,225025 +pyparsing/exceptions.py,sha256=fiFbn5KLg9lC1M9k9cQY2EdtBYaZvOjLqqPH6BZVEEA,9503 +pyparsing/helpers.py,sha256=qVn6cjBWLlDa4_26ESLl9zZc98KGkOBSgl4NGQXxbfk,38746 +pyparsing/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pyparsing/results.py,sha256=0febgrqL0IUiPNqtlVDcJ38bkM7kJWfz-ad2IlnLN4U,25667 +pyparsing/testing.py,sha256=76XAx8JRLD8lLfYbrZN4QwOphlYX16R8oyO3_qgyEYA,13802 +pyparsing/unicode.py,sha256=2iz6P6eDK5fZuSBJGiNLWkijrVVW7e62hriP8a2AjrQ,10518 +pyparsing/util.py,sha256=wV3ZlfV7OaMnjxP7UdUHeLPPhcPqbAvXpSiYGDdTEYo,8437 +pyparsing/diagram/__init__.py,sha256=Umap-1h-5r9RbVgL_JjBuCDt0C1MVrqpi90RyHq6EM4,24194 +pyparsing-3.1.2.dist-info/LICENSE,sha256=ENUSChaAWAT_2otojCIL-06POXQbVzIGBNRVowngGXI,1023 +pyparsing-3.1.2.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81 +pyparsing-3.1.2.dist-info/METADATA,sha256=fmMA9iEWL0Z5dAde98Q0byK-M-JxmGgPg2tjNE3ymGQ,5141 +pyparsing-3.1.2.dist-info/RECORD,, diff --git a/external/python/pyparsing-3.1.2.dist-info/WHEEL b/external/python/pyparsing-3.1.2.dist-info/WHEEL new file mode 100644 index 00000000..3b5e64b5 --- /dev/null +++ b/external/python/pyparsing-3.1.2.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.9.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/external/python/pyparsing/__init__.py b/external/python/pyparsing/__init__.py new file mode 100644 index 00000000..79d8153c --- /dev/null +++ b/external/python/pyparsing/__init__.py @@ -0,0 +1,325 @@ +# module pyparsing.py +# +# Copyright (c) 2003-2022 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +__doc__ = """ +pyparsing module - Classes and methods to define and execute parsing grammars +============================================================================= + +The pyparsing module is an alternative approach to creating and +executing simple grammars, vs. the traditional lex/yacc approach, or the +use of regular expressions. With pyparsing, you don't need to learn +a new syntax for defining grammars or matching expressions - the parsing +module provides a library of classes that you use to construct the +grammar directly in Python. + +Here is a program to parse "Hello, World!" (or any greeting of the form +``", !"``), built up using :class:`Word`, +:class:`Literal`, and :class:`And` elements +(the :meth:`'+'` operators create :class:`And` expressions, +and the strings are auto-converted to :class:`Literal` expressions):: + + from pyparsing import Word, alphas + + # define grammar of a greeting + greet = Word(alphas) + "," + Word(alphas) + "!" + + hello = "Hello, World!" + print(hello, "->", greet.parse_string(hello)) + +The program outputs the following:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + +The Python representation of the grammar is quite readable, owing to the +self-explanatory class names, and the use of :class:`'+'`, +:class:`'|'`, :class:`'^'` and :class:`'&'` operators. + +The :class:`ParseResults` object returned from +:class:`ParserElement.parse_string` can be +accessed as a nested list, a dictionary, or an object with named +attributes. + +The pyparsing module handles some of the problems that are typically +vexing when writing text parsers: + + - extra or missing whitespace (the above program will also handle + "Hello,World!", "Hello , World !", etc.) + - quoted strings + - embedded comments + + +Getting Started - +----------------- +Visit the classes :class:`ParserElement` and :class:`ParseResults` to +see the base classes that most other pyparsing +classes inherit from. Use the docstrings for examples of how to: + + - construct literal match expressions from :class:`Literal` and + :class:`CaselessLiteral` classes + - construct character word-group expressions using the :class:`Word` + class + - see how to create repetitive expressions using :class:`ZeroOrMore` + and :class:`OneOrMore` classes + - use :class:`'+'`, :class:`'|'`, :class:`'^'`, + and :class:`'&'` operators to combine simple expressions into + more complex ones + - associate names with your parsed results using + :class:`ParserElement.set_results_name` + - access the parsed data, which is returned as a :class:`ParseResults` + object + - find some helpful expression short-cuts like :class:`DelimitedList` + and :class:`one_of` + - find more useful common expressions in the :class:`pyparsing_common` + namespace class +""" +from typing import NamedTuple + + +class version_info(NamedTuple): + major: int + minor: int + micro: int + releaselevel: str + serial: int + + @property + def __version__(self): + return ( + f"{self.major}.{self.minor}.{self.micro}" + + ( + f"{'r' if self.releaselevel[0] == 'c' else ''}{self.releaselevel[0]}{self.serial}", + "", + )[self.releaselevel == "final"] + ) + + def __str__(self): + return f"{__name__} {self.__version__} / {__version_time__}" + + def __repr__(self): + return f"{__name__}.{type(self).__name__}({', '.join('{}={!r}'.format(*nv) for nv in zip(self._fields, self))})" + + +__version_info__ = version_info(3, 1, 2, "final", 1) +__version_time__ = "06 Mar 2024 07:08 UTC" +__version__ = __version_info__.__version__ +__versionTime__ = __version_time__ +__author__ = "Paul McGuire " + +from .util import * +from .exceptions import * +from .actions import * +from .core import __diag__, __compat__ +from .results import * +from .core import * # type: ignore[misc, assignment] +from .core import _builtin_exprs as core_builtin_exprs +from .helpers import * # type: ignore[misc, assignment] +from .helpers import _builtin_exprs as helper_builtin_exprs + +from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode +from .testing import pyparsing_test as testing +from .common import ( + pyparsing_common as common, + _builtin_exprs as common_builtin_exprs, +) + +# define backward compat synonyms +if "pyparsing_unicode" not in globals(): + pyparsing_unicode = unicode # type: ignore[misc] +if "pyparsing_common" not in globals(): + pyparsing_common = common # type: ignore[misc] +if "pyparsing_test" not in globals(): + pyparsing_test = testing # type: ignore[misc] + +core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs + + +__all__ = [ + "__version__", + "__version_time__", + "__author__", + "__compat__", + "__diag__", + "And", + "AtLineStart", + "AtStringStart", + "CaselessKeyword", + "CaselessLiteral", + "CharsNotIn", + "CloseMatch", + "Combine", + "DelimitedList", + "Dict", + "Each", + "Empty", + "FollowedBy", + "Forward", + "GoToColumn", + "Group", + "IndentedBlock", + "Keyword", + "LineEnd", + "LineStart", + "Literal", + "Located", + "PrecededBy", + "MatchFirst", + "NoMatch", + "NotAny", + "OneOrMore", + "OnlyOnce", + "OpAssoc", + "Opt", + "Optional", + "Or", + "ParseBaseException", + "ParseElementEnhance", + "ParseException", + "ParseExpression", + "ParseFatalException", + "ParseResults", + "ParseSyntaxException", + "ParserElement", + "PositionToken", + "QuotedString", + "RecursiveGrammarException", + "Regex", + "SkipTo", + "StringEnd", + "StringStart", + "Suppress", + "Token", + "TokenConverter", + "White", + "Word", + "WordEnd", + "WordStart", + "ZeroOrMore", + "Char", + "alphanums", + "alphas", + "alphas8bit", + "any_close_tag", + "any_open_tag", + "autoname_elements", + "c_style_comment", + "col", + "common_html_entity", + "condition_as_parse_action", + "counted_array", + "cpp_style_comment", + "dbl_quoted_string", + "dbl_slash_comment", + "delimited_list", + "dict_of", + "empty", + "hexnums", + "html_comment", + "identchars", + "identbodychars", + "infix_notation", + "java_style_comment", + "line", + "line_end", + "line_start", + "lineno", + "make_html_tags", + "make_xml_tags", + "match_only_at_col", + "match_previous_expr", + "match_previous_literal", + "nested_expr", + "null_debug_action", + "nums", + "one_of", + "original_text_for", + "printables", + "punc8bit", + "pyparsing_common", + "pyparsing_test", + "pyparsing_unicode", + "python_style_comment", + "quoted_string", + "remove_quotes", + "replace_with", + "replace_html_entity", + "rest_of_line", + "sgl_quoted_string", + "srange", + "string_end", + "string_start", + "token_map", + "trace_parse_action", + "ungroup", + "unicode_set", + "unicode_string", + "with_attribute", + "with_class", + # pre-PEP8 compatibility names + "__versionTime__", + "anyCloseTag", + "anyOpenTag", + "cStyleComment", + "commonHTMLEntity", + "conditionAsParseAction", + "countedArray", + "cppStyleComment", + "dblQuotedString", + "dblSlashComment", + "delimitedList", + "dictOf", + "htmlComment", + "indentedBlock", + "infixNotation", + "javaStyleComment", + "lineEnd", + "lineStart", + "locatedExpr", + "makeHTMLTags", + "makeXMLTags", + "matchOnlyAtCol", + "matchPreviousExpr", + "matchPreviousLiteral", + "nestedExpr", + "nullDebugAction", + "oneOf", + "opAssoc", + "originalTextFor", + "pythonStyleComment", + "quotedString", + "removeQuotes", + "replaceHTMLEntity", + "replaceWith", + "restOfLine", + "sglQuotedString", + "stringEnd", + "stringStart", + "tokenMap", + "traceParseAction", + "unicodeString", + "withAttribute", + "withClass", + "common", + "unicode", + "testing", +] diff --git a/external/python/pyparsing/actions.py b/external/python/pyparsing/actions.py new file mode 100644 index 00000000..ce51b395 --- /dev/null +++ b/external/python/pyparsing/actions.py @@ -0,0 +1,206 @@ +# actions.py + +from .exceptions import ParseException +from .util import col, replaced_by_pep8 + + +class OnlyOnce: + """ + Wrapper for parse actions, to ensure they are only called once. + """ + + def __init__(self, method_call): + from .core import _trim_arity + + self.callable = _trim_arity(method_call) + self.called = False + + def __call__(self, s, l, t): + if not self.called: + results = self.callable(s, l, t) + self.called = True + return results + raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset") + + def reset(self): + """ + Allow the associated parse action to be called once more. + """ + + self.called = False + + +def match_only_at_col(n): + """ + Helper method for defining parse actions that require matching at + a specific column in the input text. + """ + + def verify_col(strg, locn, toks): + if col(locn, strg) != n: + raise ParseException(strg, locn, f"matched token not at column {n}") + + return verify_col + + +def replace_with(repl_str): + """ + Helper method for common parse actions that simply return + a literal value. Especially useful when used with + :class:`transform_string` (). + + Example:: + + num = Word(nums).set_parse_action(lambda toks: int(toks[0])) + na = one_of("N/A NA").set_parse_action(replace_with(math.nan)) + term = na | num + + term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234] + """ + return lambda s, l, t: [repl_str] + + +def remove_quotes(s, l, t): + """ + Helper parse action for removing quotation marks from parsed + quoted strings. + + Example:: + + # by default, quotation marks are included in parsed results + quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] + + # use remove_quotes to strip quotation marks from parsed results + quoted_string.set_parse_action(remove_quotes) + quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] + """ + return t[0][1:-1] + + +def with_attribute(*args, **attr_dict): + """ + Helper to create a validating parse action to be used with start + tags created with :class:`make_xml_tags` or + :class:`make_html_tags`. Use ``with_attribute`` to qualify + a starting tag with a required attribute value, to avoid false + matches on common tags such as ```` or ``
``. + + Call ``with_attribute`` with a series of attribute names and + values. Specify the list of filter attributes names and values as: + + - keyword arguments, as in ``(align="right")``, or + - as an explicit dict with ``**`` operator, when an attribute + name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` + - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` + + For attribute names with a namespace prefix, you must use the second + form. Attribute names are matched insensitive to upper/lower case. + + If just testing for ``class`` (with or without a namespace), use + :class:`with_class`. + + To verify that the attribute exists, but without specifying a value, + pass ``with_attribute.ANY_VALUE`` as the value. + + Example:: + + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this has no type
+
+ ''' + div,div_end = make_html_tags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().set_parse_action(with_attribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + if args: + attrs = args[:] + else: + attrs = attr_dict.items() + attrs = [(k, v) for k, v in attrs] + + def pa(s, l, tokens): + for attrName, attrValue in attrs: + if attrName not in tokens: + raise ParseException(s, l, "no matching attribute " + attrName) + if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: + raise ParseException( + s, + l, + f"attribute {attrName!r} has value {tokens[attrName]!r}, must be {attrValue!r}", + ) + + return pa + + +with_attribute.ANY_VALUE = object() # type: ignore [attr-defined] + + +def with_class(classname, namespace=""): + """ + Simplified version of :class:`with_attribute` when + matching on a div class - made difficult because ``class`` is + a reserved word in Python. + + Example:: + + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this <div> has no class
+
+ + ''' + div,div_end = make_html_tags("div") + div_grid = div().set_parse_action(with_class("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = f"{namespace}:class" if namespace else "class" + return with_attribute(**{classattr: classname}) + + +# pre-PEP8 compatibility symbols +# fmt: off +replaceWith = replaced_by_pep8("replaceWith", replace_with) +removeQuotes = replaced_by_pep8("removeQuotes", remove_quotes) +withAttribute = replaced_by_pep8("withAttribute", with_attribute) +withClass = replaced_by_pep8("withClass", with_class) +matchOnlyAtCol = replaced_by_pep8("matchOnlyAtCol", match_only_at_col) +# fmt: on diff --git a/external/python/pyparsing/common.py b/external/python/pyparsing/common.py new file mode 100644 index 00000000..74faa460 --- /dev/null +++ b/external/python/pyparsing/common.py @@ -0,0 +1,439 @@ +# common.py +from .core import * +from .helpers import DelimitedList, any_open_tag, any_close_tag +from datetime import datetime + + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """Here are some common low-level expressions that may be useful in + jump-starting parser development: + + - numeric forms (:class:`integers`, :class:`reals`, + :class:`scientific notation`) + - common :class:`programming identifiers` + - network addresses (:class:`MAC`, + :class:`IPv4`, :class:`IPv6`) + - ISO8601 :class:`dates` and + :class:`datetime` + - :class:`UUID` + - :class:`comma-separated list` + - :class:`url` + + Parse actions: + + - :class:`convert_to_integer` + - :class:`convert_to_float` + - :class:`convert_to_date` + - :class:`convert_to_datetime` + - :class:`strip_html_tags` + - :class:`upcase_tokens` + - :class:`downcase_tokens` + + Example:: + + pyparsing_common.number.run_tests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.run_tests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.run_tests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.run_tests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.run_tests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.set_parse_action(token_map(uuid.UUID)) + pyparsing_common.uuid.run_tests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + + prints:: + + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convert_to_integer = token_map(int) + """ + Parse action for converting parsed integers to Python int + """ + + convert_to_float = token_map(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = ( + Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) + ) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = ( + Regex(r"[+-]?\d+") + .set_name("signed integer") + .set_parse_action(convert_to_integer) + ) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = ( + signed_integer().set_parse_action(convert_to_float) + + "/" + + signed_integer().set_parse_action(convert_to_float) + ).set_name("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) + + mixed_integer = ( + fraction | signed_integer + Opt(Opt("-").suppress() + fraction) + ).set_name("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.add_parse_action(sum) + + real = ( + Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") + .set_name("real number") + .set_parse_action(convert_to_float) + ) + """expression that parses a floating point number and returns a float""" + + sci_real = ( + Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") + .set_name("real number with scientific notation") + .set_parse_action(convert_to_float) + ) + """expression that parses a floating point number with optional + scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).set_name("number").streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = ( + Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?") + .set_name("fnumber") + .set_parse_action(convert_to_float) + ) + """any int or real number, returned as float""" + + ieee_float = ( + Regex(r"(?i)[+-]?((\d+\.?\d*(e[+-]?\d+)?)|nan|inf(inity)?)") + .set_name("ieee_float") + .set_parse_action(convert_to_float) + ) + """any floating-point literal (int, real number, infinity, or NaN), returned as float""" + + identifier = Word(identchars, identbodychars).set_name("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex( + r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" + ).set_name("IPv4 address") + "IPv4 address (``0.0.0.0 - 255.255.255.255``)" + + _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") + _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( + "full IPv6 address" + ) + _short_ipv6_address = ( + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) + + "::" + + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) + ).set_name("short IPv6 address") + _short_ipv6_address.add_condition( + lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 + ) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") + ipv6_address = Combine( + (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( + "IPv6 address" + ) + ).set_name("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex( + r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" + ).set_name("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convert_to_date(fmt: str = "%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) + + Example:: + + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.set_parse_action(pyparsing_common.convert_to_date()) + print(date_expr.parse_string("1999-12-31")) + + prints:: + + [datetime.date(1999, 12, 31)] + """ + + def cvt_fn(ss, ll, tt): + try: + return datetime.strptime(tt[0], fmt).date() + except ValueError as ve: + raise ParseException(ss, ll, str(ve)) + + return cvt_fn + + @staticmethod + def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): + """Helper to create a parse action for converting parsed + datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) + + Example:: + + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.set_parse_action(pyparsing_common.convert_to_datetime()) + print(dt_expr.parse_string("1999-12-31T23:59:59.999")) + + prints:: + + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + + def cvt_fn(s, l, t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + + return cvt_fn + + iso8601_date = Regex( + r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" + ).set_name("ISO8601 date") + "ISO8601 date (``yyyy-mm-dd``)" + + iso8601_datetime = Regex( + r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" + ).set_name("ISO8601 datetime") + "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" + + uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") + "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" + + _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() + + @staticmethod + def strip_html_tags(s: str, l: int, tokens: ParseResults): + """Parse action to remove HTML tags from web page HTML source + + Example:: + + # strip HTML links from normal text + text = 'More info at the pyparsing wiki page' + td, td_end = make_html_tags("TD") + table_text = td + SkipTo(td_end).set_parse_action(pyparsing_common.strip_html_tags)("body") + td_end + print(table_text.parse_string(text).body) + + Prints:: + + More info at the pyparsing wiki page + """ + return pyparsing_common._html_stripper.transform_string(tokens[0]) + + _commasepitem = ( + Combine( + OneOrMore( + ~Literal(",") + + ~LineEnd() + + Word(printables, exclude_chars=",") + + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) + ) + ) + .streamline() + .set_name("commaItem") + ) + comma_separated_list = DelimitedList( + Opt(quoted_string.copy() | _commasepitem, default="") + ).set_name("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcase_tokens = staticmethod(token_map(lambda t: t.upper())) + """Parse action to convert tokens to upper case.""" + + downcase_tokens = staticmethod(token_map(lambda t: t.lower())) + """Parse action to convert tokens to lower case.""" + + # fmt: off + url = Regex( + # https://mathiasbynens.be/demo/url-regex + # https://gist.github.com/dperini/729294 + r"(?P" + + # protocol identifier (optional) + # short syntax // still required + r"(?:(?:(?Phttps?|ftp):)?\/\/)" + + # user:pass BasicAuth (optional) + r"(?:(?P\S+(?::\S*)?)@)?" + + r"(?P" + + # IP address exclusion + # private & local networks + r"(?!(?:10|127)(?:\.\d{1,3}){3})" + + r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + + r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + + # IP address dotted notation octets + # excludes loopback network 0.0.0.0 + # excludes reserved space >= 224.0.0.0 + # excludes network & broadcast addresses + # (first & last IP address of each class) + r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + + r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + + r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + + r"|" + + # host & domain names, may end with dot + # can be replaced by a shortest alternative + # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ + r"(?:" + + r"(?:" + + r"[a-z0-9\u00a1-\uffff]" + + r"[a-z0-9\u00a1-\uffff_-]{0,62}" + + r")?" + + r"[a-z0-9\u00a1-\uffff]\." + + r")+" + + # TLD identifier name, may end with dot + r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + + r")" + + # port number (optional) + r"(:(?P\d{2,5}))?" + + # resource path (optional) + r"(?P\/[^?# ]*)?" + + # query string (optional) + r"(\?(?P[^#]*))?" + + # fragment (optional) + r"(#(?P\S*))?" + + r")" + ).set_name("url") + """URL (http/https/ftp scheme)""" + # fmt: on + + # pre-PEP8 compatibility names + convertToInteger = convert_to_integer + """Deprecated - use :class:`convert_to_integer`""" + convertToFloat = convert_to_float + """Deprecated - use :class:`convert_to_float`""" + convertToDate = convert_to_date + """Deprecated - use :class:`convert_to_date`""" + convertToDatetime = convert_to_datetime + """Deprecated - use :class:`convert_to_datetime`""" + stripHTMLTags = strip_html_tags + """Deprecated - use :class:`strip_html_tags`""" + upcaseTokens = upcase_tokens + """Deprecated - use :class:`upcase_tokens`""" + downcaseTokens = downcase_tokens + """Deprecated - use :class:`downcase_tokens`""" + + +_builtin_exprs = [ + v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) +] diff --git a/external/python/pyparsing/core.py b/external/python/pyparsing/core.py new file mode 100644 index 00000000..b19d1221 --- /dev/null +++ b/external/python/pyparsing/core.py @@ -0,0 +1,6087 @@ +# +# core.py +# + +from collections import deque +import os +import typing +from typing import ( + Any, + Callable, + Generator, + List, + NamedTuple, + Sequence, + Set, + TextIO, + Tuple, + Union, + cast, +) +from abc import ABC, abstractmethod +from enum import Enum +import string +import copy +import warnings +import re +import sys +from collections.abc import Iterable +import traceback +import types +from operator import itemgetter +from functools import wraps +from threading import RLock +from pathlib import Path + +from .util import ( + _FifoCache, + _UnboundedCache, + __config_flags, + _collapse_string_to_ranges, + _escape_regex_range_chars, + _bslash, + _flatten, + LRUMemo as _LRUMemo, + UnboundedMemo as _UnboundedMemo, + replaced_by_pep8, +) +from .exceptions import * +from .actions import * +from .results import ParseResults, _ParseResultsWithOffset +from .unicode import pyparsing_unicode + +_MAX_INT = sys.maxsize +str_type: Tuple[type, ...] = (str, bytes) + +# +# Copyright (c) 2003-2022 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + + +if sys.version_info >= (3, 8): + from functools import cached_property +else: + + class cached_property: + def __init__(self, func): + self._func = func + + def __get__(self, instance, owner=None): + ret = instance.__dict__[self._func.__name__] = self._func(instance) + return ret + + +class __compat__(__config_flags): + """ + A cross-version compatibility configuration for pyparsing features that will be + released in a future version. By setting values in this configuration to True, + those features can be enabled in prior versions for compatibility development + and testing. + + - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping + of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`; + maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1 + behavior + """ + + _type_desc = "compatibility" + + collect_all_And_tokens = True + + _all_names = [__ for __ in locals() if not __.startswith("_")] + _fixed_names = """ + collect_all_And_tokens + """.split() + + +class __diag__(__config_flags): + _type_desc = "diagnostic" + + warn_multiple_tokens_in_named_alternation = False + warn_ungrouped_named_tokens_in_collection = False + warn_name_set_on_empty_Forward = False + warn_on_parse_using_empty_Forward = False + warn_on_assignment_to_Forward = False + warn_on_multiple_string_args_to_oneof = False + warn_on_match_first_with_lshift_operator = False + enable_debug_on_named_expressions = False + + _all_names = [__ for __ in locals() if not __.startswith("_")] + _warning_names = [name for name in _all_names if name.startswith("warn")] + _debug_names = [name for name in _all_names if name.startswith("enable_debug")] + + @classmethod + def enable_all_warnings(cls) -> None: + for name in cls._warning_names: + cls.enable(name) + + +class Diagnostics(Enum): + """ + Diagnostic configuration (all default to disabled) + + - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results + name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions + - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results + name is defined on a containing expression with ungrouped subexpressions that also + have results names + - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined + with a results name, but has no contents defined + - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is + defined in a grammar but has never had an expression attached to it + - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined + but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` + - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is + incorrectly called with multiple str arguments + - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent + calls to :class:`ParserElement.set_name` + + Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`. + All warnings can be enabled by calling :class:`enable_all_warnings`. + """ + + warn_multiple_tokens_in_named_alternation = 0 + warn_ungrouped_named_tokens_in_collection = 1 + warn_name_set_on_empty_Forward = 2 + warn_on_parse_using_empty_Forward = 3 + warn_on_assignment_to_Forward = 4 + warn_on_multiple_string_args_to_oneof = 5 + warn_on_match_first_with_lshift_operator = 6 + enable_debug_on_named_expressions = 7 + + +def enable_diag(diag_enum: Diagnostics) -> None: + """ + Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`). + """ + __diag__.enable(diag_enum.name) + + +def disable_diag(diag_enum: Diagnostics) -> None: + """ + Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`). + """ + __diag__.disable(diag_enum.name) + + +def enable_all_warnings() -> None: + """ + Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). + """ + __diag__.enable_all_warnings() + + +# hide abstract class +del __config_flags + + +def _should_enable_warnings( + cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str] +) -> bool: + enable = bool(warn_env_var) + for warn_opt in cmd_line_warn_options: + w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split( + ":" + )[:5] + if not w_action.lower().startswith("i") and ( + not (w_message or w_category or w_module) or w_module == "pyparsing" + ): + enable = True + elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""): + enable = False + return enable + + +if _should_enable_warnings( + sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS") +): + enable_all_warnings() + + +# build list of single arg builtins, that can be used as parse actions +_single_arg_builtins = { + sum, + len, + sorted, + reversed, + list, + tuple, + set, + any, + all, + min, + max, +} + +_generatorType = types.GeneratorType +ParseImplReturnType = Tuple[int, Any] +PostParseReturnType = Union[ParseResults, Sequence[ParseResults]] +ParseAction = Union[ + Callable[[], Any], + Callable[[ParseResults], Any], + Callable[[int, ParseResults], Any], + Callable[[str, int, ParseResults], Any], +] +ParseCondition = Union[ + Callable[[], bool], + Callable[[ParseResults], bool], + Callable[[int, ParseResults], bool], + Callable[[str, int, ParseResults], bool], +] +ParseFailAction = Callable[[str, int, "ParserElement", Exception], None] +DebugStartAction = Callable[[str, int, "ParserElement", bool], None] +DebugSuccessAction = Callable[ + [str, int, int, "ParserElement", ParseResults, bool], None +] +DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None] + + +alphas = string.ascii_uppercase + string.ascii_lowercase +identchars = pyparsing_unicode.Latin1.identchars +identbodychars = pyparsing_unicode.Latin1.identbodychars +nums = "0123456789" +hexnums = nums + "ABCDEFabcdef" +alphanums = alphas + nums +printables = "".join([c for c in string.printable if c not in string.whitespace]) + +_trim_arity_call_line: traceback.StackSummary = None # type: ignore[assignment] + + +def _trim_arity(func, max_limit=3): + """decorator to trim function calls to match the arity of the target""" + global _trim_arity_call_line + + if func in _single_arg_builtins: + return lambda s, l, t: func(t) + + limit = 0 + found_arity = False + + # synthesize what would be returned by traceback.extract_stack at the call to + # user's parse action 'func', so that we don't incur call penalty at parse time + + # fmt: off + LINE_DIFF = 7 + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! + _trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1]) + pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF) + + def wrapper(*args): + nonlocal found_arity, limit + while 1: + try: + ret = func(*args[limit:]) + found_arity = True + return ret + except TypeError as te: + # re-raise TypeErrors if they did not come from our arity testing + if found_arity: + raise + else: + tb = te.__traceback__ + frames = traceback.extract_tb(tb, limit=2) + frame_summary = frames[-1] + trim_arity_type_error = ( + [frame_summary[:2]][-1][:2] == pa_call_line_synth + ) + del tb + + if trim_arity_type_error: + if limit < max_limit: + limit += 1 + continue + + raise + # fmt: on + + # copy func name to wrapper for sensible debug output + # (can't use functools.wraps, since that messes with function signature) + func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) + wrapper.__name__ = func_name + wrapper.__doc__ = func.__doc__ + + return wrapper + + +def condition_as_parse_action( + fn: ParseCondition, message: typing.Optional[str] = None, fatal: bool = False +) -> ParseAction: + """ + Function to convert a simple predicate function that returns ``True`` or ``False`` + into a parse action. Can be used in places when a parse action is required + and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition + to an operator level in :class:`infix_notation`). + + Optional keyword arguments: + + - ``message`` - define a custom message to be used in the raised exception + - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately; + otherwise will raise :class:`ParseException` + + """ + msg = message if message is not None else "failed user-defined condition" + exc_type = ParseFatalException if fatal else ParseException + fn = _trim_arity(fn) + + @wraps(fn) + def pa(s, l, t): + if not bool(fn(s, l, t)): + raise exc_type(s, l, msg) + + return pa + + +def _default_start_debug_action( + instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False +): + cache_hit_str = "*" if cache_hit else "" + print( + ( + f"{cache_hit_str}Match {expr} at loc {loc}({lineno(loc, instring)},{col(loc, instring)})\n" + f" {line(loc, instring)}\n" + f" {' ' * (col(loc, instring) - 1)}^" + ) + ) + + +def _default_success_debug_action( + instring: str, + startloc: int, + endloc: int, + expr: "ParserElement", + toks: ParseResults, + cache_hit: bool = False, +): + cache_hit_str = "*" if cache_hit else "" + print(f"{cache_hit_str}Matched {expr} -> {toks.as_list()}") + + +def _default_exception_debug_action( + instring: str, + loc: int, + expr: "ParserElement", + exc: Exception, + cache_hit: bool = False, +): + cache_hit_str = "*" if cache_hit else "" + print(f"{cache_hit_str}Match {expr} failed, {type(exc).__name__} raised: {exc}") + + +def null_debug_action(*args): + """'Do-nothing' debug action, to suppress debugging output during parsing.""" + + +class ParserElement(ABC): + """Abstract base level parser element class.""" + + DEFAULT_WHITE_CHARS: str = " \n\t\r" + verbose_stacktrace: bool = False + _literalStringClass: type = None # type: ignore[assignment] + + @staticmethod + def set_default_whitespace_chars(chars: str) -> None: + r""" + Overrides the default whitespace chars + + Example:: + + # default whitespace chars are space, and newline + Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] + + # change to just treat newline as significant + ParserElement.set_default_whitespace_chars(" \t") + Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def'] + """ + ParserElement.DEFAULT_WHITE_CHARS = chars + + # update whitespace all parse expressions defined in this module + for expr in _builtin_exprs: + if expr.copyDefaultWhiteChars: + expr.whiteChars = set(chars) + + @staticmethod + def inline_literals_using(cls: type) -> None: + """ + Set class to be used for inclusion of string literals into a parser. + + Example:: + + # default literal class used is Literal + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + + # change to Suppress + ParserElement.inline_literals_using(Suppress) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parse_string("1999/12/31") # -> ['1999', '12', '31'] + """ + ParserElement._literalStringClass = cls + + @classmethod + def using_each(cls, seq, **class_kwargs): + """ + Yields a sequence of class(obj, **class_kwargs) for obj in seq. + + Example:: + + LPAR, RPAR, LBRACE, RBRACE, SEMI = Suppress.using_each("(){};") + + """ + yield from (cls(obj, **class_kwargs) for obj in seq) + + class DebugActions(NamedTuple): + debug_try: typing.Optional[DebugStartAction] + debug_match: typing.Optional[DebugSuccessAction] + debug_fail: typing.Optional[DebugExceptionAction] + + def __init__(self, savelist: bool = False): + self.parseAction: List[ParseAction] = list() + self.failAction: typing.Optional[ParseFailAction] = None + self.customName: str = None # type: ignore[assignment] + self._defaultName: typing.Optional[str] = None + self.resultsName: str = None # type: ignore[assignment] + self.saveAsList = savelist + self.skipWhitespace = True + self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + self.copyDefaultWhiteChars = True + # used when checking for left-recursion + self.mayReturnEmpty = False + self.keepTabs = False + self.ignoreExprs: List["ParserElement"] = list() + self.debug = False + self.streamlined = False + # optimize exception handling for subclasses that don't advance parse index + self.mayIndexError = True + self.errmsg = "" + # mark results names as modal (report only last) or cumulative (list all) + self.modalResults = True + # custom debug actions + self.debugActions = self.DebugActions(None, None, None) + # avoid redundant calls to preParse + self.callPreparse = True + self.callDuringTry = False + self.suppress_warnings_: List[Diagnostics] = [] + + def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement": + """ + Suppress warnings emitted for a particular diagnostic on this expression. + + Example:: + + base = pp.Forward() + base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward) + + # statement would normally raise a warning, but is now suppressed + print(base.parse_string("x")) + + """ + self.suppress_warnings_.append(warning_type) + return self + + def visit_all(self): + """General-purpose method to yield all expressions and sub-expressions + in a grammar. Typically just for internal use. + """ + to_visit = deque([self]) + seen = set() + while to_visit: + cur = to_visit.popleft() + + # guard against looping forever through recursive grammars + if cur in seen: + continue + seen.add(cur) + + to_visit.extend(cur.recurse()) + yield cur + + def copy(self) -> "ParserElement": + """ + Make a copy of this :class:`ParserElement`. Useful for defining + different parse actions for the same parsing pattern, using copies of + the original parse element. + + Example:: + + integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) + integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K") + integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + + print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M")) + + prints:: + + [5120, 100, 655360, 268435456] + + Equivalent form of ``expr.copy()`` is just ``expr()``:: + + integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + """ + cpy = copy.copy(self) + cpy.parseAction = self.parseAction[:] + cpy.ignoreExprs = self.ignoreExprs[:] + if self.copyDefaultWhiteChars: + cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + return cpy + + def set_results_name( + self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False + ) -> "ParserElement": + """ + Define name for referencing matching tokens as a nested attribute + of the returned parse results. + + Normally, results names are assigned as you would assign keys in a dict: + any existing value is overwritten by later values. If it is necessary to + keep all values captured for a particular results name, call ``set_results_name`` + with ``list_all_matches`` = True. + + NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + + You can also set results names using the abbreviated syntax, + ``expr("name")`` in place of ``expr.set_results_name("name")`` + - see :class:`__call__`. If ``list_all_matches`` is required, use + ``expr("name*")``. + + Example:: + + integer = Word(nums) + date_str = (integer.set_results_name("year") + '/' + + integer.set_results_name("month") + '/' + + integer.set_results_name("day")) + + # equivalent form: + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + """ + listAllMatches = listAllMatches or list_all_matches + return self._setResultsName(name, listAllMatches) + + def _setResultsName(self, name, listAllMatches=False): + if name is None: + return self + newself = self.copy() + if name.endswith("*"): + name = name[:-1] + listAllMatches = True + newself.resultsName = name + newself.modalResults = not listAllMatches + return newself + + def set_break(self, break_flag: bool = True) -> "ParserElement": + """ + Method to invoke the Python pdb debugger when this element is + about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to + disable. + """ + if break_flag: + _parseMethod = self._parse + + def breaker(instring, loc, doActions=True, callPreParse=True): + import pdb + + # this call to pdb.set_trace() is intentional, not a checkin error + pdb.set_trace() + return _parseMethod(instring, loc, doActions, callPreParse) + + breaker._originalParseMethod = _parseMethod # type: ignore [attr-defined] + self._parse = breaker # type: ignore [assignment] + elif hasattr(self._parse, "_originalParseMethod"): + self._parse = self._parse._originalParseMethod # type: ignore [attr-defined, assignment] + return self + + def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": + """ + Define one or more actions to perform when successfully matching parse element definition. + + Parse actions can be called to perform data conversions, do extra validation, + update external data structures, or enhance or replace the parsed tokens. + Each parse action ``fn`` is a callable method with 0-3 arguments, called as + ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: + + - ``s`` = the original string being parsed (see note below) + - ``loc`` = the location of the matching substring + - ``toks`` = a list of the matched tokens, packaged as a :class:`ParseResults` object + + The parsed tokens are passed to the parse action as ParseResults. They can be + modified in place using list-style append, extend, and pop operations to update + the parsed list elements; and with dictionary-style item set and del operations + to add, update, or remove any named results. If the tokens are modified in place, + it is not necessary to return them with a return statement. + + Parse actions can also completely replace the given tokens, with another ``ParseResults`` + object, or with some entirely different object (common for parse actions that perform data + conversions). A convenient way to build a new parse result is to define the values + using a dict, and then create the return value using :class:`ParseResults.from_dict`. + + If None is passed as the ``fn`` parse action, all previously added parse actions for this + expression are cleared. + + Optional keyword arguments: + + - ``call_during_try`` = (default= ``False``) indicate if parse action should be run during + lookaheads and alternate testing. For parse actions that have side effects, it is + important to only call the parse action once it is determined that it is being + called as part of a successful parse. For parse actions that perform additional + validation, then call_during_try should be passed as True, so that the validation + code is included in the preliminary "try" parses. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`parse_string` for more + information on parsing strings containing ```` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + + Example:: + + # parse dates in the form YYYY/MM/DD + + # use parse action to convert toks from str to int at parse time + def convert_to_int(toks): + return int(toks[0]) + + # use a parse action to verify that the date is a valid date + def is_valid_date(instring, loc, toks): + from datetime import date + year, month, day = toks[::2] + try: + date(year, month, day) + except ValueError: + raise ParseException(instring, loc, "invalid date given") + + integer = Word(nums) + date_str = integer + '/' + integer + '/' + integer + + # add parse actions + integer.set_parse_action(convert_to_int) + date_str.set_parse_action(is_valid_date) + + # note that integer fields are now ints, not strings + date_str.run_tests(''' + # successful parse - note that integer fields were converted to ints + 1999/12/31 + + # fail - invalid date + 1999/13/31 + ''') + """ + if list(fns) == [None]: + self.parseAction = [] + return self + + if not all(callable(fn) for fn in fns): + raise TypeError("parse actions must be callable") + self.parseAction = [_trim_arity(fn) for fn in fns] + self.callDuringTry = kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + + return self + + def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": + """ + Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`. + + See examples in :class:`copy`. + """ + self.parseAction += [_trim_arity(fn) for fn in fns] + self.callDuringTry = self.callDuringTry or kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement": + """Add a boolean predicate function to expression's list of parse actions. See + :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``, + functions passed to ``add_condition`` need to return boolean success/fail of the condition. + + Optional keyword arguments: + + - ``message`` = define a custom message to be used in the raised exception + - ``fatal`` = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise + ParseException + - ``call_during_try`` = boolean to indicate if this method should be called during internal tryParse calls, + default=False + + Example:: + + integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) + year_int = integer.copy() + year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") + date_str = year_int + '/' + integer + '/' + integer + + result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), + (line:1, col:1) + """ + for fn in fns: + self.parseAction.append( + condition_as_parse_action( + fn, + message=str(kwargs.get("message")), + fatal=bool(kwargs.get("fatal", False)), + ) + ) + + self.callDuringTry = self.callDuringTry or kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def set_fail_action(self, fn: ParseFailAction) -> "ParserElement": + """ + Define action to perform if parsing fails at this expression. + Fail acton fn is a callable function that takes the arguments + ``fn(s, loc, expr, err)`` where: + + - ``s`` = string being parsed + - ``loc`` = location where expression match was attempted and failed + - ``expr`` = the parse expression that failed + - ``err`` = the exception thrown + + The function returns no value. It may throw :class:`ParseFatalException` + if it is desired to stop parsing immediately.""" + self.failAction = fn + return self + + def _skipIgnorables(self, instring: str, loc: int) -> int: + if not self.ignoreExprs: + return loc + exprsFound = True + ignore_expr_fns = [e._parse for e in self.ignoreExprs] + last_loc = loc + while exprsFound: + exprsFound = False + for ignore_fn in ignore_expr_fns: + try: + while 1: + loc, dummy = ignore_fn(instring, loc) + exprsFound = True + except ParseException: + pass + # check if all ignore exprs matched but didn't actually advance the parse location + if loc == last_loc: + break + last_loc = loc + return loc + + def preParse(self, instring: str, loc: int) -> int: + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + + if self.skipWhitespace: + instrlen = len(instring) + white_chars = self.whiteChars + while loc < instrlen and instring[loc] in white_chars: + loc += 1 + + return loc + + def parseImpl(self, instring, loc, doActions=True): + return loc, [] + + def postParse(self, instring, loc, tokenlist): + return tokenlist + + # @profile + def _parseNoCache( + self, instring, loc, doActions=True, callPreParse=True + ) -> Tuple[int, ParseResults]: + TRY, MATCH, FAIL = 0, 1, 2 + debugging = self.debug # and doActions) + len_instring = len(instring) + + if debugging or self.failAction: + # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring))) + try: + if callPreParse and self.callPreparse: + pre_loc = self.preParse(instring, loc) + else: + pre_loc = loc + tokens_start = pre_loc + if self.debugActions.debug_try: + self.debugActions.debug_try(instring, tokens_start, self, False) + if self.mayIndexError or pre_loc >= len_instring: + try: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + except IndexError: + raise ParseException(instring, len_instring, self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + except Exception as err: + # print("Exception raised:", err) + if self.debugActions.debug_fail: + self.debugActions.debug_fail( + instring, tokens_start, self, err, False + ) + if self.failAction: + self.failAction(instring, tokens_start, self, err) + raise + else: + if callPreParse and self.callPreparse: + pre_loc = self.preParse(instring, loc) + else: + pre_loc = loc + tokens_start = pre_loc + if self.mayIndexError or pre_loc >= len_instring: + try: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + except IndexError: + raise ParseException(instring, len_instring, self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + + tokens = self.postParse(instring, loc, tokens) + + ret_tokens = ParseResults( + tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults + ) + if self.parseAction and (doActions or self.callDuringTry): + if debugging: + try: + for fn in self.parseAction: + try: + tokens = fn(instring, tokens_start, ret_tokens) # type: ignore [call-arg, arg-type] + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + raise exc from parse_action_exc + + if tokens is not None and tokens is not ret_tokens: + ret_tokens = ParseResults( + tokens, + self.resultsName, + asList=self.saveAsList + and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults, + ) + except Exception as err: + # print "Exception raised in user parse action:", err + if self.debugActions.debug_fail: + self.debugActions.debug_fail( + instring, tokens_start, self, err, False + ) + raise + else: + for fn in self.parseAction: + try: + tokens = fn(instring, tokens_start, ret_tokens) # type: ignore [call-arg, arg-type] + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + raise exc from parse_action_exc + + if tokens is not None and tokens is not ret_tokens: + ret_tokens = ParseResults( + tokens, + self.resultsName, + asList=self.saveAsList + and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults, + ) + if debugging: + # print("Matched", self, "->", ret_tokens.as_list()) + if self.debugActions.debug_match: + self.debugActions.debug_match( + instring, tokens_start, loc, self, ret_tokens, False + ) + + return loc, ret_tokens + + def try_parse( + self, + instring: str, + loc: int, + *, + raise_fatal: bool = False, + do_actions: bool = False, + ) -> int: + try: + return self._parse(instring, loc, doActions=do_actions)[0] + except ParseFatalException: + if raise_fatal: + raise + raise ParseException(instring, loc, self.errmsg, self) + + def can_parse_next(self, instring: str, loc: int, do_actions: bool = False) -> bool: + try: + self.try_parse(instring, loc, do_actions=do_actions) + except (ParseException, IndexError): + return False + else: + return True + + # cache for left-recursion in Forward references + recursion_lock = RLock() + recursion_memos: typing.Dict[ + Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]] + ] = {} + + class _CacheType(dict): + """ + class to help type checking + """ + + not_in_cache: bool + + def get(self, *args): ... + + def set(self, *args): ... + + # argument cache for optimizing repeated calls when backtracking through recursive expressions + packrat_cache = ( + _CacheType() + ) # set later by enable_packrat(); this is here so that reset_cache() doesn't fail + packrat_cache_lock = RLock() + packrat_cache_stats = [0, 0] + + # this method gets repeatedly called during backtracking with the same arguments - + # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression + def _parseCache( + self, instring, loc, doActions=True, callPreParse=True + ) -> Tuple[int, ParseResults]: + HIT, MISS = 0, 1 + TRY, MATCH, FAIL = 0, 1, 2 + lookup = (self, instring, loc, callPreParse, doActions) + with ParserElement.packrat_cache_lock: + cache = ParserElement.packrat_cache + value = cache.get(lookup) + if value is cache.not_in_cache: + ParserElement.packrat_cache_stats[MISS] += 1 + try: + value = self._parseNoCache(instring, loc, doActions, callPreParse) + except ParseBaseException as pe: + # cache a copy of the exception, without the traceback + cache.set(lookup, pe.__class__(*pe.args)) + raise + else: + cache.set(lookup, (value[0], value[1].copy(), loc)) + return value + else: + ParserElement.packrat_cache_stats[HIT] += 1 + if self.debug and self.debugActions.debug_try: + try: + self.debugActions.debug_try(instring, loc, self, cache_hit=True) # type: ignore [call-arg] + except TypeError: + pass + if isinstance(value, Exception): + if self.debug and self.debugActions.debug_fail: + try: + self.debugActions.debug_fail( + instring, loc, self, value, cache_hit=True # type: ignore [call-arg] + ) + except TypeError: + pass + raise value + + value = cast(Tuple[int, ParseResults, int], value) + loc_, result, endloc = value[0], value[1].copy(), value[2] + if self.debug and self.debugActions.debug_match: + try: + self.debugActions.debug_match( + instring, loc_, endloc, self, result, cache_hit=True # type: ignore [call-arg] + ) + except TypeError: + pass + + return loc_, result + + _parse = _parseNoCache + + @staticmethod + def reset_cache() -> None: + ParserElement.packrat_cache.clear() + ParserElement.packrat_cache_stats[:] = [0] * len( + ParserElement.packrat_cache_stats + ) + ParserElement.recursion_memos.clear() + + _packratEnabled = False + _left_recursion_enabled = False + + @staticmethod + def disable_memoization() -> None: + """ + Disables active Packrat or Left Recursion parsing and their memoization + + This method also works if neither Packrat nor Left Recursion are enabled. + This makes it safe to call before activating Packrat nor Left Recursion + to clear any previous settings. + """ + ParserElement.reset_cache() + ParserElement._left_recursion_enabled = False + ParserElement._packratEnabled = False + ParserElement._parse = ParserElement._parseNoCache + + @staticmethod + def enable_left_recursion( + cache_size_limit: typing.Optional[int] = None, *, force=False + ) -> None: + """ + Enables "bounded recursion" parsing, which allows for both direct and indirect + left-recursion. During parsing, left-recursive :class:`Forward` elements are + repeatedly matched with a fixed recursion depth that is gradually increased + until finding the longest match. + + Example:: + + import pyparsing as pp + pp.ParserElement.enable_left_recursion() + + E = pp.Forward("E") + num = pp.Word(pp.nums) + # match `num`, or `num '+' num`, or `num '+' num '+' num`, ... + E <<= E + '+' - num | num + + print(E.parse_string("1+2+3")) + + Recursion search naturally memoizes matches of ``Forward`` elements and may + thus skip reevaluation of parse actions during backtracking. This may break + programs with parse actions which rely on strict ordering of side-effects. + + Parameters: + + - ``cache_size_limit`` - (default=``None``) - memoize at most this many + ``Forward`` elements during matching; if ``None`` (the default), + memoize all ``Forward`` elements. + + Bounded Recursion parsing works similar but not identical to Packrat parsing, + thus the two cannot be used together. Use ``force=True`` to disable any + previous, conflicting settings. + """ + if force: + ParserElement.disable_memoization() + elif ParserElement._packratEnabled: + raise RuntimeError("Packrat and Bounded Recursion are not compatible") + if cache_size_limit is None: + ParserElement.recursion_memos = _UnboundedMemo() # type: ignore[assignment] + elif cache_size_limit > 0: + ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) # type: ignore[assignment] + else: + raise NotImplementedError(f"Memo size of {cache_size_limit}") + ParserElement._left_recursion_enabled = True + + @staticmethod + def enable_packrat( + cache_size_limit: Union[int, None] = 128, *, force: bool = False + ) -> None: + """ + Enables "packrat" parsing, which adds memoizing to the parsing logic. + Repeated parse attempts at the same string location (which happens + often in many complex grammars) can immediately return a cached value, + instead of re-executing parsing/validating code. Memoizing is done of + both valid results and parsing exceptions. + + Parameters: + + - ``cache_size_limit`` - (default= ``128``) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + + This speedup may break existing programs that use parse actions that + have side-effects. For this reason, packrat parsing is disabled when + you first import pyparsing. To activate the packrat feature, your + program must call the class method :class:`ParserElement.enable_packrat`. + For best results, call ``enable_packrat()`` immediately after + importing pyparsing. + + Example:: + + import pyparsing + pyparsing.ParserElement.enable_packrat() + + Packrat parsing works similar but not identical to Bounded Recursion parsing, + thus the two cannot be used together. Use ``force=True`` to disable any + previous, conflicting settings. + """ + if force: + ParserElement.disable_memoization() + elif ParserElement._left_recursion_enabled: + raise RuntimeError("Packrat and Bounded Recursion are not compatible") + + if ParserElement._packratEnabled: + return + + ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = _UnboundedCache() + else: + ParserElement.packrat_cache = _FifoCache(cache_size_limit) # type: ignore[assignment] + ParserElement._parse = ParserElement._parseCache + + def parse_string( + self, instring: str, parse_all: bool = False, *, parseAll: bool = False + ) -> ParseResults: + """ + Parse a string with respect to the parser definition. This function is intended as the primary interface to the + client code. + + :param instring: The input string to be parsed. + :param parse_all: If set, the entire input string must match the grammar. + :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release. + :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar. + :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or + an object with attributes if the given parser includes results names. + + If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This + is also equivalent to ending the grammar with :class:`StringEnd`\\ (). + + To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are + converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string + contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string + being parsed, one can ensure a consistent view of the input string by doing one of the following: + + - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`), + - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the + parse action's ``s`` argument, or + - explicitly expand the tabs in your input string before calling ``parse_string``. + + Examples: + + By default, partial matches are OK. + + >>> res = Word('a').parse_string('aaaaabaaa') + >>> print(res) + ['aaaaa'] + + The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children + directly to see more examples. + + It raises an exception if parse_all flag is set and instring does not match the whole grammar. + + >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True) + Traceback (most recent call last): + ... + pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6) + """ + parseAll = parse_all or parseAll + + ParserElement.reset_cache() + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + if not self.keepTabs: + instring = instring.expandtabs() + try: + loc, tokens = self._parse(instring, 0) + if parseAll: + loc = self.preParse(instring, loc) + se = Empty() + StringEnd() + se._parse(instring, loc) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clearing out pyparsing internal stack trace + raise exc.with_traceback(None) + else: + return tokens + + def scan_string( + self, + instring: str, + max_matches: int = _MAX_INT, + overlap: bool = False, + *, + debug: bool = False, + maxMatches: int = _MAX_INT, + ) -> Generator[Tuple[ParseResults, int, int], None, None]: + """ + Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + ``max_matches`` argument, to clip scanning after 'n' matches are found. If + ``overlap`` is specified, then overlapping matches will be reported. + + Note that the start and end locations are reported relative to the string + being parsed. See :class:`parse_string` for more information on parsing + strings with embedded tabs. + + Example:: + + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" + print(source) + for tokens, start, end in Word(alphas).scan_string(source): + print(' '*start + '^'*(end-start)) + print(' '*start + tokens[0]) + + prints:: + + sldjf123lsdjjkf345sldkjf879lkjsfd987 + ^^^^^ + sldjf + ^^^^^^^ + lsdjjkf + ^^^^^^ + sldkjf + ^^^^^^ + lkjsfd + """ + maxMatches = min(maxMatches, max_matches) + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + + if not self.keepTabs: + instring = str(instring).expandtabs() + instrlen = len(instring) + loc = 0 + preparseFn = self.preParse + parseFn = self._parse + ParserElement.resetCache() + matches = 0 + try: + while loc <= instrlen and matches < maxMatches: + try: + preloc: int = preparseFn(instring, loc) + nextLoc: int + tokens: ParseResults + nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) + except ParseException: + loc = preloc + 1 + else: + if nextLoc > loc: + matches += 1 + if debug: + print( + { + "tokens": tokens.asList(), + "start": preloc, + "end": nextLoc, + } + ) + yield tokens, preloc, nextLoc + if overlap: + nextloc = preparseFn(instring, loc) + if nextloc > loc: + loc = nextLoc + else: + loc += 1 + else: + loc = nextLoc + else: + loc = preloc + 1 + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def transform_string(self, instring: str, *, debug: bool = False) -> str: + """ + Extension to :class:`scan_string`, to modify matching text with modified tokens that may + be returned from a parse action. To use ``transform_string``, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking ``transform_string()`` on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. ``transform_string()`` returns the resulting transformed string. + + Example:: + + wd = Word(alphas) + wd.set_parse_action(lambda toks: toks[0].title()) + + print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york.")) + + prints:: + + Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. + """ + out: List[str] = [] + lastE = 0 + # force preservation of s, to minimize unwanted transformation of string, and to + # keep string locs straight between transform_string and scan_string + self.keepTabs = True + try: + for t, s, e in self.scan_string(instring, debug=debug): + out.append(instring[lastE:s]) + lastE = e + + if not t: + continue + + if isinstance(t, ParseResults): + out += t.as_list() + elif isinstance(t, Iterable) and not isinstance(t, str_type): + out.extend(t) + else: + out.append(t) + + out.append(instring[lastE:]) + out = [o for o in out if o] + return "".join([str(s) for s in _flatten(out)]) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def search_string( + self, + instring: str, + max_matches: int = _MAX_INT, + *, + debug: bool = False, + maxMatches: int = _MAX_INT, + ) -> ParseResults: + """ + Another extension to :class:`scan_string`, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + ``max_matches`` argument, to clip searching after 'n' matches are found. + + Example:: + + # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters + cap_word = Word(alphas.upper(), alphas.lower()) + + print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")) + + # the sum() builtin can be used to merge results into a single ParseResults object + print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))) + + prints:: + + [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] + ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] + """ + maxMatches = min(maxMatches, max_matches) + try: + return ParseResults( + [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)] + ) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def split( + self, + instring: str, + maxsplit: int = _MAX_INT, + include_separators: bool = False, + *, + includeSeparators=False, + ) -> Generator[str, None, None]: + """ + Generator method to split a string using the given expression as a separator. + May be called with optional ``maxsplit`` argument, to limit the number of splits; + and the optional ``include_separators`` argument (default= ``False``), if the separating + matching text should be included in the split results. + + Example:: + + punc = one_of(list(".,;:/-!?")) + print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) + + prints:: + + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] + """ + includeSeparators = includeSeparators or include_separators + last = 0 + for t, s, e in self.scan_string(instring, max_matches=maxsplit): + yield instring[last:s] + if includeSeparators: + yield t[0] + last = e + yield instring[last:] + + def __add__(self, other) -> "ParserElement": + """ + Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` + converts them to :class:`Literal`\\ s by default. + + Example:: + + greet = Word(alphas) + "," + Word(alphas) + "!" + hello = "Hello, World!" + print(hello, "->", greet.parse_string(hello)) + + prints:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + + ``...`` may be used as a parse expression as a short form of :class:`SkipTo`:: + + Literal('start') + ... + Literal('end') + + is equivalent to:: + + Literal('start') + SkipTo('end')("_skipped*") + Literal('end') + + Note that the skipped text is returned with '_skipped' as a results name, + and to support having multiple skips in the same parser, the value returned is + a list of all skipped text. + """ + if other is Ellipsis: + return _PendingSkip(self) + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return And([self, other]) + + def __radd__(self, other) -> "ParserElement": + """ + Implementation of ``+`` operator when left operand is not a :class:`ParserElement` + """ + if other is Ellipsis: + return SkipTo(self)("_skipped*") + self + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other + self + + def __sub__(self, other) -> "ParserElement": + """ + Implementation of ``-`` operator, returns :class:`And` with error stop + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self + And._ErrorStop() + other + + def __rsub__(self, other) -> "ParserElement": + """ + Implementation of ``-`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other - self + + def __mul__(self, other) -> "ParserElement": + """ + Implementation of ``*`` operator, allows use of ``expr * 3`` in place of + ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer + tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples + may also include ``None`` as in: + + - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` + - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` + + Note that ``expr*(None, n)`` does not raise an exception if + more than n exprs exist in the input stream; that is, + ``expr*(None, n)`` does not enforce a maximum number of expr + occurrences. If this behavior is desired, then write + ``expr*(None, n) + ~expr`` + """ + if other is Ellipsis: + other = (0, None) + elif isinstance(other, tuple) and other[:1] == (Ellipsis,): + other = ((0,) + other[1:] + (None,))[:2] + + if not isinstance(other, (int, tuple)): + return NotImplemented + + if isinstance(other, int): + minElements, optElements = other, 0 + else: + other = tuple(o if o is not Ellipsis else None for o in other) + other = (other + (None, None))[:2] + if other[0] is None: + other = (0, other[1]) + if isinstance(other[0], int) and other[1] is None: + if other[0] == 0: + return ZeroOrMore(self) + if other[0] == 1: + return OneOrMore(self) + else: + return self * other[0] + ZeroOrMore(self) + elif isinstance(other[0], int) and isinstance(other[1], int): + minElements, optElements = other + optElements -= minElements + else: + return NotImplemented + + if minElements < 0: + raise ValueError("cannot multiply ParserElement by negative value") + if optElements < 0: + raise ValueError( + "second tuple value must be greater or equal to first tuple value" + ) + if minElements == optElements == 0: + return And([]) + + if optElements: + + def makeOptionalList(n): + if n > 1: + return Opt(self + makeOptionalList(n - 1)) + else: + return Opt(self) + + if minElements: + if minElements == 1: + ret = self + makeOptionalList(optElements) + else: + ret = And([self] * minElements) + makeOptionalList(optElements) + else: + ret = makeOptionalList(optElements) + else: + if minElements == 1: + ret = self + else: + ret = And([self] * minElements) + return ret + + def __rmul__(self, other) -> "ParserElement": + return self.__mul__(other) + + def __or__(self, other) -> "ParserElement": + """ + Implementation of ``|`` operator - returns :class:`MatchFirst` + """ + if other is Ellipsis: + return _PendingSkip(self, must_skip=True) + + if isinstance(other, str_type): + # `expr | ""` is equivalent to `Opt(expr)` + if other == "": + return Opt(self) + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return MatchFirst([self, other]) + + def __ror__(self, other) -> "ParserElement": + """ + Implementation of ``|`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other | self + + def __xor__(self, other) -> "ParserElement": + """ + Implementation of ``^`` operator - returns :class:`Or` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return Or([self, other]) + + def __rxor__(self, other) -> "ParserElement": + """ + Implementation of ``^`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other ^ self + + def __and__(self, other) -> "ParserElement": + """ + Implementation of ``&`` operator - returns :class:`Each` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return Each([self, other]) + + def __rand__(self, other) -> "ParserElement": + """ + Implementation of ``&`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other & self + + def __invert__(self) -> "ParserElement": + """ + Implementation of ``~`` operator - returns :class:`NotAny` + """ + return NotAny(self) + + # disable __iter__ to override legacy use of sequential access to __getitem__ to + # iterate over a sequence + __iter__ = None + + def __getitem__(self, key): + """ + use ``[]`` indexing notation as a short form for expression repetition: + + - ``expr[n]`` is equivalent to ``expr*n`` + - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` + - ``expr[n, ...]`` or ``expr[n,]`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` + - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` + + ``None`` may be used in place of ``...``. + + Note that ``expr[..., n]`` and ``expr[m, n]`` do not raise an exception + if more than ``n`` ``expr``\\ s exist in the input stream. If this behavior is + desired, then write ``expr[..., n] + ~expr``. + + For repetition with a stop_on expression, use slice notation: + + - ``expr[...: end_expr]`` and ``expr[0, ...: end_expr]`` are equivalent to ``ZeroOrMore(expr, stop_on=end_expr)`` + - ``expr[1, ...: end_expr]`` is equivalent to ``OneOrMore(expr, stop_on=end_expr)`` + + """ + + stop_on_defined = False + stop_on = NoMatch() + if isinstance(key, slice): + key, stop_on = key.start, key.stop + if key is None: + key = ... + stop_on_defined = True + elif isinstance(key, tuple) and isinstance(key[-1], slice): + key, stop_on = (key[0], key[1].start), key[1].stop + stop_on_defined = True + + # convert single arg keys to tuples + if isinstance(key, str_type): + key = (key,) + try: + iter(key) + except TypeError: + key = (key, key) + + if len(key) > 2: + raise TypeError( + f"only 1 or 2 index arguments supported ({key[:5]}{f'... [{len(key)}]' if len(key) > 5 else ''})" + ) + + # clip to 2 elements + ret = self * tuple(key[:2]) + ret = typing.cast(_MultipleMatch, ret) + + if stop_on_defined: + ret.stopOn(stop_on) + + return ret + + def __call__(self, name: typing.Optional[str] = None) -> "ParserElement": + """ + Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. + + If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be + passed as ``True``. + + If ``name`` is omitted, same as calling :class:`copy`. + + Example:: + + # these are equivalent + userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno") + userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") + """ + if name is not None: + return self._setResultsName(name) + + return self.copy() + + def suppress(self) -> "ParserElement": + """ + Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from + cluttering up returned output. + """ + return Suppress(self) + + def ignore_whitespace(self, recursive: bool = True) -> "ParserElement": + """ + Enables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. + + :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any) + """ + self.skipWhitespace = True + return self + + def leave_whitespace(self, recursive: bool = True) -> "ParserElement": + """ + Disables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. + + :param recursive: If true (the default), also disable whitespace skipping in child elements (if any) + """ + self.skipWhitespace = False + return self + + def set_whitespace_chars( + self, chars: Union[Set[str], str], copy_defaults: bool = False + ) -> "ParserElement": + """ + Overrides the default whitespace chars + """ + self.skipWhitespace = True + self.whiteChars = set(chars) + self.copyDefaultWhiteChars = copy_defaults + return self + + def parse_with_tabs(self) -> "ParserElement": + """ + Overrides default behavior to expand ```` s to spaces before parsing the input string. + Must be called before ``parse_string`` when the input grammar contains elements that + match ```` characters. + """ + self.keepTabs = True + return self + + def ignore(self, other: "ParserElement") -> "ParserElement": + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + + Example:: + + patt = Word(alphas)[...] + patt.parse_string('ablaj /* comment */ lskjd') + # -> ['ablaj'] + + patt.ignore(c_style_comment) + patt.parse_string('ablaj /* comment */ lskjd') + # -> ['ablaj', 'lskjd'] + """ + if isinstance(other, str_type): + other = Suppress(other) + + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + self.ignoreExprs.append(other) + else: + self.ignoreExprs.append(Suppress(other.copy())) + return self + + def set_debug_actions( + self, + start_action: DebugStartAction, + success_action: DebugSuccessAction, + exception_action: DebugExceptionAction, + ) -> "ParserElement": + """ + Customize display of debugging messages while doing pattern matching: + + - ``start_action`` - method to be called when an expression is about to be parsed; + should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)`` + + - ``success_action`` - method to be called when an expression has successfully parsed; + should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)`` + + - ``exception_action`` - method to be called when expression fails to parse; + should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)`` + """ + self.debugActions = self.DebugActions( + start_action or _default_start_debug_action, # type: ignore[truthy-function] + success_action or _default_success_debug_action, # type: ignore[truthy-function] + exception_action or _default_exception_debug_action, # type: ignore[truthy-function] + ) + self.debug = True + return self + + def set_debug(self, flag: bool = True, recurse: bool = False) -> "ParserElement": + """ + Enable display of debugging messages while doing pattern matching. + Set ``flag`` to ``True`` to enable, ``False`` to disable. + Set ``recurse`` to ``True`` to set the debug flag on this expression and all sub-expressions. + + Example:: + + wd = Word(alphas).set_name("alphaword") + integer = Word(nums).set_name("numword") + term = wd | integer + + # turn on debugging for wd + wd.set_debug() + + term[1, ...].parse_string("abc 123 xyz 890") + + prints:: + + Match alphaword at loc 0(1,1) + Matched alphaword -> ['abc'] + Match alphaword at loc 3(1,4) + Exception raised:Expected alphaword (at char 4), (line:1, col:5) + Match alphaword at loc 7(1,8) + Matched alphaword -> ['xyz'] + Match alphaword at loc 11(1,12) + Exception raised:Expected alphaword (at char 12), (line:1, col:13) + Match alphaword at loc 15(1,16) + Exception raised:Expected alphaword (at char 15), (line:1, col:16) + + The output shown is that produced by the default debug actions - custom debug actions can be + specified using :class:`set_debug_actions`. Prior to attempting + to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` + is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` + message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression, + which makes debugging and exception messages easier to understand - for instance, the default + name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``. + """ + if recurse: + for expr in self.visit_all(): + expr.set_debug(flag, recurse=False) + return self + + if flag: + self.set_debug_actions( + _default_start_debug_action, + _default_success_debug_action, + _default_exception_debug_action, + ) + else: + self.debug = False + return self + + @property + def default_name(self) -> str: + if self._defaultName is None: + self._defaultName = self._generateDefaultName() + return self._defaultName + + @abstractmethod + def _generateDefaultName(self) -> str: + """ + Child classes must define this method, which defines how the ``default_name`` is set. + """ + + def set_name(self, name: str) -> "ParserElement": + """ + Define name for this expression, makes debugging and exception messages clearer. + + Example:: + + integer = Word(nums) + integer.parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) + + integer.set_name("integer") + integer.parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) + """ + self.customName = name + self.errmsg = f"Expected {self.name}" + if __diag__.enable_debug_on_named_expressions: + self.set_debug() + return self + + @property + def name(self) -> str: + # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name + return self.customName if self.customName is not None else self.default_name + + def __str__(self) -> str: + return self.name + + def __repr__(self) -> str: + return str(self) + + def streamline(self) -> "ParserElement": + self.streamlined = True + self._defaultName = None + return self + + def recurse(self) -> List["ParserElement"]: + return [] + + def _checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.recurse(): + e._checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None) -> None: + """ + Check defined expressions for valid structure, check for infinite recursive definitions. + """ + warnings.warn( + "ParserElement.validate() is deprecated, and should not be used to check for left recursion", + DeprecationWarning, + stacklevel=2, + ) + self._checkRecursion([]) + + def parse_file( + self, + file_or_filename: Union[str, Path, TextIO], + encoding: str = "utf-8", + parse_all: bool = False, + *, + parseAll: bool = False, + ) -> ParseResults: + """ + Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. + """ + parseAll = parseAll or parse_all + try: + file_or_filename = typing.cast(TextIO, file_or_filename) + file_contents = file_or_filename.read() + except AttributeError: + file_or_filename = typing.cast(str, file_or_filename) + with open(file_or_filename, "r", encoding=encoding) as f: + file_contents = f.read() + try: + return self.parse_string(file_contents, parseAll) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def __eq__(self, other): + if self is other: + return True + elif isinstance(other, str_type): + return self.matches(other, parse_all=True) + elif isinstance(other, ParserElement): + return vars(self) == vars(other) + return False + + def __hash__(self): + return id(self) + + def matches( + self, test_string: str, parse_all: bool = True, *, parseAll: bool = True + ) -> bool: + """ + Method for quick testing of a parser against a test string. Good for simple + inline microtests of sub expressions while building up larger parser. + + Parameters: + + - ``test_string`` - to test against this expression for a match + - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests + + Example:: + + expr = Word(nums) + assert expr.matches("100") + """ + parseAll = parseAll and parse_all + try: + self.parse_string(str(test_string), parse_all=parseAll) + return True + except ParseBaseException: + return False + + def run_tests( + self, + tests: Union[str, List[str]], + parse_all: bool = True, + comment: typing.Optional[Union["ParserElement", str]] = "#", + full_dump: bool = True, + print_results: bool = True, + failure_tests: bool = False, + post_parse: typing.Optional[Callable[[str, ParseResults], str]] = None, + file: typing.Optional[TextIO] = None, + with_line_numbers: bool = False, + *, + parseAll: bool = True, + fullDump: bool = True, + printResults: bool = True, + failureTests: bool = False, + postParse: typing.Optional[Callable[[str, ParseResults], str]] = None, + ) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]: + """ + Execute the parse expression on a series of test strings, showing each + test, the parsed results or where the parse failed. Quick and easy way to + run a parse expression against a list of sample strings. + + Parameters: + + - ``tests`` - a list of separate test strings, or a multiline string of test strings + - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests + - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test + string; pass None to disable comment filtering + - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline; + if False, only dump nested list + - ``print_results`` - (default= ``True``) prints test output to stdout + - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing + - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as + `fn(test_string, parse_results)` and returns a string to be added to the test output + - ``file`` - (default= ``None``) optional file-like object to which test output will be written; + if None, will default to ``sys.stdout`` + - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers + + Returns: a (success, results) tuple, where success indicates that all tests succeeded + (or failed if ``failure_tests`` is True), and the results contain a list of lines of each + test's output + + Example:: + + number_expr = pyparsing_common.number.copy() + + result = number_expr.run_tests(''' + # unsigned integer + 100 + # negative integer + -100 + # float with scientific notation + 6.02e23 + # integer with scientific notation + 1e-12 + ''') + print("Success" if result[0] else "Failed!") + + result = number_expr.run_tests(''' + # stray character + 100Z + # missing leading digit before '.' + -.100 + # too many '.' + 3.14.159 + ''', failure_tests=True) + print("Success" if result[0] else "Failed!") + + prints:: + + # unsigned integer + 100 + [100] + + # negative integer + -100 + [-100] + + # float with scientific notation + 6.02e23 + [6.02e+23] + + # integer with scientific notation + 1e-12 + [1e-12] + + Success + + # stray character + 100Z + ^ + FAIL: Expected end of text (at char 3), (line:1, col:4) + + # missing leading digit before '.' + -.100 + ^ + FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) + + # too many '.' + 3.14.159 + ^ + FAIL: Expected end of text (at char 4), (line:1, col:5) + + Success + + Each test string must be on a single line. If you want to test a string that spans multiple + lines, create a test like this:: + + expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines") + + (Note that this is a raw string literal, you must include the leading ``'r'``.) + """ + from .testing import pyparsing_test + + parseAll = parseAll and parse_all + fullDump = fullDump and full_dump + printResults = printResults and print_results + failureTests = failureTests or failure_tests + postParse = postParse or post_parse + if isinstance(tests, str_type): + tests = typing.cast(str, tests) + line_strip = type(tests).strip + tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()] + comment_specified = comment is not None + if comment_specified: + if isinstance(comment, str_type): + comment = typing.cast(str, comment) + comment = Literal(comment) + comment = typing.cast(ParserElement, comment) + if file is None: + file = sys.stdout + print_ = file.write + + result: Union[ParseResults, Exception] + allResults: List[Tuple[str, Union[ParseResults, Exception]]] = [] + comments: List[str] = [] + success = True + NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) + BOM = "\ufeff" + nlstr = "\n" + for t in tests: + if comment_specified and comment.matches(t, False) or comments and not t: + comments.append( + pyparsing_test.with_line_numbers(t) if with_line_numbers else t + ) + continue + if not t: + continue + out = [ + f"{nlstr}{nlstr.join(comments) if comments else ''}", + pyparsing_test.with_line_numbers(t) if with_line_numbers else t, + ] + comments = [] + try: + # convert newline marks to actual newlines, and strip leading BOM if present + t = NL.transform_string(t.lstrip(BOM)) + result = self.parse_string(t, parse_all=parseAll) + except ParseBaseException as pe: + fatal = "(FATAL) " if isinstance(pe, ParseFatalException) else "" + out.append(pe.explain()) + out.append(f"FAIL: {fatal}{pe}") + if ParserElement.verbose_stacktrace: + out.extend(traceback.format_tb(pe.__traceback__)) + success = success and failureTests + result = pe + except Exception as exc: + out.append(f"FAIL-EXCEPTION: {type(exc).__name__}: {exc}") + if ParserElement.verbose_stacktrace: + out.extend(traceback.format_tb(exc.__traceback__)) + success = success and failureTests + result = exc + else: + success = success and not failureTests + if postParse is not None: + try: + pp_value = postParse(t, result) + if pp_value is not None: + if isinstance(pp_value, ParseResults): + out.append(pp_value.dump()) + else: + out.append(str(pp_value)) + else: + out.append(result.dump()) + except Exception as e: + out.append(result.dump(full=fullDump)) + out.append( + f"{postParse.__name__} failed: {type(e).__name__}: {e}" + ) + else: + out.append(result.dump(full=fullDump)) + out.append("") + + if printResults: + print_("\n".join(out)) + + allResults.append((t, result)) + + return success, allResults + + def create_diagram( + self, + output_html: Union[TextIO, Path, str], + vertical: int = 3, + show_results_names: bool = False, + show_groups: bool = False, + embed: bool = False, + **kwargs, + ) -> None: + """ + Create a railroad diagram for the parser. + + Parameters: + + - ``output_html`` (str or file-like object) - output target for generated + diagram HTML + - ``vertical`` (int) - threshold for formatting multiple alternatives vertically + instead of horizontally (default=3) + - ``show_results_names`` - bool flag whether diagram should show annotations for + defined results names + - ``show_groups`` - bool flag whether groups should be highlighted with an unlabeled surrounding box + - ``embed`` - bool flag whether generated HTML should omit , , and tags to embed + the resulting HTML in an enclosing HTML source + - ``head`` - str containing additional HTML to insert into the section of the generated code; + can be used to insert custom CSS styling + - ``body`` - str containing additional HTML to insert at the beginning of the section of the + generated code + + Additional diagram-formatting keyword arguments can also be included; + see railroad.Diagram class. + """ + + try: + from .diagram import to_railroad, railroad_to_html + except ImportError as ie: + raise Exception( + "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams" + ) from ie + + self.streamline() + + railroad = to_railroad( + self, + vertical=vertical, + show_results_names=show_results_names, + show_groups=show_groups, + diagram_kwargs=kwargs, + ) + if not isinstance(output_html, (str, Path)): + # we were passed a file-like object, just write to it + output_html.write(railroad_to_html(railroad, embed=embed, **kwargs)) + return + + with open(output_html, "w", encoding="utf-8") as diag_file: + diag_file.write(railroad_to_html(railroad, embed=embed, **kwargs)) + + # Compatibility synonyms + # fmt: off + inlineLiteralsUsing = replaced_by_pep8("inlineLiteralsUsing", inline_literals_using) + setDefaultWhitespaceChars = replaced_by_pep8( + "setDefaultWhitespaceChars", set_default_whitespace_chars + ) + setResultsName = replaced_by_pep8("setResultsName", set_results_name) + setBreak = replaced_by_pep8("setBreak", set_break) + setParseAction = replaced_by_pep8("setParseAction", set_parse_action) + addParseAction = replaced_by_pep8("addParseAction", add_parse_action) + addCondition = replaced_by_pep8("addCondition", add_condition) + setFailAction = replaced_by_pep8("setFailAction", set_fail_action) + tryParse = replaced_by_pep8("tryParse", try_parse) + enableLeftRecursion = replaced_by_pep8("enableLeftRecursion", enable_left_recursion) + enablePackrat = replaced_by_pep8("enablePackrat", enable_packrat) + parseString = replaced_by_pep8("parseString", parse_string) + scanString = replaced_by_pep8("scanString", scan_string) + transformString = replaced_by_pep8("transformString", transform_string) + searchString = replaced_by_pep8("searchString", search_string) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + setWhitespaceChars = replaced_by_pep8("setWhitespaceChars", set_whitespace_chars) + parseWithTabs = replaced_by_pep8("parseWithTabs", parse_with_tabs) + setDebugActions = replaced_by_pep8("setDebugActions", set_debug_actions) + setDebug = replaced_by_pep8("setDebug", set_debug) + setName = replaced_by_pep8("setName", set_name) + parseFile = replaced_by_pep8("parseFile", parse_file) + runTests = replaced_by_pep8("runTests", run_tests) + canParseNext = can_parse_next + resetCache = reset_cache + defaultName = default_name + # fmt: on + + +class _PendingSkip(ParserElement): + # internal placeholder class to hold a place were '...' is added to a parser element, + # once another ParserElement is added, this placeholder will be replaced with a SkipTo + def __init__(self, expr: ParserElement, must_skip: bool = False): + super().__init__() + self.anchor = expr + self.must_skip = must_skip + + def _generateDefaultName(self) -> str: + return str(self.anchor + Empty()).replace("Empty", "...") + + def __add__(self, other) -> "ParserElement": + skipper = SkipTo(other).set_name("...")("_skipped*") + if self.must_skip: + + def must_skip(t): + if not t._skipped or t._skipped.as_list() == [""]: + del t[0] + t.pop("_skipped", None) + + def show_skip(t): + if t._skipped.as_list()[-1:] == [""]: + t.pop("_skipped") + t["_skipped"] = f"missing <{self.anchor!r}>" + + return ( + self.anchor + skipper().add_parse_action(must_skip) + | skipper().add_parse_action(show_skip) + ) + other + + return self.anchor + skipper + other + + def __repr__(self): + return self.defaultName + + def parseImpl(self, *args): + raise Exception( + "use of `...` expression without following SkipTo target expression" + ) + + +class Token(ParserElement): + """Abstract :class:`ParserElement` subclass, for defining atomic + matching patterns. + """ + + def __init__(self): + super().__init__(savelist=False) + + def _generateDefaultName(self) -> str: + return type(self).__name__ + + +class NoMatch(Token): + """ + A token that will never match. + """ + + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + self.errmsg = "Unmatchable token" + + def parseImpl(self, instring, loc, doActions=True): + raise ParseException(instring, loc, self.errmsg, self) + + +class Literal(Token): + """ + Token to exactly match a specified string. + + Example:: + + Literal('abc').parse_string('abc') # -> ['abc'] + Literal('abc').parse_string('abcdef') # -> ['abc'] + Literal('abc').parse_string('ab') # -> Exception: Expected "abc" + + For case-insensitive matching, use :class:`CaselessLiteral`. + + For keyword matching (force word break before and after the matched string), + use :class:`Keyword` or :class:`CaselessKeyword`. + """ + + def __new__(cls, match_string: str = "", *, matchString: str = ""): + # Performance tuning: select a subclass with optimized parseImpl + if cls is Literal: + match_string = matchString or match_string + if not match_string: + return super().__new__(Empty) + if len(match_string) == 1: + return super().__new__(_SingleCharLiteral) + + # Default behavior + return super().__new__(cls) + + # Needed to make copy.copy() work correctly if we customize __new__ + def __getnewargs__(self): + return (self.match,) + + def __init__(self, match_string: str = "", *, matchString: str = ""): + super().__init__() + match_string = matchString or match_string + self.match = match_string + self.matchLen = len(match_string) + self.firstMatchChar = match_string[:1] + self.errmsg = f"Expected {self.name}" + self.mayReturnEmpty = False + self.mayIndexError = False + + def _generateDefaultName(self) -> str: + return repr(self.match) + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar and instring.startswith( + self.match, loc + ): + return loc + self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + + +class Empty(Literal): + """ + An empty token, will always match. + """ + + def __init__(self, match_string="", *, matchString=""): + super().__init__("") + self.mayReturnEmpty = True + self.mayIndexError = False + + def _generateDefaultName(self) -> str: + return "Empty" + + def parseImpl(self, instring, loc, doActions=True): + return loc, [] + + +class _SingleCharLiteral(Literal): + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar: + return loc + 1, self.match + raise ParseException(instring, loc, self.errmsg, self) + + +ParserElement._literalStringClass = Literal + + +class Keyword(Token): + """ + Token to exactly match a specified string as a keyword, that is, + it must be immediately preceded and followed by whitespace or + non-keyword characters. Compare with :class:`Literal`: + + - ``Literal("if")`` will match the leading ``'if'`` in + ``'ifAndOnlyIf'``. + - ``Keyword("if")`` will not; it will only match the leading + ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` + + Accepts two optional constructor arguments in addition to the + keyword string: + + - ``ident_chars`` is a string of characters that would be valid + identifier characters, defaulting to all alphanumerics + "_" and + "$" + - ``caseless`` allows case-insensitive matching, default is ``False``. + + Example:: + + Keyword("start").parse_string("start") # -> ['start'] + Keyword("start").parse_string("starting") # -> Exception + + For case-insensitive matching, use :class:`CaselessKeyword`. + """ + + DEFAULT_KEYWORD_CHARS = alphanums + "_$" + + def __init__( + self, + match_string: str = "", + ident_chars: typing.Optional[str] = None, + caseless: bool = False, + *, + matchString: str = "", + identChars: typing.Optional[str] = None, + ): + super().__init__() + identChars = identChars or ident_chars + if identChars is None: + identChars = Keyword.DEFAULT_KEYWORD_CHARS + match_string = matchString or match_string + self.match = match_string + self.matchLen = len(match_string) + try: + self.firstMatchChar = match_string[0] + except IndexError: + raise ValueError("null string passed to Keyword; use Empty() instead") + self.errmsg = f"Expected {type(self).__name__} {self.name}" + self.mayReturnEmpty = False + self.mayIndexError = False + self.caseless = caseless + if caseless: + self.caselessmatch = match_string.upper() + identChars = identChars.upper() + self.identChars = set(identChars) + + def _generateDefaultName(self) -> str: + return repr(self.match) + + def parseImpl(self, instring, loc, doActions=True): + errmsg = self.errmsg + errloc = loc + if self.caseless: + if instring[loc : loc + self.matchLen].upper() == self.caselessmatch: + if loc == 0 or instring[loc - 1].upper() not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen].upper() not in self.identChars + ): + return loc + self.matchLen, self.match + + # followed by keyword char + errmsg += ", was immediately followed by keyword character" + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + + elif ( + instring[loc] == self.firstMatchChar + and self.matchLen == 1 + or instring.startswith(self.match, loc) + ): + if loc == 0 or instring[loc - 1] not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen] not in self.identChars + ): + return loc + self.matchLen, self.match + + # followed by keyword char + errmsg += ", keyword was immediately followed by keyword character" + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + + raise ParseException(instring, errloc, errmsg, self) + + @staticmethod + def set_default_keyword_chars(chars) -> None: + """ + Overrides the default characters used by :class:`Keyword` expressions. + """ + Keyword.DEFAULT_KEYWORD_CHARS = chars + + setDefaultKeywordChars = set_default_keyword_chars + + +class CaselessLiteral(Literal): + """ + Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + + Example:: + + CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10") + # -> ['CMD', 'CMD', 'CMD'] + + (Contrast with example for :class:`CaselessKeyword`.) + """ + + def __init__(self, match_string: str = "", *, matchString: str = ""): + match_string = matchString or match_string + super().__init__(match_string.upper()) + # Preserve the defining literal. + self.returnString = match_string + self.errmsg = f"Expected {self.name}" + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc : loc + self.matchLen].upper() == self.match: + return loc + self.matchLen, self.returnString + raise ParseException(instring, loc, self.errmsg, self) + + +class CaselessKeyword(Keyword): + """ + Caseless version of :class:`Keyword`. + + Example:: + + CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10") + # -> ['CMD', 'CMD'] + + (Contrast with example for :class:`CaselessLiteral`.) + """ + + def __init__( + self, + match_string: str = "", + ident_chars: typing.Optional[str] = None, + *, + matchString: str = "", + identChars: typing.Optional[str] = None, + ): + identChars = identChars or ident_chars + match_string = matchString or match_string + super().__init__(match_string, identChars, caseless=True) + + +class CloseMatch(Token): + """A variation on :class:`Literal` which matches "close" matches, + that is, strings with at most 'n' mismatching characters. + :class:`CloseMatch` takes parameters: + + - ``match_string`` - string to be matched + - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters + - ``max_mismatches`` - (``default=1``) maximum number of + mismatches allowed to count as a match + + The results from a successful parse will contain the matched text + from the input string and the following named results: + + - ``mismatches`` - a list of the positions within the + match_string where mismatches were found + - ``original`` - the original match_string used to compare + against the input string + + If ``mismatches`` is an empty list, then the match was an exact + match. + + Example:: + + patt = CloseMatch("ATCATCGAATGGA") + patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) + patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) + + # exact match + patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) + + # close match allowing up to 2 mismatches + patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2) + patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) + """ + + def __init__( + self, + match_string: str, + max_mismatches: typing.Optional[int] = None, + *, + maxMismatches: int = 1, + caseless=False, + ): + maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches + super().__init__() + self.match_string = match_string + self.maxMismatches = maxMismatches + self.errmsg = f"Expected {self.match_string!r} (with up to {self.maxMismatches} mismatches)" + self.caseless = caseless + self.mayIndexError = False + self.mayReturnEmpty = False + + def _generateDefaultName(self) -> str: + return f"{type(self).__name__}:{self.match_string!r}" + + def parseImpl(self, instring, loc, doActions=True): + start = loc + instrlen = len(instring) + maxloc = start + len(self.match_string) + + if maxloc <= instrlen: + match_string = self.match_string + match_stringloc = 0 + mismatches = [] + maxMismatches = self.maxMismatches + + for match_stringloc, s_m in enumerate( + zip(instring[loc:maxloc], match_string) + ): + src, mat = s_m + if self.caseless: + src, mat = src.lower(), mat.lower() + + if src != mat: + mismatches.append(match_stringloc) + if len(mismatches) > maxMismatches: + break + else: + loc = start + match_stringloc + 1 + results = ParseResults([instring[start:loc]]) + results["original"] = match_string + results["mismatches"] = mismatches + return loc, results + + raise ParseException(instring, loc, self.errmsg, self) + + +class Word(Token): + """Token for matching words composed of allowed character sets. + + Parameters: + + - ``init_chars`` - string of all characters that should be used to + match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.; + if ``body_chars`` is also specified, then this is the string of + initial characters + - ``body_chars`` - string of characters that + can be used for matching after a matched initial character as + given in ``init_chars``; if omitted, same as the initial characters + (default=``None``) + - ``min`` - minimum number of characters to match (default=1) + - ``max`` - maximum number of characters to match (default=0) + - ``exact`` - exact number of characters to match (default=0) + - ``as_keyword`` - match as a keyword (default=``False``) + - ``exclude_chars`` - characters that might be + found in the input ``body_chars`` string but which should not be + accepted for matching ;useful to define a word of all + printables except for one or two characters, for instance + (default=``None``) + + :class:`srange` is useful for defining custom character set strings + for defining :class:`Word` expressions, using range notation from + regular expression character sets. + + A common mistake is to use :class:`Word` to match a specific literal + string, as in ``Word("Address")``. Remember that :class:`Word` + uses the string argument to define *sets* of matchable characters. + This expression would match "Add", "AAA", "dAred", or any other word + made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an + exact literal string, use :class:`Literal` or :class:`Keyword`. + + pyparsing includes helper strings for building Words: + + - :class:`alphas` + - :class:`nums` + - :class:`alphanums` + - :class:`hexnums` + - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 + - accented, tilded, umlauted, etc.) + - :class:`punc8bit` (non-alphabetic characters in ASCII range + 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - :class:`printables` (any non-whitespace character) + + ``alphas``, ``nums``, and ``printables`` are also defined in several + Unicode sets - see :class:`pyparsing_unicode``. + + Example:: + + # a word composed of digits + integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) + + # a word with a leading capital, and zero or more lowercase + capitalized_word = Word(alphas.upper(), alphas.lower()) + + # hostnames are alphanumeric, with leading alpha, and '-' + hostname = Word(alphas, alphanums + '-') + + # roman numeral (not a strict parser, accepts invalid mix of characters) + roman = Word("IVXLCDM") + + # any string of non-whitespace characters, except for ',' + csv_value = Word(printables, exclude_chars=",") + """ + + def __init__( + self, + init_chars: str = "", + body_chars: typing.Optional[str] = None, + min: int = 1, + max: int = 0, + exact: int = 0, + as_keyword: bool = False, + exclude_chars: typing.Optional[str] = None, + *, + initChars: typing.Optional[str] = None, + bodyChars: typing.Optional[str] = None, + asKeyword: bool = False, + excludeChars: typing.Optional[str] = None, + ): + initChars = initChars or init_chars + bodyChars = bodyChars or body_chars + asKeyword = asKeyword or as_keyword + excludeChars = excludeChars or exclude_chars + super().__init__() + if not initChars: + raise ValueError( + f"invalid {type(self).__name__}, initChars cannot be empty string" + ) + + initChars_set = set(initChars) + if excludeChars: + excludeChars_set = set(excludeChars) + initChars_set -= excludeChars_set + if bodyChars: + bodyChars = "".join(set(bodyChars) - excludeChars_set) + self.initChars = initChars_set + self.initCharsOrig = "".join(sorted(initChars_set)) + + if bodyChars: + self.bodyChars = set(bodyChars) + self.bodyCharsOrig = "".join(sorted(bodyChars)) + else: + self.bodyChars = initChars_set + self.bodyCharsOrig = self.initCharsOrig + + self.maxSpecified = max > 0 + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted" + ) + + if self.maxSpecified and min > max: + raise ValueError( + f"invalid args, if min and max both specified min must be <= max (min={min}, max={max})" + ) + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + min = max = exact + self.maxLen = exact + self.minLen = exact + + self.errmsg = f"Expected {self.name}" + self.mayIndexError = False + self.asKeyword = asKeyword + if self.asKeyword: + self.errmsg += " as a keyword" + + # see if we can make a regex for this Word + if " " not in (self.initChars | self.bodyChars): + if len(self.initChars) == 1: + re_leading_fragment = re.escape(self.initCharsOrig) + else: + re_leading_fragment = f"[{_collapse_string_to_ranges(self.initChars)}]" + + if self.bodyChars == self.initChars: + if max == 0 and self.minLen == 1: + repeat = "+" + elif max == 1: + repeat = "" + else: + if self.minLen != self.maxLen: + repeat = f"{{{self.minLen},{'' if self.maxLen == _MAX_INT else self.maxLen}}}" + else: + repeat = f"{{{self.minLen}}}" + self.reString = f"{re_leading_fragment}{repeat}" + else: + if max == 1: + re_body_fragment = "" + repeat = "" + else: + re_body_fragment = f"[{_collapse_string_to_ranges(self.bodyChars)}]" + if max == 0 and self.minLen == 1: + repeat = "*" + elif max == 2: + repeat = "?" if min <= 1 else "" + else: + if min != max: + repeat = f"{{{min - 1 if min > 0 else ''},{max - 1 if max > 0 else ''}}}" + else: + repeat = f"{{{min - 1 if min > 0 else ''}}}" + + self.reString = f"{re_leading_fragment}{re_body_fragment}{repeat}" + + if self.asKeyword: + self.reString = rf"\b{self.reString}\b" + + try: + self.re = re.compile(self.reString) + except re.error: + self.re = None # type: ignore[assignment] + else: + self.re_match = self.re.match + self.parseImpl = self.parseImpl_regex # type: ignore[assignment] + + def _generateDefaultName(self) -> str: + def charsAsStr(s): + max_repr_len = 16 + s = _collapse_string_to_ranges(s, re_escape=False) + + if len(s) > max_repr_len: + return s[: max_repr_len - 3] + "..." + + return s + + if self.initChars != self.bodyChars: + base = f"W:({charsAsStr(self.initChars)}, {charsAsStr(self.bodyChars)})" + else: + base = f"W:({charsAsStr(self.initChars)})" + + # add length specification + if self.minLen > 1 or self.maxLen != _MAX_INT: + if self.minLen == self.maxLen: + if self.minLen == 1: + return base[2:] + else: + return base + f"{{{self.minLen}}}" + elif self.maxLen == _MAX_INT: + return base + f"{{{self.minLen},...}}" + else: + return base + f"{{{self.minLen},{self.maxLen}}}" + return base + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.initChars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + instrlen = len(instring) + bodychars = self.bodyChars + maxloc = start + self.maxLen + maxloc = min(maxloc, instrlen) + while loc < maxloc and instring[loc] in bodychars: + loc += 1 + + throwException = False + if loc - start < self.minLen: + throwException = True + elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: + throwException = True + elif self.asKeyword and ( + (start > 0 and instring[start - 1] in bodychars) + or (loc < instrlen and instring[loc] in bodychars) + ): + throwException = True + + if throwException: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + def parseImpl_regex(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + return loc, result.group() + + +class Char(Word): + """A short-cut class for defining :class:`Word` ``(characters, exact=1)``, + when defining a match of any single character in a string of + characters. + """ + + def __init__( + self, + charset: str, + as_keyword: bool = False, + exclude_chars: typing.Optional[str] = None, + *, + asKeyword: bool = False, + excludeChars: typing.Optional[str] = None, + ): + asKeyword = asKeyword or as_keyword + excludeChars = excludeChars or exclude_chars + super().__init__( + charset, exact=1, as_keyword=asKeyword, exclude_chars=excludeChars + ) + + +class Regex(Token): + r"""Token for matching strings that match a given regular + expression. Defined with string specifying the regular expression in + a form recognized by the stdlib Python `re module `_. + If the given regex contains named groups (defined using ``(?P...)``), + these will be preserved as named :class:`ParseResults`. + + If instead of the Python stdlib ``re`` module you wish to use a different RE module + (such as the ``regex`` module), you can do so by building your ``Regex`` object with + a compiled RE that was compiled using ``regex``. + + Example:: + + realnum = Regex(r"[+-]?\d+\.\d*") + # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") + + # named fields in a regex will be returned as named results + date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') + + # the Regex class will accept re's compiled using the regex module + import regex + parser = pp.Regex(regex.compile(r'[0-9]')) + """ + + def __init__( + self, + pattern: Any, + flags: Union[re.RegexFlag, int] = 0, + as_group_list: bool = False, + as_match: bool = False, + *, + asGroupList: bool = False, + asMatch: bool = False, + ): + """The parameters ``pattern`` and ``flags`` are passed + to the ``re.compile()`` function as-is. See the Python + `re module `_ module for an + explanation of the acceptable patterns and flags. + """ + super().__init__() + asGroupList = asGroupList or as_group_list + asMatch = asMatch or as_match + + if isinstance(pattern, str_type): + if not pattern: + raise ValueError("null string passed to Regex; use Empty() instead") + + self._re = None + self.reString = self.pattern = pattern + self.flags = flags + + elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): + self._re = pattern + self.pattern = self.reString = pattern.pattern + self.flags = flags + + else: + raise TypeError( + "Regex may only be constructed with a string or a compiled RE object" + ) + + self.errmsg = f"Expected {self.name}" + self.mayIndexError = False + self.asGroupList = asGroupList + self.asMatch = asMatch + if self.asGroupList: + self.parseImpl = self.parseImplAsGroupList # type: ignore [assignment] + if self.asMatch: + self.parseImpl = self.parseImplAsMatch # type: ignore [assignment] + + @cached_property + def re(self): + if self._re: + return self._re + + try: + return re.compile(self.pattern, self.flags) + except re.error: + raise ValueError(f"invalid pattern ({self.pattern!r}) passed to Regex") + + @cached_property + def re_match(self): + return self.re.match + + @cached_property + def mayReturnEmpty(self): + return self.re_match("") is not None + + def _generateDefaultName(self) -> str: + return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\")) + + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = ParseResults(result.group()) + d = result.groupdict() + + for k, v in d.items(): + ret[k] = v + + return loc, ret + + def parseImplAsGroupList(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.groups() + return loc, ret + + def parseImplAsMatch(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result + return loc, ret + + def sub(self, repl: str) -> ParserElement: + r""" + Return :class:`Regex` with an attached parse action to transform the parsed + result as if called using `re.sub(expr, repl, string) `_. + + Example:: + + make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") + print(make_html.transform_string("h1:main title:")) + # prints "

main title

" + """ + if self.asGroupList: + raise TypeError("cannot use sub() with Regex(as_group_list=True)") + + if self.asMatch and callable(repl): + raise TypeError( + "cannot use sub() with a callable with Regex(as_match=True)" + ) + + if self.asMatch: + + def pa(tokens): + return tokens[0].expand(repl) + + else: + + def pa(tokens): + return self.re.sub(repl, tokens[0]) + + return self.add_parse_action(pa) + + +class QuotedString(Token): + r""" + Token for matching strings that are delimited by quoting characters. + + Defined with the following parameters: + + - ``quote_char`` - string of one or more characters defining the + quote delimiting string + - ``esc_char`` - character to re_escape quotes, typically backslash + (default= ``None``) + - ``esc_quote`` - special quote sequence to re_escape an embedded quote + string (such as SQL's ``""`` to re_escape an embedded ``"``) + (default= ``None``) + - ``multiline`` - boolean indicating whether quotes can span + multiple lines (default= ``False``) + - ``unquote_results`` - boolean indicating whether the matched text + should be unquoted (default= ``True``) + - ``end_quote_char`` - string of one or more characters defining the + end of the quote delimited string (default= ``None`` => same as + quote_char) + - ``convert_whitespace_escapes`` - convert escaped whitespace + (``'\t'``, ``'\n'``, etc.) to actual whitespace + (default= ``True``) + + Example:: + + qs = QuotedString('"') + print(qs.search_string('lsjdf "This is the quote" sldjf')) + complex_qs = QuotedString('{{', end_quote_char='}}') + print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf')) + sql_qs = QuotedString('"', esc_quote='""') + print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + + prints:: + + [['This is the quote']] + [['This is the "quote"']] + [['This is the quote with "embedded" quotes']] + """ + + ws_map = dict(((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r"))) + + def __init__( + self, + quote_char: str = "", + esc_char: typing.Optional[str] = None, + esc_quote: typing.Optional[str] = None, + multiline: bool = False, + unquote_results: bool = True, + end_quote_char: typing.Optional[str] = None, + convert_whitespace_escapes: bool = True, + *, + quoteChar: str = "", + escChar: typing.Optional[str] = None, + escQuote: typing.Optional[str] = None, + unquoteResults: bool = True, + endQuoteChar: typing.Optional[str] = None, + convertWhitespaceEscapes: bool = True, + ): + super().__init__() + esc_char = escChar or esc_char + esc_quote = escQuote or esc_quote + unquote_results = unquoteResults and unquote_results + end_quote_char = endQuoteChar or end_quote_char + convert_whitespace_escapes = ( + convertWhitespaceEscapes and convert_whitespace_escapes + ) + quote_char = quoteChar or quote_char + + # remove white space from quote chars + quote_char = quote_char.strip() + if not quote_char: + raise ValueError("quote_char cannot be the empty string") + + if end_quote_char is None: + end_quote_char = quote_char + else: + end_quote_char = end_quote_char.strip() + if not end_quote_char: + raise ValueError("end_quote_char cannot be the empty string") + + self.quote_char: str = quote_char + self.quote_char_len: int = len(quote_char) + self.first_quote_char: str = quote_char[0] + self.end_quote_char: str = end_quote_char + self.end_quote_char_len: int = len(end_quote_char) + self.esc_char: str = esc_char or "" + self.has_esc_char: bool = esc_char is not None + self.esc_quote: str = esc_quote or "" + self.unquote_results: bool = unquote_results + self.convert_whitespace_escapes: bool = convert_whitespace_escapes + self.multiline = multiline + self.re_flags = re.RegexFlag(0) + + # fmt: off + # build up re pattern for the content between the quote delimiters + inner_pattern = [] + + if esc_quote: + inner_pattern.append(rf"(?:{re.escape(esc_quote)})") + + if esc_char: + inner_pattern.append(rf"(?:{re.escape(esc_char)}.)") + + if len(self.end_quote_char) > 1: + inner_pattern.append( + "(?:" + + "|".join( + f"(?:{re.escape(self.end_quote_char[:i])}(?!{re.escape(self.end_quote_char[i:])}))" + for i in range(len(self.end_quote_char) - 1, 0, -1) + ) + + ")" + ) + + if self.multiline: + self.re_flags |= re.MULTILINE | re.DOTALL + inner_pattern.append( + rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}" + rf"{(_escape_regex_range_chars(esc_char) if self.has_esc_char else '')}])" + ) + else: + inner_pattern.append( + rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}\n\r" + rf"{(_escape_regex_range_chars(esc_char) if self.has_esc_char else '')}])" + ) + + self.pattern = "".join( + [ + re.escape(self.quote_char), + "(?:", + '|'.join(inner_pattern), + ")*", + re.escape(self.end_quote_char), + ] + ) + + if self.unquote_results: + if self.convert_whitespace_escapes: + self.unquote_scan_re = re.compile( + rf"({'|'.join(re.escape(k) for k in self.ws_map)})" + rf"|({re.escape(self.esc_char)}.)" + rf"|(\n|.)", + flags=self.re_flags, + ) + else: + self.unquote_scan_re = re.compile( + rf"({re.escape(self.esc_char)}.)" + rf"|(\n|.)", + flags=self.re_flags + ) + # fmt: on + + try: + self.re = re.compile(self.pattern, self.re_flags) + self.reString = self.pattern + self.re_match = self.re.match + except re.error: + raise ValueError(f"invalid pattern {self.pattern!r} passed to Regex") + + self.errmsg = f"Expected {self.name}" + self.mayIndexError = False + self.mayReturnEmpty = True + + def _generateDefaultName(self) -> str: + if self.quote_char == self.end_quote_char and isinstance( + self.quote_char, str_type + ): + return f"string enclosed in {self.quote_char!r}" + + return f"quoted string, starting with {self.quote_char} ending with {self.end_quote_char}" + + def parseImpl(self, instring, loc, doActions=True): + # check first character of opening quote to see if that is a match + # before doing the more complicated regex match + result = ( + instring[loc] == self.first_quote_char + and self.re_match(instring, loc) + or None + ) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + # get ending loc and matched string from regex matching result + loc = result.end() + ret = result.group() + + if self.unquote_results: + # strip off quotes + ret = ret[self.quote_char_len : -self.end_quote_char_len] + + if isinstance(ret, str_type): + # fmt: off + if self.convert_whitespace_escapes: + # as we iterate over matches in the input string, + # collect from whichever match group of the unquote_scan_re + # regex matches (only 1 group will match at any given time) + ret = "".join( + # match group 1 matches \t, \n, etc. + self.ws_map[match.group(1)] if match.group(1) + # match group 2 matches escaped characters + else match.group(2)[-1] if match.group(2) + # match group 3 matches any character + else match.group(3) + for match in self.unquote_scan_re.finditer(ret) + ) + else: + ret = "".join( + # match group 1 matches escaped characters + match.group(1)[-1] if match.group(1) + # match group 2 matches any character + else match.group(2) + for match in self.unquote_scan_re.finditer(ret) + ) + # fmt: on + + # replace escaped quotes + if self.esc_quote: + ret = ret.replace(self.esc_quote, self.end_quote_char) + + return loc, ret + + +class CharsNotIn(Token): + """Token for matching words composed of characters *not* in a given + set (will include whitespace in matched characters if not listed in + the provided exclusion set - see example). Defined with string + containing all disallowed characters, and an optional minimum, + maximum, and/or exact length. The default value for ``min`` is + 1 (a minimum value < 1 is not valid); the default values for + ``max`` and ``exact`` are 0, meaning no maximum or exact + length restriction. + + Example:: + + # define a comma-separated-value as anything that is not a ',' + csv_value = CharsNotIn(',') + print(DelimitedList(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213")) + + prints:: + + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] + """ + + def __init__( + self, + not_chars: str = "", + min: int = 1, + max: int = 0, + exact: int = 0, + *, + notChars: str = "", + ): + super().__init__() + self.skipWhitespace = False + self.notChars = not_chars or notChars + self.notCharsSet = set(self.notChars) + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use" + " Opt(CharsNotIn()) if zero-length char group is permitted" + ) + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.errmsg = f"Expected {self.name}" + self.mayReturnEmpty = self.minLen == 0 + self.mayIndexError = False + + def _generateDefaultName(self) -> str: + not_chars_str = _collapse_string_to_ranges(self.notChars) + if len(not_chars_str) > 16: + return f"!W:({self.notChars[: 16 - 3]}...)" + else: + return f"!W:({self.notChars})" + + def parseImpl(self, instring, loc, doActions=True): + notchars = self.notCharsSet + if instring[loc] in notchars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + maxlen = min(start + self.maxLen, len(instring)) + while loc < maxlen and instring[loc] not in notchars: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class White(Token): + """Special matching class for matching whitespace. Normally, + whitespace is ignored by pyparsing grammars. This class is included + when some whitespace structures are significant. Define with + a string containing the whitespace characters to be matched; default + is ``" \\t\\r\\n"``. Also takes optional ``min``, + ``max``, and ``exact`` arguments, as defined for the + :class:`Word` class. + """ + + whiteStrs = { + " ": "", + "\t": "", + "\n": "", + "\r": "", + "\f": "", + "\u00A0": "", + "\u1680": "", + "\u180E": "", + "\u2000": "", + "\u2001": "", + "\u2002": "", + "\u2003": "", + "\u2004": "", + "\u2005": "", + "\u2006": "", + "\u2007": "", + "\u2008": "", + "\u2009": "", + "\u200A": "", + "\u200B": "", + "\u202F": "", + "\u205F": "", + "\u3000": "", + } + + def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0): + super().__init__() + self.matchWhite = ws + self.set_whitespace_chars( + "".join(c for c in self.whiteStrs if c not in self.matchWhite), + copy_defaults=True, + ) + # self.leave_whitespace() + self.mayReturnEmpty = True + self.errmsg = f"Expected {self.name}" + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + def _generateDefaultName(self) -> str: + return "".join(White.whiteStrs[c] for c in self.matchWhite) + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.matchWhite: + raise ParseException(instring, loc, self.errmsg, self) + start = loc + loc += 1 + maxloc = start + self.maxLen + maxloc = min(maxloc, len(instring)) + while loc < maxloc and instring[loc] in self.matchWhite: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class PositionToken(Token): + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + + +class GoToColumn(PositionToken): + """Token to advance to a specific column of input text; useful for + tabular report scraping. + """ + + def __init__(self, colno: int): + super().__init__() + self.col = colno + + def preParse(self, instring: str, loc: int) -> int: + if col(loc, instring) == self.col: + return loc + + instrlen = len(instring) + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + while ( + loc < instrlen + and instring[loc].isspace() + and col(loc, instring) != self.col + ): + loc += 1 + + return loc + + def parseImpl(self, instring, loc, doActions=True): + thiscol = col(loc, instring) + if thiscol > self.col: + raise ParseException(instring, loc, "Text not in expected column", self) + newloc = loc + self.col - thiscol + ret = instring[loc:newloc] + return newloc, ret + + +class LineStart(PositionToken): + r"""Matches if current position is at the beginning of a line within + the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (LineStart() + 'AAA' + rest_of_line).search_string(test): + print(t) + + prints:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + + def __init__(self): + super().__init__() + self.leave_whitespace() + self.orig_whiteChars = set() | self.whiteChars + self.whiteChars.discard("\n") + self.skipper = Empty().set_whitespace_chars(self.whiteChars) + self.errmsg = "Expected start of line" + + def preParse(self, instring: str, loc: int) -> int: + if loc == 0: + return loc + + ret = self.skipper.preParse(instring, loc) + + if "\n" in self.orig_whiteChars: + while instring[ret : ret + 1] == "\n": + ret = self.skipper.preParse(instring, ret + 1) + + return ret + + def parseImpl(self, instring, loc, doActions=True): + if col(loc, instring) == 1: + return loc, [] + raise ParseException(instring, loc, self.errmsg, self) + + +class LineEnd(PositionToken): + """Matches if current position is at the end of a line within the + parse string + """ + + def __init__(self): + super().__init__() + self.whiteChars.discard("\n") + self.set_whitespace_chars(self.whiteChars, copy_defaults=False) + self.errmsg = "Expected end of line" + + def parseImpl(self, instring, loc, doActions=True): + if loc < len(instring): + if instring[loc] == "\n": + return loc + 1, "\n" + else: + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc + 1, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + + +class StringStart(PositionToken): + """Matches if current position is at the beginning of the parse + string + """ + + def __init__(self): + super().__init__() + self.errmsg = "Expected start of text" + + def parseImpl(self, instring, loc, doActions=True): + # see if entire string up to here is just whitespace and ignoreables + if loc != 0 and loc != self.preParse(instring, 0): + raise ParseException(instring, loc, self.errmsg, self) + + return loc, [] + + +class StringEnd(PositionToken): + """ + Matches if current position is at the end of the parse string + """ + + def __init__(self): + super().__init__() + self.errmsg = "Expected end of text" + + def parseImpl(self, instring, loc, doActions=True): + if loc < len(instring): + raise ParseException(instring, loc, self.errmsg, self) + if loc == len(instring): + return loc + 1, [] + if loc > len(instring): + return loc, [] + + raise ParseException(instring, loc, self.errmsg, self) + + +class WordStart(PositionToken): + """Matches if the current position is at the beginning of a + :class:`Word`, and is not preceded by any character in a given + set of ``word_chars`` (default= ``printables``). To emulate the + ``\b`` behavior of regular expressions, use + ``WordStart(alphanums)``. ``WordStart`` will also match at + the beginning of the string being parsed, or at the beginning of + a line. + """ + + def __init__(self, word_chars: str = printables, *, wordChars: str = printables): + wordChars = word_chars if wordChars == printables else wordChars + super().__init__() + self.wordChars = set(wordChars) + self.errmsg = "Not at the start of a word" + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + if ( + instring[loc - 1] in self.wordChars + or instring[loc] not in self.wordChars + ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class WordEnd(PositionToken): + """Matches if the current position is at the end of a :class:`Word`, + and is not followed by any character in a given set of ``word_chars`` + (default= ``printables``). To emulate the ``\b`` behavior of + regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` + will also match at the end of the string being parsed, or at the end + of a line. + """ + + def __init__(self, word_chars: str = printables, *, wordChars: str = printables): + wordChars = word_chars if wordChars == printables else wordChars + super().__init__() + self.wordChars = set(wordChars) + self.skipWhitespace = False + self.errmsg = "Not at the end of a word" + + def parseImpl(self, instring, loc, doActions=True): + instrlen = len(instring) + if instrlen > 0 and loc < instrlen: + if ( + instring[loc] in self.wordChars + or instring[loc - 1] not in self.wordChars + ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class ParseExpression(ParserElement): + """Abstract subclass of ParserElement, for combining and + post-processing parsed tokens. + """ + + def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): + super().__init__(savelist) + self.exprs: List[ParserElement] + if isinstance(exprs, _generatorType): + exprs = list(exprs) + + if isinstance(exprs, str_type): + self.exprs = [self._literalStringClass(exprs)] + elif isinstance(exprs, ParserElement): + self.exprs = [exprs] + elif isinstance(exprs, Iterable): + exprs = list(exprs) + # if sequence of strings provided, wrap with Literal + if any(isinstance(expr, str_type) for expr in exprs): + exprs = ( + self._literalStringClass(e) if isinstance(e, str_type) else e + for e in exprs + ) + self.exprs = list(exprs) + else: + try: + self.exprs = list(exprs) + except TypeError: + self.exprs = [exprs] + self.callPreparse = False + + def recurse(self) -> List[ParserElement]: + return self.exprs[:] + + def append(self, other) -> ParserElement: + self.exprs.append(other) + self._defaultName = None + return self + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on + all contained expressions. + """ + super().leave_whitespace(recursive) + + if recursive: + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.leave_whitespace(recursive) + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on + all contained expressions. + """ + super().ignore_whitespace(recursive) + if recursive: + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.ignore_whitespace(recursive) + return self + + def ignore(self, other) -> ParserElement: + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + super().ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + else: + super().ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + return self + + def _generateDefaultName(self) -> str: + return f"{type(self).__name__}:({self.exprs})" + + def streamline(self) -> ParserElement: + if self.streamlined: + return self + + super().streamline() + + for e in self.exprs: + e.streamline() + + # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)`` + # but only if there are no parse actions or resultsNames on the nested And's + # (likewise for :class:`Or`'s and :class:`MatchFirst`'s) + if len(self.exprs) == 2: + other = self.exprs[0] + if ( + isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug + ): + self.exprs = other.exprs[:] + [self.exprs[1]] + self._defaultName = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + other = self.exprs[-1] + if ( + isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug + ): + self.exprs = self.exprs[:-1] + other.exprs[:] + self._defaultName = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + self.errmsg = f"Expected {self}" + + return self + + def validate(self, validateTrace=None) -> None: + warnings.warn( + "ParserElement.validate() is deprecated, and should not be used to check for left recursion", + DeprecationWarning, + stacklevel=2, + ) + tmp = (validateTrace if validateTrace is not None else [])[:] + [self] + for e in self.exprs: + e.validate(tmp) + self._checkRecursion([]) + + def copy(self) -> ParserElement: + ret = super().copy() + ret = typing.cast(ParseExpression, ret) + ret.exprs = [e.copy() for e in self.exprs] + return ret + + def _setResultsName(self, name, listAllMatches=False): + if not ( + __diag__.warn_ungrouped_named_tokens_in_collection + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in self.suppress_warnings_ + ): + return super()._setResultsName(name, listAllMatches) + + for e in self.exprs: + if ( + isinstance(e, ParserElement) + and e.resultsName + and ( + Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ) + ): + warning = ( + "warn_ungrouped_named_tokens_in_collection:" + f" setting results name {name!r} on {type(self).__name__} expression" + f" collides with {e.resultsName!r} on contained expression" + ) + warnings.warn(warning, stacklevel=3) + break + + return super()._setResultsName(name, listAllMatches) + + # Compatibility synonyms + # fmt: off + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + # fmt: on + + +class And(ParseExpression): + """ + Requires all given :class:`ParseExpression` s to be found in the given order. + Expressions may be separated by whitespace. + May be constructed using the ``'+'`` operator. + May also be constructed using the ``'-'`` operator, which will + suppress backtracking. + + Example:: + + integer = Word(nums) + name_expr = Word(alphas)[1, ...] + + expr = And([integer("id"), name_expr("name"), integer("age")]) + # more easily written as: + expr = integer("id") + name_expr("name") + integer("age") + """ + + class _ErrorStop(Empty): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.leave_whitespace() + + def _generateDefaultName(self) -> str: + return "-" + + def __init__( + self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True + ): + exprs: List[ParserElement] = list(exprs_arg) + if exprs and Ellipsis in exprs: + tmp = [] + for i, expr in enumerate(exprs): + if expr is not Ellipsis: + tmp.append(expr) + continue + + if i < len(exprs) - 1: + skipto_arg: ParserElement = typing.cast( + ParseExpression, (Empty() + exprs[i + 1]) + ).exprs[-1] + tmp.append(SkipTo(skipto_arg)("_skipped*")) + continue + + raise Exception("cannot construct And with sequence ending in ...") + exprs[:] = tmp + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + if not isinstance(self.exprs[0], White): + self.set_whitespace_chars( + self.exprs[0].whiteChars, + copy_defaults=self.exprs[0].copyDefaultWhiteChars, + ) + self.skipWhitespace = self.exprs[0].skipWhitespace + else: + self.skipWhitespace = False + else: + self.mayReturnEmpty = True + self.callPreparse = True + + def streamline(self) -> ParserElement: + # collapse any _PendingSkip's + if self.exprs and any( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + for e in self.exprs[:-1] + ): + deleted_expr_marker = NoMatch() + for i, e in enumerate(self.exprs[:-1]): + if e is deleted_expr_marker: + continue + if ( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + ): + e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] + self.exprs[i + 1] = deleted_expr_marker + self.exprs = [e for e in self.exprs if e is not deleted_expr_marker] + + super().streamline() + + # link any IndentedBlocks to the prior expression + prev: ParserElement + cur: ParserElement + for prev, cur in zip(self.exprs, self.exprs[1:]): + # traverse cur or any first embedded expr of cur looking for an IndentedBlock + # (but watch out for recursive grammar) + seen = set() + while True: + if id(cur) in seen: + break + seen.add(id(cur)) + if isinstance(cur, IndentedBlock): + prev.add_parse_action( + lambda s, l, t, cur_=cur: setattr( + cur_, "parent_anchor", col(l, s) + ) + ) + break + subs = cur.recurse() + next_first = next(iter(subs), None) + if next_first is None: + break + cur = typing.cast(ParserElement, next_first) + + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + return self + + def parseImpl(self, instring, loc, doActions=True): + # pass False as callPreParse arg to _parse for first element, since we already + # pre-parsed the string as part of our And pre-parsing + loc, resultlist = self.exprs[0]._parse( + instring, loc, doActions, callPreParse=False + ) + errorStop = False + for e in self.exprs[1:]: + # if isinstance(e, And._ErrorStop): + if type(e) is And._ErrorStop: + errorStop = True + continue + if errorStop: + try: + loc, exprtokens = e._parse(instring, loc, doActions) + except ParseSyntaxException: + raise + except ParseBaseException as pe: + pe.__traceback__ = None + raise ParseSyntaxException._from_exception(pe) + except IndexError: + raise ParseSyntaxException( + instring, len(instring), self.errmsg, self + ) + else: + loc, exprtokens = e._parse(instring, loc, doActions) + resultlist += exprtokens + return loc, resultlist + + def __iadd__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self.append(other) # And([self, other]) + + def _checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.exprs: + e._checkRecursion(subRecCheckList) + if not e.mayReturnEmpty: + break + + def _generateDefaultName(self) -> str: + inner = " ".join(str(e) for e in self.exprs) + # strip off redundant inner {}'s + while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": + inner = inner[1:-1] + return f"{{{inner}}}" + + +class Or(ParseExpression): + """Requires that at least one :class:`ParseExpression` is found. If + two expressions match, the expression that matches the longest + string will be used. May be constructed using the ``'^'`` + operator. + + Example:: + + # construct Or using '^' operator + + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) + print(number.search_string("123 3.1416 789")) + + prints:: + + [['123'], ['3.1416'], ['789']] + """ + + def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self) -> ParserElement: + super().streamline() + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.saveAsList = any(e.saveAsList for e in self.exprs) + self.skipWhitespace = all( + e.skipWhitespace and not isinstance(e, White) for e in self.exprs + ) + else: + self.saveAsList = False + return self + + def parseImpl(self, instring, loc, doActions=True): + maxExcLoc = -1 + maxException = None + matches = [] + fatals = [] + if all(e.callPreparse for e in self.exprs): + loc = self.preParse(instring, loc) + for e in self.exprs: + try: + loc2 = e.try_parse(instring, loc, raise_fatal=True) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parser_element = e + fatals.append(pfe) + maxException = None + maxExcLoc = -1 + except ParseException as err: + if not fatals: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException( + instring, len(instring), e.errmsg, self + ) + maxExcLoc = len(instring) + else: + # save match among all matches, to retry longest to shortest + matches.append((loc2, e)) + + if matches: + # re-evaluate all matches in descending order of length of match, in case attached actions + # might change whether or how much they match of the input. + matches.sort(key=itemgetter(0), reverse=True) + + if not doActions: + # no further conditions or parse actions to change the selection of + # alternative, so the first match will be the best match + best_expr = matches[0][1] + return best_expr._parse(instring, loc, doActions) + + longest = -1, None + for loc1, expr1 in matches: + if loc1 <= longest[0]: + # already have a longer match than this one will deliver, we are done + return longest + + try: + loc2, toks = expr1._parse(instring, loc, doActions) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + else: + if loc2 >= loc1: + return loc2, toks + # didn't match as much as before + elif loc2 > longest[0]: + longest = loc2, toks + + if longest != (-1, None): + return longest + + if fatals: + if len(fatals) > 1: + fatals.sort(key=lambda e: -e.loc) + if fatals[0].loc == fatals[1].loc: + fatals.sort(key=lambda e: (-e.loc, -len(str(e.parser_element)))) + max_fatal = fatals[0] + raise max_fatal + + if maxException is not None: + # infer from this check that all alternatives failed at the current position + # so emit this collective error message instead of any single error message + if maxExcLoc == loc: + maxException.msg = self.errmsg + raise maxException + + raise ParseException(instring, loc, "no defined alternatives to match", self) + + def __ixor__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self.append(other) # Or([self, other]) + + def _generateDefaultName(self) -> str: + return f"{{{' ^ '.join(str(e) for e in self.exprs)}}}" + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_multiple_tokens_in_named_alternation + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in self.suppress_warnings_ + ): + if any( + isinstance(e, And) + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in e.suppress_warnings_ + for e in self.exprs + ): + warning = ( + "warn_multiple_tokens_in_named_alternation:" + f" setting results name {name!r} on {type(self).__name__} expression" + " will return a list of all parsed tokens in an And alternative," + " in prior versions only the first token was returned; enclose" + " contained argument in Group" + ) + warnings.warn(warning, stacklevel=3) + + return super()._setResultsName(name, listAllMatches) + + +class MatchFirst(ParseExpression): + """Requires that at least one :class:`ParseExpression` is found. If + more than one expression matches, the first one listed is the one that will + match. May be constructed using the ``'|'`` operator. + + Example:: + + # construct MatchFirst using '|' operator + + # watch the order of expressions to match + number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) + print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] + + # put more selective expression first + number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) + print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] + """ + + def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self) -> ParserElement: + if self.streamlined: + return self + + super().streamline() + if self.exprs: + self.saveAsList = any(e.saveAsList for e in self.exprs) + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all( + e.skipWhitespace and not isinstance(e, White) for e in self.exprs + ) + else: + self.saveAsList = False + self.mayReturnEmpty = True + return self + + def parseImpl(self, instring, loc, doActions=True): + maxExcLoc = -1 + maxException = None + + for e in self.exprs: + try: + return e._parse(instring, loc, doActions) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parser_element = e + raise + except ParseException as err: + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException( + instring, len(instring), e.errmsg, self + ) + maxExcLoc = len(instring) + + if maxException is not None: + # infer from this check that all alternatives failed at the current position + # so emit this collective error message instead of any individual error message + if maxExcLoc == loc: + maxException.msg = self.errmsg + raise maxException + + raise ParseException(instring, loc, "no defined alternatives to match", self) + + def __ior__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self.append(other) # MatchFirst([self, other]) + + def _generateDefaultName(self) -> str: + return f"{{{' | '.join(str(e) for e in self.exprs)}}}" + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_multiple_tokens_in_named_alternation + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in self.suppress_warnings_ + ): + if any( + isinstance(e, And) + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in e.suppress_warnings_ + for e in self.exprs + ): + warning = ( + "warn_multiple_tokens_in_named_alternation:" + f" setting results name {name!r} on {type(self).__name__} expression" + " will return a list of all parsed tokens in an And alternative," + " in prior versions only the first token was returned; enclose" + " contained argument in Group" + ) + warnings.warn(warning, stacklevel=3) + + return super()._setResultsName(name, listAllMatches) + + +class Each(ParseExpression): + """Requires all given :class:`ParseExpression` s to be found, but in + any order. Expressions may be separated by whitespace. + + May be constructed using the ``'&'`` operator. + + Example:: + + color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") + shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") + integer = Word(nums) + shape_attr = "shape:" + shape_type("shape") + posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") + color_attr = "color:" + color("color") + size_attr = "size:" + integer("size") + + # use Each (using operator '&') to accept attributes in any order + # (shape and posn are required, color and size are optional) + shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr) + + shape_spec.run_tests(''' + shape: SQUARE color: BLACK posn: 100, 120 + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ''' + ) + + prints:: + + shape: SQUARE color: BLACK posn: 100, 120 + ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] + - color: BLACK + - posn: ['100', ',', '120'] + - x: 100 + - y: 120 + - shape: SQUARE + + + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] + - color: BLUE + - posn: ['50', ',', '80'] + - x: 50 + - y: 80 + - shape: CIRCLE + - size: 50 + + + color: GREEN size: 20 shape: TRIANGLE posn: 20,40 + ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] + - color: GREEN + - posn: ['20', ',', '40'] + - x: 20 + - y: 40 + - shape: TRIANGLE + - size: 20 + """ + + def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + self.skipWhitespace = True + self.initExprGroups = True + self.saveAsList = True + + def __iand__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self.append(other) # Each([self, other]) + + def streamline(self) -> ParserElement: + super().streamline() + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + return self + + def parseImpl(self, instring, loc, doActions=True): + if self.initExprGroups: + self.opt1map = dict( + (id(e.expr), e) for e in self.exprs if isinstance(e, Opt) + ) + opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)] + opt2 = [ + e + for e in self.exprs + if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore)) + ] + self.optionals = opt1 + opt2 + self.multioptionals = [ + e.expr.set_results_name(e.resultsName, list_all_matches=True) + for e in self.exprs + if isinstance(e, _MultipleMatch) + ] + self.multirequired = [ + e.expr.set_results_name(e.resultsName, list_all_matches=True) + for e in self.exprs + if isinstance(e, OneOrMore) + ] + self.required = [ + e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore)) + ] + self.required += self.multirequired + self.initExprGroups = False + + tmpLoc = loc + tmpReqd = self.required[:] + tmpOpt = self.optionals[:] + multis = self.multioptionals[:] + matchOrder = [] + + keepMatching = True + failed = [] + fatals = [] + while keepMatching: + tmpExprs = tmpReqd + tmpOpt + multis + failed.clear() + fatals.clear() + for e in tmpExprs: + try: + tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parser_element = e + fatals.append(pfe) + failed.append(e) + except ParseException: + failed.append(e) + else: + matchOrder.append(self.opt1map.get(id(e), e)) + if e in tmpReqd: + tmpReqd.remove(e) + elif e in tmpOpt: + tmpOpt.remove(e) + if len(failed) == len(tmpExprs): + keepMatching = False + + # look for any ParseFatalExceptions + if fatals: + if len(fatals) > 1: + fatals.sort(key=lambda e: -e.loc) + if fatals[0].loc == fatals[1].loc: + fatals.sort(key=lambda e: (-e.loc, -len(str(e.parser_element)))) + max_fatal = fatals[0] + raise max_fatal + + if tmpReqd: + missing = ", ".join([str(e) for e in tmpReqd]) + raise ParseException( + instring, + loc, + f"Missing one or more required elements ({missing})", + ) + + # add any unmatched Opts, in case they have default values defined + matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt] + + total_results = ParseResults([]) + for e in matchOrder: + loc, results = e._parse(instring, loc, doActions) + total_results += results + + return loc, total_results + + def _generateDefaultName(self) -> str: + return f"{{{' & '.join(str(e) for e in self.exprs)}}}" + + +class ParseElementEnhance(ParserElement): + """Abstract subclass of :class:`ParserElement`, for combining and + post-processing parsed tokens. + """ + + def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): + super().__init__(savelist) + if isinstance(expr, str_type): + expr_str = typing.cast(str, expr) + if issubclass(self._literalStringClass, Token): + expr = self._literalStringClass(expr_str) # type: ignore[call-arg] + elif issubclass(type(self), self._literalStringClass): + expr = Literal(expr_str) + else: + expr = self._literalStringClass(Literal(expr_str)) # type: ignore[assignment, call-arg] + expr = typing.cast(ParserElement, expr) + self.expr = expr + if expr is not None: + self.mayIndexError = expr.mayIndexError + self.mayReturnEmpty = expr.mayReturnEmpty + self.set_whitespace_chars( + expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars + ) + self.skipWhitespace = expr.skipWhitespace + self.saveAsList = expr.saveAsList + self.callPreparse = expr.callPreparse + self.ignoreExprs.extend(expr.ignoreExprs) + + def recurse(self) -> List[ParserElement]: + return [self.expr] if self.expr is not None else [] + + def parseImpl(self, instring, loc, doActions=True): + if self.expr is None: + raise ParseException(instring, loc, "No expression defined", self) + + try: + return self.expr._parse(instring, loc, doActions, callPreParse=False) + except ParseBaseException as pbe: + if not isinstance(self, Forward) or self.customName is not None: + if self.errmsg: + pbe.msg = self.errmsg + raise + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + super().leave_whitespace(recursive) + + if recursive: + if self.expr is not None: + self.expr = self.expr.copy() + self.expr.leave_whitespace(recursive) + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + super().ignore_whitespace(recursive) + + if recursive: + if self.expr is not None: + self.expr = self.expr.copy() + self.expr.ignore_whitespace(recursive) + return self + + def ignore(self, other) -> ParserElement: + if not isinstance(other, Suppress) or other not in self.ignoreExprs: + super().ignore(other) + if self.expr is not None: + self.expr.ignore(self.ignoreExprs[-1]) + + return self + + def streamline(self) -> ParserElement: + super().streamline() + if self.expr is not None: + self.expr.streamline() + return self + + def _checkRecursion(self, parseElementList): + if self in parseElementList: + raise RecursiveGrammarException(parseElementList + [self]) + subRecCheckList = parseElementList[:] + [self] + if self.expr is not None: + self.expr._checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None) -> None: + warnings.warn( + "ParserElement.validate() is deprecated, and should not be used to check for left recursion", + DeprecationWarning, + stacklevel=2, + ) + if validateTrace is None: + validateTrace = [] + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self._checkRecursion([]) + + def _generateDefaultName(self) -> str: + return f"{type(self).__name__}:({self.expr})" + + # Compatibility synonyms + # fmt: off + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + # fmt: on + + +class IndentedBlock(ParseElementEnhance): + """ + Expression to match one or more expressions at a given indentation level. + Useful for parsing text where structure is implied by indentation (like Python source code). + """ + + class _Indent(Empty): + def __init__(self, ref_col: int): + super().__init__() + self.errmsg = f"expected indent at column {ref_col}" + self.add_condition(lambda s, l, t: col(l, s) == ref_col) + + class _IndentGreater(Empty): + def __init__(self, ref_col: int): + super().__init__() + self.errmsg = f"expected indent at column greater than {ref_col}" + self.add_condition(lambda s, l, t: col(l, s) > ref_col) + + def __init__( + self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True + ): + super().__init__(expr, savelist=True) + # if recursive: + # raise NotImplementedError("IndentedBlock with recursive is not implemented") + self._recursive = recursive + self._grouped = grouped + self.parent_anchor = 1 + + def parseImpl(self, instring, loc, doActions=True): + # advance parse position to non-whitespace by using an Empty() + # this should be the column to be used for all subsequent indented lines + anchor_loc = Empty().preParse(instring, loc) + + # see if self.expr matches at the current location - if not it will raise an exception + # and no further work is necessary + self.expr.try_parse(instring, anchor_loc, do_actions=doActions) + + indent_col = col(anchor_loc, instring) + peer_detect_expr = self._Indent(indent_col) + + inner_expr = Empty() + peer_detect_expr + self.expr + if self._recursive: + sub_indent = self._IndentGreater(indent_col) + nested_block = IndentedBlock( + self.expr, recursive=self._recursive, grouped=self._grouped + ) + nested_block.set_debug(self.debug) + nested_block.parent_anchor = indent_col + inner_expr += Opt(sub_indent + nested_block) + + inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}") + block = OneOrMore(inner_expr) + + trailing_undent = self._Indent(self.parent_anchor) | StringEnd() + + if self._grouped: + wrapper = Group + else: + wrapper = lambda expr: expr + return (wrapper(block) + Optional(trailing_undent)).parseImpl( + instring, anchor_loc, doActions + ) + + +class AtStringStart(ParseElementEnhance): + """Matches if expression matches at the beginning of the parse + string:: + + AtStringStart(Word(nums)).parse_string("123") + # prints ["123"] + + AtStringStart(Word(nums)).parse_string(" 123") + # raises ParseException + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.callPreparse = False + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + raise ParseException(instring, loc, "not found at string start") + return super().parseImpl(instring, loc, doActions) + + +class AtLineStart(ParseElementEnhance): + r"""Matches if an expression matches at the beginning of a line within + the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (AtLineStart('AAA') + rest_of_line).search_string(test): + print(t) + + prints:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.callPreparse = False + + def parseImpl(self, instring, loc, doActions=True): + if col(loc, instring) != 1: + raise ParseException(instring, loc, "not found at line start") + return super().parseImpl(instring, loc, doActions) + + +class FollowedBy(ParseElementEnhance): + """Lookahead matching of the given parse expression. + ``FollowedBy`` does *not* advance the parsing position within + the input string, it only verifies that the specified parse + expression matches at the current position. ``FollowedBy`` + always returns a null token list. If any results names are defined + in the lookahead expression, those *will* be returned for access by + name. + + Example:: + + # use FollowedBy to match a label only if it is followed by a ':' + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + + attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() + + prints:: + + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + # by using self._expr.parse and deleting the contents of the returned ParseResults list + # we keep any named results that were defined in the FollowedBy expression + _, ret = self.expr._parse(instring, loc, doActions=doActions) + del ret[:] + + return loc, ret + + +class PrecededBy(ParseElementEnhance): + """Lookbehind matching of the given parse expression. + ``PrecededBy`` does not advance the parsing position within the + input string, it only verifies that the specified parse expression + matches prior to the current position. ``PrecededBy`` always + returns a null token list, but if a results name is defined on the + given expression, it is returned. + + Parameters: + + - ``expr`` - expression that must match prior to the current parse + location + - ``retreat`` - (default= ``None``) - (int) maximum number of characters + to lookbehind prior to the current parse location + + If the lookbehind expression is a string, :class:`Literal`, + :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn` + with a specified exact or maximum length, then the retreat + parameter is not required. Otherwise, retreat must be specified to + give a maximum number of characters to look back from + the current parse position for a lookbehind match. + + Example:: + + # VB-style variable names with type prefixes + int_var = PrecededBy("#") + pyparsing_common.identifier + str_var = PrecededBy("$") + pyparsing_common.identifier + + """ + + def __init__( + self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None + ): + super().__init__(expr) + self.expr = self.expr().leave_whitespace() + self.mayReturnEmpty = True + self.mayIndexError = False + self.exact = False + if isinstance(expr, str_type): + expr = typing.cast(str, expr) + retreat = len(expr) + self.exact = True + elif isinstance(expr, (Literal, Keyword)): + retreat = expr.matchLen + self.exact = True + elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: + retreat = expr.maxLen + self.exact = True + elif isinstance(expr, PositionToken): + retreat = 0 + self.exact = True + self.retreat = retreat + self.errmsg = f"not preceded by {expr}" + self.skipWhitespace = False + self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) + + def parseImpl(self, instring, loc=0, doActions=True): + if self.exact: + if loc < self.retreat: + raise ParseException(instring, loc, self.errmsg) + start = loc - self.retreat + _, ret = self.expr._parse(instring, start) + return loc, ret + + # retreat specified a maximum lookbehind window, iterate + test_expr = self.expr + StringEnd() + instring_slice = instring[max(0, loc - self.retreat) : loc] + last_expr = ParseException(instring, loc, self.errmsg) + + for offset in range(1, min(loc, self.retreat + 1) + 1): + try: + # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) + _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset) + except ParseBaseException as pbe: + last_expr = pbe + else: + break + else: + raise last_expr + + return loc, ret + + +class Located(ParseElementEnhance): + """ + Decorates a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - ``locn_start`` - location where matched expression begins + - ``locn_end`` - location where matched expression ends + - ``value`` - the actual parsed results + + Be careful if the input text contains ```` characters, you + may want to call :class:`ParserElement.parse_with_tabs` + + Example:: + + wd = Word(alphas) + for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints:: + + [0, ['ljsdf'], 5] + [8, ['lksdjjf'], 15] + [18, ['lkkjj'], 23] + + """ + + def parseImpl(self, instring, loc, doActions=True): + start = loc + loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False) + ret_tokens = ParseResults([start, tokens, loc]) + ret_tokens["locn_start"] = start + ret_tokens["value"] = tokens + ret_tokens["locn_end"] = loc + if self.resultsName: + # must return as a list, so that the name will be attached to the complete group + return loc, [ret_tokens] + else: + return loc, ret_tokens + + +class NotAny(ParseElementEnhance): + """ + Lookahead to disallow matching with the given parse expression. + ``NotAny`` does *not* advance the parsing position within the + input string, it only verifies that the specified parse expression + does *not* match at the current position. Also, ``NotAny`` does + *not* skip over leading whitespace. ``NotAny`` always returns + a null token list. May be constructed using the ``'~'`` operator. + + Example:: + + AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) + + # take care not to mistake keywords for identifiers + ident = ~(AND | OR | NOT) + Word(alphas) + boolean_term = Opt(NOT) + ident + + # very crude boolean expression - to support parenthesis groups and + # operation hierarchy, use infix_notation + boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...] + + # integers that are followed by "." are actually floats + integer = Word(nums) + ~Char(".") + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + # do NOT use self.leave_whitespace(), don't want to propagate to exprs + # self.leave_whitespace() + self.skipWhitespace = False + + self.mayReturnEmpty = True + self.errmsg = f"Found unwanted token, {self.expr}" + + def parseImpl(self, instring, loc, doActions=True): + if self.expr.can_parse_next(instring, loc, do_actions=doActions): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + def _generateDefaultName(self) -> str: + return f"~{{{self.expr}}}" + + +class _MultipleMatch(ParseElementEnhance): + def __init__( + self, + expr: Union[str, ParserElement], + stop_on: typing.Optional[Union[ParserElement, str]] = None, + *, + stopOn: typing.Optional[Union[ParserElement, str]] = None, + ): + super().__init__(expr) + stopOn = stopOn or stop_on + self.saveAsList = True + ender = stopOn + if isinstance(ender, str_type): + ender = self._literalStringClass(ender) + self.stopOn(ender) + + def stopOn(self, ender) -> ParserElement: + if isinstance(ender, str_type): + ender = self._literalStringClass(ender) + self.not_ender = ~ender if ender is not None else None + return self + + def parseImpl(self, instring, loc, doActions=True): + self_expr_parse = self.expr._parse + self_skip_ignorables = self._skipIgnorables + check_ender = self.not_ender is not None + if check_ender: + try_not_ender = self.not_ender.try_parse + + # must be at least one (but first see if we are the stopOn sentinel; + # if so, fail) + if check_ender: + try_not_ender(instring, loc) + loc, tokens = self_expr_parse(instring, loc, doActions) + try: + hasIgnoreExprs = not not self.ignoreExprs + while 1: + if check_ender: + try_not_ender(instring, loc) + if hasIgnoreExprs: + preloc = self_skip_ignorables(instring, loc) + else: + preloc = loc + loc, tmptokens = self_expr_parse(instring, preloc, doActions) + tokens += tmptokens + except (ParseException, IndexError): + pass + + return loc, tokens + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_ungrouped_named_tokens_in_collection + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in self.suppress_warnings_ + ): + for e in [self.expr] + self.expr.recurse(): + if ( + isinstance(e, ParserElement) + and e.resultsName + and ( + Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ) + ): + warning = ( + "warn_ungrouped_named_tokens_in_collection:" + f" setting results name {name!r} on {type(self).__name__} expression" + f" collides with {e.resultsName!r} on contained expression" + ) + warnings.warn(warning, stacklevel=3) + break + + return super()._setResultsName(name, listAllMatches) + + +class OneOrMore(_MultipleMatch): + """ + Repetition of one or more of the given expression. + + Parameters: + + - ``expr`` - expression that must match one or more times + - ``stop_on`` - (default= ``None``) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join)) + + text = "shape: SQUARE posn: upper left color: BLACK" + attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] + + # use stop_on attribute for OneOrMore to avoid reading label string as part of the data + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + + # could also be written as + (attr_expr * (1,)).parse_string(text).pprint() + """ + + def _generateDefaultName(self) -> str: + return f"{{{self.expr}}}..." + + +class ZeroOrMore(_MultipleMatch): + """ + Optional repetition of zero or more of the given expression. + + Parameters: + + - ``expr`` - expression that must match zero or more times + - ``stop_on`` - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) - (default= ``None``) + + Example: similar to :class:`OneOrMore` + """ + + def __init__( + self, + expr: Union[str, ParserElement], + stop_on: typing.Optional[Union[ParserElement, str]] = None, + *, + stopOn: typing.Optional[Union[ParserElement, str]] = None, + ): + super().__init__(expr, stopOn=stopOn or stop_on) + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + try: + return super().parseImpl(instring, loc, doActions) + except (ParseException, IndexError): + return loc, ParseResults([], name=self.resultsName) + + def _generateDefaultName(self) -> str: + return f"[{self.expr}]..." + + +class DelimitedList(ParseElementEnhance): + def __init__( + self, + expr: Union[str, ParserElement], + delim: Union[str, ParserElement] = ",", + combine: bool = False, + min: typing.Optional[int] = None, + max: typing.Optional[int] = None, + *, + allow_trailing_delim: bool = False, + ): + """Helper to define a delimited list of expressions - the delimiter + defaults to ','. By default, the list elements and delimiters can + have intervening whitespace, and comments, but this can be + overridden by passing ``combine=True`` in the constructor. If + ``combine`` is set to ``True``, the matching tokens are + returned as a single token string, with the delimiters included; + otherwise, the matching tokens are returned as a list of tokens, + with the delimiters suppressed. + + If ``allow_trailing_delim`` is set to True, then the list may end with + a delimiter. + + Example:: + + DelimitedList(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc'] + DelimitedList(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] + """ + if isinstance(expr, str_type): + expr = ParserElement._literalStringClass(expr) + expr = typing.cast(ParserElement, expr) + + if min is not None and min < 1: + raise ValueError("min must be greater than 0") + + if max is not None and min is not None and max < min: + raise ValueError("max must be greater than, or equal to min") + + self.content = expr + self.raw_delim = str(delim) + self.delim = delim + self.combine = combine + if not combine: + self.delim = Suppress(delim) + self.min = min or 1 + self.max = max + self.allow_trailing_delim = allow_trailing_delim + + delim_list_expr = self.content + (self.delim + self.content) * ( + self.min - 1, + None if self.max is None else self.max - 1, + ) + if self.allow_trailing_delim: + delim_list_expr += Opt(self.delim) + + if self.combine: + delim_list_expr = Combine(delim_list_expr) + + super().__init__(delim_list_expr, savelist=True) + + def _generateDefaultName(self) -> str: + content_expr = self.content.streamline() + return f"{content_expr} [{self.raw_delim} {content_expr}]..." + + +class _NullToken: + def __bool__(self): + return False + + def __str__(self): + return "" + + +class Opt(ParseElementEnhance): + """ + Optional matching of the given expression. + + Parameters: + + - ``expr`` - expression that must match zero or more times + - ``default`` (optional) - value to be returned if the optional expression is not found. + + Example:: + + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier + zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4))) + zip.run_tests(''' + # traditional ZIP code + 12345 + + # ZIP+4 form + 12101-0001 + + # invalid ZIP + 98765- + ''') + + prints:: + + # traditional ZIP code + 12345 + ['12345'] + + # ZIP+4 form + 12101-0001 + ['12101-0001'] + + # invalid ZIP + 98765- + ^ + FAIL: Expected end of text (at char 5), (line:1, col:6) + """ + + __optionalNotMatched = _NullToken() + + def __init__( + self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched + ): + super().__init__(expr, savelist=False) + self.saveAsList = self.expr.saveAsList + self.defaultValue = default + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + self_expr = self.expr + try: + loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False) + except (ParseException, IndexError): + default_value = self.defaultValue + if default_value is not self.__optionalNotMatched: + if self_expr.resultsName: + tokens = ParseResults([default_value]) + tokens[self_expr.resultsName] = default_value + else: + tokens = [default_value] + else: + tokens = [] + return loc, tokens + + def _generateDefaultName(self) -> str: + inner = str(self.expr) + # strip off redundant inner {}'s + while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": + inner = inner[1:-1] + return f"[{inner}]" + + +Optional = Opt + + +class SkipTo(ParseElementEnhance): + """ + Token for skipping over all undefined text until the matched + expression is found. + + Parameters: + + - ``expr`` - target expression marking the end of the data to be skipped + - ``include`` - if ``True``, the target expression is also parsed + (the skipped text and target expression are returned as a 2-element + list) (default= ``False``). + - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and + comments) that might contain false matches to the target expression + - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be + included in the skipped test; if found before the target expression is found, + the :class:`SkipTo` is not a match + + Example:: + + report = ''' + Outstanding Issues Report - 1 Jan 2000 + + # | Severity | Description | Days Open + -----+----------+-------------------------------------------+----------- + 101 | Critical | Intermittent system crash | 6 + 94 | Cosmetic | Spelling error on Login ('log|n') | 14 + 79 | Minor | System slow when running too many reports | 47 + ''' + integer = Word(nums) + SEP = Suppress('|') + # use SkipTo to simply match everything up until the next SEP + # - ignore quoted strings, so that a '|' character inside a quoted string does not match + # - parse action will call token.strip() for each matched token, i.e., the description body + string_data = SkipTo(SEP, ignore=quoted_string) + string_data.set_parse_action(token_map(str.strip)) + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + + integer("days_open")) + + for tkt in ticket_expr.search_string(report): + print tkt.dump() + + prints:: + + ['101', 'Critical', 'Intermittent system crash', '6'] + - days_open: '6' + - desc: 'Intermittent system crash' + - issue_num: '101' + - sev: 'Critical' + ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] + - days_open: '14' + - desc: "Spelling error on Login ('log|n')" + - issue_num: '94' + - sev: 'Cosmetic' + ['79', 'Minor', 'System slow when running too many reports', '47'] + - days_open: '47' + - desc: 'System slow when running too many reports' + - issue_num: '79' + - sev: 'Minor' + """ + + def __init__( + self, + other: Union[ParserElement, str], + include: bool = False, + ignore: typing.Optional[Union[ParserElement, str]] = None, + fail_on: typing.Optional[Union[ParserElement, str]] = None, + *, + failOn: typing.Optional[Union[ParserElement, str]] = None, + ): + super().__init__(other) + failOn = failOn or fail_on + self.ignoreExpr = ignore + self.mayReturnEmpty = True + self.mayIndexError = False + self.includeMatch = include + self.saveAsList = False + if isinstance(failOn, str_type): + self.failOn = self._literalStringClass(failOn) + else: + self.failOn = failOn + self.errmsg = "No match found for " + str(self.expr) + self.ignorer = Empty().leave_whitespace() + self._update_ignorer() + + def _update_ignorer(self): + # rebuild internal ignore expr from current ignore exprs and assigned ignoreExpr + self.ignorer.ignoreExprs.clear() + for e in self.expr.ignoreExprs: + self.ignorer.ignore(e) + if self.ignoreExpr: + self.ignorer.ignore(self.ignoreExpr) + + def ignore(self, expr): + super().ignore(expr) + self._update_ignorer() + + def parseImpl(self, instring, loc, doActions=True): + startloc = loc + instrlen = len(instring) + self_expr_parse = self.expr._parse + self_failOn_canParseNext = ( + self.failOn.canParseNext if self.failOn is not None else None + ) + ignorer_try_parse = self.ignorer.try_parse if self.ignorer.ignoreExprs else None + + tmploc = loc + while tmploc <= instrlen: + if self_failOn_canParseNext is not None: + # break if failOn expression matches + if self_failOn_canParseNext(instring, tmploc): + break + + if ignorer_try_parse is not None: + # advance past ignore expressions + prev_tmploc = tmploc + while 1: + try: + tmploc = ignorer_try_parse(instring, tmploc) + except ParseBaseException: + break + # see if all ignorers matched, but didn't actually ignore anything + if tmploc == prev_tmploc: + break + prev_tmploc = tmploc + + try: + self_expr_parse(instring, tmploc, doActions=False, callPreParse=False) + except (ParseException, IndexError): + # no match, advance loc in string + tmploc += 1 + else: + # matched skipto expr, done + break + + else: + # ran off the end of the input string without matching skipto expr, fail + raise ParseException(instring, loc, self.errmsg, self) + + # build up return values + loc = tmploc + skiptext = instring[startloc:loc] + skipresult = ParseResults(skiptext) + + if self.includeMatch: + loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False) + skipresult += mat + + return loc, skipresult + + +class Forward(ParseElementEnhance): + """ + Forward declaration of an expression to be defined later - + used for recursive grammars, such as algebraic infix notation. + When the expression is known, it is assigned to the ``Forward`` + variable using the ``'<<'`` operator. + + Note: take care when assigning to ``Forward`` not to overlook + precedence of operators. + + Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that:: + + fwd_expr << a | b | c + + will actually be evaluated as:: + + (fwd_expr << a) | b | c + + thereby leaving b and c out as parseable alternatives. It is recommended that you + explicitly group the values inserted into the ``Forward``:: + + fwd_expr << (a | b | c) + + Converting to use the ``'<<='`` operator instead will avoid this problem. + + See :class:`ParseResults.pprint` for an example of a recursive + parser created using ``Forward``. + """ + + def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None): + self.caller_frame = traceback.extract_stack(limit=2)[0] + super().__init__(other, savelist=False) # type: ignore[arg-type] + self.lshift_line = None + + def __lshift__(self, other) -> "Forward": + if hasattr(self, "caller_frame"): + del self.caller_frame + if isinstance(other, str_type): + other = self._literalStringClass(other) + + if not isinstance(other, ParserElement): + return NotImplemented + + self.expr = other + self.streamlined = other.streamlined + self.mayIndexError = self.expr.mayIndexError + self.mayReturnEmpty = self.expr.mayReturnEmpty + self.set_whitespace_chars( + self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars + ) + self.skipWhitespace = self.expr.skipWhitespace + self.saveAsList = self.expr.saveAsList + self.ignoreExprs.extend(self.expr.ignoreExprs) + self.lshift_line = traceback.extract_stack(limit=2)[-2] # type: ignore[assignment] + return self + + def __ilshift__(self, other) -> "Forward": + if not isinstance(other, ParserElement): + return NotImplemented + + return self << other + + def __or__(self, other) -> "ParserElement": + caller_line = traceback.extract_stack(limit=2)[-2] + if ( + __diag__.warn_on_match_first_with_lshift_operator + and caller_line == self.lshift_line + and Diagnostics.warn_on_match_first_with_lshift_operator + not in self.suppress_warnings_ + ): + warnings.warn( + "using '<<' operator with '|' is probably an error, use '<<='", + stacklevel=2, + ) + ret = super().__or__(other) + return ret + + def __del__(self): + # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<' + if ( + self.expr is None + and __diag__.warn_on_assignment_to_Forward + and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_ + ): + warnings.warn_explicit( + "Forward defined here but no expression attached later using '<<=' or '<<'", + UserWarning, + filename=self.caller_frame.filename, + lineno=self.caller_frame.lineno, + ) + + def parseImpl(self, instring, loc, doActions=True): + if ( + self.expr is None + and __diag__.warn_on_parse_using_empty_Forward + and Diagnostics.warn_on_parse_using_empty_Forward + not in self.suppress_warnings_ + ): + # walk stack until parse_string, scan_string, search_string, or transform_string is found + parse_fns = ( + "parse_string", + "scan_string", + "search_string", + "transform_string", + ) + tb = traceback.extract_stack(limit=200) + for i, frm in enumerate(reversed(tb), start=1): + if frm.name in parse_fns: + stacklevel = i + 1 + break + else: + stacklevel = 2 + warnings.warn( + "Forward expression was never assigned a value, will not parse any input", + stacklevel=stacklevel, + ) + if not ParserElement._left_recursion_enabled: + return super().parseImpl(instring, loc, doActions) + # ## Bounded Recursion algorithm ## + # Recursion only needs to be processed at ``Forward`` elements, since they are + # the only ones that can actually refer to themselves. The general idea is + # to handle recursion stepwise: We start at no recursion, then recurse once, + # recurse twice, ..., until more recursion offers no benefit (we hit the bound). + # + # The "trick" here is that each ``Forward`` gets evaluated in two contexts + # - to *match* a specific recursion level, and + # - to *search* the bounded recursion level + # and the two run concurrently. The *search* must *match* each recursion level + # to find the best possible match. This is handled by a memo table, which + # provides the previous match to the next level match attempt. + # + # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al. + # + # There is a complication since we not only *parse* but also *transform* via + # actions: We do not want to run the actions too often while expanding. Thus, + # we expand using `doActions=False` and only run `doActions=True` if the next + # recursion level is acceptable. + with ParserElement.recursion_lock: + memo = ParserElement.recursion_memos + try: + # we are parsing at a specific recursion expansion - use it as-is + prev_loc, prev_result = memo[loc, self, doActions] + if isinstance(prev_result, Exception): + raise prev_result + return prev_loc, prev_result.copy() + except KeyError: + act_key = (loc, self, True) + peek_key = (loc, self, False) + # we are searching for the best recursion expansion - keep on improving + # both `doActions` cases must be tracked separately here! + prev_loc, prev_peek = memo[peek_key] = ( + loc - 1, + ParseException( + instring, loc, "Forward recursion without base case", self + ), + ) + if doActions: + memo[act_key] = memo[peek_key] + while True: + try: + new_loc, new_peek = super().parseImpl(instring, loc, False) + except ParseException: + # we failed before getting any match – do not hide the error + if isinstance(prev_peek, Exception): + raise + new_loc, new_peek = prev_loc, prev_peek + # the match did not get better: we are done + if new_loc <= prev_loc: + if doActions: + # replace the match for doActions=False as well, + # in case the action did backtrack + prev_loc, prev_result = memo[peek_key] = memo[act_key] + del memo[peek_key], memo[act_key] + return prev_loc, prev_result.copy() + del memo[peek_key] + return prev_loc, prev_peek.copy() + # the match did get better: see if we can improve further + if doActions: + try: + memo[act_key] = super().parseImpl(instring, loc, True) + except ParseException as e: + memo[peek_key] = memo[act_key] = (new_loc, e) + raise + prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + self.skipWhitespace = False + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + self.skipWhitespace = True + return self + + def streamline(self) -> ParserElement: + if not self.streamlined: + self.streamlined = True + if self.expr is not None: + self.expr.streamline() + return self + + def validate(self, validateTrace=None) -> None: + warnings.warn( + "ParserElement.validate() is deprecated, and should not be used to check for left recursion", + DeprecationWarning, + stacklevel=2, + ) + if validateTrace is None: + validateTrace = [] + + if self not in validateTrace: + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self._checkRecursion([]) + + def _generateDefaultName(self) -> str: + # Avoid infinite recursion by setting a temporary _defaultName + self._defaultName = ": ..." + + # Use the string representation of main expression. + retString = "..." + try: + if self.expr is not None: + retString = str(self.expr)[:1000] + else: + retString = "None" + finally: + return f"{type(self).__name__}: {retString}" + + def copy(self) -> ParserElement: + if self.expr is not None: + return super().copy() + else: + ret = Forward() + ret <<= self + return ret + + def _setResultsName(self, name, list_all_matches=False): + # fmt: off + if ( + __diag__.warn_name_set_on_empty_Forward + and Diagnostics.warn_name_set_on_empty_Forward not in self.suppress_warnings_ + and self.expr is None + ): + warning = ( + "warn_name_set_on_empty_Forward:" + f" setting results name {name!r} on {type(self).__name__} expression" + " that has no contained expression" + ) + warnings.warn(warning, stacklevel=3) + # fmt: on + + return super()._setResultsName(name, list_all_matches) + + # Compatibility synonyms + # fmt: off + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + # fmt: on + + +class TokenConverter(ParseElementEnhance): + """ + Abstract subclass of :class:`ParseExpression`, for converting parsed results. + """ + + def __init__(self, expr: Union[ParserElement, str], savelist=False): + super().__init__(expr) # , savelist) + self.saveAsList = False + + +class Combine(TokenConverter): + """Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the + input string; this can be disabled by specifying + ``'adjacent=False'`` in the constructor. + + Example:: + + real = Word(nums) + '.' + Word(nums) + print(real.parse_string('3.1416')) # -> ['3', '.', '1416'] + # will also erroneously match the following + print(real.parse_string('3. 1416')) # -> ['3', '.', '1416'] + + real = Combine(Word(nums) + '.' + Word(nums)) + print(real.parse_string('3.1416')) # -> ['3.1416'] + # no match when there are internal spaces + print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...) + """ + + def __init__( + self, + expr: ParserElement, + join_string: str = "", + adjacent: bool = True, + *, + joinString: typing.Optional[str] = None, + ): + super().__init__(expr) + joinString = joinString if joinString is not None else join_string + # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself + if adjacent: + self.leave_whitespace() + self.adjacent = adjacent + self.skipWhitespace = True + self.joinString = joinString + self.callPreparse = True + + def ignore(self, other) -> ParserElement: + if self.adjacent: + ParserElement.ignore(self, other) + else: + super().ignore(other) + return self + + def postParse(self, instring, loc, tokenlist): + retToks = tokenlist.copy() + del retToks[:] + retToks += ParseResults( + ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults + ) + + if self.resultsName and retToks.haskeys(): + return [retToks] + else: + return retToks + + +class Group(TokenConverter): + """Converter to return the matched tokens as a list - useful for + returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. + + The optional ``aslist`` argument when set to True will return the + parsed tokens as a Python list instead of a pyparsing ParseResults. + + Example:: + + ident = Word(alphas) + num = Word(nums) + term = ident | num + func = ident + Opt(DelimitedList(term)) + print(func.parse_string("fn a, b, 100")) + # -> ['fn', 'a', 'b', '100'] + + func = ident + Group(Opt(DelimitedList(term))) + print(func.parse_string("fn a, b, 100")) + # -> ['fn', ['a', 'b', '100']] + """ + + def __init__(self, expr: ParserElement, aslist: bool = False): + super().__init__(expr) + self.saveAsList = True + self._asPythonList = aslist + + def postParse(self, instring, loc, tokenlist): + if self._asPythonList: + return ParseResults.List( + tokenlist.asList() + if isinstance(tokenlist, ParseResults) + else list(tokenlist) + ) + + return [tokenlist] + + +class Dict(TokenConverter): + """Converter to return a repetitive expression as a list, but also + as a dictionary. Each element can also be referenced using the first + token in the expression as its key. Useful for tabular report + scraping when the first column can be used as a item key. + + The optional ``asdict`` argument when set to True will return the + parsed tokens as a Python dict instead of a pyparsing ParseResults. + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + + # print attributes as plain groups + print(attr_expr[1, ...].parse_string(text).dump()) + + # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names + result = Dict(Group(attr_expr)[1, ...]).parse_string(text) + print(result.dump()) + + # access named fields as dict entries, or output as dict + print(result['shape']) + print(result.as_dict()) + + prints:: + + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: 'light blue' + - posn: 'upper left' + - shape: 'SQUARE' + - texture: 'burlap' + SQUARE + {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} + + See more examples at :class:`ParseResults` of accessing fields by results name. + """ + + def __init__(self, expr: ParserElement, asdict: bool = False): + super().__init__(expr) + self.saveAsList = True + self._asPythonDict = asdict + + def postParse(self, instring, loc, tokenlist): + for i, tok in enumerate(tokenlist): + if len(tok) == 0: + continue + + ikey = tok[0] + if isinstance(ikey, int): + ikey = str(ikey).strip() + + if len(tok) == 1: + tokenlist[ikey] = _ParseResultsWithOffset("", i) + + elif len(tok) == 2 and not isinstance(tok[1], ParseResults): + tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) + + else: + try: + dictvalue = tok.copy() # ParseResults(i) + except Exception: + exc = TypeError( + "could not extract dict values from parsed results" + " - Dict expression must contain Grouped expressions" + ) + raise exc from None + + del dictvalue[0] + + if len(dictvalue) != 1 or ( + isinstance(dictvalue, ParseResults) and dictvalue.haskeys() + ): + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) + else: + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) + + if self._asPythonDict: + return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() + + return [tokenlist] if self.resultsName else tokenlist + + +class Suppress(TokenConverter): + """Converter for ignoring the results of a parsed expression. + + Example:: + + source = "a, b, c,d" + wd = Word(alphas) + wd_list1 = wd + (',' + wd)[...] + print(wd_list1.parse_string(source)) + + # often, delimiters that are useful during parsing are just in the + # way afterward - use Suppress to keep them out of the parsed output + wd_list2 = wd + (Suppress(',') + wd)[...] + print(wd_list2.parse_string(source)) + + # Skipped text (using '...') can be suppressed as well + source = "lead in START relevant text END trailing text" + start_marker = Keyword("START") + end_marker = Keyword("END") + find_body = Suppress(...) + start_marker + ... + end_marker + print(find_body.parse_string(source) + + prints:: + + ['a', ',', 'b', ',', 'c', ',', 'd'] + ['a', 'b', 'c', 'd'] + ['START', 'relevant text ', 'END'] + + (See also :class:`DelimitedList`.) + """ + + def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): + if expr is ...: + expr = _PendingSkip(NoMatch()) + super().__init__(expr) + + def __add__(self, other) -> "ParserElement": + if isinstance(self.expr, _PendingSkip): + return Suppress(SkipTo(other)) + other + + return super().__add__(other) + + def __sub__(self, other) -> "ParserElement": + if isinstance(self.expr, _PendingSkip): + return Suppress(SkipTo(other)) - other + + return super().__sub__(other) + + def postParse(self, instring, loc, tokenlist): + return [] + + def suppress(self) -> ParserElement: + return self + + +def trace_parse_action(f: ParseAction) -> ParseAction: + """Decorator for debugging parse actions. + + When the parse action is called, this decorator will print + ``">> entering method-name(line:, , )"``. + When the parse action completes, the decorator will print + ``"<<"`` followed by the returned value, or any exception that the parse action raised. + + Example:: + + wd = Word(alphas) + + @trace_parse_action + def remove_duplicate_chars(tokens): + return ''.join(sorted(set(''.join(tokens)))) + + wds = wd[1, ...].set_parse_action(remove_duplicate_chars) + print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) + + prints:: + + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) + < 3: + thisFunc = f"{type(paArgs[0]).__name__}.{thisFunc}" + sys.stderr.write(f">>entering {thisFunc}(line: {line(l, s)!r}, {l}, {t!r})\n") + try: + ret = f(*paArgs) + except Exception as exc: + sys.stderr.write(f"< str: + r"""Helper to easily define string ranges for use in :class:`Word` + construction. Borrows syntax from regexp ``'[]'`` string range + definitions:: + + srange("[0-9]") -> "0123456789" + srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" + srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" + + The input string must be enclosed in []'s, and the returned string + is the expanded character set joined into a single string. The + values enclosed in the []'s may be: + + - a single character + - an escaped character with a leading backslash (such as ``\-`` + or ``\]``) + - an escaped hex character with a leading ``'\x'`` + (``\x21``, which is a ``'!'`` character) (``\0x##`` + is also supported for backwards compatibility) + - an escaped octal character with a leading ``'\0'`` + (``\041``, which is a ``'!'`` character) + - a range of any of the above, separated by a dash (``'a-z'``, + etc.) + - any combination of the above (``'aeiouy'``, + ``'a-zA-Z0-9_$'``, etc.) + """ + _expanded = lambda p: ( + p + if not isinstance(p, ParseResults) + else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) + ) + try: + return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body) + except Exception as e: + return "" + + +def token_map(func, *args) -> ParseAction: + """Helper to define a parse action by mapping a function to all + elements of a :class:`ParseResults` list. If any additional args are passed, + they are forwarded to the given function as additional arguments + after the token, as in + ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``, + which will convert the parsed data to an integer using base 16. + + Example (compare the last to example in :class:`ParserElement.transform_string`:: + + hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16)) + hex_ints.run_tests(''' + 00 11 22 aa FF 0a 0d 1a + ''') + + upperword = Word(alphas).set_parse_action(token_map(str.upper)) + upperword[1, ...].run_tests(''' + my kingdom for a horse + ''') + + wd = Word(alphas).set_parse_action(token_map(str.title)) + wd[1, ...].set_parse_action(' '.join).run_tests(''' + now is the winter of our discontent made glorious summer by this sun of york + ''') + + prints:: + + 00 11 22 aa FF 0a 0d 1a + [0, 17, 34, 170, 255, 10, 13, 26] + + my kingdom for a horse + ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] + + now is the winter of our discontent made glorious summer by this sun of york + ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] + """ + + def pa(s, l, t): + return [func(tokn, *args) for tokn in t] + + func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) + pa.__name__ = func_name + + return pa + + +def autoname_elements() -> None: + """ + Utility to simplify mass-naming of parser elements, for + generating railroad diagram with named subdiagrams. + """ + calling_frame = sys._getframe().f_back + if calling_frame is None: + return + calling_frame = typing.cast(types.FrameType, calling_frame) + for name, var in calling_frame.f_locals.items(): + if isinstance(var, ParserElement) and not var.customName: + var.set_name(name) + + +dbl_quoted_string = Combine( + Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' +).set_name("string enclosed in double quotes") + +sgl_quoted_string = Combine( + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" +).set_name("string enclosed in single quotes") + +quoted_string = Combine( + (Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').set_name( + "double quoted string" + ) + | (Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").set_name( + "single quoted string" + ) +).set_name("quoted string using single or double quotes") + +python_quoted_string = Combine( + (Regex(r'"""(?:[^"\\]|""(?!")|"(?!"")|\\.)*', flags=re.MULTILINE) + '"""').set_name( + "multiline double quoted string" + ) + ^ ( + Regex(r"'''(?:[^'\\]|''(?!')|'(?!'')|\\.)*", flags=re.MULTILINE) + "'''" + ).set_name("multiline single quoted string") + ^ (Regex(r'"(?:[^"\n\r\\]|(?:\\")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').set_name( + "double quoted string" + ) + ^ (Regex(r"'(?:[^'\n\r\\]|(?:\\')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").set_name( + "single quoted string" + ) +).set_name("Python quoted string") + +unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") + + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +# build list of built-in expressions, for future reference if a global default value +# gets updated +_builtin_exprs: List[ParserElement] = [ + v for v in vars().values() if isinstance(v, ParserElement) +] + +# backward compatibility names +# fmt: off +sglQuotedString = sgl_quoted_string +dblQuotedString = dbl_quoted_string +quotedString = quoted_string +unicodeString = unicode_string +lineStart = line_start +lineEnd = line_end +stringStart = string_start +stringEnd = string_end +nullDebugAction = replaced_by_pep8("nullDebugAction", null_debug_action) +traceParseAction = replaced_by_pep8("traceParseAction", trace_parse_action) +conditionAsParseAction = replaced_by_pep8("conditionAsParseAction", condition_as_parse_action) +tokenMap = replaced_by_pep8("tokenMap", token_map) +# fmt: on diff --git a/external/python/pyparsing/diagram/__init__.py b/external/python/pyparsing/diagram/__init__.py new file mode 100644 index 00000000..700d0b56 --- /dev/null +++ b/external/python/pyparsing/diagram/__init__.py @@ -0,0 +1,656 @@ +# mypy: ignore-errors +import railroad +import pyparsing +import typing +from typing import ( + List, + NamedTuple, + Generic, + TypeVar, + Dict, + Callable, + Set, + Iterable, +) +from jinja2 import Template +from io import StringIO +import inspect + + +jinja2_template_source = """\ +{% if not embed %} + + + +{% endif %} + {% if not head %} + + {% else %} + {{ head | safe }} + {% endif %} +{% if not embed %} + + +{% endif %} +{{ body | safe }} +{% for diagram in diagrams %} +
+

{{ diagram.title }}

+
{{ diagram.text }}
+
+ {{ diagram.svg }} +
+
+{% endfor %} +{% if not embed %} + + +{% endif %} +""" + +template = Template(jinja2_template_source) + +# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet +NamedDiagram = NamedTuple( + "NamedDiagram", + [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)], +) +""" +A simple structure for associating a name with a railroad diagram +""" + +T = TypeVar("T") + + +class EachItem(railroad.Group): + """ + Custom railroad item to compose a: + - Group containing a + - OneOrMore containing a + - Choice of the elements in the Each + with the group label indicating that all must be matched + """ + + all_label = "[ALL]" + + def __init__(self, *items): + choice_item = railroad.Choice(len(items) - 1, *items) + one_or_more_item = railroad.OneOrMore(item=choice_item) + super().__init__(one_or_more_item, label=self.all_label) + + +class AnnotatedItem(railroad.Group): + """ + Simple subclass of Group that creates an annotation label + """ + + def __init__(self, label: str, item): + super().__init__(item=item, label="[{}]".format(label) if label else label) + + +class EditablePartial(Generic[T]): + """ + Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been + constructed. + """ + + # We need this here because the railroad constructors actually transform the data, so can't be called until the + # entire tree is assembled + + def __init__(self, func: Callable[..., T], args: list, kwargs: dict): + self.func = func + self.args = args + self.kwargs = kwargs + + @classmethod + def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]": + """ + If you call this function in the same way that you would call the constructor, it will store the arguments + as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) + """ + return EditablePartial(func=func, args=list(args), kwargs=kwargs) + + @property + def name(self): + return self.kwargs["name"] + + def __call__(self) -> T: + """ + Evaluate the partial and return the result + """ + args = self.args.copy() + kwargs = self.kwargs.copy() + + # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. + # args=['list', 'of', 'things']) + arg_spec = inspect.getfullargspec(self.func) + if arg_spec.varargs in self.kwargs: + args += kwargs.pop(arg_spec.varargs) + + return self.func(*args, **kwargs) + + +def railroad_to_html(diagrams: List[NamedDiagram], embed=False, **kwargs) -> str: + """ + Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams + :params kwargs: kwargs to be passed in to the template + """ + data = [] + for diagram in diagrams: + if diagram.diagram is None: + continue + io = StringIO() + try: + css = kwargs.get('css') + diagram.diagram.writeStandalone(io.write, css=css) + except AttributeError: + diagram.diagram.writeSvg(io.write) + title = diagram.name + if diagram.index == 0: + title += " (root)" + data.append({"title": title, "text": "", "svg": io.getvalue()}) + + return template.render(diagrams=data, embed=embed, **kwargs) + + +def resolve_partial(partial: "EditablePartial[T]") -> T: + """ + Recursively resolves a collection of Partials into whatever type they are + """ + if isinstance(partial, EditablePartial): + partial.args = resolve_partial(partial.args) + partial.kwargs = resolve_partial(partial.kwargs) + return partial() + elif isinstance(partial, list): + return [resolve_partial(x) for x in partial] + elif isinstance(partial, dict): + return {key: resolve_partial(x) for key, x in partial.items()} + else: + return partial + + +def to_railroad( + element: pyparsing.ParserElement, + diagram_kwargs: typing.Optional[dict] = None, + vertical: int = 3, + show_results_names: bool = False, + show_groups: bool = False, +) -> List[NamedDiagram]: + """ + Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram + creation if you want to access the Railroad tree before it is converted to HTML + :param element: base element of the parser being diagrammed + :param diagram_kwargs: kwargs to pass to the Diagram() constructor + :param vertical: (optional) - int - limit at which number of alternatives should be + shown vertically instead of horizontally + :param show_results_names - bool to indicate whether results name annotations should be + included in the diagram + :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled + surrounding box + """ + # Convert the whole tree underneath the root + lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) + _to_diagram_element( + element, + lookup=lookup, + parent=None, + vertical=vertical, + show_results_names=show_results_names, + show_groups=show_groups, + ) + + root_id = id(element) + # Convert the root if it hasn't been already + if root_id in lookup: + if not element.customName: + lookup[root_id].name = "" + lookup[root_id].mark_for_extraction(root_id, lookup, force=True) + + # Now that we're finished, we can convert from intermediate structures into Railroad elements + diags = list(lookup.diagrams.values()) + if len(diags) > 1: + # collapse out duplicate diags with the same name + seen = set() + deduped_diags = [] + for d in diags: + # don't extract SkipTo elements, they are uninformative as subdiagrams + if d.name == "...": + continue + if d.name is not None and d.name not in seen: + seen.add(d.name) + deduped_diags.append(d) + resolved = [resolve_partial(partial) for partial in deduped_diags] + else: + # special case - if just one diagram, always display it, even if + # it has no name + resolved = [resolve_partial(partial) for partial in diags] + return sorted(resolved, key=lambda diag: diag.index) + + +def _should_vertical( + specification: int, exprs: Iterable[pyparsing.ParserElement] +) -> bool: + """ + Returns true if we should return a vertical list of elements + """ + if specification is None: + return False + else: + return len(_visible_exprs(exprs)) >= specification + + +class ElementState: + """ + State recorded for an individual pyparsing Element + """ + + # Note: this should be a dataclass, but we have to support Python 3.5 + def __init__( + self, + element: pyparsing.ParserElement, + converted: EditablePartial, + parent: EditablePartial, + number: int, + name: str = None, + parent_index: typing.Optional[int] = None, + ): + #: The pyparsing element that this represents + self.element: pyparsing.ParserElement = element + #: The name of the element + self.name: typing.Optional[str] = name + #: The output Railroad element in an unconverted state + self.converted: EditablePartial = converted + #: The parent Railroad element, which we store so that we can extract this if it's duplicated + self.parent: EditablePartial = parent + #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram + self.number: int = number + #: The index of this inside its parent + self.parent_index: typing.Optional[int] = parent_index + #: If true, we should extract this out into a subdiagram + self.extract: bool = False + #: If true, all of this element's children have been filled out + self.complete: bool = False + + def mark_for_extraction( + self, el_id: int, state: "ConverterState", name: str = None, force: bool = False + ): + """ + Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram + :param el_id: id of the element + :param state: element/diagram state tracker + :param name: name to use for this element's text + :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the + root element when we know we're finished + """ + self.extract = True + + # Set the name + if not self.name: + if name: + # Allow forcing a custom name + self.name = name + elif self.element.customName: + self.name = self.element.customName + else: + self.name = "" + + # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children + # to be added + # Also, if this is just a string literal etc, don't bother extracting it + if force or (self.complete and _worth_extracting(self.element)): + state.extract_into_diagram(el_id) + + +class ConverterState: + """ + Stores some state that persists between recursions into the element tree + """ + + def __init__(self, diagram_kwargs: typing.Optional[dict] = None): + #: A dictionary mapping ParserElements to state relating to them + self._element_diagram_states: Dict[int, ElementState] = {} + #: A dictionary mapping ParserElement IDs to subdiagrams generated from them + self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {} + #: The index of the next unnamed element + self.unnamed_index: int = 1 + #: The index of the next element. This is used for sorting + self.index: int = 0 + #: Shared kwargs that are used to customize the construction of diagrams + self.diagram_kwargs: dict = diagram_kwargs or {} + self.extracted_diagram_names: Set[str] = set() + + def __setitem__(self, key: int, value: ElementState): + self._element_diagram_states[key] = value + + def __getitem__(self, key: int) -> ElementState: + return self._element_diagram_states[key] + + def __delitem__(self, key: int): + del self._element_diagram_states[key] + + def __contains__(self, key: int): + return key in self._element_diagram_states + + def generate_unnamed(self) -> int: + """ + Generate a number used in the name of an otherwise unnamed diagram + """ + self.unnamed_index += 1 + return self.unnamed_index + + def generate_index(self) -> int: + """ + Generate a number used to index a diagram + """ + self.index += 1 + return self.index + + def extract_into_diagram(self, el_id: int): + """ + Used when we encounter the same token twice in the same tree. When this + happens, we replace all instances of that token with a terminal, and + create a new subdiagram for the token + """ + position = self[el_id] + + # Replace the original definition of this element with a regular block + if position.parent: + ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) + if "item" in position.parent.kwargs: + position.parent.kwargs["item"] = ret + elif "items" in position.parent.kwargs: + position.parent.kwargs["items"][position.parent_index] = ret + + # If the element we're extracting is a group, skip to its content but keep the title + if position.converted.func == railroad.Group: + content = position.converted.kwargs["item"] + else: + content = position.converted + + self.diagrams[el_id] = EditablePartial.from_call( + NamedDiagram, + name=position.name, + diagram=EditablePartial.from_call( + railroad.Diagram, content, **self.diagram_kwargs + ), + index=position.number, + ) + + del self[el_id] + + +def _worth_extracting(element: pyparsing.ParserElement) -> bool: + """ + Returns true if this element is worth having its own sub-diagram. Simply, if any of its children + themselves have children, then its complex enough to extract + """ + children = element.recurse() + return any(child.recurse() for child in children) + + +def _apply_diagram_item_enhancements(fn): + """ + decorator to ensure enhancements to a diagram item (such as results name annotations) + get applied on return from _to_diagram_element (we do this since there are several + returns in _to_diagram_element) + """ + + def _inner( + element: pyparsing.ParserElement, + parent: typing.Optional[EditablePartial], + lookup: ConverterState = None, + vertical: int = None, + index: int = 0, + name_hint: str = None, + show_results_names: bool = False, + show_groups: bool = False, + ) -> typing.Optional[EditablePartial]: + ret = fn( + element, + parent, + lookup, + vertical, + index, + name_hint, + show_results_names, + show_groups, + ) + + # apply annotation for results name, if present + if show_results_names and ret is not None: + element_results_name = element.resultsName + if element_results_name: + # add "*" to indicate if this is a "list all results" name + element_results_name += "" if element.modalResults else "*" + ret = EditablePartial.from_call( + railroad.Group, item=ret, label=element_results_name + ) + + return ret + + return _inner + + +def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): + non_diagramming_exprs = ( + pyparsing.ParseElementEnhance, + pyparsing.PositionToken, + pyparsing.And._ErrorStop, + ) + return [ + e + for e in exprs + if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs)) + ] + + +@_apply_diagram_item_enhancements +def _to_diagram_element( + element: pyparsing.ParserElement, + parent: typing.Optional[EditablePartial], + lookup: ConverterState = None, + vertical: int = None, + index: int = 0, + name_hint: str = None, + show_results_names: bool = False, + show_groups: bool = False, +) -> typing.Optional[EditablePartial]: + """ + Recursively converts a PyParsing Element to a railroad Element + :param lookup: The shared converter state that keeps track of useful things + :param index: The index of this element within the parent + :param parent: The parent of this element in the output tree + :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), + it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never + do so + :param name_hint: If provided, this will override the generated name + :param show_results_names: bool flag indicating whether to add annotations for results names + :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed + :param show_groups: bool flag indicating whether to show groups using bounding box + """ + exprs = element.recurse() + name = name_hint or element.customName or type(element).__name__ + + # Python's id() is used to provide a unique identifier for elements + el_id = id(element) + + element_results_name = element.resultsName + + # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram + if not element.customName: + if isinstance( + element, + ( + # pyparsing.TokenConverter, + # pyparsing.Forward, + pyparsing.Located, + ), + ): + # However, if this element has a useful custom name, and its child does not, we can pass it on to the child + if exprs: + if not exprs[0].customName: + propagated_name = name + else: + propagated_name = None + + return _to_diagram_element( + element.expr, + parent=parent, + lookup=lookup, + vertical=vertical, + index=index, + name_hint=propagated_name, + show_results_names=show_results_names, + show_groups=show_groups, + ) + + # If the element isn't worth extracting, we always treat it as the first time we say it + if _worth_extracting(element): + if el_id in lookup: + # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, + # so we have to extract it into a new diagram. + looked_up = lookup[el_id] + looked_up.mark_for_extraction(el_id, lookup, name=name_hint) + ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name) + return ret + + elif el_id in lookup.diagrams: + # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we + # just put in a marker element that refers to the sub-diagram + ret = EditablePartial.from_call( + railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] + ) + return ret + + # Recursively convert child elements + # Here we find the most relevant Railroad element for matching pyparsing Element + # We use ``items=[]`` here to hold the place for where the child elements will go once created + if isinstance(element, pyparsing.And): + # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat + # (all will have the same name, and resultsName) + if not exprs: + return None + if len(set((e.name, e.resultsName) for e in exprs)) == 1: + ret = EditablePartial.from_call( + railroad.OneOrMore, item="", repeat=str(len(exprs)) + ) + elif _should_vertical(vertical, exprs): + ret = EditablePartial.from_call(railroad.Stack, items=[]) + else: + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): + if not exprs: + return None + if _should_vertical(vertical, exprs): + ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) + else: + ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) + elif isinstance(element, pyparsing.Each): + if not exprs: + return None + ret = EditablePartial.from_call(EachItem, items=[]) + elif isinstance(element, pyparsing.NotAny): + ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") + elif isinstance(element, pyparsing.FollowedBy): + ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") + elif isinstance(element, pyparsing.PrecededBy): + ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") + elif isinstance(element, pyparsing.Group): + if show_groups: + ret = EditablePartial.from_call(AnnotatedItem, label="", item="") + else: + ret = EditablePartial.from_call(railroad.Group, label="", item="") + elif isinstance(element, pyparsing.TokenConverter): + label = type(element).__name__.lower() + if label == "tokenconverter": + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + else: + ret = EditablePartial.from_call(AnnotatedItem, label=label, item="") + elif isinstance(element, pyparsing.Opt): + ret = EditablePartial.from_call(railroad.Optional, item="") + elif isinstance(element, pyparsing.OneOrMore): + ret = EditablePartial.from_call(railroad.OneOrMore, item="") + elif isinstance(element, pyparsing.ZeroOrMore): + ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") + elif isinstance(element, pyparsing.Group): + ret = EditablePartial.from_call( + railroad.Group, item=None, label=element_results_name + ) + elif isinstance(element, pyparsing.Empty) and not element.customName: + # Skip unnamed "Empty" elements + ret = None + elif isinstance(element, pyparsing.ParseElementEnhance): + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + elif len(exprs) > 0 and not element_results_name: + ret = EditablePartial.from_call(railroad.Group, item="", label=name) + elif len(exprs) > 0: + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + else: + terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) + ret = terminal + + if ret is None: + return + + # Indicate this element's position in the tree so we can extract it if necessary + lookup[el_id] = ElementState( + element=element, + converted=ret, + parent=parent, + parent_index=index, + number=lookup.generate_index(), + ) + if element.customName: + lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) + + i = 0 + for expr in exprs: + # Add a placeholder index in case we have to extract the child before we even add it to the parent + if "items" in ret.kwargs: + ret.kwargs["items"].insert(i, None) + + item = _to_diagram_element( + expr, + parent=ret, + lookup=lookup, + vertical=vertical, + index=i, + show_results_names=show_results_names, + show_groups=show_groups, + ) + + # Some elements don't need to be shown in the diagram + if item is not None: + if "item" in ret.kwargs: + ret.kwargs["item"] = item + elif "items" in ret.kwargs: + # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal + ret.kwargs["items"][i] = item + i += 1 + elif "items" in ret.kwargs: + # If we're supposed to skip this element, remove it from the parent + del ret.kwargs["items"][i] + + # If all this items children are none, skip this item + if ret and ( + ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) + or ("item" in ret.kwargs and ret.kwargs["item"] is None) + ): + ret = EditablePartial.from_call(railroad.Terminal, name) + + # Mark this element as "complete", ie it has all of its children + if el_id in lookup: + lookup[el_id].complete = True + + if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: + lookup.extract_into_diagram(el_id) + if ret is not None: + ret = EditablePartial.from_call( + railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] + ) + + return ret diff --git a/external/python/pyparsing/exceptions.py b/external/python/pyparsing/exceptions.py new file mode 100644 index 00000000..1aaea56f --- /dev/null +++ b/external/python/pyparsing/exceptions.py @@ -0,0 +1,300 @@ +# exceptions.py + +import re +import sys +import typing + +from .util import ( + col, + line, + lineno, + _collapse_string_to_ranges, + replaced_by_pep8, +) +from .unicode import pyparsing_unicode as ppu + + +class _ExceptionWordUnicodeSet( + ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic +): + pass + + +_extract_alphanums = _collapse_string_to_ranges(_ExceptionWordUnicodeSet.alphanums) +_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") + + +class ParseBaseException(Exception): + """base exception class for all parsing runtime exceptions""" + + loc: int + msg: str + pstr: str + parser_element: typing.Any # "ParserElement" + args: typing.Tuple[str, int, typing.Optional[str]] + + __slots__ = ( + "loc", + "msg", + "pstr", + "parser_element", + "args", + ) + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( + self, + pstr: str, + loc: int = 0, + msg: typing.Optional[str] = None, + elem=None, + ): + self.loc = loc + if msg is None: + self.msg = pstr + self.pstr = "" + else: + self.msg = msg + self.pstr = pstr + self.parser_element = elem + self.args = (pstr, loc, msg) + + @staticmethod + def explain_exception(exc, depth=16): + """ + Method to take an exception and translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - exc - exception raised during parsing (need not be a ParseException, in support + of Python exceptions that might be raised in a parse action) + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + """ + import inspect + from .core import ParserElement + + if depth is None: + depth = sys.getrecursionlimit() + ret = [] + if isinstance(exc, ParseBaseException): + ret.append(exc.line) + ret.append(" " * (exc.column - 1) + "^") + ret.append(f"{type(exc).__name__}: {exc}") + + if depth <= 0: + return "\n".join(ret) + + callers = inspect.getinnerframes(exc.__traceback__, context=depth) + seen = set() + for ff in callers[-depth:]: + frm = ff[0] + + f_self = frm.f_locals.get("self", None) + if isinstance(f_self, ParserElement): + if not frm.f_code.co_name.startswith(("parseImpl", "_parseNoCache")): + continue + if id(f_self) in seen: + continue + seen.add(id(f_self)) + + self_type = type(f_self) + ret.append(f"{self_type.__module__}.{self_type.__name__} - {f_self}") + + elif f_self is not None: + self_type = type(f_self) + ret.append(f"{self_type.__module__}.{self_type.__name__}") + + else: + code = frm.f_code + if code.co_name in ("wrapper", ""): + continue + + ret.append(code.co_name) + + depth -= 1 + if not depth: + break + + return "\n".join(ret) + + @classmethod + def _from_exception(cls, pe): + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parser_element) + + @property + def line(self) -> str: + """ + Return the line of text where the exception occurred. + """ + return line(self.loc, self.pstr) + + @property + def lineno(self) -> int: + """ + Return the 1-based line number of text where the exception occurred. + """ + return lineno(self.loc, self.pstr) + + @property + def col(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + @property + def column(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + # pre-PEP8 compatibility + @property + def parserElement(self): + return self.parser_element + + @parserElement.setter + def parserElement(self, elem): + self.parser_element = elem + + def __str__(self) -> str: + if self.pstr: + if self.loc >= len(self.pstr): + foundstr = ", found end of text" + else: + # pull out next word at error location + found_match = _exception_word_extractor.match(self.pstr, self.loc) + if found_match is not None: + found = found_match.group(0) + else: + found = self.pstr[self.loc : self.loc + 1] + foundstr = (", found %r" % found).replace(r"\\", "\\") + else: + foundstr = "" + return f"{self.msg}{foundstr} (at char {self.loc}), (line:{self.lineno}, col:{self.column})" + + def __repr__(self): + return str(self) + + def mark_input_line( + self, marker_string: typing.Optional[str] = None, *, markerString: str = ">!<" + ) -> str: + """ + Extracts the exception line from the input string, and marks + the location of the exception with a special symbol. + """ + markerString = marker_string if marker_string is not None else markerString + line_str = self.line + line_column = self.column - 1 + if markerString: + line_str = "".join( + (line_str[:line_column], markerString, line_str[line_column:]) + ) + return line_str.strip() + + def explain(self, depth=16) -> str: + """ + Method to translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + + Example:: + + # an expression to parse 3 integers + expr = pp.Word(pp.nums) * 3 + try: + # a failing parse - the third integer is prefixed with "A" + expr.parse_string("123 456 A789") + except pp.ParseException as pe: + print(pe.explain(depth=0)) + + prints:: + + 123 456 A789 + ^ + ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9) + + Note: the diagnostic output will include string representations of the expressions + that failed to parse. These representations will be more helpful if you use `set_name` to + give identifiable names to your expressions. Otherwise they will use the default string + forms, which may be cryptic to read. + + Note: pyparsing's default truncation of exception tracebacks may also truncate the + stack of expressions that are displayed in the ``explain`` output. To get the full listing + of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` + """ + return self.explain_exception(self, depth) + + # fmt: off + markInputline = replaced_by_pep8("markInputline", mark_input_line) + # fmt: on + + +class ParseException(ParseBaseException): + """ + Exception thrown when a parse expression doesn't match the input string + + Example:: + + integer = Word(nums).set_name("integer") + try: + integer.parse_string("ABC") + except ParseException as pe: + print(pe) + print(f"column: {pe.column}") + + prints:: + + Expected integer (at char 0), (line:1, col:1) column: 1 + + """ + + +class ParseFatalException(ParseBaseException): + """ + User-throwable exception thrown when inconsistent parse content + is found; stops all parsing immediately + """ + + +class ParseSyntaxException(ParseFatalException): + """ + Just like :class:`ParseFatalException`, but thrown internally + when an :class:`ErrorStop` ('-' operator) indicates + that parsing is to stop immediately because an unbacktrackable + syntax error has been found. + """ + + +class RecursiveGrammarException(Exception): + """ + Exception thrown by :class:`ParserElement.validate` if the + grammar could be left-recursive; parser may need to enable + left recursion using :class:`ParserElement.enable_left_recursion` + """ + + def __init__(self, parseElementList): + self.parseElementTrace = parseElementList + + def __str__(self) -> str: + return f"RecursiveGrammarException: {self.parseElementTrace}" diff --git a/external/python/pyparsing/helpers.py b/external/python/pyparsing/helpers.py new file mode 100644 index 00000000..dcfdb8fe --- /dev/null +++ b/external/python/pyparsing/helpers.py @@ -0,0 +1,1078 @@ +# helpers.py +import html.entities +import re +import sys +import typing + +from . import __diag__ +from .core import * +from .util import ( + _bslash, + _flatten, + _escape_regex_range_chars, + replaced_by_pep8, +) + + +# +# global helpers +# +def counted_array( + expr: ParserElement, + int_expr: typing.Optional[ParserElement] = None, + *, + intExpr: typing.Optional[ParserElement] = None, +) -> ParserElement: + """Helper to define a counted list of expressions. + + This helper defines a pattern of the form:: + + integer expr expr expr... + + where the leading integer tells how many expr expressions follow. + The matched tokens returns the array of expr tokens as a list - the + leading count token is suppressed. + + If ``int_expr`` is specified, it should be a pyparsing expression + that produces an integer value. + + Example:: + + counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd'] + + # in this parser, the leading integer value is given in binary, + # '10' indicating that 2 values are in the array + binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2)) + counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd'] + + # if other fields must be parsed after the count but before the + # list items, give the fields results names and they will + # be preserved in the returned ParseResults: + count_with_metadata = integer + Word(alphas)("type") + typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items") + result = typed_array.parse_string("3 bool True True False") + print(result.dump()) + + # prints + # ['True', 'True', 'False'] + # - items: ['True', 'True', 'False'] + # - type: 'bool' + """ + intExpr = intExpr or int_expr + array_expr = Forward() + + def count_field_parse_action(s, l, t): + nonlocal array_expr + n = t[0] + array_expr <<= (expr * n) if n else Empty() + # clear list contents, but keep any named results + del t[:] + + if intExpr is None: + intExpr = Word(nums).set_parse_action(lambda t: int(t[0])) + else: + intExpr = intExpr.copy() + intExpr.set_name("arrayLen") + intExpr.add_parse_action(count_field_parse_action, call_during_try=True) + return (intExpr + array_expr).set_name(f"(len) {expr}...") + + +def match_previous_literal(expr: ParserElement) -> ParserElement: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = match_previous_literal(first) + match_expr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches a previous literal, will also match the leading + ``"1:1"`` in ``"1:10"``. If this is not desired, use + :class:`match_previous_expr`. Do *not* use with packrat parsing + enabled. + """ + rep = Forward() + + def copy_token_to_repeater(s, l, t): + if not t: + rep << Empty() + return + + if len(t) == 1: + rep << t[0] + return + + # flatten t tokens + tflat = _flatten(t.as_list()) + rep << And(Literal(tt) for tt in tflat) + + expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) + rep.set_name("(prev) " + str(expr)) + return rep + + +def match_previous_expr(expr: ParserElement) -> ParserElement: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = match_previous_expr(first) + match_expr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches by expressions, will *not* match the leading ``"1:1"`` + in ``"1:10"``; the expressions are evaluated first, and then + compared, so ``"1"`` is compared with ``"10"``. Do *not* use + with packrat parsing enabled. + """ + rep = Forward() + e2 = expr.copy() + rep <<= e2 + + def copy_token_to_repeater(s, l, t): + matchTokens = _flatten(t.as_list()) + + def must_match_these_tokens(s, l, t): + theseTokens = _flatten(t.as_list()) + if theseTokens != matchTokens: + raise ParseException( + s, l, f"Expected {matchTokens}, found{theseTokens}" + ) + + rep.set_parse_action(must_match_these_tokens, callDuringTry=True) + + expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) + rep.set_name("(prev) " + str(expr)) + return rep + + +def one_of( + strs: Union[typing.Iterable[str], str], + caseless: bool = False, + use_regex: bool = True, + as_keyword: bool = False, + *, + useRegex: bool = True, + asKeyword: bool = False, +) -> ParserElement: + """Helper to quickly define a set of alternative :class:`Literal` s, + and makes sure to do longest-first testing when there is a conflict, + regardless of the input order, but returns + a :class:`MatchFirst` for best performance. + + Parameters: + + - ``strs`` - a string of space-delimited literals, or a collection of + string literals + - ``caseless`` - treat all literals as caseless - (default= ``False``) + - ``use_regex`` - as an optimization, will + generate a :class:`Regex` object; otherwise, will generate + a :class:`MatchFirst` object (if ``caseless=True`` or ``as_keyword=True``, or if + creating a :class:`Regex` raises an exception) - (default= ``True``) + - ``as_keyword`` - enforce :class:`Keyword`-style matching on the + generated expressions - (default= ``False``) + - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility, + but will be removed in a future release + + Example:: + + comp_oper = one_of("< = > <= >= !=") + var = Word(alphas) + number = Word(nums) + term = var | number + comparison_expr = term + comp_oper + term + print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12")) + + prints:: + + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] + """ + asKeyword = asKeyword or as_keyword + useRegex = useRegex and use_regex + + if ( + isinstance(caseless, str_type) + and __diag__.warn_on_multiple_string_args_to_oneof + ): + warnings.warn( + "More than one string argument passed to one_of, pass" + " choices as a list or space-delimited string", + stacklevel=2, + ) + + if caseless: + isequal = lambda a, b: a.upper() == b.upper() + masks = lambda a, b: b.upper().startswith(a.upper()) + parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral + else: + isequal = lambda a, b: a == b + masks = lambda a, b: b.startswith(a) + parseElementClass = Keyword if asKeyword else Literal + + symbols: List[str] = [] + if isinstance(strs, str_type): + strs = typing.cast(str, strs) + symbols = strs.split() + elif isinstance(strs, Iterable): + symbols = list(strs) + else: + raise TypeError("Invalid argument to one_of, expected string or iterable") + if not symbols: + return NoMatch() + + # reorder given symbols to take care to avoid masking longer choices with shorter ones + # (but only if the given symbols are not just single characters) + if any(len(sym) > 1 for sym in symbols): + i = 0 + while i < len(symbols) - 1: + cur = symbols[i] + for j, other in enumerate(symbols[i + 1 :]): + if isequal(other, cur): + del symbols[i + j + 1] + break + if masks(cur, other): + del symbols[i + j + 1] + symbols.insert(i, other) + break + else: + i += 1 + + if useRegex: + re_flags: int = re.IGNORECASE if caseless else 0 + + try: + if all(len(sym) == 1 for sym in symbols): + # symbols are just single characters, create range regex pattern + patt = f"[{''.join(_escape_regex_range_chars(sym) for sym in symbols)}]" + else: + patt = "|".join(re.escape(sym) for sym in symbols) + + # wrap with \b word break markers if defining as keywords + if asKeyword: + patt = rf"\b(?:{patt})\b" + + ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols)) + + if caseless: + # add parse action to return symbols as specified, not in random + # casing as found in input string + symbol_map = {sym.lower(): sym for sym in symbols} + ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()]) + + return ret + + except re.error: + warnings.warn( + "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 + ) + + # last resort, just use MatchFirst + return MatchFirst(parseElementClass(sym) for sym in symbols).set_name( + " | ".join(symbols) + ) + + +def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: + """Helper to easily and clearly define a dictionary by specifying + the respective patterns for the key and value. Takes care of + defining the :class:`Dict`, :class:`ZeroOrMore`, and + :class:`Group` tokens in the proper order. The key pattern + can include delimiting markers or punctuation, as long as they are + suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the :class:`Dict` results + can include named token fields. + + Example:: + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + print(attr_expr[1, ...].parse_string(text).dump()) + + attr_label = label + attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) + + # similar to Dict, but simpler call format + result = dict_of(attr_label, attr_value).parse_string(text) + print(result.dump()) + print(result['shape']) + print(result.shape) # object attribute access works too + print(result.as_dict()) + + prints:: + + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: 'light blue' + - posn: 'upper left' + - shape: 'SQUARE' + - texture: 'burlap' + SQUARE + SQUARE + {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} + """ + return Dict(OneOrMore(Group(key + value))) + + +def original_text_for( + expr: ParserElement, as_string: bool = True, *, asString: bool = True +) -> ParserElement: + """Helper to return the original, untokenized text for a given + expression. Useful to restore the parsed fields of an HTML start + tag into the raw tag text itself, or to revert separate tokens with + intervening whitespace back to the original matching input text. By + default, returns a string containing the original parsed text. + + If the optional ``as_string`` argument is passed as + ``False``, then the return value is + a :class:`ParseResults` containing any results names that + were originally matched, and a single token containing the original + matched text from the input string. So if the expression passed to + :class:`original_text_for` contains expressions with defined + results names, you must set ``as_string`` to ``False`` if you + want to preserve those results name values. + + The ``asString`` pre-PEP8 argument is retained for compatibility, + but will be removed in a future release. + + Example:: + + src = "this is test bold text normal text " + for tag in ("b", "i"): + opener, closer = make_html_tags(tag) + patt = original_text_for(opener + ... + closer) + print(patt.search_string(src)[0]) + + prints:: + + [' bold text '] + ['text'] + """ + asString = asString and as_string + + locMarker = Empty().set_parse_action(lambda s, loc, t: loc) + endlocMarker = locMarker.copy() + endlocMarker.callPreparse = False + matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") + if asString: + extractText = lambda s, l, t: s[t._original_start : t._original_end] + else: + + def extractText(s, l, t): + t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]] + + matchExpr.set_parse_action(extractText) + matchExpr.ignoreExprs = expr.ignoreExprs + matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection) + return matchExpr + + +def ungroup(expr: ParserElement) -> ParserElement: + """Helper to undo pyparsing's default grouping of And expressions, + even if all but one are non-empty. + """ + return TokenConverter(expr).add_parse_action(lambda t: t[0]) + + +def locatedExpr(expr: ParserElement) -> ParserElement: + """ + (DEPRECATED - future code should use the :class:`Located` class) + Helper to decorate a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - ``locn_start`` - location where matched expression begins + - ``locn_end`` - location where matched expression ends + - ``value`` - the actual parsed results + + Be careful if the input text contains ```` characters, you + may want to call :class:`ParserElement.parse_with_tabs` + + Example:: + + wd = Word(alphas) + for match in locatedExpr(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints:: + + [[0, 'ljsdf', 5]] + [[8, 'lksdjjf', 15]] + [[18, 'lkkjj', 23]] + """ + locator = Empty().set_parse_action(lambda ss, ll, tt: ll) + return Group( + locator("locn_start") + + expr("value") + + locator.copy().leaveWhitespace()("locn_end") + ) + + +def nested_expr( + opener: Union[str, ParserElement] = "(", + closer: Union[str, ParserElement] = ")", + content: typing.Optional[ParserElement] = None, + ignore_expr: ParserElement = quoted_string(), + *, + ignoreExpr: ParserElement = quoted_string(), +) -> ParserElement: + """Helper method for defining nested lists enclosed in opening and + closing delimiters (``"("`` and ``")"`` are the default). + + Parameters: + + - ``opener`` - opening character for a nested list + (default= ``"("``); can also be a pyparsing expression + - ``closer`` - closing character for a nested list + (default= ``")"``); can also be a pyparsing expression + - ``content`` - expression for items within the nested lists + (default= ``None``) + - ``ignore_expr`` - expression for ignoring opening and closing delimiters + (default= :class:`quoted_string`) + - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility + but will be removed in a future release + + If an expression is not provided for the content argument, the + nested expression will capture all whitespace-delimited content + between delimiters as a list of separate values. + + Use the ``ignore_expr`` argument to define expressions that may + contain opening or closing characters that should not be treated as + opening or closing characters for nesting, such as quoted_string or + a comment expression. Specify multiple expressions using an + :class:`Or` or :class:`MatchFirst`. The default is + :class:`quoted_string`, but if no expressions are to be ignored, then + pass ``None`` for this argument. + + Example:: + + data_type = one_of("void int short long char float double") + decl_data_type = Combine(data_type + Opt(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR, RPAR = map(Suppress, "()") + + code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Opt(DelimitedList(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(c_style_comment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.search_string(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + + prints:: + + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + if ignoreExpr != ignore_expr: + ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener, str_type) and isinstance(closer, str_type): + opener = typing.cast(str, opener) + closer = typing.cast(str, closer) + if len(opener) == 1 and len(closer) == 1: + if ignoreExpr is not None: + content = Combine( + OneOrMore( + ~ignoreExpr + + CharsNotIn( + opener + closer + ParserElement.DEFAULT_WHITE_CHARS, + exact=1, + ) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + content = empty.copy() + CharsNotIn( + opener + closer + ParserElement.DEFAULT_WHITE_CHARS + ).set_parse_action(lambda t: t[0].strip()) + else: + if ignoreExpr is not None: + content = Combine( + OneOrMore( + ~ignoreExpr + + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + content = Combine( + OneOrMore( + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + raise ValueError( + "opening and closing arguments must be strings if no content expression is given" + ) + ret = Forward() + if ignoreExpr is not None: + ret <<= Group( + Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer) + ) + else: + ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) + ret.set_name(f"nested {opener}{closer} expression") + # don't override error message from content expressions + ret.errmsg = None + return ret + + +def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): + """Internal helper to construct opening and closing tag expressions, given a tag name""" + if isinstance(tagStr, str_type): + resname = tagStr + tagStr = Keyword(tagStr, caseless=not xml) + else: + resname = tagStr.name + + tagAttrName = Word(alphas, alphanums + "_-:") + if xml: + tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) + openTag = ( + suppress_LT + + tagStr("tag") + + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + + Opt("/", default=[False])("empty").set_parse_action( + lambda s, l, t: t[0] == "/" + ) + + suppress_GT + ) + else: + tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( + printables, exclude_chars=">" + ) + openTag = ( + suppress_LT + + tagStr("tag") + + Dict( + ZeroOrMore( + Group( + tagAttrName.set_parse_action(lambda t: t[0].lower()) + + Opt(Suppress("=") + tagAttrValue) + ) + ) + ) + + Opt("/", default=[False])("empty").set_parse_action( + lambda s, l, t: t[0] == "/" + ) + + suppress_GT + ) + closeTag = Combine(Literal("", adjacent=False) + + openTag.set_name(f"<{resname}>") + # add start results name in parse action now that ungrouped names are not reported at two levels + openTag.add_parse_action( + lambda t: t.__setitem__( + "start" + "".join(resname.replace(":", " ").title().split()), t.copy() + ) + ) + closeTag = closeTag( + "end" + "".join(resname.replace(":", " ").title().split()) + ).set_name(f"") + openTag.tag = resname + closeTag.tag = resname + openTag.tag_body = SkipTo(closeTag()) + return openTag, closeTag + + +def make_html_tags( + tag_str: Union[str, ParserElement] +) -> Tuple[ParserElement, ParserElement]: + """Helper to construct opening and closing tag expressions for HTML, + given a tag name. Matches tags in either upper or lower case, + attributes with namespaces and with quoted or unquoted values. + + Example:: + + text = 'More info at the pyparsing wiki page' + # make_html_tags returns pyparsing expressions for the opening and + # closing tags as a 2-tuple + a, a_end = make_html_tags("A") + link_expr = a + SkipTo(a_end)("link_text") + a_end + + for link in link_expr.search_string(text): + # attributes in the tag (like "href" shown here) are + # also accessible as named results + print(link.link_text, '->', link.href) + + prints:: + + pyparsing -> https://github.com/pyparsing/pyparsing/wiki + """ + return _makeTags(tag_str, False) + + +def make_xml_tags( + tag_str: Union[str, ParserElement] +) -> Tuple[ParserElement, ParserElement]: + """Helper to construct opening and closing tag expressions for XML, + given a tag name. Matches tags only in the given upper/lower case. + + Example: similar to :class:`make_html_tags` + """ + return _makeTags(tag_str, True) + + +any_open_tag: ParserElement +any_close_tag: ParserElement +any_open_tag, any_close_tag = make_html_tags( + Word(alphas, alphanums + "_:").set_name("any tag") +) + +_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()} +common_html_entity = Regex("&(?P" + "|".join(_htmlEntityMap) + ");").set_name( + "common HTML entity" +) + + +def replace_html_entity(s, l, t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + + +class OpAssoc(Enum): + """Enumeration of operator associativity + - used in constructing InfixNotationOperatorSpec for :class:`infix_notation`""" + + LEFT = 1 + RIGHT = 2 + + +InfixNotationOperatorArgType = Union[ + ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]] +] +InfixNotationOperatorSpec = Union[ + Tuple[ + InfixNotationOperatorArgType, + int, + OpAssoc, + typing.Optional[ParseAction], + ], + Tuple[ + InfixNotationOperatorArgType, + int, + OpAssoc, + ], +] + + +def infix_notation( + base_expr: ParserElement, + op_list: List[InfixNotationOperatorSpec], + lpar: Union[str, ParserElement] = Suppress("("), + rpar: Union[str, ParserElement] = Suppress(")"), +) -> ParserElement: + """Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary + or binary, left- or right-associative. Parse actions can also be + attached to operator expressions. The generated parser will also + recognize the use of parentheses to override operator precedences + (see example below). + + Note: if you define a deep operator list, you may see performance + issues when using infix_notation. See + :class:`ParserElement.enable_packrat` for a mechanism to potentially + improve your parser performance. + + Parameters: + + - ``base_expr`` - expression representing the most basic operand to + be used in the expression + - ``op_list`` - list of tuples, one for each operator precedence level + in the expression grammar; each tuple is of the form ``(op_expr, + num_operands, right_left_assoc, (optional)parse_action)``, where: + + - ``op_expr`` is the pyparsing expression for the operator; may also + be a string, which will be converted to a Literal; if ``num_operands`` + is 3, ``op_expr`` is a tuple of two expressions, for the two + operators separating the 3 terms + - ``num_operands`` is the number of terms for this operator (must be 1, + 2, or 3) + - ``right_left_assoc`` is the indicator whether the operator is right + or left associative, using the pyparsing-defined constants + ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. + - ``parse_action`` is the parse action to be associated with + expressions matching this operator expression (the parse action + tuple member may be omitted); if the parse action is passed + a tuple or list of functions, this is equivalent to calling + ``set_parse_action(*fn)`` + (:class:`ParserElement.set_parse_action`) + - ``lpar`` - expression for matching left-parentheses; if passed as a + str, then will be parsed as ``Suppress(lpar)``. If lpar is passed as + an expression (such as ``Literal('(')``), then it will be kept in + the parsed results, and grouped with them. (default= ``Suppress('(')``) + - ``rpar`` - expression for matching right-parentheses; if passed as a + str, then will be parsed as ``Suppress(rpar)``. If rpar is passed as + an expression (such as ``Literal(')')``), then it will be kept in + the parsed results, and grouped with them. (default= ``Suppress(')')``) + + Example:: + + # simple example of four-function arithmetic with ints and + # variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infix_notation(integer | varname, + [ + ('-', 1, OpAssoc.RIGHT), + (one_of('* /'), 2, OpAssoc.LEFT), + (one_of('+ -'), 2, OpAssoc.LEFT), + ]) + + arith_expr.run_tests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', full_dump=False) + + prints:: + + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + (5+x)*y + [[[5, '+', 'x'], '*', 'y']] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + + # captive version of FollowedBy that does not do parse actions or capture results names + class _FB(FollowedBy): + def parseImpl(self, instring, loc, doActions=True): + self.expr.try_parse(instring, loc) + return loc, [] + + _FB.__name__ = "FollowedBy>" + + ret = Forward() + if isinstance(lpar, str): + lpar = Suppress(lpar) + if isinstance(rpar, str): + rpar = Suppress(rpar) + + # if lpar and rpar are not suppressed, wrap in group + if not (isinstance(lpar, Suppress) and isinstance(rpar, Suppress)): + lastExpr = base_expr | Group(lpar + ret + rpar) + else: + lastExpr = base_expr | (lpar + ret + rpar) + + arity: int + rightLeftAssoc: opAssoc + pa: typing.Optional[ParseAction] + opExpr1: ParserElement + opExpr2: ParserElement + for operDef in op_list: + opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] # type: ignore[assignment] + if isinstance(opExpr, str_type): + opExpr = ParserElement._literalStringClass(opExpr) + opExpr = typing.cast(ParserElement, opExpr) + if arity == 3: + if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2: + raise ValueError( + "if numterms=3, opExpr must be a tuple or list of two expressions" + ) + opExpr1, opExpr2 = opExpr + term_name = f"{opExpr1}{opExpr2} term" + else: + term_name = f"{opExpr} term" + + if not 1 <= arity <= 3: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + + if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): + raise ValueError("operator must indicate right or left associativity") + + thisExpr: ParserElement = Forward().set_name(term_name) + thisExpr = typing.cast(Forward, thisExpr) + if rightLeftAssoc is OpAssoc.LEFT: + if arity == 1: + matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...]) + elif arity == 2: + if opExpr is not None: + matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( + lastExpr + (opExpr + lastExpr)[1, ...] + ) + else: + matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...]) + elif arity == 3: + matchExpr = _FB( + lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr + ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)) + elif rightLeftAssoc is OpAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Opt): + opExpr = Opt(opExpr) + matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) + elif arity == 2: + if opExpr is not None: + matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( + lastExpr + (opExpr + thisExpr)[1, ...] + ) + else: + matchExpr = _FB(lastExpr + thisExpr) + Group( + lastExpr + thisExpr[1, ...] + ) + elif arity == 3: + matchExpr = _FB( + lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr + ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.set_parse_action(*pa) + else: + matchExpr.set_parse_action(pa) + thisExpr <<= (matchExpr | lastExpr).setName(term_name) + lastExpr = thisExpr + ret <<= lastExpr + return ret + + +def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]): + """ + (DEPRECATED - use :class:`IndentedBlock` class instead) + Helper method for defining space-delimited indentation blocks, + such as those used to define block statements in Python source code. + + Parameters: + + - ``blockStatementExpr`` - expression defining syntax of statement that + is repeated within the indented block + - ``indentStack`` - list created by caller to manage indentation stack + (multiple ``statementWithIndentedBlock`` expressions within a single + grammar should share a common ``indentStack``) + - ``indent`` - boolean indicating whether block must be indented beyond + the current level; set to ``False`` for block of left-most statements + (default= ``True``) + + A valid block must contain at least one ``blockStatement``. + + (Note that indentedBlock uses internal parse actions which make it + incompatible with packrat parsing.) + + Example:: + + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group(funcDecl + func_body) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << (funcDef | assignment | identifier) + + module_body = stmt[1, ...] + + parseTree = module_body.parseString(data) + parseTree.pprint() + + prints:: + + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + backup_stacks.append(indentStack[:]) + + def reset_stack(): + indentStack[:] = backup_stacks[-1] + + def checkPeerIndent(s, l, t): + if l >= len(s): + return + curCol = col(l, s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseException(s, l, "illegal nesting") + raise ParseException(s, l, "not a peer entry") + + def checkSubIndent(s, l, t): + curCol = col(l, s) + if curCol > indentStack[-1]: + indentStack.append(curCol) + else: + raise ParseException(s, l, "not a subentry") + + def checkUnindent(s, l, t): + if l >= len(s): + return + curCol = col(l, s) + if not (indentStack and curCol in indentStack): + raise ParseException(s, l, "not an unindent") + if curCol < indentStack[-1]: + indentStack.pop() + + NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress()) + INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT") + PEER = Empty().set_parse_action(checkPeerIndent).set_name("") + UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT") + if indent: + smExpr = Group( + Opt(NL) + + INDENT + + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) + + UNDENT + ) + else: + smExpr = Group( + Opt(NL) + + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) + + Opt(UNDENT) + ) + + # add a parse action to remove backup_stack from list of backups + smExpr.add_parse_action( + lambda: backup_stacks.pop(-1) and None if backup_stacks else None + ) + smExpr.set_fail_action(lambda a, b, c, d: reset_stack()) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.set_name("indented block") + + +# it's easy to get these comment structures wrong - they're very common, so may as well make them available +c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name( + "C style comment" +) +"Comment of the form ``/* ... */``" + +html_comment = Regex(r"").set_name("HTML comment") +"Comment of the form ````" + +rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line") +dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment") +"Comment of the form ``// ... (to end of line)``" + +cpp_style_comment = Combine( + Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment +).set_name("C++ style comment") +"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`" + +java_style_comment = cpp_style_comment +"Same as :class:`cpp_style_comment`" + +python_style_comment = Regex(r"#.*").set_name("Python style comment") +"Comment of the form ``# ... (to end of line)``" + + +# build list of built-in expressions, for future reference if a global default value +# gets updated +_builtin_exprs: List[ParserElement] = [ + v for v in vars().values() if isinstance(v, ParserElement) +] + + +# compatibility function, superseded by DelimitedList class +def delimited_list( + expr: Union[str, ParserElement], + delim: Union[str, ParserElement] = ",", + combine: bool = False, + min: typing.Optional[int] = None, + max: typing.Optional[int] = None, + *, + allow_trailing_delim: bool = False, +) -> ParserElement: + """(DEPRECATED - use :class:`DelimitedList` class)""" + return DelimitedList( + expr, delim, combine, min, max, allow_trailing_delim=allow_trailing_delim + ) + + +# pre-PEP8 compatible names +# fmt: off +opAssoc = OpAssoc +anyOpenTag = any_open_tag +anyCloseTag = any_close_tag +commonHTMLEntity = common_html_entity +cStyleComment = c_style_comment +htmlComment = html_comment +restOfLine = rest_of_line +dblSlashComment = dbl_slash_comment +cppStyleComment = cpp_style_comment +javaStyleComment = java_style_comment +pythonStyleComment = python_style_comment +delimitedList = replaced_by_pep8("delimitedList", DelimitedList) +delimited_list = replaced_by_pep8("delimited_list", DelimitedList) +countedArray = replaced_by_pep8("countedArray", counted_array) +matchPreviousLiteral = replaced_by_pep8("matchPreviousLiteral", match_previous_literal) +matchPreviousExpr = replaced_by_pep8("matchPreviousExpr", match_previous_expr) +oneOf = replaced_by_pep8("oneOf", one_of) +dictOf = replaced_by_pep8("dictOf", dict_of) +originalTextFor = replaced_by_pep8("originalTextFor", original_text_for) +nestedExpr = replaced_by_pep8("nestedExpr", nested_expr) +makeHTMLTags = replaced_by_pep8("makeHTMLTags", make_html_tags) +makeXMLTags = replaced_by_pep8("makeXMLTags", make_xml_tags) +replaceHTMLEntity = replaced_by_pep8("replaceHTMLEntity", replace_html_entity) +infixNotation = replaced_by_pep8("infixNotation", infix_notation) +# fmt: on diff --git a/external/python/pyparsing/py.typed b/external/python/pyparsing/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/external/python/pyparsing/results.py b/external/python/pyparsing/results.py new file mode 100644 index 00000000..3e5fe208 --- /dev/null +++ b/external/python/pyparsing/results.py @@ -0,0 +1,797 @@ +# results.py +from collections.abc import ( + MutableMapping, + Mapping, + MutableSequence, + Iterator, + Sequence, + Container, +) +import pprint +from typing import Tuple, Any, Dict, Set, List + +str_type: Tuple[type, ...] = (str, bytes) +_generator_type = type((_ for _ in ())) + + +class _ParseResultsWithOffset: + tup: Tuple["ParseResults", int] + __slots__ = ["tup"] + + def __init__(self, p1: "ParseResults", p2: int): + self.tup: Tuple[ParseResults, int] = (p1, p2) + + def __getitem__(self, i): + return self.tup[i] + + def __getstate__(self): + return self.tup + + def __setstate__(self, *args): + self.tup = args[0] + + +class ParseResults: + """Structured parse results, to provide multiple means of access to + the parsed data: + + - as a list (``len(results)``) + - by list index (``results[0], results[1]``, etc.) + - by attribute (``results.`` - see :class:`ParserElement.set_results_name`) + + Example:: + + integer = Word(nums) + date_str = (integer.set_results_name("year") + '/' + + integer.set_results_name("month") + '/' + + integer.set_results_name("day")) + # equivalent form: + # date_str = (integer("year") + '/' + # + integer("month") + '/' + # + integer("day")) + + # parse_string returns a ParseResults object + result = date_str.parse_string("1999/12/31") + + def test(s, fn=repr): + print(f"{s} -> {fn(eval(s))}") + test("list(result)") + test("result[0]") + test("result['month']") + test("result.day") + test("'month' in result") + test("'minutes' in result") + test("result.dump()", str) + + prints:: + + list(result) -> ['1999', '/', '12', '/', '31'] + result[0] -> '1999' + result['month'] -> '12' + result.day -> '31' + 'month' in result -> True + 'minutes' in result -> False + result.dump() -> ['1999', '/', '12', '/', '31'] + - day: '31' + - month: '12' + - year: '1999' + """ + + _null_values: Tuple[Any, ...] = (None, [], ()) + + _name: str + _parent: "ParseResults" + _all_names: Set[str] + _modal: bool + _toklist: List[Any] + _tokdict: Dict[str, Any] + + __slots__ = ( + "_name", + "_parent", + "_all_names", + "_modal", + "_toklist", + "_tokdict", + ) + + class List(list): + """ + Simple wrapper class to distinguish parsed list results that should be preserved + as actual Python lists, instead of being converted to :class:`ParseResults`:: + + LBRACK, RBRACK = map(pp.Suppress, "[]") + element = pp.Forward() + item = ppc.integer + element_list = LBRACK + pp.DelimitedList(element) + RBRACK + + # add parse actions to convert from ParseResults to actual Python collection types + def as_python_list(t): + return pp.ParseResults.List(t.as_list()) + element_list.add_parse_action(as_python_list) + + element <<= item | element_list + + element.run_tests(''' + 100 + [2,3,4] + [[2, 1],3,4] + [(2, 1),3,4] + (2,3,4) + ''', post_parse=lambda s, r: (r[0], type(r[0]))) + + prints:: + + 100 + (100, ) + + [2,3,4] + ([2, 3, 4], ) + + [[2, 1],3,4] + ([[2, 1], 3, 4], ) + + (Used internally by :class:`Group` when `aslist=True`.) + """ + + def __new__(cls, contained=None): + if contained is None: + contained = [] + + if not isinstance(contained, list): + raise TypeError( + f"{cls.__name__} may only be constructed with a list, not {type(contained).__name__}" + ) + + return list.__new__(cls) + + def __new__(cls, toklist=None, name=None, **kwargs): + if isinstance(toklist, ParseResults): + return toklist + self = object.__new__(cls) + self._name = None + self._parent = None + self._all_names = set() + + if toklist is None: + self._toklist = [] + elif isinstance(toklist, (list, _generator_type)): + self._toklist = ( + [toklist[:]] + if isinstance(toklist, ParseResults.List) + else list(toklist) + ) + else: + self._toklist = [toklist] + self._tokdict = dict() + return self + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( + self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance + ): + self._tokdict: Dict[str, _ParseResultsWithOffset] + self._modal = modal + + if name is None or name == "": + return + + if isinstance(name, int): + name = str(name) + + if not modal: + self._all_names = {name} + + self._name = name + + if toklist in self._null_values: + return + + if isinstance(toklist, (str_type, type)): + toklist = [toklist] + + if asList: + if isinstance(toklist, ParseResults): + self[name] = _ParseResultsWithOffset(ParseResults(toklist._toklist), 0) + else: + self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0) + self[name]._name = name + return + + try: + self[name] = toklist[0] + except (KeyError, TypeError, IndexError): + if toklist is not self: + self[name] = toklist + else: + self._name = name + + def __getitem__(self, i): + if isinstance(i, (int, slice)): + return self._toklist[i] + + if i not in self._all_names: + return self._tokdict[i][-1][0] + + return ParseResults([v[0] for v in self._tokdict[i]]) + + def __setitem__(self, k, v, isinstance=isinstance): + if isinstance(v, _ParseResultsWithOffset): + self._tokdict[k] = self._tokdict.get(k, list()) + [v] + sub = v[0] + elif isinstance(k, (int, slice)): + self._toklist[k] = v + sub = v + else: + self._tokdict[k] = self._tokdict.get(k, list()) + [ + _ParseResultsWithOffset(v, 0) + ] + sub = v + if isinstance(sub, ParseResults): + sub._parent = self + + def __delitem__(self, i): + if not isinstance(i, (int, slice)): + del self._tokdict[i] + return + + mylen = len(self._toklist) + del self._toklist[i] + + # convert int to slice + if isinstance(i, int): + if i < 0: + i += mylen + i = slice(i, i + 1) + # get removed indices + removed = list(range(*i.indices(mylen))) + removed.reverse() + # fixup indices in token dictionary + for occurrences in self._tokdict.values(): + for j in removed: + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset( + value, position - (position > j) + ) + + def __contains__(self, k) -> bool: + return k in self._tokdict + + def __len__(self) -> int: + return len(self._toklist) + + def __bool__(self) -> bool: + return not not (self._toklist or self._tokdict) + + def __iter__(self) -> Iterator: + return iter(self._toklist) + + def __reversed__(self) -> Iterator: + return iter(self._toklist[::-1]) + + def keys(self): + return iter(self._tokdict) + + def values(self): + return (self[k] for k in self.keys()) + + def items(self): + return ((k, self[k]) for k in self.keys()) + + def haskeys(self) -> bool: + """ + Since ``keys()`` returns an iterator, this method is helpful in bypassing + code that looks for the existence of any defined results names.""" + return not not self._tokdict + + def pop(self, *args, **kwargs): + """ + Removes and returns item at specified index (default= ``last``). + Supports both ``list`` and ``dict`` semantics for ``pop()``. If + passed no argument or an integer argument, it will use ``list`` + semantics and pop tokens from the list of parsed tokens. If passed + a non-integer argument (most likely a string), it will use ``dict`` + semantics and pop the corresponding value from any defined results + names. A second default return value argument is supported, just as in + ``dict.pop()``. + + Example:: + + numlist = Word(nums)[...] + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] + + def remove_first(tokens): + tokens.pop(0) + numlist.add_parse_action(remove_first) + print(numlist.parse_string("0 123 321")) # -> ['123', '321'] + + label = Word(alphas) + patt = label("LABEL") + Word(nums)[1, ...] + print(patt.parse_string("AAB 123 321").dump()) + + # Use pop() in a parse action to remove named result (note that corresponding value is not + # removed from list form of results) + def remove_LABEL(tokens): + tokens.pop("LABEL") + return tokens + patt.add_parse_action(remove_LABEL) + print(patt.parse_string("AAB 123 321").dump()) + + prints:: + + ['AAB', '123', '321'] + - LABEL: 'AAB' + + ['AAB', '123', '321'] + """ + if not args: + args = [-1] + for k, v in kwargs.items(): + if k == "default": + args = (args[0], v) + else: + raise TypeError(f"pop() got an unexpected keyword argument {k!r}") + if isinstance(args[0], int) or len(args) == 1 or args[0] in self: + index = args[0] + ret = self[index] + del self[index] + return ret + else: + defaultvalue = args[1] + return defaultvalue + + def get(self, key, default_value=None): + """ + Returns named result matching the given key, or if there is no + such name, then returns the given ``default_value`` or ``None`` if no + ``default_value`` is specified. + + Similar to ``dict.get()``. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string("1999/12/31") + print(result.get("year")) # -> '1999' + print(result.get("hour", "not specified")) # -> 'not specified' + print(result.get("hour")) # -> None + """ + if key in self: + return self[key] + else: + return default_value + + def insert(self, index, ins_string): + """ + Inserts new element at location index in the list of parsed tokens. + + Similar to ``list.insert()``. + + Example:: + + numlist = Word(nums)[...] + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to insert the parse location in the front of the parsed results + def insert_locn(locn, tokens): + tokens.insert(0, locn) + numlist.add_parse_action(insert_locn) + print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321'] + """ + self._toklist.insert(index, ins_string) + # fixup indices in token dictionary + for occurrences in self._tokdict.values(): + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset( + value, position + (position > index) + ) + + def append(self, item): + """ + Add single element to end of ``ParseResults`` list of elements. + + Example:: + + numlist = Word(nums)[...] + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to compute the sum of the parsed integers, and add it to the end + def append_sum(tokens): + tokens.append(sum(map(int, tokens))) + numlist.add_parse_action(append_sum) + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444] + """ + self._toklist.append(item) + + def extend(self, itemseq): + """ + Add sequence of elements to end of ``ParseResults`` list of elements. + + Example:: + + patt = Word(alphas)[1, ...] + + # use a parse action to append the reverse of the matched strings, to make a palindrome + def make_palindrome(tokens): + tokens.extend(reversed([t[::-1] for t in tokens])) + return ''.join(tokens) + patt.add_parse_action(make_palindrome) + print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' + """ + if isinstance(itemseq, ParseResults): + self.__iadd__(itemseq) + else: + self._toklist.extend(itemseq) + + def clear(self): + """ + Clear all elements and results names. + """ + del self._toklist[:] + self._tokdict.clear() + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + if name.startswith("__"): + raise AttributeError(name) + return "" + + def __add__(self, other: "ParseResults") -> "ParseResults": + ret = self.copy() + ret += other + return ret + + def __iadd__(self, other: "ParseResults") -> "ParseResults": + if not other: + return self + + if other._tokdict: + offset = len(self._toklist) + addoffset = lambda a: offset if a < 0 else a + offset + otheritems = other._tokdict.items() + otherdictitems = [ + (k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) + for k, vlist in otheritems + for v in vlist + ] + for k, v in otherdictitems: + self[k] = v + if isinstance(v[0], ParseResults): + v[0]._parent = self + + self._toklist += other._toklist + self._all_names |= other._all_names + return self + + def __radd__(self, other) -> "ParseResults": + if isinstance(other, int) and other == 0: + # useful for merging many ParseResults using sum() builtin + return self.copy() + else: + # this may raise a TypeError - so be it + return other + self + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._toklist!r}, {self.as_dict()})" + + def __str__(self) -> str: + return ( + "[" + + ", ".join( + [ + str(i) if isinstance(i, ParseResults) else repr(i) + for i in self._toklist + ] + ) + + "]" + ) + + def _asStringList(self, sep=""): + out = [] + for item in self._toklist: + if out and sep: + out.append(sep) + if isinstance(item, ParseResults): + out += item._asStringList() + else: + out.append(str(item)) + return out + + def as_list(self) -> list: + """ + Returns the parse results as a nested list of matching tokens, all converted to strings. + + Example:: + + patt = Word(alphas)[1, ...] + result = patt.parse_string("sldkj lsdkj sldkj") + # even though the result prints in string-like form, it is actually a pyparsing ParseResults + print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] + + # Use as_list() to create an actual list + result_list = result.as_list() + print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] + """ + return [ + res.as_list() if isinstance(res, ParseResults) else res + for res in self._toklist + ] + + def as_dict(self) -> dict: + """ + Returns the named parse results as a nested dictionary. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string('12/31/1999') + print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) + + result_dict = result.as_dict() + print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} + + # even though a ParseResults supports dict-like access, sometime you just need to have a dict + import json + print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable + print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"} + """ + + def to_item(obj): + if isinstance(obj, ParseResults): + return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj] + else: + return obj + + return dict((k, to_item(v)) for k, v in self.items()) + + def copy(self) -> "ParseResults": + """ + Returns a new shallow copy of a :class:`ParseResults` object. `ParseResults` + items contained within the source are shared with the copy. Use + :class:`ParseResults.deepcopy()` to create a copy with its own separate + content values. + """ + ret = ParseResults(self._toklist) + ret._tokdict = self._tokdict.copy() + ret._parent = self._parent + ret._all_names |= self._all_names + ret._name = self._name + return ret + + def deepcopy(self) -> "ParseResults": + """ + Returns a new deep copy of a :class:`ParseResults` object. + """ + ret = self.copy() + # replace values with copies if they are of known mutable types + for i, obj in enumerate(self._toklist): + if isinstance(obj, ParseResults): + self._toklist[i] = obj.deepcopy() + elif isinstance(obj, (str, bytes)): + pass + elif isinstance(obj, MutableMapping): + self._toklist[i] = dest = type(obj)() + for k, v in obj.items(): + dest[k] = v.deepcopy() if isinstance(v, ParseResults) else v + elif isinstance(obj, Container): + self._toklist[i] = type(obj)( + v.deepcopy() if isinstance(v, ParseResults) else v for v in obj + ) + return ret + + def get_name(self): + r""" + Returns the results name for this token expression. Useful when several + different expressions might match at a particular location. + + Example:: + + integer = Word(nums) + ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") + house_number_expr = Suppress('#') + Word(nums, alphanums) + user_data = (Group(house_number_expr)("house_number") + | Group(ssn_expr)("ssn") + | Group(integer)("age")) + user_info = user_data[1, ...] + + result = user_info.parse_string("22 111-22-3333 #221B") + for item in result: + print(item.get_name(), ':', item[0]) + + prints:: + + age : 22 + ssn : 111-22-3333 + house_number : 221B + """ + if self._name: + return self._name + elif self._parent: + par: "ParseResults" = self._parent + parent_tokdict_items = par._tokdict.items() + return next( + ( + k + for k, vlist in parent_tokdict_items + for v, loc in vlist + if v is self + ), + None, + ) + elif ( + len(self) == 1 + and len(self._tokdict) == 1 + and next(iter(self._tokdict.values()))[0][1] in (0, -1) + ): + return next(iter(self._tokdict.keys())) + else: + return None + + def dump(self, indent="", full=True, include_list=True, _depth=0) -> str: + """ + Diagnostic method for listing out the contents of + a :class:`ParseResults`. Accepts an optional ``indent`` argument so + that this string can be embedded in a nested display of other data. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string('1999/12/31') + print(result.dump()) + + prints:: + + ['1999', '/', '12', '/', '31'] + - day: '31' + - month: '12' + - year: '1999' + """ + out = [] + NL = "\n" + out.append(indent + str(self.as_list()) if include_list else "") + + if not full: + return "".join(out) + + if self.haskeys(): + items = sorted((str(k), v) for k, v in self.items()) + for k, v in items: + if out: + out.append(NL) + out.append(f"{indent}{(' ' * _depth)}- {k}: ") + if not isinstance(v, ParseResults): + out.append(repr(v)) + continue + + if not v: + out.append(str(v)) + continue + + out.append( + v.dump( + indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1, + ) + ) + if not any(isinstance(vv, ParseResults) for vv in self): + return "".join(out) + + v = self + incr = " " + nl = "\n" + for i, vv in enumerate(v): + if isinstance(vv, ParseResults): + vv_dump = vv.dump( + indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1, + ) + out.append( + f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv_dump}" + ) + else: + out.append( + f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv}" + ) + + return "".join(out) + + def pprint(self, *args, **kwargs): + """ + Pretty-printer for parsed results as a list, using the + `pprint `_ module. + Accepts additional positional or keyword args as defined for + `pprint.pprint `_ . + + Example:: + + ident = Word(alphas, alphanums) + num = Word(nums) + func = Forward() + term = ident | num | Group('(' + func + ')') + func <<= ident + Group(Optional(DelimitedList(term))) + result = func.parse_string("fna a,b,(fnb c,d,200),100") + result.pprint(width=40) + + prints:: + + ['fna', + ['a', + 'b', + ['(', 'fnb', ['c', 'd', '200'], ')'], + '100']] + """ + pprint.pprint(self.as_list(), *args, **kwargs) + + # add support for pickle protocol + def __getstate__(self): + return ( + self._toklist, + ( + self._tokdict.copy(), + None, + self._all_names, + self._name, + ), + ) + + def __setstate__(self, state): + self._toklist, (self._tokdict, par, inAccumNames, self._name) = state + self._all_names = set(inAccumNames) + self._parent = None + + def __getnewargs__(self): + return self._toklist, self._name + + def __dir__(self): + return dir(type(self)) + list(self.keys()) + + @classmethod + def from_dict(cls, other, name=None) -> "ParseResults": + """ + Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the + name-value relations as results names. If an optional ``name`` argument is + given, a nested ``ParseResults`` will be returned. + """ + + def is_iterable(obj): + try: + iter(obj) + except Exception: + return False + # str's are iterable, but in pyparsing, we don't want to iterate over them + else: + return not isinstance(obj, str_type) + + ret = cls([]) + for k, v in other.items(): + if isinstance(v, Mapping): + ret += cls.from_dict(v, name=k) + else: + ret += cls([v], name=k, asList=is_iterable(v)) + if name is not None: + ret = cls([ret], name=name) + return ret + + asList = as_list + """Deprecated - use :class:`as_list`""" + asDict = as_dict + """Deprecated - use :class:`as_dict`""" + getName = get_name + """Deprecated - use :class:`get_name`""" + + +MutableMapping.register(ParseResults) +MutableSequence.register(ParseResults) diff --git a/external/python/pyparsing/testing.py b/external/python/pyparsing/testing.py new file mode 100644 index 00000000..5654d47d --- /dev/null +++ b/external/python/pyparsing/testing.py @@ -0,0 +1,345 @@ +# testing.py + +from contextlib import contextmanager +import re +import typing + + +from .core import ( + ParserElement, + ParseException, + Keyword, + __diag__, + __compat__, +) + + +class pyparsing_test: + """ + namespace class for classes useful in writing unit tests + """ + + class reset_pyparsing_context: + """ + Context manager to be used when writing unit tests that modify pyparsing config values: + - packrat parsing + - bounded recursion parsing + - default whitespace characters. + - default keyword characters + - literal string auto-conversion class + - __diag__ settings + + Example:: + + with reset_pyparsing_context(): + # test that literals used to construct a grammar are automatically suppressed + ParserElement.inlineLiteralsUsing(Suppress) + + term = Word(alphas) | Word(nums) + group = Group('(' + term[...] + ')') + + # assert that the '()' characters are not included in the parsed tokens + self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def']) + + # after exiting context manager, literals are converted to Literal expressions again + """ + + def __init__(self): + self._save_context = {} + + def save(self): + self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS + self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS + + self._save_context["literal_string_class"] = ( + ParserElement._literalStringClass + ) + + self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace + + self._save_context["packrat_enabled"] = ParserElement._packratEnabled + if ParserElement._packratEnabled: + self._save_context["packrat_cache_size"] = ( + ParserElement.packrat_cache.size + ) + else: + self._save_context["packrat_cache_size"] = None + self._save_context["packrat_parse"] = ParserElement._parse + self._save_context["recursion_enabled"] = ( + ParserElement._left_recursion_enabled + ) + + self._save_context["__diag__"] = { + name: getattr(__diag__, name) for name in __diag__._all_names + } + + self._save_context["__compat__"] = { + "collect_all_And_tokens": __compat__.collect_all_And_tokens + } + + return self + + def restore(self): + # reset pyparsing global state + if ( + ParserElement.DEFAULT_WHITE_CHARS + != self._save_context["default_whitespace"] + ): + ParserElement.set_default_whitespace_chars( + self._save_context["default_whitespace"] + ) + + ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"] + + Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] + ParserElement.inlineLiteralsUsing( + self._save_context["literal_string_class"] + ) + + for name, value in self._save_context["__diag__"].items(): + (__diag__.enable if value else __diag__.disable)(name) + + ParserElement._packratEnabled = False + if self._save_context["packrat_enabled"]: + ParserElement.enable_packrat(self._save_context["packrat_cache_size"]) + else: + ParserElement._parse = self._save_context["packrat_parse"] + ParserElement._left_recursion_enabled = self._save_context[ + "recursion_enabled" + ] + + __compat__.collect_all_And_tokens = self._save_context["__compat__"] + + return self + + def copy(self): + ret = type(self)() + ret._save_context.update(self._save_context) + return ret + + def __enter__(self): + return self.save() + + def __exit__(self, *args): + self.restore() + + class TestParseResultsAsserts: + """ + A mixin class to add parse results assertion methods to normal unittest.TestCase classes. + """ + + def assertParseResultsEquals( + self, result, expected_list=None, expected_dict=None, msg=None + ): + """ + Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``, + and compare any defined results names with an optional ``expected_dict``. + """ + if expected_list is not None: + self.assertEqual(expected_list, result.as_list(), msg=msg) + if expected_dict is not None: + self.assertEqual(expected_dict, result.as_dict(), msg=msg) + + def assertParseAndCheckList( + self, expr, test_string, expected_list, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting ``ParseResults.asList()`` is equal to the ``expected_list``. + """ + result = expr.parse_string(test_string, parse_all=True) + if verbose: + print(result.dump()) + else: + print(result.as_list()) + self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) + + def assertParseAndCheckDict( + self, expr, test_string, expected_dict, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``. + """ + result = expr.parse_string(test_string, parseAll=True) + if verbose: + print(result.dump()) + else: + print(result.as_list()) + self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) + + def assertRunTestResults( + self, run_tests_report, expected_parse_results=None, msg=None + ): + """ + Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of + list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped + with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``. + Finally, asserts that the overall ``runTests()`` success value is ``True``. + + :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests + :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] + """ + run_test_success, run_test_results = run_tests_report + + if expected_parse_results is None: + self.assertTrue( + run_test_success, msg=msg if msg is not None else "failed runTests" + ) + return + + merged = [ + (*rpt, expected) + for rpt, expected in zip(run_test_results, expected_parse_results) + ] + for test_string, result, expected in merged: + # expected should be a tuple containing a list and/or a dict or an exception, + # and optional failure message string + # an empty tuple will skip any result validation + fail_msg = next((exp for exp in expected if isinstance(exp, str)), None) + expected_exception = next( + ( + exp + for exp in expected + if isinstance(exp, type) and issubclass(exp, Exception) + ), + None, + ) + if expected_exception is not None: + with self.assertRaises( + expected_exception=expected_exception, msg=fail_msg or msg + ): + if isinstance(result, Exception): + raise result + else: + expected_list = next( + (exp for exp in expected if isinstance(exp, list)), None + ) + expected_dict = next( + (exp for exp in expected if isinstance(exp, dict)), None + ) + if (expected_list, expected_dict) != (None, None): + self.assertParseResultsEquals( + result, + expected_list=expected_list, + expected_dict=expected_dict, + msg=fail_msg or msg, + ) + else: + # warning here maybe? + print(f"no validation for {test_string!r}") + + # do this last, in case some specific test results can be reported instead + self.assertTrue( + run_test_success, msg=msg if msg is not None else "failed runTests" + ) + + @contextmanager + def assertRaisesParseException( + self, exc_type=ParseException, expected_msg=None, msg=None + ): + if expected_msg is not None: + if isinstance(expected_msg, str): + expected_msg = re.escape(expected_msg) + with self.assertRaisesRegex(exc_type, expected_msg, msg=msg) as ctx: + yield ctx + + else: + with self.assertRaises(exc_type, msg=msg) as ctx: + yield ctx + + @staticmethod + def with_line_numbers( + s: str, + start_line: typing.Optional[int] = None, + end_line: typing.Optional[int] = None, + expand_tabs: bool = True, + eol_mark: str = "|", + mark_spaces: typing.Optional[str] = None, + mark_control: typing.Optional[str] = None, + ) -> str: + """ + Helpful method for debugging a parser - prints a string with line and column numbers. + (Line and column numbers are 1-based.) + + :param s: tuple(bool, str - string to be printed with line and column numbers + :param start_line: int - (optional) starting line number in s to print (default=1) + :param end_line: int - (optional) ending line number in s to print (default=len(s)) + :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default + :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|") + :param mark_spaces: str - (optional) special character to display in place of spaces + :param mark_control: str - (optional) convert non-printing control characters to a placeholding + character; valid values: + - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊" + - any single character string - replace control characters with given string + - None (default) - string is displayed as-is + + :return: str - input string with leading line numbers and column number headers + """ + if expand_tabs: + s = s.expandtabs() + if mark_control is not None: + mark_control = typing.cast(str, mark_control) + if mark_control == "unicode": + transtable_map = { + c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433)) + } + transtable_map[127] = 0x2421 + tbl = str.maketrans(transtable_map) + eol_mark = "" + else: + ord_mark_control = ord(mark_control) + tbl = str.maketrans( + {c: ord_mark_control for c in list(range(0, 32)) + [127]} + ) + s = s.translate(tbl) + if mark_spaces is not None and mark_spaces != " ": + if mark_spaces == "unicode": + tbl = str.maketrans({9: 0x2409, 32: 0x2423}) + s = s.translate(tbl) + else: + s = s.replace(" ", mark_spaces) + if start_line is None: + start_line = 1 + if end_line is None: + end_line = len(s) + end_line = min(end_line, len(s)) + start_line = min(max(1, start_line), end_line) + + if mark_control != "unicode": + s_lines = s.splitlines()[start_line - 1 : end_line] + else: + s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]] + if not s_lines: + return "" + + lineno_width = len(str(end_line)) + max_line_len = max(len(line) for line in s_lines) + lead = " " * (lineno_width + 1) + if max_line_len >= 99: + header0 = ( + lead + + "".join( + f"{' ' * 99}{(i + 1) % 100}" + for i in range(max(max_line_len // 100, 1)) + ) + + "\n" + ) + else: + header0 = "" + header1 = ( + header0 + + lead + + "".join(f" {(i + 1) % 10}" for i in range(-(-max_line_len // 10))) + + "\n" + ) + header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n" + return ( + header1 + + header2 + + "\n".join( + f"{i:{lineno_width}d}:{line}{eol_mark}" + for i, line in enumerate(s_lines, start=start_line) + ) + + "\n" + ) diff --git a/external/python/pyparsing/unicode.py b/external/python/pyparsing/unicode.py new file mode 100644 index 00000000..426b8b23 --- /dev/null +++ b/external/python/pyparsing/unicode.py @@ -0,0 +1,354 @@ +# unicode.py + +import sys +from itertools import filterfalse +from typing import List, Tuple, Union + + +class _lazyclassproperty: + def __init__(self, fn): + self.fn = fn + self.__doc__ = fn.__doc__ + self.__name__ = fn.__name__ + + def __get__(self, obj, cls): + if cls is None: + cls = type(obj) + if not hasattr(cls, "_intern") or any( + cls._intern is getattr(superclass, "_intern", []) + for superclass in cls.__mro__[1:] + ): + cls._intern = {} + attrname = self.fn.__name__ + if attrname not in cls._intern: + cls._intern[attrname] = self.fn(cls) + return cls._intern[attrname] + + +UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]] + + +class unicode_set: + """ + A set of Unicode characters, for language-specific strings for + ``alphas``, ``nums``, ``alphanums``, and ``printables``. + A unicode_set is defined by a list of ranges in the Unicode character + set, in a class attribute ``_ranges``. Ranges can be specified using + 2-tuples or a 1-tuple, such as:: + + _ranges = [ + (0x0020, 0x007e), + (0x00a0, 0x00ff), + (0x0100,), + ] + + Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). + + A unicode set can also be defined using multiple inheritance of other unicode sets:: + + class CJK(Chinese, Japanese, Korean): + pass + """ + + _ranges: UnicodeRangeList = [] + + @_lazyclassproperty + def _chars_for_ranges(cls): + ret = [] + for cc in cls.__mro__: + if cc is unicode_set: + break + for rr in getattr(cc, "_ranges", ()): + ret.extend(range(rr[0], rr[-1] + 1)) + return [chr(c) for c in sorted(set(ret))] + + @_lazyclassproperty + def printables(cls): + """all non-whitespace characters in this range""" + return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) + + @_lazyclassproperty + def alphas(cls): + """all alphabetic characters in this range""" + return "".join(filter(str.isalpha, cls._chars_for_ranges)) + + @_lazyclassproperty + def nums(cls): + """all numeric digit characters in this range""" + return "".join(filter(str.isdigit, cls._chars_for_ranges)) + + @_lazyclassproperty + def alphanums(cls): + """all alphanumeric characters in this range""" + return cls.alphas + cls.nums + + @_lazyclassproperty + def identchars(cls): + """all characters in this range that are valid identifier characters, plus underscore '_'""" + return "".join( + sorted( + set( + "".join(filter(str.isidentifier, cls._chars_for_ranges)) + + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" + + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" + + "_" + ) + ) + ) + + @_lazyclassproperty + def identbodychars(cls): + """ + all characters in this range that are valid identifier body characters, + plus the digits 0-9, and · (Unicode MIDDLE DOT) + """ + identifier_chars = set( + c for c in cls._chars_for_ranges if ("_" + c).isidentifier() + ) + return "".join(sorted(identifier_chars | set(cls.identchars + "0123456789·"))) + + @_lazyclassproperty + def identifier(cls): + """ + a pyparsing Word expression for an identifier using this range's definitions for + identchars and identbodychars + """ + from pyparsing import Word + + return Word(cls.identchars, cls.identbodychars) + + +class pyparsing_unicode(unicode_set): + """ + A namespace class for defining common language unicode_sets. + """ + + # fmt: off + + # define ranges in language character sets + _ranges: UnicodeRangeList = [ + (0x0020, sys.maxunicode), + ] + + class BasicMultilingualPlane(unicode_set): + """Unicode set for the Basic Multilingual Plane""" + _ranges: UnicodeRangeList = [ + (0x0020, 0xFFFF), + ] + + class Latin1(unicode_set): + """Unicode set for Latin-1 Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0020, 0x007E), + (0x00A0, 0x00FF), + ] + + class LatinA(unicode_set): + """Unicode set for Latin-A Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0100, 0x017F), + ] + + class LatinB(unicode_set): + """Unicode set for Latin-B Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0180, 0x024F), + ] + + class Greek(unicode_set): + """Unicode set for Greek Unicode Character Ranges""" + _ranges: UnicodeRangeList = [ + (0x0342, 0x0345), + (0x0370, 0x0377), + (0x037A, 0x037F), + (0x0384, 0x038A), + (0x038C,), + (0x038E, 0x03A1), + (0x03A3, 0x03E1), + (0x03F0, 0x03FF), + (0x1D26, 0x1D2A), + (0x1D5E,), + (0x1D60,), + (0x1D66, 0x1D6A), + (0x1F00, 0x1F15), + (0x1F18, 0x1F1D), + (0x1F20, 0x1F45), + (0x1F48, 0x1F4D), + (0x1F50, 0x1F57), + (0x1F59,), + (0x1F5B,), + (0x1F5D,), + (0x1F5F, 0x1F7D), + (0x1F80, 0x1FB4), + (0x1FB6, 0x1FC4), + (0x1FC6, 0x1FD3), + (0x1FD6, 0x1FDB), + (0x1FDD, 0x1FEF), + (0x1FF2, 0x1FF4), + (0x1FF6, 0x1FFE), + (0x2129,), + (0x2719, 0x271A), + (0xAB65,), + (0x10140, 0x1018D), + (0x101A0,), + (0x1D200, 0x1D245), + (0x1F7A1, 0x1F7A7), + ] + + class Cyrillic(unicode_set): + """Unicode set for Cyrillic Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0400, 0x052F), + (0x1C80, 0x1C88), + (0x1D2B,), + (0x1D78,), + (0x2DE0, 0x2DFF), + (0xA640, 0xA672), + (0xA674, 0xA69F), + (0xFE2E, 0xFE2F), + ] + + class Chinese(unicode_set): + """Unicode set for Chinese Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x2E80, 0x2E99), + (0x2E9B, 0x2EF3), + (0x31C0, 0x31E3), + (0x3400, 0x4DB5), + (0x4E00, 0x9FEF), + (0xA700, 0xA707), + (0xF900, 0xFA6D), + (0xFA70, 0xFAD9), + (0x16FE2, 0x16FE3), + (0x1F210, 0x1F212), + (0x1F214, 0x1F23B), + (0x1F240, 0x1F248), + (0x20000, 0x2A6D6), + (0x2A700, 0x2B734), + (0x2B740, 0x2B81D), + (0x2B820, 0x2CEA1), + (0x2CEB0, 0x2EBE0), + (0x2F800, 0x2FA1D), + ] + + class Japanese(unicode_set): + """Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges""" + + class Kanji(unicode_set): + "Unicode set for Kanji Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x4E00, 0x9FBF), + (0x3000, 0x303F), + ] + + class Hiragana(unicode_set): + """Unicode set for Hiragana Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x3041, 0x3096), + (0x3099, 0x30A0), + (0x30FC,), + (0xFF70,), + (0x1B001,), + (0x1B150, 0x1B152), + (0x1F200,), + ] + + class Katakana(unicode_set): + """Unicode set for Katakana Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x3099, 0x309C), + (0x30A0, 0x30FF), + (0x31F0, 0x31FF), + (0x32D0, 0x32FE), + (0xFF65, 0xFF9F), + (0x1B000,), + (0x1B164, 0x1B167), + (0x1F201, 0x1F202), + (0x1F213,), + ] + + 漢字 = Kanji + カタカナ = Katakana + ひらがな = Hiragana + + _ranges = ( + Kanji._ranges + + Hiragana._ranges + + Katakana._ranges + ) + + class Hangul(unicode_set): + """Unicode set for Hangul (Korean) Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x1100, 0x11FF), + (0x302E, 0x302F), + (0x3131, 0x318E), + (0x3200, 0x321C), + (0x3260, 0x327B), + (0x327E,), + (0xA960, 0xA97C), + (0xAC00, 0xD7A3), + (0xD7B0, 0xD7C6), + (0xD7CB, 0xD7FB), + (0xFFA0, 0xFFBE), + (0xFFC2, 0xFFC7), + (0xFFCA, 0xFFCF), + (0xFFD2, 0xFFD7), + (0xFFDA, 0xFFDC), + ] + + Korean = Hangul + + class CJK(Chinese, Japanese, Hangul): + """Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range""" + + class Thai(unicode_set): + """Unicode set for Thai Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0E01, 0x0E3A), + (0x0E3F, 0x0E5B) + ] + + class Arabic(unicode_set): + """Unicode set for Arabic Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0600, 0x061B), + (0x061E, 0x06FF), + (0x0700, 0x077F), + ] + + class Hebrew(unicode_set): + """Unicode set for Hebrew Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0591, 0x05C7), + (0x05D0, 0x05EA), + (0x05EF, 0x05F4), + (0xFB1D, 0xFB36), + (0xFB38, 0xFB3C), + (0xFB3E,), + (0xFB40, 0xFB41), + (0xFB43, 0xFB44), + (0xFB46, 0xFB4F), + ] + + class Devanagari(unicode_set): + """Unicode set for Devanagari Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0900, 0x097F), + (0xA8E0, 0xA8FF) + ] + + BMP = BasicMultilingualPlane + + # add language identifiers using language Unicode + العربية = Arabic + 中文 = Chinese + кириллица = Cyrillic + Ελληνικά = Greek + עִברִית = Hebrew + 日本語 = Japanese + 한국어 = Korean + ไทย = Thai + देवनागरी = Devanagari + + # fmt: on diff --git a/external/python/pyparsing/util.py b/external/python/pyparsing/util.py new file mode 100644 index 00000000..4ae018a9 --- /dev/null +++ b/external/python/pyparsing/util.py @@ -0,0 +1,277 @@ +# util.py +import inspect +import warnings +import types +import collections +import itertools +from functools import lru_cache, wraps +from typing import Callable, List, Union, Iterable, TypeVar, cast + +_bslash = chr(92) +C = TypeVar("C", bound=Callable) + + +class __config_flags: + """Internal class for defining compatibility and debugging flags""" + + _all_names: List[str] = [] + _fixed_names: List[str] = [] + _type_desc = "configuration" + + @classmethod + def _set(cls, dname, value): + if dname in cls._fixed_names: + warnings.warn( + f"{cls.__name__}.{dname} {cls._type_desc} is {str(getattr(cls, dname)).upper()}" + f" and cannot be overridden", + stacklevel=3, + ) + return + if dname in cls._all_names: + setattr(cls, dname, value) + else: + raise ValueError(f"no such {cls._type_desc} {dname!r}") + + enable = classmethod(lambda cls, name: cls._set(name, True)) + disable = classmethod(lambda cls, name: cls._set(name, False)) + + +@lru_cache(maxsize=128) +def col(loc: int, strg: str) -> int: + """ + Returns current column within a string, counting newlines as line separators. + The first column is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See + :class:`ParserElement.parse_string` for more + information on parsing strings containing ```` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + """ + s = strg + return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc) + + +@lru_cache(maxsize=128) +def lineno(loc: int, strg: str) -> int: + """Returns current line number within a string, counting newlines as line separators. + The first line is number 1. + + Note - the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`ParserElement.parse_string` + for more information on parsing strings containing ```` s, and + suggested methods to maintain a consistent view of the parsed string, the + parse location, and line and column positions within the parsed string. + """ + return strg.count("\n", 0, loc) + 1 + + +@lru_cache(maxsize=128) +def line(loc: int, strg: str) -> str: + """ + Returns the line of text containing loc within a string, counting newlines as line separators. + """ + last_cr = strg.rfind("\n", 0, loc) + next_cr = strg.find("\n", loc) + return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :] + + +class _UnboundedCache: + def __init__(self): + cache = {} + cache_get = cache.get + self.not_in_cache = not_in_cache = object() + + def get(_, key): + return cache_get(key, not_in_cache) + + def set_(_, key, value): + cache[key] = value + + def clear(_): + cache.clear() + + self.size = None + self.get = types.MethodType(get, self) + self.set = types.MethodType(set_, self) + self.clear = types.MethodType(clear, self) + + +class _FifoCache: + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + cache = {} + keyring = [object()] * size + cache_get = cache.get + cache_pop = cache.pop + keyiter = itertools.cycle(range(size)) + + def get(_, key): + return cache_get(key, not_in_cache) + + def set_(_, key, value): + cache[key] = value + i = next(keyiter) + cache_pop(keyring[i], None) + keyring[i] = key + + def clear(_): + cache.clear() + keyring[:] = [object()] * size + + self.size = size + self.get = types.MethodType(get, self) + self.set = types.MethodType(set_, self) + self.clear = types.MethodType(clear, self) + + +class LRUMemo: + """ + A memoizing mapping that retains `capacity` deleted items + + The memo tracks retained items by their access order; once `capacity` items + are retained, the least recently used item is discarded. + """ + + def __init__(self, capacity): + self._capacity = capacity + self._active = {} + self._memory = collections.OrderedDict() + + def __getitem__(self, key): + try: + return self._active[key] + except KeyError: + self._memory.move_to_end(key) + return self._memory[key] + + def __setitem__(self, key, value): + self._memory.pop(key, None) + self._active[key] = value + + def __delitem__(self, key): + try: + value = self._active.pop(key) + except KeyError: + pass + else: + while len(self._memory) >= self._capacity: + self._memory.popitem(last=False) + self._memory[key] = value + + def clear(self): + self._active.clear() + self._memory.clear() + + +class UnboundedMemo(dict): + """ + A memoizing mapping that retains all deleted items + """ + + def __delitem__(self, key): + pass + + +def _escape_regex_range_chars(s: str) -> str: + # escape these chars: ^-[] + for c in r"\^-[]": + s = s.replace(c, _bslash + c) + s = s.replace("\n", r"\n") + s = s.replace("\t", r"\t") + return str(s) + + +def _collapse_string_to_ranges( + s: Union[str, Iterable[str]], re_escape: bool = True +) -> str: + def is_consecutive(c): + c_int = ord(c) + is_consecutive.prev, prev = c_int, is_consecutive.prev + if c_int - prev > 1: + is_consecutive.value = next(is_consecutive.counter) + return is_consecutive.value + + is_consecutive.prev = 0 # type: ignore [attr-defined] + is_consecutive.counter = itertools.count() # type: ignore [attr-defined] + is_consecutive.value = -1 # type: ignore [attr-defined] + + def escape_re_range_char(c): + return "\\" + c if c in r"\^-][" else c + + def no_escape_re_range_char(c): + return c + + if not re_escape: + escape_re_range_char = no_escape_re_range_char + + ret = [] + s = "".join(sorted(set(s))) + if len(s) > 3: + for _, chars in itertools.groupby(s, key=is_consecutive): + first = last = next(chars) + last = collections.deque( + itertools.chain(iter([last]), chars), maxlen=1 + ).pop() + if first == last: + ret.append(escape_re_range_char(first)) + else: + sep = "" if ord(last) == ord(first) + 1 else "-" + ret.append( + f"{escape_re_range_char(first)}{sep}{escape_re_range_char(last)}" + ) + else: + ret = [escape_re_range_char(c) for c in s] + + return "".join(ret) + + +def _flatten(ll: list) -> list: + ret = [] + for i in ll: + if isinstance(i, list): + ret.extend(_flatten(i)) + else: + ret.append(i) + return ret + + +def replaced_by_pep8(compat_name: str, fn: C) -> C: + # In a future version, uncomment the code in the internal _inner() functions + # to begin emitting DeprecationWarnings. + + # Unwrap staticmethod/classmethod + fn = getattr(fn, "__func__", fn) + + # (Presence of 'self' arg in signature is used by explain_exception() methods, so we take + # some extra steps to add it if present in decorated function.) + if "self" == list(inspect.signature(fn).parameters)[0]: + + @wraps(fn) + def _inner(self, *args, **kwargs): + # warnings.warn( + # f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=2 + # ) + return fn(self, *args, **kwargs) + + else: + + @wraps(fn) + def _inner(*args, **kwargs): + # warnings.warn( + # f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=2 + # ) + return fn(*args, **kwargs) + + _inner.__doc__ = f"""Deprecated - use :class:`{fn.__name__}`""" + _inner.__name__ = compat_name + _inner.__annotations__ = fn.__annotations__ + if isinstance(fn, types.FunctionType): + _inner.__kwdefaults__ = fn.__kwdefaults__ + elif isinstance(fn, type) and hasattr(fn, "__init__"): + _inner.__kwdefaults__ = fn.__init__.__kwdefaults__ + else: + _inner.__kwdefaults__ = None + _inner.__qualname__ = fn.__qualname__ + return cast(C, _inner) diff --git a/specification/Makefile b/specification/Makefile index 118dda23..226be4b7 100644 --- a/specification/Makefile +++ b/specification/Makefile @@ -4,16 +4,16 @@ SHELL = /usr/bin/env bash -QUIET ?= @ +QUIET ?= @ VERYQUIET ?= @ PYTHON ?= python3 ASCIIDOC ?= asciidoctor -RM = rm -f -RMRF = rm -rf -MKDIR = mkdir -p -CP = cp -p -MV = mv -ECHO = @echo +RM = rm -f +RMRF = rm -rf +MKDIR = mkdir -p +CP = cp -p +MV = mv +ECHO = @echo # Use GENXR_OPTIONS to add arguments like -time to scripts/genxr.py invocations GENXR_ARGS += $(GENXR_OPTIONS) -registry $(REGISTRY) @@ -24,25 +24,26 @@ else GENXR_ARGS += -q endif -ifneq (,$(strip $(STRICT))) -ASCIIDOC := $(ASCIIDOC) --failure-level ERROR -endif - ifneq (,$(strip $(VERY_STRICT))) -ASCIIDOC := $(ASCIIDOC) --failure-level WARN +ADOC_FAILURE_LEVEL := --failure-level INFO --verbose +else +ADOC_FAILURE_LEVEL := --failure-level ERROR endif -SPECREVISION = 1.0.34 +VERSIONS := XR_VERSION_1_0 XR_VERSION_1_1 XR_LOADER_VERSION_1_0 +VERSIONOPTIONS := $(foreach version,$(VERSIONS),-feature $(version)) + +SPECREVISION = 1.1.36 REVISION_COMPONENTS = $(subst ., ,$(SPECREVISION)) MAJORMINORVER = $(word 1,$(REVISION_COMPONENTS)).$(word 2,$(REVISION_COMPONENTS)) # Target directory for output files. -OUTDIR ?= $(GENDIR)/out/$(MAJORMINORVER) +OUTDIR ?= $(GENDIR)/out/$(MAJORMINORVER) # Target directory for all generated files. # This can be overridden. -GENDIR = $(CURDIR)/generated -REFPATH = $(GENDIR)/refpage +GENDIR := $(CURDIR)/generated +REFPATH := $(GENDIR)/refpage # Path to directory containing handcoded extension appendices APPENDICES= $(CURDIR)/sources/chapters/extensions @@ -57,8 +58,6 @@ PYAPIMAP := $(GENDIR)/apimap.py RBAPIMAP := $(GENDIR)/apimap.rb METADIR := $(GENDIR)/meta -VK_REF_PAGE_ROOT := https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/ - MAKE_RELATIVE = $(patsubst $(CURDIR)/%,%,$(1)) # Gets built automatically @@ -113,25 +112,26 @@ default: # Store our build configuration in a file, and force re-generation if it changes. # Most common thing that changes is the extension list. -APITITLE ?= OpenXR +APITITLE := EXTS := $(sort $(EXTENSIONS)) +VERSIONS := $(sort $(VERSIONS)) CONFIG_STAMP_FN := $(GENDIR)/config-stamp # This is the stuff stored/checked -STAMP_DATA := $(EXTS) "$(APITITLE)" +STAMP_DATA := $(EXTS) $(VERSIONS) "$(APITITLE)" # Depend on this target to force checking the config for changes. config_stamp: $(QUIET)$(MKDIR) $(dir $(CONFIG_STAMP_FN)) - $(QUIET)if ! $(PYTHON) $(SCRIPTS)/check_stamp.py "$(CONFIG_STAMP_FN)" $(STAMP_DATA); then $(MAKE) clean_generated; fi + $(QUIET)if ! $(PYTHON) $(SCRIPTS)/check_stamp.py "$(CONFIG_STAMP_FN)" $(STAMP_DATA); then $(MAKE) clean_generated; $(MAKE) attribs; fi .PHONY: config_stamp ################################################ ## OpenXR header file targets HEADER_DIR := $(OUTDIR)/openxr -HEADER := $(HEADER_DIR)/openxr.h +HEADER := $(HEADER_DIR)/openxr.h PLATHEAD := $(HEADER_DIR)/openxr_platform.h LOADERHEAD := $(HEADER_DIR)/openxr_loader_negotiation.h REFLECTHEADERS:= $(HEADER_DIR)/openxr_reflection.h \ @@ -163,7 +163,7 @@ header-test: header $(TESTSRC) # Some nominal targets are just "stamp" files generated. REGISTRY := registry/xr.xml -GENXR := $(SCRIPTS)/genxr.py +GENXR := $(SCRIPTS)/genxr.py BASIC_GENERATED_DEPENDS := \ $(REGISTRY) \ $(GENXR) \ @@ -180,6 +180,7 @@ GENSTAMPS := \ $(GENDIR)/api/apiinc \ $(GENDIR)/validity/validinc \ $(GENDIR)/hostsynctable/hostsyncinc \ + $(GENDIR)/interfaces/interfaceinc \ $(METADIR)/extinc \ # The actual generated index @@ -190,7 +191,7 @@ $(GENDEPENDS) $(GENHEADERS): $(BASIC_GENERATED_DEPENDS) $(ECHO) "[genxr] $(REGISTRY) -> $@" @if [ "x$(STAMP_NOTE)" != "x" ]; then echo " $(STAMP_NOTE)"; fi $(QUIET)$(MKDIR) "$(@D)" - $(QUIET)$(PYTHON) $(GENXR) $(GENXR_ARGS) $(EXTOPTIONS) -o "$(@D)" $(@F) + $(QUIET)$(PYTHON) $(GENXR) $(GENXR_ARGS) $(VERSIONOPTIONS) $(EXTOPTIONS) -o "$(@D)" $(@F) # Print an extra note for stamp files $(GENSTAMPS): STAMP_NOTE = (and additional files in $(@D)) @@ -204,20 +205,20 @@ $(GENHEADERS): $(SCRIPTS)/cgenerator.py $(REFLECTHEADERS): $(SCRIPTS)/creflectiongenerator.py $(SCRIPTS)/jinja_helpers.py $(wildcard $(SCRIPTS)/template_*) # The actual generated files depend on their stamp file. -GENAPI = $(wildcard $(GENDIR)/api/*/[A-Za-z]*.txt) +GENAPI = $(wildcard $(GENDIR)/api/*/[A-Za-z]*.adoc) $(GENAPI): $(GENDIR)/api/apiinc apiinc: $(GENDIR)/api/apiinc -GENVALIDITY = $(wildcard $(GENDIR)/validity/*/[A-Za-z]*.txt) +GENVALIDITY = $(wildcard $(GENDIR)/validity/*/[A-Za-z]*.adoc) $(GENVALIDITY): $(GENDIR)/validity/validinc validinc: $(GENDIR)/validity/validinc GENSYNC := \ - $(GENDIR)/hostsynctable/implicit.txt \ - $(GENDIR)/hostsynctable/parameterlists.txt \ - $(GENDIR)/hostsynctable/parameters.txt + $(GENDIR)/hostsynctable/implicit.adoc \ + $(GENDIR)/hostsynctable/parameterlists.adoc \ + $(GENDIR)/hostsynctable/parameters.adoc $(GENSYNC): $(GENDIR)/hostsynctable/hostsyncinc hostsyncinc: $(GENDIR)/hostsynctable/hostsyncinc @@ -227,10 +228,37 @@ $(GENMETA): $(METADIR)/extinc extinc: $(METADIR)/extinc +GENINTERFACE = $(wildcard $(GENDIR)/interfaces/[A-Za-z]*.adoc) +$(GENINTERFACE): $(GENDIR)/interfaces/interfaceinc +interfaceinc: $(GENDIR)/interfaces/interfaceinc + +ATTRIBFILE := $(GENDIR)/specattribs.adoc + +# This generates a single file containing asciidoc attributes for each +# core version and extension in the spec being built. +# For use with Antora, it also includes a couple of document attributes +# otherwise passed on the asciidoctor command line. +# These should not use the asciidoctor attribute names (e.g. revnumber, +# revdate), so use the Makefile variable names instead (e.g. +# SPECREVISION, SPECDATE). + +attribs: $(ATTRIBFILE) + +$(ATTRIBFILE): + $(ECHO) "[make] config -> $@" + $(QUIET)$(MKDIR) "$(@D)" + $(QUIET)for attrib in $(VERSIONS) $(EXTS) ; do \ + echo ":$${attrib}:" ; \ + done > $@ + $(QUIET)(echo ":SPECREVISION: $(SPECREVISION)" ; \ + echo ":SPECDATE: $(SPECDATE)" ; \ + echo ":SPECREMARK: $(SPECREMARK)" ; \ + echo ":APITITLE: $(APITITLE)") >> $@ + # The actual generated include files -GENINCLUDE = $(GENAPI) $(GENVALIDITY) $(GENSYNC) $(GENMETA) +GENINCLUDE = $(GENAPI) $(GENVALIDITY) $(GENSYNC) $(GENMETA) $(ATTRIBFILE) .PHONY: generated -generated: $(GENDEPENDS) +generated: $(GENDEPENDS) $(GENINCLUDE) ################################################ # OpenXR Style Guide @@ -248,7 +276,7 @@ ASCIIDOCTOR_TARGETS += $(STYLEGUIDE) # Target-specific variables and deps customizing the AsciiDoctor rule $(STYLEGUIDE): SPECSRC=$(STYLESRC) -$(STYLEGUIDE): LOGFILE=$(OUTDIR)/adoc_styleguide_stderr.txt +$(STYLEGUIDE): ADOCOPTS += $(ADOCHTMLOPTS) $(STYLEGUIDE): $(STYLESRC) $(STYLEFILES) $(GENDIR)/validity/validinc $(GENDIR)/api/apiinc $(RBAPIMAP) @@ -266,7 +294,7 @@ ASCIIDOCTOR_TARGETS += $(LOADERGUIDE) # Target-specific variables and deps customizing the AsciiDoctor rule $(LOADERGUIDE): SPECSRC=$(LOADERSRC) -$(LOADERGUIDE): LOGFILE=$(OUTDIR)/adoc_loader_stderr.txt +$(LOADERGUIDE): ADOCOPTS += $(ADOCHTMLOPTS) $(LOADERGUIDE): $(LOADERSRC) $(LOADERFILES) $(RBAPIMAP) @@ -282,7 +310,7 @@ extprocess: $(EXTPROCESSGUIDE) ASCIIDOCTOR_TARGETS += $(EXTPROCESSGUIDE) $(EXTPROCESSGUIDE): SPECSRC=$(EXTPROCESSSRC) -$(EXTPROCESSGUIDE): LOGFILE=$(OUTDIR)/adoc_extprocess_stderr.txt +$(EXTPROCESSGUIDE): ADOCOPTS += $(ADOCHTMLOPTS) $(EXTPROCESSGUIDE): $(EXTPROCESSSRC) ################################################ @@ -300,10 +328,9 @@ html: $(HTMLSPEC) ASCIIDOCTOR_TARGETS += $(HTMLSPEC) # Target-specific variables and deps customizing the AsciiDoctor rule -$(HTMLSPEC): LOGFILE=$(OUTDIR)/adoc_html_stderr.txt -$(HTMLSPEC): ATTRIBOPTS+=-a sectanchors +$(HTMLSPEC): ATTRIBOPTS += -a sectanchors +$(HTMLSPEC): ADOCOPTS += $(ADOCHTMLOPTS) $(HTMLSPEC): $(COMMONDOCS) -$(HTMLSPEC): POSTPROCESS=$(QUIET)$(PYTHON) $(SCRIPTS)/genanchorlinks.py $@ $@ ## PDF PDFSPEC := $(OUTDIR)/$(SPEC_FILENAME_STEM).pdf @@ -316,7 +343,6 @@ pdfA4: $(PDFA4SPEC) ASCIIDOCTOR_TARGETS += $(PDFSPEC) $(PDFA4SPEC) # Target-specific variables and deps customizing the AsciiDoctor rule -$(PDFSPEC) $(PDFA4SPEC): LOGFILE=$(OUTDIR)/adoc_pdf_stderr.txt $(PDFSPEC) $(PDFA4SPEC): BACKEND_ARGS=--backend pdf --require asciidoctor-pdf -a compress -r ./scripts/pdf-index-customizer.rb $(PDFSPEC): PAGESIZE=LETTER $(PDFA4SPEC): PAGESIZE=A4 @@ -325,7 +351,6 @@ $(PDFSPEC) $(PDFA4SPEC): $(COMMONDOCS) ################################################ ## Shared asciidoctor rule -EXTATTRIBS := $(foreach ext,$(EXTS),-a $(ext)) EXTOPTIONS := $(foreach ext,$(EXTS),-extension $(ext)) # Generate Asciidoc attributes for spec revision remark. @@ -343,19 +368,32 @@ endif GITREMARK ?= from git branch: $(GITBRANCH) ATTRIBOPTS = -a revnumber="$(SPECREVISION)" \ - -a revremark="$(SPECREMARK)" \ - -a apititle="$(APITITLE)" \ - -a config=$(CURDIR)/config \ - -a pdf-page-size=$(PAGESIZE) \ - -a pdf-stylesdir=config \ - -a pdf-style=pdf \ - -a generated=$(abspath $(GENDIR)) \ - -a appendices=$(APPENDICES) \ - $(EXTATTRIBS) - -# Look in $(GENERATED) for explicitly required non-extension Ruby, such + -a revremark="$(SPECREMARK)" \ + -a apititle="$(APITITLE)" \ + -a config=$(CURDIR)/config \ + -a chapters=$(CURDIR)/sources/chapters \ + -a pdf-page-size=$(PAGESIZE) \ + -a pdf-stylesdir=config \ + -a pdf-style=pdf \ + -a generated=$(abspath $(GENDIR)) \ + -a appendices=$(APPENDICES) \ + $(foreach version,$(VERSIONS),-a $(version)) \ + $(EXTATTRIBS) + +# Look in $(GENDIR) for explicitly required non-extension Ruby, such # as apimap.rb -ADOCOPTS = --doctype book -a data-uri -I$(GENDIR) -r $(CURDIR)/scripts/spec-macros.rb $(ATTRIBOPTS) +ADOCOPTS = --doctype book \ + -a data-uri \ + -I$(GENDIR) \ + --require $(CURDIR)/scripts/spec-macros.rb \ + $(ADOC_FAILURE_LEVEL) \ + $(ATTRIBOPTS) + +ADOCHTMLOPTS := \ + --require $(CURDIR)/scripts/rouge-extend-css.rb \ + --require $(CURDIR)/scripts/genanchorlinks.rb \ + --require $(CURDIR)/scripts/nonbreaking-ext-titles.rb \ + ifneq (,$(strip $(RELEASE))) # No dates or internal commit hashes in release builds for reproducibility @@ -377,20 +415,18 @@ ifneq (,$(strip $(KHRONOS_SPEC_LICENSED))) ATTRIBOPTS += -a include-dedication-photo endif +CSS_FILENAME := khronos.css + # Default to html5 -BACKEND_ARGS=--backend html5 +BACKEND_ARGS := --backend html5 \ + -a stylesdir=$(CURDIR)/config \ + -a stylesheet=$(CSS_FILENAME) # AsciiDoctor rule - customized by the places where these are described $(ASCIIDOCTOR_TARGETS): $(ECHO) "[asciidoctor] $(SPECSRC) -> $(call MAKE_RELATIVE,$@)" $(QUIET)$(MKDIR) "$(@D)" - $(QUIET)$(ASCIIDOC) $(ADOCOPTS) $(BACKEND_ARGS) --out-file $@ $(SPECSRC) 2>&1 | tee $(LOGFILE) - $(QUIET)if [ -s $(LOGFILE) ]; then \ - echo "Failure: $(LOGFILE) exists and is not empty!"; \ - false; \ - else \ - rm $(LOGFILE); \ - fi + $(QUIET)$(ASCIIDOC) $(ADOCOPTS) $(BACKEND_ARGS) --out-file $@ $(SPECSRC) $(POSTPROCESS) @@ -398,14 +434,15 @@ $(ASCIIDOCTOR_TARGETS): # Reference "man" pages extracted from spec MANHTMLDIR = $(OUTDIR)/man/html -KHRSOURCES = $(wildcard $(REFPATH)/*KHR.txt) -MACROSOURCES = $(wildcard $(REFPATH)/XR_*[A-Z][A-Z].txt) -VENSOURCES = $(filter-out $(KHRSOURCES) $(MACROSOURCES),$(wildcard $(REFPATH)/*[A-Z][A-Z].txt)) -CORESOURCES = $(filter-out $(KHRSOURCES) $(VENSOURCES),$(wildcard $(REFPATH)/[Xx][Rr]*.txt $(REFPATH)/PFN*.txt)) +KHRSOURCES = $(wildcard $(REFPATH)/*KHR.adoc) +MACROSOURCES = $(wildcard $(REFPATH)/XR_*[A-Z][A-Z].adoc) +VENSOURCES = $(filter-out $(KHRSOURCES) $(MACROSOURCES),$(wildcard $(REFPATH)/*[A-Z][A-Z].adoc)) +CORESOURCES = $(filter-out $(KHRSOURCES) $(VENSOURCES),$(wildcard $(REFPATH)/[Xx][Rr]*.adoc $(REFPATH)/PFN*.adoc)) MANSOURCES = $(CORESOURCES) $(VENSOURCES) $(KHRSOURCES) MANGENERATED = $(wildcard $(REFPATH)/*) -MANHTML = $(MANSOURCES:$(REFPATH)/%.txt=$(MANHTMLDIR)/%.html) -MANDEPS = $(GENINCLUDE) $(GENDEPENDS) +MANHTML = $(MANSOURCES:$(REFPATH)/%.adoc=$(MANHTMLDIR)/%.html) +MANCSSDIR := $(MANHTMLDIR)/css +MANDEPS = $(GENINCLUDE) $(GENDEPENDS) $(MANCSSDIR)/$(CSS_FILENAME) HTML_SPEC_RELATIVE ?= ../../html/$(SPEC_FILENAME_STEM).html # Asciidoctor options to build refpages @@ -416,12 +453,15 @@ HTML_SPEC_RELATIVE ?= ../../html/$(SPEC_FILENAME_STEM).html # refprefix includes the refpage (not spec) extension metadata. # isrefpage is for refpage-specific content # html_spec_relative is where to find the full specification -ADOCREFOPTS = -a stylesheet=khronos.css \ - -a stylesdir=$(CURDIR)/config \ - -a refprefix='refpage.' \ +ADOCREFOPTS = -a refprefix='refpage.' \ -a isrefpage \ -a html_spec_relative='$(HTML_SPEC_RELATIVE)' \ - -a imagesdir=$(CURDIR)/sources + -a imagesdir=$(CURDIR)/sources \ + -a source-highlighter=rouge \ + -a rouge-style=github \ + -a linkcss \ + -a copycss=$(MANCSSDIR)/$(CSS_FILENAME) \ + -a stylesdir=css # Pure makefile lowercase function, generated by a script. make_lower = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1))))))))))))))))))))))))))) @@ -433,6 +473,12 @@ make_extension_source = sources/chapters/extensions/$(word 1,$(subst _, ,$(1)))/ # Call make_extension_source on every enabled extension, after lowercasing and stripping the leading XR prefix. EXTENSION_SOURCES := $(foreach ext,$(patsubst xr_%,%,$(EXTENSIONS_LOWER)),$(call make_extension_source,$(ext))) +# Manually copy the CSS file. Asciidoctor does not always do this, it seems. +$(MANCSSDIR)/$(CSS_FILENAME): $(CURDIR)/config/$(CSS_FILENAME) + $(ECHO) "[cp] $(call MAKE_RELATIVE,$<) -> $(call MAKE_RELATIVE,$@)" + $(QUIET)$(MKDIR) "$(@D)" + $(QUIET)$(CP) "$<" "$@" + # Generation of refpage asciidoctor sources by extraction from the # specification. # @@ -446,30 +492,30 @@ EXTENSION_SOURCES := $(foreach ext,$(patsubst xr_%,%,$(EXTENSIONS_LOWER)),$(call # Should pass in $(EXTOPTIONS) to determine which pages to generate. # For now, all core and extension refpages are extracted by genRef.py. # -# Treating the all-in-one ref page source apispec.txt as the "stamp" for genRef. +# Treating the all-in-one ref page source apispec.adoc as the "stamp" for genRef. GENREF = $(SCRIPTS)/genRef.py LOGFILE = $(REFPATH)/refpage.log -refpages: $(REFPATH)/apispec.txt -$(REFPATH)/apispec.txt: $(SPECFILES) $(EXTENSION_SOURCES) $(GENREF) $(SCRIPTS)/reflib.py $(PYAPIMAP) +refpages: $(REFPATH)/apispec.adoc +$(REFPATH)/apispec.adoc: $(SPECFILES) $(EXTENSION_SOURCES) $(GENREF) $(SCRIPTS)/reflib.py $(PYAPIMAP) $(ECHO) "[genRef.py] $(REGISTRY) and spec -> $@" $(ECHO) " (and additional files in $(@D))" $(QUIET)$(MKDIR) "$(REFPATH)" $(QUIET)$(PYTHON) $(GENREF) -genpath $(abspath $(GENDIR)) -basedir $(REFPATH) \ -log $(LOGFILE) \ -registry $(REGISTRY) $(EXTOPTIONS) $(SPECFILES) $(EXTENSION_SOURCES) - $(QUIET)grep "ERROR:" $(LOGFILE) + $(QUIET)grep "ERROR:" $(LOGFILE) || true # These targets are HTML5 refpages # # The recursive $(MAKE) is an apparently unavoidable hack, since the # actual list of man page sources isn't known until after -# $(REFPATH)/apispec.txt is generated. $(GENDEPENDS) is generated before +# $(REFPATH)/apispec.adoc is generated. $(GENDEPENDS) is generated before # running the recursive make, so it does not trigger twice # $(SUBMAKEOPTIONS) suppresses the redundant "Entering / leaving" # messages make normally prints out, similarly to suppressing make # command output logging in the individual refpage actions below. SUBMAKEOPTIONS = --no-print-directory -manhtmlpages: $(REFPATH)/apispec.txt $(GENDEPENDS) +manhtmlpages: $(REFPATH)/apispec.adoc $(GENDEPENDS) $(QUIET)$(MAKE) $(SUBMAKEOPTIONS) -e buildmanpages # Build the individual refpages, then the symbolic links from aliases @@ -483,19 +529,19 @@ manaliases: $(PYAPIMAP) # This is the single-page ref page. # 'doctype-manpage' allows use of the "book" style but still enable the # refpage-only portions. -$(MANHTMLDIR)/openxr.html: $(REFPATH)/apispec.txt $(MANDEPS) +$(MANHTMLDIR)/openxr.html: $(REFPATH)/apispec.adoc $(MANDEPS) $(ECHO) "[asciidoctor] $(call MAKE_RELATIVE,$<) -> $(call MAKE_RELATIVE,$@)" $(QUIET)$(MKDIR) "$(@D)" - $(QUIET)$(ASCIIDOC) -b html5 $(ADOCOPTS) $(ADOCREFOPTS) -a doctype-manpage -d book -o $@ $< + $(QUIET)$(ASCIIDOC) $(BACKEND_ARGS) $(ADOCOPTS) $(ADOCREFOPTS) -a doctype-manpage -d book -o $@ $< # The refpage build process normally generates far too much output, so # use VERYQUIET instead of QUIET # 'cross-file-links' causes the *link: macros to link to other refpages, # instead of internal anchors. -$(MANHTML): $(MANHTMLDIR)/%.html: $(REFPATH)/%.txt $(MANDEPS) +$(MANHTML): $(MANHTMLDIR)/%.html: $(REFPATH)/%.adoc $(MANDEPS) $(ECHO) "[asciidoctor] $(call MAKE_RELATIVE,$<) -> $(call MAKE_RELATIVE,$@)" $(QUIET)$(MKDIR) "$(@D)" - $(VERYQUIET)$(ASCIIDOC) -b html5 $(ADOCOPTS) $(ADOCHTMLOPTS) $(ADOCREFOPTS) \ + $(VERYQUIET)$(ASCIIDOC) $(BACKEND_ARGS) $(ADOCOPTS) $(ADOCHTMLOPTS) $(ADOCREFOPTS) \ -a cross-file-links -d manpage -o $@ $< # Defer for now - there is no latexmath in the XR spec, unlike Vulkan @@ -594,10 +640,10 @@ REGISTRYOUTDIR = $(GENDIR)/out/registry-release/specs/$(MAJORMINORVER)$(RELEASE_ $(REGISTRYOUTDIR): $(QUIET)$(MKDIR) "$@" -$(REGISTRYOUTDIR)/pdf $(REGISTRYOUTDIR)/html $(REGISTRYOUTDIR)/man $(REGISTRYOUTDIR)/headers: $(REGISTRYOUTDIR) +$(REGISTRYOUTDIR)/pdf $(REGISTRYOUTDIR)/html $(REGISTRYOUTDIR)/man $(REGISTRYOUTDIR)/headers: | $(REGISTRYOUTDIR) $(QUIET)$(MKDIR) "$@" -$(REGISTRYOUTDIR)/headers/openxr: $(REGISTRYOUTDIR)/headers +$(REGISTRYOUTDIR)/headers/openxr: | $(REGISTRYOUTDIR)/headers $(QUIET)$(MKDIR) "$@" release-htmlpdf: html pdf $(REGISTRYOUTDIR)/pdf $(REGISTRYOUTDIR)/html @@ -607,7 +653,7 @@ release-htmlpdf: html pdf $(REGISTRYOUTDIR)/pdf $(REGISTRYOUTDIR)/html .PHONY: release-htmlpdf -release: release-htmlpdf manhtmlpages loader styleguide extprocess $(REGISTRYOUTDIR) $(REGISTRYOUTDIR)/man +release: release-htmlpdf manhtmlpages loader styleguide extprocess | $(REGISTRYOUTDIR) $(REGISTRYOUTDIR)/man $(QUIET)$(CP) $(OUTDIR)/styleguide.html $(OUTDIR)/extprocess.html $(OUTDIR)/loader.html $(REGISTRYOUTDIR) $(QUIET)$(CP) -R $(MANHTMLDIR) $(REGISTRYOUTDIR)/man/html .PHONY: release @@ -624,11 +670,22 @@ DIRT = $(PYDIRT) $(MANDIRT) ERRS \#* clean_dirt: $(RM) $(DIRT) +# Generated directories and files to remove +CLEAN_GEN_PATHS := \ + $(PYAPIMAP) \ + $(RBAPIMAP) \ + $(GENDIR)/index.adoc \ + $(GENDIR)/api \ + $(GENDIR)/validity \ + $(GENDIR)/hostsynctable \ + $(METADIR) \ + $(REFPATH) \ + $(ATTRIBFILE) + # Clean intermediate generated files # Don't remove OUTDIR, since it contains the config stamp and final output targets clean_generated: - $(RMRF) $(PYAPIMAP) $(RBAPIMAP) $(GENDIR)/index.adoc - $(RMRF) $(GENDIR)/api $(GENDIR)/validity $(GENDIR)/hostsynctable $(METADIR) $(REFPATH) + $(RMRF) $(CLEAN_GEN_PATHS) # Clean generated targets as well as intermediates. clean clobber: clean_dirt clean_generated diff --git a/specification/README.md b/specification/README.md index bdab33d7..8cd8479c 100644 --- a/specification/README.md +++ b/specification/README.md @@ -86,7 +86,7 @@ These are invoked as follows: These targets generate a variety of output documents in the directory specified by the Makefile variable `$(OUTDIR)` (by default, -`generated/out/1.0/`). +`generated/out/1.1/`). It is recommended to build these targets using a "helper" script from above, unless you want to only build the core spec without any extensions. @@ -210,8 +210,10 @@ environment managers below. Please read the remainder of this document (other than platform-specific parts you don't use) completely before trying to install. -* Asciidoctor (`asciidoctor`, version: 2.0.10 or compatible) -* Asciidoctor PDF (`asciidoctor-pdf`, version: 1.5.0 or compatible) +* Asciidoctor (`asciidoctor`, version: 2.0.10 or compatible, apt package + `asciidoctor`) +* Rouge (`rouge`, apt package `ruby-rouge`) +* Asciidoctor PDF (`asciidoctor-pdf`, version: 1.5.0 through 1.6.2) **Note:** > Asciidoctor-pdf versions before `1.5.0.alpha15` have issues with multi-page @@ -219,8 +221,8 @@ valid usage blocks, in that the background only renders for the first page. `alpha.15` fixes this issue (as well as a few others); do not use prior versions. -Only the `asciidoctor` gem (and its dependencies) is needed if you don't intend -to build PDF versions of the spec and supporting documents. +Only the `asciidoctor` and `rouge` gems (and their dependencies) are needed if +you do not intend to build PDF versions of the spec and supporting documents. **Note:** > While it's easier to install just the toolchain components for HTML builds, @@ -245,7 +247,6 @@ gem in the user's directory. The Ubuntu 16.04.6 default Ruby install (version 2.3.1) seems to be up-to-date enough to run all the required gems. Just follow the Debian-derived Linux instructions below. - **Notes:** * If you're already using [rvm](https://rvm.io) or @@ -274,18 +275,18 @@ and Yum (SuSE) will have different requirements. # Absolute bare minimum for only these makefile targets: # header html manhtmlpages extprocess styleguide loader sudo apt install make git ruby python3 -gem install --user asciidoctor +gem install --user "asciidoctor:~>2.0.10" rouge # More complete, for building these makefile targets: # header html pdf pdfA4 manhtmlpages styleguide loader header-test: build-examples check-spec-links release sudo apt -y install build-essential python3 git libxml2-dev ttf-lyx ghostscript ruby \ python3-termcolor python3-tabulate python3-networkx -gem install --user "asciidoctor:~>2.0.10" "asciidoctor-pdf:~>1.5.0" +gem install --user "asciidoctor:~>2.0.10" rouge "asciidoctor-pdf:1.6.2" # Full build: supports all makefile targets, including the "all" target sudo apt -y install build-essential python3 git libxml2-dev ttf-lyx ghostscript ruby \ trang jing python3-termcolor python3-tabulate python3-networkx -gem install --user "asciidoctor:~>2.0.10" "asciidoctor-pdf:~>1.5.0" +gem install --user "asciidoctor:~>2.0.10" rouge "asciidoctor-pdf:1.6.2" ``` Ubuntu 20.04+, Debian Bullseye, and Debian Buster Backports all have new enough @@ -295,7 +296,7 @@ be replaced with something like this (adding `-t buster-backports` if you're sti on Buster): ``` -sudo apt install asciidoctor +sudo apt install asciidoctor ruby-rouge # or this, if you also want to build PDFs: sudo apt install asciidoctor ruby-asciidoctor-pdf @@ -318,7 +319,7 @@ Something like this will work: gem install --user "asciidoctor:~>2.0.10" # Adding PDF support -gem install --user "asciidoctor:~>2.0.10" "asciidoctor-pdf:~>1.5.0" +gem install --user "asciidoctor:~>2.0.10" "asciidoctor-pdf:1.6.2" ``` After the `gem install --user`, you may see a message that a directory isn't on @@ -358,7 +359,7 @@ gem uninstall asciidoctor-pdf \ asciidoctor-mathematical \ asciidoctor \ rake \ - coderay \ + rouge \ json-schema \ mathematical \ ruby-enum @@ -371,7 +372,7 @@ sudo gem uninstall asciidoctor-pdf \ asciidoctor-mathematical \ asciidoctor \ rake \ - coderay \ + rouge \ json-schema \ mathematical \ ruby-enum @@ -390,7 +391,7 @@ But, just in case, I asked to install them again. ```sh sudo gem install asciidoctor \ rake \ - coderay \ + rouge \ json-schema \ mathematical \ ruby-enum \ diff --git a/specification/registry/xr.xml b/specification/registry/xr.xml index b6053cc5..c552f0b5 100644 --- a/specification/registry/xr.xml +++ b/specification/registry/xr.xml @@ -133,7 +133,7 @@ maintained in the default branch of the Khronos OpenXR GitHub project. updates them automatically by processing a line at a time. --> // OpenXR current version number. -#define XR_CURRENT_API_VERSION XR_MAKE_VERSION(1, 0, 34) +#define XR_CURRENT_API_VERSION XR_MAKE_VERSION(1, 1, 36) + + typedef XrFlags64 XrInstanceCreateFlags; typedef XrFlags64 XrSessionCreateFlags; @@ -403,6 +412,10 @@ maintained in the default branch of the Khronos OpenXR GitHub project. typedef XrFlags64 XrLocalizationMapErrorFlagsML; + + typedef XrFlags64 XrEnvironmentDepthProviderCreateFlagsMETA; + typedef XrFlags64 XrEnvironmentDepthSwapchainCreateFlagsMETA; + XR_DEFINE_HANDLE(XrInstance) @@ -464,6 +477,13 @@ maintained in the default branch of the Khronos OpenXR GitHub project. XR_DEFINE_HANDLE(XrMarkerDetectorML) + + XR_DEFINE_OPAQUE_64(XrFutureEXT) + + + XR_DEFINE_HANDLE(XrEnvironmentDepthProviderMETA) + XR_DEFINE_HANDLE(XrEnvironmentDepthSwapchainMETA) + @@ -665,6 +685,13 @@ maintained in the default branch of the Khronos OpenXR GitHub project. + + + + + + + float x @@ -705,11 +732,9 @@ maintained in the default branch of the Khronos OpenXR GitHub project. float width float height - - float width - float height - float depth - + + + XrOffset2Df offset XrExtent2Df extent @@ -2103,19 +2128,23 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( const void* next XrMeshComputeLodMSFT lod + XrVector3f center float radius + XrPosef pose XrVector3f extents + XrPosef pose - XrFovf fov - float farDistance + XrFovf fov + float farDistance + XrSpace space XrTime time @@ -2643,20 +2672,19 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - - float width - float height - float depth - + + float x float y float z + XrOffset3DfFB offset XrExtent3DfFB extent + XrStructureType type const void* next @@ -3017,9 +3045,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - - uint8_t data[XR_UUID_SIZE_EXT] - + @@ -3553,6 +3579,49 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( XrMarkerML marker XrPosef poseInMarkerSpace + + float r + float g + float b + + + + float width + float height + float depth + + + + XrPosef center + float radius + + + + XrPosef center + XrExtent3Df extents + + + + XrPosef pose + XrFovf fov + float nearZ + float farZ + + + + uint8_t data[XR_UUID_SIZE] + + + + + + + + + + + + @@ -3583,6 +3652,129 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( XrBool32 isUserPresent + + + + XrStructureType type + void* next + XrResult futureResult + + + + XrStructureType type + void* next + XrResult futureResult + + + + XrStructureType type + const void* next + XrFutureEXT future + + + + XrStructureType type + const void* next + XrFutureEXT future + + + + XrStructureType type + void* next + XrFutureStateEXT state + + + + + + + XrStructureType type + const void* next + XrEnvironmentDepthProviderCreateFlagsMETA createFlags + + + XrStructureType type + const void* next + XrEnvironmentDepthSwapchainCreateFlagsMETA createFlags + + + XrStructureType type + void* next + uint32_t width + uint32_t height + + + XrStructureType type + const void* next + XrSpace space + XrTime displayTime + + + XrStructureType type + const void* next + XrFovf fov + XrPosef pose + + + XrStructureType type + const void* next + uint32_t swapchainIndex + float nearZ + float farZ + XrEnvironmentDepthImageViewMETA views[2] + + + XrStructureType type + const void* next + XrBool32 enabled + + + XrStructureType type + void* next + XrBool32 supportsEnvironmentDepth + XrBool32 supportsHandRemoval + + + + + + XrStructureType type + const void* next + XrSpace baseSpace + XrTime time + uint32_t spaceCount + const XrSpace* spaces + + + + + XrStructureType type + void* next + uint32_t locationCount + XrSpaceLocationData* locations + + + + + XrSpaceLocationFlags locationFlags + XrPosef pose + + + + + XrStructureType type + void* next + uint32_t velocityCount + XrSpaceVelocityData* velocities + + + + + XrSpaceVelocityFlags velocityFlags + XrVector3f linearVelocity + XrVector3f angularVelocity + + @@ -3607,6 +3799,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( + + @@ -4853,6 +5046,21 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( + + + + + + + + + + + + + + + @@ -4875,6 +5083,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( + @@ -4896,7 +5105,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( uint32_t* propertyCountOutput XrExtensionProperties* properties - + XrResult xrCreateInstance const XrInstanceCreateInfo* createInfo XrInstance* instance @@ -6594,6 +6803,84 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( XrSpace* space + + + XrResult xrPollFutureEXT + XrInstance instance + const XrFuturePollInfoEXT* pollInfo + XrFuturePollResultEXT* pollResult + + + + XrResult xrCancelFutureEXT + XrInstance instance + const XrFutureCancelInfoEXT* cancelInfo + + + + + + XrResult xrCreateEnvironmentDepthProviderMETA + XrSession session + const XrEnvironmentDepthProviderCreateInfoMETA* createInfo + XrEnvironmentDepthProviderMETA* environmentDepthProvider + + + XrResult xrDestroyEnvironmentDepthProviderMETA + XrEnvironmentDepthProviderMETA environmentDepthProvider + + + XrResult xrStartEnvironmentDepthProviderMETA + XrEnvironmentDepthProviderMETA environmentDepthProvider + + + XrResult xrStopEnvironmentDepthProviderMETA + XrEnvironmentDepthProviderMETA environmentDepthProvider + + + XrResult xrCreateEnvironmentDepthSwapchainMETA + XrEnvironmentDepthProviderMETA environmentDepthProvider + const XrEnvironmentDepthSwapchainCreateInfoMETA* createInfo + XrEnvironmentDepthSwapchainMETA* swapchain + + + XrResult xrDestroyEnvironmentDepthSwapchainMETA + XrEnvironmentDepthSwapchainMETA swapchain + + + XrResult xrEnumerateEnvironmentDepthSwapchainImagesMETA + XrEnvironmentDepthSwapchainMETA swapchain + uint32_t imageCapacityInput + uint32_t* imageCountOutput + XrSwapchainImageBaseHeader* images + + + XrResult xrGetEnvironmentDepthSwapchainStateMETA + XrEnvironmentDepthSwapchainMETA swapchain + XrEnvironmentDepthSwapchainStateMETA* state + + + XrResult xrAcquireEnvironmentDepthImageMETA + XrEnvironmentDepthProviderMETA environmentDepthProvider + const XrEnvironmentDepthImageAcquireInfoMETA* acquireInfo + XrEnvironmentDepthImageMETA* environmentDepthImage + + + XrResult xrSetEnvironmentDepthHandRemovalMETA + XrEnvironmentDepthProviderMETA environmentDepthProvider + const XrEnvironmentDepthHandRemovalSetInfoMETA* setInfo + + + + + + + XrResult xrLocateSpaces + XrSession session + const XrSpacesLocateInfo* locateInfo + XrSpaceLocations* spaceLocations + + @@ -6852,7 +7139,6 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - @@ -7054,6 +7340,167 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -7247,6 +7694,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( + @@ -7394,6 +7842,412 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -7457,7 +8311,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -7730,11 +8584,11 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - - - + + + @@ -7795,7 +8649,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -7869,12 +8723,12 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -7914,7 +8768,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -7960,7 +8814,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -7998,7 +8852,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8101,7 +8955,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8110,12 +8964,12 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -8123,7 +8977,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8215,7 +9069,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8289,7 +9143,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8359,7 +9213,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8414,14 +9268,14 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -8434,12 +9288,12 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -8447,14 +9301,14 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -8462,12 +9316,12 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -8475,7 +9329,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8564,7 +9418,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8609,14 +9463,14 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -8624,12 +9478,12 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -8649,7 +9503,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8658,7 +9512,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8691,13 +9545,13 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -8705,12 +9559,12 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -8724,12 +9578,12 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -8768,7 +9622,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8784,7 +9638,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8795,7 +9649,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8851,7 +9705,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8871,7 +9725,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -8951,7 +9805,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9029,7 +9883,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9045,7 +9899,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9151,13 +10005,13 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -9166,12 +10020,12 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -9267,7 +10121,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9354,7 +10208,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9411,7 +10265,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9440,7 +10294,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9494,7 +10348,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9533,7 +10387,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9567,7 +10421,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9578,7 +10432,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9587,7 +10441,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9596,7 +10450,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9642,7 +10496,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9705,7 +10559,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9735,9 +10589,9 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -9931,7 +10785,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -9944,7 +10798,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -10029,7 +10883,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -10399,7 +11253,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -10623,7 +11477,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -10665,7 +11519,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -10689,7 +11543,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -10855,11 +11709,51 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -10911,7 +11805,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -10941,7 +11835,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -11502,7 +12396,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -11575,7 +12469,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -11583,7 +12477,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -11606,7 +12500,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -11617,7 +12511,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -11920,11 +12814,11 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - - - + + + @@ -11935,7 +12829,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -12266,10 +13160,36 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -12286,10 +13206,21 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - - + + + + + + + + + + + + + @@ -12467,7 +13398,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + @@ -12476,12 +13407,12 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + - + @@ -13973,11 +14904,126 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)( - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/specification/scripts/__init__.py.docs b/specification/scripts/__init__.py.docs index ce7877b6..373e26e0 100644 --- a/specification/scripts/__init__.py.docs +++ b/specification/scripts/__init__.py.docs @@ -1,3 +1,6 @@ +# Copyright 2019-2024 The Khronos Group Inc. +# +# SPDX-License-Identifier: Apache-2.0 """Scripts for building the OpenXR specification and artifacts. @@ -5,9 +8,5 @@ See also `src.scripts` for scripts that help build the loader and layers, many of which build on these scripts. """ -# Copyright (c) 2013-2024, The Khronos Group Inc. -# -# SPDX-License-Identifier: Apache-2.0 - # This is only used during doc builds, hence the weird file extension. # It messes up scripts at other times. diff --git a/specification/scripts/cgenerator.py b/specification/scripts/cgenerator.py index 316945e5..6dea2957 100644 --- a/specification/scripts/cgenerator.py +++ b/specification/scripts/cgenerator.py @@ -252,7 +252,7 @@ def endFeature(self): raise MissingGeneratorOptionsError() if self.genOpts.conventions is None: raise MissingGeneratorOptionsConventionsError() - is_core = self.featureName and self.featureName.startswith(self.conventions.api_prefix + 'VERSION_') + is_core = self.featureName and self.featureName.startswith(f"{self.conventions.api_prefix}VERSION_") if self.genOpts.conventions.writeFeature(self.featureName, self.featureExtraProtect, self.genOpts.filename): self.newline() if self.genOpts.protectFeature: @@ -265,6 +265,8 @@ def endFeature(self): write('#ifdef', self.featureExtraProtect, file=self.outFile) self.newline() + # Generate warning of possible use in IDEs + write(f'// {self.featureName} is a preprocessor guard. Do not pass it to API calls.', file=self.outFile) write('#define', self.featureName, '1', file=self.outFile) for section in self.TYPE_SECTIONS: contents = self.sections[section] @@ -284,26 +286,26 @@ def endFeature(self): self.genOpts.protectExtensionProtoStr, file=self.outFile) write('\n'.join(self.sections['command']), end='', file=self.outFile) if self.genOpts.protectExtensionProto and not is_core: - write('#endif' + - self._endProtectComment(protect_directive=self.genOpts.protectExtensionProto, - protect_str=self.genOpts.protectExtensionProtoStr), + comment = self._endProtectComment(protect_directive=self.genOpts.protectExtensionProto, + protect_str=self.genOpts.protectExtensionProtoStr) + write(f"#endif{comment}", file=self.outFile) if self.genOpts.protectProto: - write('#endif' + - self._endProtectComment(protect_directive=self.genOpts.protectProto, - protect_str=self.genOpts.protectProtoStr), + comment = self._endProtectComment(protect_directive=self.genOpts.protectProto, + protect_str=self.genOpts.protectProtoStr) + write(f"#endif{comment}", file=self.outFile) else: self.newline() if self.featureExtraProtect is not None: - write('#endif' + - self._endProtectComment(protect_str=self.featureExtraProtect), + comment = self._endProtectComment(protect_str=self.featureExtraProtect) + write(f"#endif{comment}", file=self.outFile) if self.genOpts.protectFeature: - write('#endif' + - self._endProtectComment(protect_str=self.featureName), + comment = self._endProtectComment(protect_str=self.featureName) + write(f"#endif{comment}", file=self.outFile) # Finish processing in superclass OutputGenerator.endFeature(self) @@ -345,7 +347,7 @@ def genType(self, typeinfo, name, alias): # OpenXR: this section was not under 'else:' previously, just fell through if alias: # If the type is an alias, just emit a typedef declaration - body = 'typedef ' + alias + ' ' + name + ';\n' + body = f"typedef {alias} {name};\n" else: # Replace tags with an APIENTRY-style string # (from self.genOpts). Copy other text through unchanged. @@ -356,6 +358,8 @@ def genType(self, typeinfo, name, alias): body += self.genOpts.apientry + noneStr(elem.tail) else: body += noneStr(elem.text) + noneStr(elem.tail) + if category == 'define' and self.misracppstyle(): + body = body.replace("(uint32_t)", "static_cast") if body: # Add extra newline after multi-line entries. if '\n' in body[0:-1]: @@ -376,13 +380,13 @@ def genProtectString(self, protect_str): if ',' in protect_str: protect_list = protect_str.split(',') - protect_defs = ('defined(%s)' % d for d in protect_list) + protect_defs = (f'defined({d})' for d in protect_list) protect_def_str = ' && '.join(protect_defs) - protect_if_str = '#if %s\n' % protect_def_str - protect_end_str = '#endif // %s\n' % protect_def_str + protect_if_str = f'#if {protect_def_str}\n' + protect_end_str = f'#endif // {protect_def_str}\n' else: - protect_if_str = '#ifdef %s\n' % protect_str - protect_end_str = '#endif // %s\n' % protect_str + protect_if_str = f'#ifdef {protect_str}\n' + protect_end_str = f'#endif // {protect_str}\n' return (protect_if_str, protect_end_str) @@ -425,7 +429,7 @@ def genStruct(self, typeinfo, typeName, alias): typeElem = typeinfo.elem if alias: - body = 'typedef ' + alias + ' ' + typeName + ';\n' + body = f"typedef {alias} {typeName};\n" else: body = '' (protect_begin, protect_end) = self.genProtectString(typeElem.get('protect')) @@ -434,23 +438,23 @@ def genStruct(self, typeinfo, typeName, alias): if self.genOpts.genStructExtendsComment: structextends = typeElem.get('structextends') - body += '// ' + typeName + ' extends ' + structextends + '\n' if structextends else '' + body += f"// {typeName} extends {structextends}\n" if structextends else '' - body += 'typedef ' + typeElem.get('category') + body += f"typedef {typeElem.get('category')}" # This is an OpenXR-specific alternative where aliasing refers # to an inheritance hierarchy of types rather than C-level type # aliases. if self.genOpts.genAliasMacro and self.typeMayAlias(typeName): - body += ' ' + self.genOpts.aliasMacro + body += f" {self.genOpts.aliasMacro}" - body += ' ' + typeName + ' {\n' + body += f" {typeName} {{\n" targetLen = self.getMaxCParamTypeLength(typeinfo) for member in typeElem.findall('.//member'): body += self.makeCParamDecl(member, targetLen + 4) body += ';\n' - body += '} ' + typeName + ';\n' + body += f"}} {typeName};\n" if protect_end: body += protect_end @@ -476,13 +480,13 @@ def genGroup(self, groupinfo, groupName, alias=None): if alias: # If the group name is aliased, just emit a typedef declaration # for the alias. - body = 'typedef ' + alias + ' ' + groupName + ';\n' + body = f"typedef {alias} {groupName};\n" self.appendSection(section, body) else: if self.genOpts is None: raise MissingGeneratorOptionsError() (section, body) = self.buildEnumCDecl(self.genOpts.genEnumBeginEndRange, groupinfo, groupName) - self.appendSection(section, '\n' + body) + self.appendSection(section, f"\n{body}") def genEnum(self, enuminfo, name, alias): """Generate the C declaration for a constant (a single value). @@ -508,7 +512,7 @@ def genCmd(self, cmdinfo, name, alias): prefix = '' decls = self.makeCDecls(cmdinfo.elem) - self.appendSection('command', prefix + decls[0] + '\n') + self.appendSection('command', f"{prefix + decls[0]}\n") if self.genOpts.genFuncPointers: self.appendSection('commandPointer', decls[1]) diff --git a/specification/scripts/check_spec_links.py b/specification/scripts/check_spec_links.py index d4baa3ca..b8936740 100755 --- a/specification/scripts/check_spec_links.py +++ b/specification/scripts/check_spec_links.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 # +# Copyright 2018-2024, The Khronos Group Inc. # Copyright (c) 2018-2019 Collabora, Ltd. # # SPDX-License-Identifier: Apache-2.0 @@ -18,6 +19,7 @@ from spec_tools.macro_checker_file import BlockType, MacroCheckerFile from spec_tools.main import checkerMain from spec_tools.shared import MessageId +from apiconventions import APIConventions ### # "Configuration" constants @@ -30,6 +32,9 @@ 'XRAPI_CALL', 'XRAPI_PTR', 'XR_NO_STDINT_H', + 'XR_VERSION_1_0', + 'XR_LOADER_VERSION_1_0', + 'XR_VERSION_1_1', ) # TODO move permissions into XML eventually @@ -151,11 +156,10 @@ def allowEnumXrefs(self): """ return True - def makeMacroChecker(enabled_messages): """Create a correctly-configured MacroChecker instance.""" entity_db = XREntityDatabase() - return MacroChecker(enabled_messages, entity_db, XRMacroCheckerFile, ROOT) + return MacroChecker(enabled_messages, entity_db, XRMacroCheckerFile, ROOT, APIConventions) if __name__ == '__main__': diff --git a/specification/scripts/comment_convert.py b/specification/scripts/comment_convert.py index 518eb580..c6c3e387 100755 --- a/specification/scripts/comment_convert.py +++ b/specification/scripts/comment_convert.py @@ -74,7 +74,7 @@ def dump_converted_comment_lines(self, indent): self.output_line(line) self.trailing_empty_lines = [] - indent = indent + ' ' + indent = f"{indent} " def extract(line): match = COMMENT_RE.match(line) diff --git a/specification/scripts/creflectiongenerator.py b/specification/scripts/creflectiongenerator.py index ac05aeed..d0a2a076 100644 --- a/specification/scripts/creflectiongenerator.py +++ b/specification/scripts/creflectiongenerator.py @@ -50,7 +50,7 @@ def protect_value(self) -> bool: def protect_string(self) -> Optional[str]: """The preprocessor expression to test for protection, or None""" if self.protect: - return " && ".join("defined({})".format(x) for x in self.protect) + return " && ".join(f"defined({x})" for x in self.protect) class BitmaskData: @@ -87,7 +87,7 @@ def __init__(self, *args, **kwargs): def beginFile(self, genOpts): OutputGenerator.beginFile(self, genOpts) - self.template = JinjaTemplate(self.env, "template_{}".format(genOpts.filename)) + self.template = JinjaTemplate(self.env, f"template_{genOpts.filename}") def _get_structs_for_protect(self, protect=None): """ @@ -230,7 +230,7 @@ def genGroup(self, groupinfo, groupName, alias=None): expandSuffix = '' expandSuffixMatch = re.search(r'[A-Z][A-Z]+$', groupName) if expandSuffixMatch: - expandSuffix = '_' + expandSuffixMatch.group() + expandSuffix = f"_{expandSuffixMatch.group()}" # Strip off the suffix from the prefix expandPrefix = expandName.rsplit(expandSuffix, 1)[0] diff --git a/specification/scripts/docgenerator.py b/specification/scripts/docgenerator.py index c1aa4f12..72960e49 100644 --- a/specification/scripts/docgenerator.py +++ b/specification/scripts/docgenerator.py @@ -6,15 +6,17 @@ from pathlib import Path from typing import List, Optional +from dataclasses import dataclass from generator import GeneratorOptions, OutputGenerator, noneStr, write +from parse_dependency import dependencyLanguageComment _ENUM_TABLE_PREFIX = """ [cols=",",options="header",] -|======================================================================= +|==== |Enum |Description""" -_TABLE_SUFFIX = """|=======================================================================""" +_TABLE_SUFFIX = """|====""" _ENUM_BLOCK_PREFIX = """.Enumerant Descriptions ****""" @@ -24,6 +26,16 @@ _BLOCK_SUFFIX = """****""" + +@dataclass +class _Enumerant: + name: str + value: int + comment: str + extname: Optional[str] = None + deprecated: Optional[str] = None + + def orgLevelKey(name): # Sort key for organization levels of features / extensions # From highest to lowest, core versions, KHR extensions, EXT extensions, @@ -35,35 +47,24 @@ def orgLevelKey(name): 'VK_KHR_', 'VK_EXT_') - i = 0 - for prefix in prefixes: + for index, prefix in enumerate(prefixes): if name.startswith(prefix): - return i - i += 1 + return index # Everything else (e.g. vendor extensions) is least important - return i - + return len(prefixes) -def orgLevelKey(name): - # Sort key for organization levels of features / extensions - # From highest to lowest, core versions, KHR extensions, EXT extensions, - # and vendor extensions - prefixes = ( - 'VK_VERSION_', - 'VKSC_VERSION_', - 'VK_KHR_', - 'VK_EXT_') +def _deprecated_enum_note(data: _Enumerant): + if data.deprecated == "true": + return "_(deprecated)_ " + if data.deprecated == "ignored": + return "__(deprecated -- ignored)__ " + if data.deprecated is None: + return "" - i = 0 - for prefix in prefixes: - if name.startswith(prefix): - return i - i += 1 + raise RuntimeWarning("Unhandled 'deprecated' attribute for an enumerant value") - # Everything else (e.g. vendor extensions) is least important - return i class DocGeneratorOptions(GeneratorOptions): """DocGeneratorOptions - subclass of GeneratorOptions for @@ -179,7 +180,7 @@ def beginFile(self, genOpts): # This should be a separate conventions property rather than an # inferred type name pattern for different APIs. - self.result_type = genOpts.conventions.type_prefix + "Result" + self.result_type = f"{genOpts.conventions.type_prefix}Result" def endFile(self): OutputGenerator.endFile(self) @@ -213,12 +214,28 @@ def genRequirements(self, name, mustBeFound = True): # To simplify this, sort the (base,dependency) requirements # and put them in a set to ensure they are unique. features = set() + # 'dependency' may be a boolean expression of extension names for (base,dependency) in self.apidict.requiredBy[name]: if dependency is not None: - l = sorted( - sorted((base, dependency)), - key=orgLevelKey) - features.add(' with '.join(l)) + # 'dependency' may be a boolean expression of extension + # names, in which case the sorting will not work well. + + # First, convert it from asciidoctor markup to language. + depLanguage = dependencyLanguageComment(dependency) + + # If they are the same, the dependency is only a + # single extension, and sorting them works. + # Otherwise, skip it. + if depLanguage == dependency: + deps = sorted( + sorted((base, dependency)), + key=orgLevelKey) + depString = ' with '.join(deps) + else: + # An expression with multiple extensions + depString = f'{base} with {depLanguage}' + + features.add(depString) else: features.add(base) # Sort the overall dependencies so core versions are first @@ -227,8 +244,10 @@ def genRequirements(self, name, mustBeFound = True): key=orgLevelKey)) return f'// Provided by {provider}\n' else: - if mustBeFound: - self.logMsg('warn', 'genRequirements: API {} not found'.format(name)) + # TODO disabled in OpenXR, re-enable when we either explicitly require each entity + # or improve dependency tracking. + # if mustBeFound: + # self.logMsg('warn', f'genRequirements: API {name} not found') return '' else: # No API dictionary available, return nothing @@ -241,30 +260,31 @@ def writeInclude(self, directory, basename, contents): - basename - base name of the file - contents - contents of the file (Asciidoc boilerplate aside)""" # Create subdirectory, if needed + assert self.genOpts directory = Path(self.genOpts.directory) / directory self.makeDir(directory) # Create file - filename = directory / (basename + '.txt') + filename = directory / f"{basename}{self.file_suffix}" self.logMsg('diag', '# Generating include file:', str(filename)) fp = open(filename, 'w', encoding='utf-8') # Asciidoc anchor write(self.genOpts.conventions.warning_comment, file=fp) - write('[[{0}]]'.format(basename), file=fp) + write(f'[[{basename}]]', file=fp) if self.genOpts.conventions.generate_index_terms: if basename.startswith(self.conventions.command_prefix): - index_term = basename + " (function)" + index_term = f"{basename} (function)" elif basename.startswith(self.conventions.type_prefix): - index_term = basename + " (type)" + index_term = f"{basename} (type)" elif basename.startswith(self.conventions.api_prefix): - index_term = basename + " (define)" + index_term = f"{basename} (define)" else: index_term = basename - write('indexterm:[{}]'.format(index_term), file=fp) + write(f'indexterm:[{index_term}]', file=fp) - write('[source,c++]', file=fp) + write(f'[source,{self.conventions.docgen_language}]', file=fp) write('----', file=fp) write(contents, file=fp) write('----', file=fp) @@ -272,14 +292,14 @@ def writeInclude(self, directory, basename, contents): if self.genOpts.secondaryInclude: # Create secondary no cross-reference include file - filename = directory / (basename + '.no-xref.txt') + filename = directory / f'{basename}.no-xref{self.file_suffix}' self.logMsg('diag', '# Generating include file:', filename) fp = open(filename, 'w', encoding='utf-8') # Asciidoc anchor write(self.genOpts.conventions.warning_comment, file=fp) write('// Include this no-xref version without cross reference id for multiple includes of same file', file=fp) - write('[source,c++]', file=fp) + write(f'[source,{self.conventions.docgen_language}]', file=fp) write('----', file=fp) write(contents, file=fp) write('----', file=fp) @@ -287,10 +307,11 @@ def writeInclude(self, directory, basename, contents): def writeEnumTable(self, basename, values): """Output a table of enumerants.""" + assert self.genOpts directory = Path(self.genOpts.directory) / 'enums' self.makeDir(directory) - filename = str(directory / '{}.comments.txt'.format(basename)) + filename = directory / f"{basename}.comments{self.file_suffix}" self.logMsg('diag', '# Generating include file:', filename) with open(filename, 'w', encoding='utf-8') as fp: @@ -298,8 +319,7 @@ def writeEnumTable(self, basename, values): write(_ENUM_TABLE_PREFIX, file=fp) for data in values: - write("|ename:{}".format(data['name']), file=fp) - write("|{}".format(data['comment']), file=fp) + write(self._make_enumerant_table_row(data), file=fp) write(_TABLE_SUFFIX, file=fp) @@ -312,32 +332,45 @@ def writeBox(self, filename, prefix, items): write(prefix, file=fp) for item in items: - write("* {}".format(item), file=fp) + write(f"* {item}", file=fp) write(_BLOCK_SUFFIX, file=fp) def writeEnumBox(self, basename, values): """Output a box of enumerants.""" + assert self.genOpts directory = Path(self.genOpts.directory) / 'enums' self.makeDir(directory) - filename = str(directory / '{}.comments-box.txt'.format(basename)) - self.writeBox(filename, _ENUM_BLOCK_PREFIX, - ("ename:{} -- {}".format(data['name'], data['comment']) - for data in values)) + filename = directory / f'{basename}.comments-box{self.file_suffix}' + self.writeBox( + filename, + _ENUM_BLOCK_PREFIX, + ( + self._make_enumerant_list_item(data) + for data in values + ), + ) def writeFlagBox(self, basename, values): """Output a box of flag bit comments.""" + assert self.genOpts directory = Path(self.genOpts.directory) / 'enums' self.makeDir(directory) - filename = str(directory / '{}.comments.txt'.format(basename)) - self.writeBox(filename, _FLAG_BLOCK_PREFIX, - ("ename:{} -- {}".format(data['name'], data['comment']) - for data in values)) + filename = directory / f'{basename}.comments{self.file_suffix}' + self.writeBox( + filename, + _FLAG_BLOCK_PREFIX, + ( + self._make_enumerant_list_item(data) + for data in values + ), + ) def genType(self, typeinfo, name, alias): """Generate type.""" + assert self.genOpts OutputGenerator.genType(self, typeinfo, name, alias) typeElem = typeinfo.elem # If the type is a struct type, traverse the embedded tags @@ -350,13 +383,12 @@ def genType(self, typeinfo, name, alias): self.genStruct(typeinfo, name, alias) elif category not in OutputGenerator.categoryToPath: # If there is no path, do not write output - self.logMsg('diag', 'NOT writing include for {} category {}'.format( - name, category)) + self.logMsg('diag', f'NOT writing include for {name} category {category}') else: body = self.genRequirements(name) if alias: # If the type is an alias, just emit a typedef declaration - body += 'typedef ' + alias + ' ' + name + ';\n' + body += f"typedef {alias} {name};\n" self.writeInclude(OutputGenerator.categoryToPath[category], name, body) else: @@ -376,7 +408,7 @@ def genType(self, typeinfo, name, alias): if body: self.writeInclude(OutputGenerator.categoryToPath[category], - name, body + '\n') + name, f"{body}\n") else: self.logMsg('diag', 'NOT writing empty include file for type', name) @@ -387,81 +419,119 @@ def genStructBody(self, typeinfo, typeName): Factored out to allow aliased types to also generate the original type. """ typeElem = typeinfo.elem - body = 'typedef ' + typeElem.get('category') + ' ' + typeName + ' {\n' + body = f"typedef {typeElem.get('category')} {typeName} {{\n" targetLen = self.getMaxCParamTypeLength(typeinfo) for member in typeElem.findall('.//member'): body += self.makeCParamDecl(member, targetLen + 4) body += ';\n' - body += '} ' + typeName + ';' + body += f"}} {typeName};" return body def genStruct(self, typeinfo, typeName, alias): """Generate struct.""" + assert self.registry OutputGenerator.genStruct(self, typeinfo, typeName, alias) body = self.genRequirements(typeName) if alias: if self.conventions.duplicate_aliased_structs: # TODO maybe move this outside the conditional? This would be a visual change. - body += '// {} is an alias for {}\n'.format(typeName, alias) + body += f'// {typeName} is an alias for {alias}\n' alias_info = self.registry.typedict[alias] body += self.genStructBody(alias_info, alias) body += '\n\n' - body += 'typedef ' + alias + ' ' + typeName + ';\n' + body += f"typedef {alias} {typeName};\n" else: body += self.genStructBody(typeinfo, typeName) self.writeInclude('structs', typeName, body) - def genEnumTable(self, groupinfo, groupName): - """Generate tables of enumerant values and short descriptions from - the XML.""" - - values = [] - got_comment = False - missing_comments = [] - for elem in groupinfo.elem.findall('enum'): - if not elem.get('required'): - continue - name = elem.get('name') - data = { - 'name': name, - } + def _maybe_return_enumerant_object_for_table( + self, elems, elem, missing_comments: List[str] + ) -> Optional[_Enumerant]: + assert self.genOpts + if not elem.get("required"): + return + name = elem.get("name") - (numVal, _) = self.enumToValue(elem, True) - data['value'] = numVal + (num_val, _) = self.enumToValue(elem, True, parent_for_alias_dereference=elems) - extname = elem.get('extname') + extname = elem.get("extname") - added_by_extension_to_core = (extname is not None and self.in_core) - if added_by_extension_to_core and not self.genOpts.extEnumerantAdditions: - # We are skipping such values - continue + added_by_extension_to_core = extname is not None and self.in_core + if added_by_extension_to_core and not self.genOpts.extEnumerantAdditions: + # We are skipping such values + return - comment = elem.get('comment') - if comment: - got_comment = True - elif name.endswith('_UNKNOWN') and numVal == 0: + comment = elem.get("comment") + if comment is None: + if name.endswith("_UNKNOWN") and num_val == 0: # This is a placeholder for 0-initialization to be clearly invalid. # Just skip this silently - continue - else: - # Skip but record this in case it is an odd-one-out missing - # a comment. - missing_comments.append(name) - continue + return + # Skip but record this in case it is an odd-one-out missing + # a comment. + missing_comments.append(name) + return + + assert num_val is not None + + return _Enumerant( + name=name, + value=num_val, + comment=comment, + extname=extname, + deprecated=elem.get("deprecated"), + ) + + def _make_enumerant_extension_note(self, data: _Enumerant) -> Optional[str]: + assert self.genOpts + if data.extname is not None and self.genOpts.extEnumerantFormatString: + formatted_ext = self.conventions.formatExtension(data.extname) + return self.genOpts.extEnumerantFormatString.format(formatted_ext) + + def _make_enumerant_list_item(self, data: _Enumerant) -> str: + ext_note = self._make_enumerant_extension_note(data) + parts = [ + f"ename:{data.name}", + _deprecated_enum_note(data), + "--", + data.comment, + ext_note, + ] + + # Filter out None values before joining to avoid excess space + return " ".join(part for part in parts if part is not None) + + def _make_enumerant_table_row(self, data: _Enumerant) -> str: + ext_note = self._make_enumerant_extension_note(data) + parts = [ + f"|ename:{data.name}", + "|", + _deprecated_enum_note(data), + data.comment, + ext_note, + ] + + # Filter out None values before joining to avoid excess space + return " ".join(part for part in parts if part is not None) - if added_by_extension_to_core and self.genOpts.extEnumerantFormatString: - # Add a note to the comment - comment += self.genOpts.extEnumerantFormatString.format( - self.conventions.formatExtension(extname)) + def genEnumTable(self, groupinfo, groupName): + """Generate tables of enumerant values and short descriptions from + the XML.""" - data['comment'] = comment - values.append(data) + assert self.genOpts + + values = [] + missing_comments = [] + for elem in groupinfo.elem.findall("enum"): + maybe_data = self._maybe_return_enumerant_object_for_table(groupinfo.elem, elem, missing_comments) + if maybe_data: + values.append(maybe_data) - if got_comment: + if values: # If any had a comment, output it. if missing_comments: @@ -472,29 +542,30 @@ def genEnumTable(self, groupinfo, groupName): group_type = groupinfo.elem.get('type') if groupName == self.result_type: # Split this into success and failure - self.writeEnumTable(groupName + '.success', - (data for data in values - if data['value'] >= 0)) - self.writeEnumTable(groupName + '.error', - (data for data in values - if data['value'] < 0)) + self.writeEnumTable(f"{groupName}.success", + (data for data in values + if data.value >= 0)) + self.writeEnumTable(f"{groupName}.error", + (data for data in values + if data.value < 0)) elif group_type == 'bitmask': self.writeFlagBox(groupName, values) elif group_type == 'enum': self.writeEnumTable(groupName, values) self.writeEnumBox(groupName, values) else: - raise RuntimeError("Unrecognized enums type: " + str(group_type)) + raise RuntimeError(f"Unrecognized enums type: {str(group_type)}") def genGroup(self, groupinfo, groupName, alias): """Generate group (e.g. C "enum" type).""" + assert self.genOpts OutputGenerator.genGroup(self, groupinfo, groupName, alias) body = self.genRequirements(groupName) if alias: # If the group name is aliased, just emit a typedef declaration # for the alias. - body += 'typedef ' + alias + ' ' + groupName + ';\n' + body += f"typedef {alias} {groupName};\n" else: expand = self.genOpts.expandEnumerants (_, enumbody) = self.buildEnumCDecl(expand, groupinfo, groupName) diff --git a/specification/scripts/extdependency.py b/specification/scripts/extdependency.py index e2ad0109..11856728 100755 --- a/specification/scripts/extdependency.py +++ b/specification/scripts/extdependency.py @@ -12,6 +12,7 @@ from pathlib import Path from apiconventions import APIConventions +from parse_dependency import dependencyNames class DiGraph: """A directed graph. @@ -97,39 +98,55 @@ def __init__(self, extensions supported for that API are considered. """ + conventions = APIConventions() if registry_path is None: - registry_path = APIConventions().registry_path + registry_path = conventions.registry_path if api_name is None: - api_name = APIConventions().xml_api_name + api_name = conventions.xml_api_name self.allExts = set() self.khrExts = set() + self.ratifiedExts = set() self.graph = DiGraph() self.extensions = {} self.tree = etree.parse(registry_path) # Loop over all supported extensions, creating a digraph of the - # extension dependencies in the 'requires' attribute, which is a - # comma-separated list of extension names. Also track lists of - # all extensions and all KHR extensions. + # extension dependencies in the 'depends' attribute, which is a + # boolean expression of core version and extension names. + # A static dependency tree can be constructed only by treating all + # extension names in the expression as dependencies, even though + # that may not be true if it is of form (ext OR ext). + # For the purpose these dependencies are used for - generating + # specifications with required dependencies included automatically - + # this will suffice. + # Separately tracks lists of all extensions and all KHR extensions, + # which are common specification targets. for elem in self.tree.findall('extensions/extension'): name = elem.get('name') supported = elem.get('supported') + ratified = elem.get('ratified', '') - # This works for the present form of the 'supported' attribute, - # which is a comma-separate list of XML API names if api_name in supported.split(','): self.allExts.add(name) if 'KHR' in name: self.khrExts.add(name) - deps = elem.get('requires') - if deps: - for dep in deps.split(','): - self.graph.add_edge(name, dep) - else: - self.graph.add_node(name) + if api_name in ratified.split(','): + self.ratifiedExts.add(name) + + self.graph.add_node(name) + + depends = elem.get('depends') + if depends: + # Walk a list of the leaf nodes (version and extension + # names) in the boolean expression. + for dep in dependencyNames(depends): + # Filter out version names, which are explicitly + # specified when building a specification. + if not conventions.is_api_version_name(dep): + self.graph.add_edge(name, dep) else: # Skip unsupported extensions pass @@ -142,6 +159,10 @@ def khrExtensions(self): """Returns a set of all KHR extensions in the graph""" return self.khrExts + def ratifiedExtensions(self): + """Returns a set of all ratified extensions in the graph""" + return self.ratifiedExts + def children(self, extension): """Returns a set of the dependencies of an extension. Throws an exception if the extension is not in the graph.""" @@ -158,9 +179,9 @@ def children(self, extension): parser.add_argument('-registry', action='store', default=APIConventions().registry_path, - help='Use specified registry file instead of ' + APIConventions().registry_path) + help=f"Use specified registry file instead of {APIConventions().registry_path}") parser.add_argument('-loops', action='store', - default=20, type=int, + default=10, type=int, help='Number of timing loops to run') parser.add_argument('-test', action='store', default=None, @@ -168,6 +189,10 @@ def children(self, extension): args = parser.parse_args() + deps = ApiDependencies(args.registry) + print('KHR exts =', sorted(deps.khrExtensions())) + print('Ratified exts =', sorted(deps.ratifiedExtensions())) + import time startTime = time.process_time() @@ -177,4 +202,4 @@ def children(self, extension): endTime = time.process_time() deltaT = endTime - startTime - print('Total time = {} time/loop = {}'.format(deltaT, deltaT / args.loops)) + print(f'Total time = {deltaT} time/loop = {deltaT / args.loops}') diff --git a/specification/scripts/extensionmetadocgenerator.py b/specification/scripts/extensionmetadocgenerator.py index 001cf864..fa034ed7 100644 --- a/specification/scripts/extensionmetadocgenerator.py +++ b/specification/scripts/extensionmetadocgenerator.py @@ -10,8 +10,8 @@ from pathlib import Path from functools import total_ordering -from typing import cast from generator import GeneratorOptions, OutputGenerator, regSortFeatures, write +from parse_dependency import dependencyMarkup, dependencyNames class ExtensionMetaDocGeneratorOptions(GeneratorOptions): """ExtensionMetaDocGeneratorOptions - subclass of GeneratorOptions. @@ -25,26 +25,32 @@ class Extension: def __init__(self, generator, # needed for logging and API conventions filename, + interface, name, number, ext_type, - requires, - requiresCore, + depends, contact, promotedTo, deprecatedBy, obsoletedBy, provisional, revision, - specialuse ): + specialuse, + ratified + ): + """Object encapsulating information from an XML tag. + Most of the parameters / members are XML tag values. + 'interface' is the actual XML element.""" + self.generator = generator self.conventions = generator.genOpts.conventions self.filename = filename + self.interface = interface self.name = name self.number = number self.ext_type = ext_type - self.requires = requires - self.requiresCore = requiresCore + self.depends = depends self.contact = contact self.promotedTo = promotedTo self.deprecatedBy = deprecatedBy @@ -52,19 +58,25 @@ def __init__(self, self.provisional = provisional self.revision = revision self.specialuse = specialuse + self.ratified = ratified self.deprecationType = None self.supercedingAPIVersion = None self.supercedingExtension = None + # This is a set containing names of extensions (if any) promoted + # *to* this extension. + # It is filled in after all the Extension objects are created, + # since it requires a reverse mapping step. + self.promotedFrom = set() if self.promotedTo is not None and self.deprecatedBy is not None and self.obsoletedBy is not None: - self.generator.logMsg('warn', 'All \'promotedto\', \'deprecatedby\' and \'obsoletedby\' attributes used on extension ' + self.name + '! Ignoring \'promotedto\' and \'deprecatedby\'.') + self.generator.logMsg('warn', f"All 'promotedto', 'deprecatedby' and 'obsoletedby' attributes used on extension {self.name}! Ignoring 'promotedto' and 'deprecatedby'.") elif self.promotedTo is not None and self.deprecatedBy is not None: - self.generator.logMsg('warn', 'Both \'promotedto\' and \'deprecatedby\' attributes used on extension ' + self.name + '! Ignoring \'deprecatedby\'.') + self.generator.logMsg('warn', f"Both 'promotedto' and 'deprecatedby' attributes used on extension {self.name}! Ignoring 'deprecatedby'.") elif self.promotedTo is not None and self.obsoletedBy is not None: - self.generator.logMsg('warn', 'Both \'promotedto\' and \'obsoletedby\' attributes used on extension ' + self.name + '! Ignoring \'promotedto\'.') + self.generator.logMsg('warn', f"Both 'promotedto' and 'obsoletedby' attributes used on extension {self.name}! Ignoring 'promotedto'.") elif self.deprecatedBy is not None and self.obsoletedBy is not None: - self.generator.logMsg('warn', 'Both \'deprecatedby\' and \'obsoletedby\' attributes used on extension ' + self.name + '! Ignoring \'deprecatedby\'.') + self.generator.logMsg('warn', f"Both 'deprecatedby' and 'obsoletedby' attributes used on extension {self.name}! Ignoring 'deprecatedby'.") supercededBy = None if self.promotedTo is not None: @@ -82,10 +94,10 @@ def __init__(self, pass # supercedingAPIVersion, supercedingExtension is None elif supercededBy.startswith(self.conventions.api_version_prefix): self.supercedingAPIVersion = supercededBy - elif supercededBy.startswith(self.conventions.api_prefix): + elif supercededBy.startswith(self.conventions.extension_name_prefix): self.supercedingExtension = supercededBy else: - self.generator.logMsg('error', 'Unrecognized ' + self.deprecationType + ' attribute value \'' + supercededBy + '\'!') + self.generator.logMsg('error', f"Unrecognized {self.deprecationType} attribute value '{supercededBy}'!") def __str__(self): return self.name @@ -119,7 +131,7 @@ def typeToStr(self): return 'Device extension' if self.ext_type is not None: - self.generator.logMsg('warn', 'The type attribute of ' + self.name + ' extension is neither \'instance\' nor \'device\'. That is invalid (at the time this script was written).') + self.generator.logMsg('warn', f"The type attribute of {self.name} extension is neither 'instance' nor 'device'. That is invalid (at the time this script was written).") else: # should be unreachable self.generator.logMsg('error', 'Logic error in typeToStr(): Missing type attribute!') return None @@ -136,72 +148,82 @@ def specLink(self, xrefName, xrefText, isRefpage = False): if isRefpage: # Always link into API spec specURL = self.conventions.specURL('api') - return 'link:{}#{}[{}^]'.format(specURL, xrefName, xrefText) + return f'link:{specURL}#{xrefName}[{xrefText}^]' else: - return '<<' + xrefName + ', ' + xrefText + '>>' + return f"<<{xrefName}, {xrefText}>>" def conditionalLinkCoreAPI(self, apiVersion, linkSuffix, isRefpage): - versionMatch = re.match(self.conventions.api_version_prefix + r'(\d+)_(\d+)', apiVersion) + versionMatch = re.match(f"{self.conventions.api_version_prefix}(\\d+)_(\\d+)", apiVersion) major = versionMatch.group(1) minor = versionMatch.group(2) - dottedVersion = major + '.' + minor + dottedVersion = f"{major}.{minor}" - xrefName = 'versions-' + dottedVersion + linkSuffix - xrefText = self.conventions.api_name() + ' ' + dottedVersion + xrefName = f"versions-{dottedVersion}{linkSuffix}" + xrefText = f"{self.conventions.api_name()} {dottedVersion}" - doc = 'ifdef::' + apiVersion + '[]\n' - doc += ' ' + self.specLink(xrefName, xrefText, isRefpage) + '\n' - doc += 'endif::' + apiVersion + '[]\n' - doc += 'ifndef::' + apiVersion + '[]\n' - doc += ' ' + self.conventions.api_name() + ' ' + dottedVersion + '\n' - doc += 'endif::' + apiVersion + '[]\n' + doc = f"ifdef::{apiVersion}[]\n" + doc += f" {self.specLink(xrefName, xrefText, isRefpage)}\n" + doc += f"endif::{apiVersion}[]\n" + doc += f"ifndef::{apiVersion}[]\n" + doc += f" {self.conventions.api_name()} {dottedVersion}\n" + doc += f"endif::{apiVersion}[]\n" return doc def conditionalLinkExt(self, extName, indent = ' '): - doc = 'ifdef::' + extName + '[]\n' - doc += indent + self.conventions.formatExtension(extName) + '\n' - doc += 'endif::' + extName + '[]\n' - doc += 'ifndef::' + extName + '[]\n' - doc += indent + '`' + extName + '`\n' - doc += 'endif::' + extName + '[]\n' + doc = f"ifdef::{extName}[]\n" + doc += f"{indent}{self.conventions.formatExtension(extName)}\n" + doc += f"endif::{extName}[]\n" + doc += f"ifndef::{extName}[]\n" + doc += f"{indent}`{extName}`\n" + doc += f"endif::{extName}[]\n" return doc - def resolveDeprecationChain(self, extensionsList, succeededBy, isRefpage, file): - ext = next(x for x in extensionsList if x.name == succeededBy) + def resolveDeprecationChain(self, extensions, succeededBy, isRefpage, file): + if succeededBy not in extensions: + write(f' ** *NOTE* The extension `{succeededBy}` is not supported for the API specification being generated', file=file) + self.generator.logMsg( + 'warn', f'resolveDeprecationChain: {self.name} defines a superseding interface {succeededBy} which is not in the supported extensions list') + return + + ext = extensions[succeededBy] if ext.deprecationType: if ext.deprecationType == 'promotion': if ext.supercedingAPIVersion: - write(' ** Which in turn was _promoted_ to\n' + ext.conditionalLinkCoreAPI(ext.supercedingAPIVersion, '-promotions', isRefpage), file=file) + write(f" ** Which in turn was _promoted_ to\n{ext.conditionalLinkCoreAPI(ext.supercedingAPIVersion, '-promotions', isRefpage)}", file=file) else: # ext.supercedingExtension - write(' ** Which in turn was _promoted_ to extension\n' + ext.conditionalLinkExt(ext.supercedingExtension), file=file) - ext.resolveDeprecationChain(extensionsList, ext.supercedingExtension, file) + write(f" ** Which in turn was _promoted_ to extension\n{ext.conditionalLinkExt(ext.supercedingExtension)}", file=file) + ext.resolveDeprecationChain(extensions, ext.supercedingExtension, file) elif ext.deprecationType == 'deprecation': if ext.supercedingAPIVersion: - write(' ** Which in turn was _deprecated_ by\n' + ext.conditionalLinkCoreAPI(ext.supercedingAPIVersion, '-new-feature', isRefpage), file=file) + write(f" ** Which in turn was _deprecated_ by\n{ext.conditionalLinkCoreAPI(ext.supercedingAPIVersion, '-new-feature', isRefpage)}", file=file) elif ext.supercedingExtension: - write(' ** Which in turn was _deprecated_ by\n' + ext.conditionalLinkExt(ext.supercedingExtension) + ' extension', file=file) - ext.resolveDeprecationChain(extensionsList, ext.supercedingExtension, file) + write(f" ** Which in turn was _deprecated_ by\n{ext.conditionalLinkExt(ext.supercedingExtension)} extension", file=file) + ext.resolveDeprecationChain(extensions, ext.supercedingExtension, file) else: write(' ** Which in turn was _deprecated_ without replacement', file=file) elif ext.deprecationType == 'obsoletion': if ext.supercedingAPIVersion: - write(' ** Which in turn was _obsoleted_ by\n' + ext.conditionalLinkCoreAPI(ext.supercedingAPIVersion, '-new-feature', isRefpage), file=file) + write(f" ** Which in turn was _obsoleted_ by\n{ext.conditionalLinkCoreAPI(ext.supercedingAPIVersion, '-new-feature', isRefpage)}", file=file) elif ext.supercedingExtension: - write(' ** Which in turn was _obsoleted_ by\n' + ext.conditionalLinkExt(ext.supercedingExtension) + ' extension', file=file) - ext.resolveDeprecationChain(extensionsList, ext.supercedingExtension, file) + write(f" ** Which in turn was _obsoleted_ by\n{ext.conditionalLinkExt(ext.supercedingExtension)} extension", file=file) + ext.resolveDeprecationChain(extensions, ext.supercedingExtension, file) else: write(' ** Which in turn was _obsoleted_ without replacement', file=file) else: # should be unreachable self.generator.logMsg('error', 'Logic error in resolveDeprecationChain(): deprecationType is neither \'promotion\', \'deprecation\' nor \'obsoletion\'!') - def writeTag(self, tag, value, isRefpage, fp): """Write a tag and (if non-None) a tag value to a file. + If the value is None, just write the tag. + + If the tag is None, just write the value (used for adding a value + to a just-written tag). + - tag - string tag name - value - tag value, or None - isRefpage - controls style in which the tag is marked up @@ -209,64 +231,78 @@ def writeTag(self, tag, value, isRefpage, fp): if isRefpage: # Use subsection headers for the tag name - tagPrefix = '== ' + # Because we do not know what preceded this, add whitespace + tagPrefix = '\n== ' tagSuffix = '' else: - # Use an bolded item list for the tag name + # Use a bolded item list for the tag name tagPrefix = '*' tagSuffix = '*::' - write(tagPrefix + tag + tagSuffix, file=fp) + if tag is not None: + write(tagPrefix + tag + tagSuffix, file=fp) if value is not None: write(value, file=fp) if isRefpage: write('', file=fp) - def makeMetafile(self, extensionsList, isRefpage = False): + def makeMetafile(self, extensions, SPV_deps, isRefpage = False): """Generate a file containing extension metainformation in asciidoctor markup form. - - extensionsList - list of extensions spec is being generated against + - extensions - dictionary of Extension objects for extensions spec + is being generated against + - SPV_deps - dictionary of SPIR-V extension names required for each + extension and version name - isRefpage - True if generating a refpage include, False if generating a specification extension appendix include""" if isRefpage: - filename = self.filename.with_name('refpage.' + self.filename.name) + filename = self.filename.with_name(f"refpage.{self.filename.name}") else: filename = self.filename fp = self.generator.newFile(filename) if not isRefpage: - write('[[' + self.name + ']]', file=fp) - write('=== ' + self.name, file=fp) + write(f"[[{self.name}]]", file=fp) + write(f"=== {self.name}", file=fp) write('', file=fp) - self.writeTag('Name String', '`' + self.name + '`', isRefpage, fp) - self.writeTag('Extension Type', self.typeToStr(), isRefpage, fp) + self.writeTag('Name String', f"`{self.name}`", isRefpage, fp) + if self.conventions.write_extension_type: + self.writeTag('Extension Type', self.typeToStr(), isRefpage, fp) - self.writeTag('Registered Extension Number', self.number, isRefpage, fp) - self.writeTag('Revision', self.revision, isRefpage, fp) + if self.conventions.write_extension_number: + self.writeTag('Registered Extension Number', self.number, isRefpage, fp) + if self.conventions.write_extension_revision: + self.writeTag('Revision', self.revision, isRefpage, fp) + + # if self.conventions.xml_api_name in self.ratified.split(','): + # ratstatus = 'Ratified' + # else: + # ratstatus = 'Not ratified' + # self.writeTag('Ratification Status', ratstatus, isRefpage, fp) # Only API extension dependencies are coded in XML, others are explicit self.writeTag('Extension and Version Dependencies', None, isRefpage, fp) - write(' * Requires support for {} {}'.format( - self.conventions.api_name(), self.requiresCore), file=fp) - - if self.requires: - # Exact meaning of 'requires' depends on extension type. - if self.ext_type == 'instance': - enableQualifier = '' + # Transform the boolean 'depends' expression into equivalent + # human-readable asciidoc markup. + if self.depends is not None: + if isRefpage: + separator = '' else: - # self.ext_type == 'device': - enableQualifier = ' for any device-level functionality' + separator = '+' + write(separator + '\n--\n' + + dependencyMarkup(self.depends) + + '--', file=fp) + else: + # Do not specify the base API redundantly, but put something + # here to avoid formatting trouble. + self.writeTag(None, 'None', isRefpage, fp) - for dep in self.requires.split(','): - write(' * Requires {} to be enabled{}'.format( - self.conventions.formatExtension(dep), enableQualifier), - file=fp) if self.provisional == 'true' and self.conventions.provisional_extension_warning: write(' * *This is a _provisional_ extension and must: be used with caution.', file=fp) write(' See the ' + @@ -276,29 +312,58 @@ def makeMetafile(self, extensionsList, isRefpage = False): ' of provisional header files for enablement and stability details.*', file=fp) write('', file=fp) + # Determine version and extension interactions from 'depends' + # attributes of tags. + interacts = set() + for elem in self.interface.findall('require[@depends]'): + names = dependencyNames(elem.get('depends')) + interacts |= names + + if len(interacts) > 0: + self.writeTag('API Interactions', None, isRefpage, fp) + + def versionKey(name): + """Sort _VERSION_ names before extension names""" + return '_VERSION_' not in name + + names = sorted(sorted(interacts), key=versionKey) + for name in names: + if "_VERSION_" in name: + write(f"* Interacts with {self.conventions.formatVersion(name)}", file=fp) + else: + write(f"* Interacts with {self.conventions.formatExtension(name)}", file=fp) + + if self.name in SPV_deps: + self.writeTag('SPIR-V Dependencies', None, isRefpage, fp) + + for spvname in sorted(SPV_deps[self.name]): + write(f' * {self.conventions.formatSPIRVlink(spvname)}', file=fp) + + write('', file=fp) + if self.deprecationType: - self.writeTag('Deprecation state', None, isRefpage, fp) + self.writeTag('Deprecation State', None, isRefpage, fp) if self.deprecationType == 'promotion': if self.supercedingAPIVersion: - write(' * _Promoted_ to\n' + self.conditionalLinkCoreAPI(self.supercedingAPIVersion, '-promotions', isRefpage), file=fp) + write(f" * _Promoted_ to\n{self.conditionalLinkCoreAPI(self.supercedingAPIVersion, '-promotions', isRefpage)}", file=fp) else: # ext.supercedingExtension - write(' * _Promoted_ to\n' + self.conditionalLinkExt(self.supercedingExtension) + ' extension', file=fp) - self.resolveDeprecationChain(extensionsList, self.supercedingExtension, isRefpage, fp) + write(f" * _Promoted_ to\n{self.conditionalLinkExt(self.supercedingExtension)} extension", file=fp) + self.resolveDeprecationChain(extensions, self.supercedingExtension, isRefpage, fp) elif self.deprecationType == 'deprecation': if self.supercedingAPIVersion: - write(' * _Deprecated_ by\n' + self.conditionalLinkCoreAPI(self.supercedingAPIVersion, '-new-features', isRefpage), file=fp) + write(f" * _Deprecated_ by\n{self.conditionalLinkCoreAPI(self.supercedingAPIVersion, '-new-features', isRefpage)}", file=fp) elif self.supercedingExtension: - write(' * _Deprecated_ by\n' + self.conditionalLinkExt(self.supercedingExtension) + ' extension' , file=fp) - self.resolveDeprecationChain(extensionsList, self.supercedingExtension, isRefpage, fp) + write(f" * _Deprecated_ by\n{self.conditionalLinkExt(self.supercedingExtension)} extension" , file=fp) + self.resolveDeprecationChain(extensions, self.supercedingExtension, isRefpage, fp) else: write(' * _Deprecated_ without replacement' , file=fp) elif self.deprecationType == 'obsoletion': if self.supercedingAPIVersion: - write(' * _Obsoleted_ by\n' + self.conditionalLinkCoreAPI(self.supercedingAPIVersion, '-new-features', isRefpage), file=fp) + write(f" * _Obsoleted_ by\n{self.conditionalLinkCoreAPI(self.supercedingAPIVersion, '-new-features', isRefpage)}", file=fp) elif self.supercedingExtension: - write(' * _Obsoleted_ by\n' + self.conditionalLinkExt(self.supercedingExtension) + ' extension' , file=fp) - self.resolveDeprecationChain(extensionsList, self.supercedingExtension, isRefpage, fp) + write(f" * _Obsoleted_ by\n{self.conditionalLinkExt(self.supercedingExtension)} extension" , file=fp) + self.resolveDeprecationChain(extensions, self.supercedingExtension, isRefpage, fp) else: # TODO: Does not make sense to retroactively ban use of extensions from 1.0. # Needs some tweaks to the semantics and this message, when such extension(s) occur. @@ -322,7 +387,7 @@ def makeMetafile(self, extensionsList, isRefpage = False): write('* {}'.format( self.specLink( xrefName = self.conventions.special_use_section_anchor, - xrefText = '{' + use + '}', + xrefText = f"{{{use}}}", isRefpage = isRefpage)), file=fp) write('', file=fp) @@ -335,29 +400,59 @@ def makeMetafile(self, extensionsList, isRefpage = False): name = ' '.join(contactWords[:-1]) handle = contactWords[-1] if handle.startswith('gitlab:'): - prettyHandle = 'icon:gitlab[alt=GitLab, role="red"]' + handle.replace('gitlab:@', '') + prettyHandle = f"icon:gitlab[alt=GitLab, role=\"red\"]{handle.replace('gitlab:@', '')}" elif handle.startswith('@'): - issuePlaceholderText = '[' + self.name + '] ' + handle - issuePlaceholderText += '%0A<>' - trackerLink = 'link:++https://github.com/KhronosGroup/Vulkan-Docs/issues/new?body=' + issuePlaceholderText + '++' - prettyHandle = trackerLink + '[icon:github[alt=GitHub,role="black"]' + handle[1:] + ',window=_blank,opts=nofollow]' + issuePlaceholderText = f'[{self.name}] {handle}' + issuePlaceholderText += f'%0A*Here describe the issue or question you have about the {self.name} extension*' + trackerLink = f'link:++https://github.com/KhronosGroup/Vulkan-Docs/issues/new?body={issuePlaceholderText}++' + prettyHandle = f'{trackerLink}[icon:github[alt=GitHub,role="black"]{handle[1:]},window=_blank,opts=nofollow]' else: prettyHandle = handle - write(' * ' + name + ' ' + prettyHandle, file=fp) + write(f" * {name} {prettyHandle}", file=fp) write('', file=fp) # Check if a proposal document for this extension exists in the # current repository, and link to the same document (parameterized # by a URL prefix attribute) if it does. # The assumption is that a proposal document for an extension - # VK_name will be located in 'proposals/VK_name.asciidoc' relative + # VK_name will be located in 'proposals/VK_name.adoc' relative # to the repository root, and that this script will be invoked from # the repository root. - path = 'proposals/{}.asciidoc'.format(self.name) - if os.path.exists(path) and os.access(path, os.R_OK): - self.writeTag('Extension Proposal', - 'link:{{specRepositoryURL}}/{}[{}]'.format(path, self.name), isRefpage, fp) + # If a proposal for this extension does not exist, look for + # proposals for the extensions it is promoted from. + + def checkProposal(extname): + """Check if a proposal document for an extension exists, + returning the path to that proposal or None otherwise.""" + + path = f'proposals/{extname}.adoc' + if os.path.exists(path) and os.access(path, os.R_OK): + return path + else: + return None + + # List of [ extname, proposal link ] + proposals = [] + + path = checkProposal(self.name) + if path is not None: + proposals.append([self.name, path]) + else: + for name in self.promotedFrom: + path = checkProposal(name) + if path is not None: + proposals.append([name, path]) + + if len(proposals) > 0: + tag = 'Extension Proposal' + for (name, path) in sorted(proposals): + self.writeTag(tag, + f'link:{{specRepositoryURL}}/{path}[{name}]', + isRefpage, fp) + # Setting tag = None so additional values will not get + # additional tag headers. + tag = None # If this is metadata to be included in a refpage, adjust the # leveloffset to account for the relative structure of the extension @@ -377,8 +472,7 @@ class ExtensionMetaDocOutputGenerator(OutputGenerator): - number extension number (optional) - contact name and GitHub login or email address (optional) - type 'instance' | 'device' (optional) - - requires list of comma-separated required API extensions (optional) - - requiresCore required core version of API (optional) + - depends boolean expression of core version and extension names this depends on (optional) - promotedTo extension or API version it was promoted to - deprecatedBy extension or API version which deprecated this extension, or empty string if deprecated without replacement @@ -388,12 +482,15 @@ class ExtensionMetaDocOutputGenerator(OutputGenerator): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.extensions = [] + self.extensions = {} # List of strings containing all vendor tags self.vendor_tags = [] self.file_suffix = '' + # SPIR-V dependencies, generated in beginFile() + self.SPV_deps = {} def newFile(self, filename): + assert self.genOpts self.logMsg('diag', '# Generating include file:', filename) fp = open(filename, 'w', encoding='utf-8') write(self.genOpts.conventions.warning_comment, file=fp) @@ -401,7 +498,8 @@ def newFile(self, filename): def beginFile(self, genOpts): OutputGenerator.beginFile(self, genOpts) - + assert self.genOpts + assert self.registry self.directory = Path(self.genOpts.directory) self.file_suffix = self.genOpts.conventions.file_suffix @@ -411,29 +509,51 @@ def beginFile(self, genOpts): for tag in root.findall('tags/tag'): self.vendor_tags.append(tag.get('name')) + # If there are elements in the XML, generate a + # reverse map from API version and extension names to the SPV + # extensions they depend on. + + def add_dep(SPV_deps, name, spvname): + """Add spvname as a dependency of name. + name may be an API or extension name.""" + + if name not in SPV_deps: + SPV_deps[name] = set() + SPV_deps[name].add(spvname) + + for spvext in root.findall('spirvextensions/spirvextension'): + spvname = spvext.get('name') + for elem in spvext.findall('enable'): + if elem.get('version'): + version_name = elem.get('version') + add_dep(self.SPV_deps, version_name, spvname) + elif elem.get('extension'): + ext_name = elem.get('extension') + add_dep(self.SPV_deps, ext_name, spvname) + # Create subdirectory, if needed self.makeDir(self.directory) def conditionalExt(self, extName, content, ifdef = None, condition = None): doc = '' - innerdoc = 'ifdef::' + extName + '[]\n' - innerdoc += content + '\n' - innerdoc += 'endif::' + extName + '[]\n' + innerdoc = f"ifdef::{extName}[]\n" + innerdoc += f"{content}\n" + innerdoc += f"endif::{extName}[]\n" if ifdef: if ifdef == 'ifndef': if condition: - doc += 'ifndef::' + condition + '[]\n' + doc += f"ifndef::{condition}[]\n" doc += innerdoc - doc += 'endif::' + condition + '[]\n' + doc += f"endif::{condition}[]\n" else: # no condition is as if condition is defined; "nothing" is always defined :p pass # so no output elif ifdef == 'ifdef': if condition: - doc += 'ifdef::' + condition + '+' + extName + '[]\n' - doc += content + '\n' # does not include innerdoc; the ifdef was merged with the one above - doc += 'endif::' + condition + '+' + extName + '[]\n' + doc += f"ifdef::{condition}+{extName}[]\n" + doc += f"{content}\n" # does not include innerdoc; the ifdef was merged with the one above + doc += f"endif::{condition}+{extName}[]\n" else: # no condition is as if condition is defined; "nothing" is always defined :p doc += innerdoc else: # should be unreachable @@ -447,55 +567,64 @@ def makeExtensionInclude(self, extname): return self.conventions.extension_include_string(extname) def endFile(self): - self.extensions.sort() + # Determine the extension an extension is promoted from, if any. + # This is used when attempting to locate a proposal document in + # makeMetafile() below. + for (extname, ext) in self.extensions.items(): + promotedTo = ext.promotedTo + if promotedTo is not None: + if promotedTo in self.extensions: + #print(f'{promotedTo} is promoted from {extname}') + self.extensions[promotedTo].promotedFrom.add(extname) + #print(f'setting self.extensions[{promotedTo}].promotedFrom = {self.extensions[promotedTo].promotedFrom}') + elif not self.conventions.is_api_version_name(promotedTo): + self.logMsg('warn', f'{extname} is promoted to {promotedTo} which is not in the extension map') # Generate metadoc extension files, in refpage and non-refpage form - for ext in self.extensions: - ext.makeMetafile(self.extensions, isRefpage = False) + for ext in self.extensions.values(): + ext.makeMetafile(self.extensions, self.SPV_deps, isRefpage = False) if self.conventions.write_refpage_include: - ext.makeMetafile(self.extensions, isRefpage = True) + ext.makeMetafile(self.extensions, self.SPV_deps, isRefpage = True) + + # Key to sort extensions alphabetically within 'KHR', 'EXT', vendor + # extension prefixes. + def makeSortKey(extname): + name = extname.lower() + prefixes = self.conventions.extension_index_prefixes + for i, prefix in enumerate(prefixes): + if extname.startswith(prefix): + return (i, name) + return (len(prefixes), name) # Generate list of promoted extensions promotedExtensions = {} - for ext in self.extensions: + for ext in self.extensions.values(): if ext.deprecationType == 'promotion' and ext.supercedingAPIVersion: - promotedExtensions.setdefault(ext.supercedingAPIVersion, []).append(ext) + promotedExtensions.setdefault(ext.supercedingAPIVersion, []).append(ext.name) for coreVersion, extensions in promotedExtensions.items(): - promoted_extensions_fp = self.newFile(self.directory / ('promoted_extensions_' + coreVersion + self.file_suffix)) + promoted_extensions_fp = self.newFile(self.directory / f"promoted_extensions_{coreVersion}{self.file_suffix}") - for ext in extensions: + for extname in sorted(extensions, key=makeSortKey): + ext = self.extensions[extname] indent = '' - write(' * {blank}\n+\n' + ext.conditionalLinkExt(ext.name, indent), file=promoted_extensions_fp) + write(f" * {{blank}}\n+\n{ext.conditionalLinkExt(extname, indent)}", file=promoted_extensions_fp) promoted_extensions_fp.close() - # Re-sort to match earlier behavior - # TODO: Remove this extra sort when re-arranging section order OK. - - def makeSortKey(ext): - name = ext.name.lower() - prefixes = self.conventions.extension_index_prefixes - for i, prefix in enumerate(prefixes): - if ext.name.startswith(prefix): - return (i, name) - return (len(prefixes), name) - - self.extensions.sort(key=makeSortKey) - # Generate include directives for the extensions appendix, grouping # extensions by status (current, deprecated, provisional, etc.) - with self.newFile(self.directory / ('current_extensions_appendix' + self.file_suffix)) as current_extensions_appendix_fp, \ - self.newFile(self.directory / ('deprecated_extensions_appendix' + self.file_suffix)) as deprecated_extensions_appendix_fp, \ - self.newFile(self.directory / ('current_extension_appendices' + self.file_suffix)) as current_extension_appendices_fp, \ - self.newFile(self.directory / ('current_extension_appendices_toc' + self.file_suffix)) as current_extension_appendices_toc_fp, \ - self.newFile(self.directory / ('deprecated_extension_appendices' + self.file_suffix)) as deprecated_extension_appendices_fp, \ - self.newFile(self.directory / ('deprecated_extension_appendices_toc' + self.file_suffix)) as deprecated_extension_appendices_toc_fp, \ - self.newFile(self.directory / ('deprecated_extensions_guard_macro' + self.file_suffix)) as deprecated_extensions_guard_macro_fp, \ - self.newFile(self.directory / ('provisional_extensions_appendix' + self.file_suffix)) as provisional_extensions_appendix_fp, \ - self.newFile(self.directory / ('provisional_extension_appendices' + self.file_suffix)) as provisional_extension_appendices_fp, \ - self.newFile(self.directory / ('provisional_extension_appendices_toc' + self.file_suffix)) as provisional_extension_appendices_toc_fp, \ - self.newFile(self.directory / ('provisional_extensions_guard_macro' + self.file_suffix)) as provisional_extensions_guard_macro_fp: + with self.newFile(self.directory / f"current_extensions_appendix{self.file_suffix}") as current_extensions_appendix_fp, \ + self.newFile(self.directory / f"deprecated_extensions_appendix{self.file_suffix}") as deprecated_extensions_appendix_fp, \ + self.newFile(self.directory / f"current_extension_appendices{self.file_suffix}") as current_extension_appendices_fp, \ + self.newFile(self.directory / f"current_extension_appendices_toc{self.file_suffix}") as current_extension_appendices_toc_fp, \ + self.newFile(self.directory / f"deprecated_extension_appendices{self.file_suffix}") as deprecated_extension_appendices_fp, \ + self.newFile(self.directory / f"deprecated_extension_appendices_toc{self.file_suffix}") as deprecated_extension_appendices_toc_fp, \ + self.newFile(self.directory / f"deprecated_extensions_guard_macro{self.file_suffix}") as deprecated_extensions_guard_macro_fp, \ + self.newFile(self.directory / f"provisional_extensions_appendix{self.file_suffix}") as provisional_extensions_appendix_fp, \ + self.newFile(self.directory / f"provisional_extension_appendices{self.file_suffix}") as provisional_extension_appendices_fp, \ + self.newFile(self.directory / f"provisional_extension_appendices_toc{self.file_suffix}") as provisional_extension_appendices_toc_fp, \ + self.newFile(self.directory / f"provisional_extensions_guard_macro{self.file_suffix}") as provisional_extensions_guard_macro_fp: # Note: there is a hardwired assumption in creating the # include:: directives below that all of these files are located @@ -503,8 +632,12 @@ def makeSortKey(ext): # This is difficult to change, and it is very unlikely changing # it will be needed. + # Do not include the lengthy '*extension_appendices_toc' indices + # in the Antora site build, since all the extensions are already + # indexed on the right navigation sidebar. + write('', file=current_extensions_appendix_fp) - write('include::{generated}/meta/deprecated_extensions_guard_macro' + self.file_suffix + '[]', file=current_extensions_appendix_fp) + write(f"include::{{generated}}/meta/deprecated_extensions_guard_macro{self.file_suffix}[]", file=current_extensions_appendix_fp) write('', file=current_extensions_appendix_fp) write('ifndef::HAS_DEPRECATED_EXTENSIONS[]', file=current_extensions_appendix_fp) write('[[extension-appendices-list]]', file=current_extensions_appendix_fp) @@ -515,19 +648,23 @@ def makeSortKey(ext): write('== List of Current Extensions', file=current_extensions_appendix_fp) write('endif::HAS_DEPRECATED_EXTENSIONS[]', file=current_extensions_appendix_fp) write('', file=current_extensions_appendix_fp) - write('include::{generated}/meta/current_extension_appendices_toc' + self.file_suffix + '[]', file=current_extensions_appendix_fp) + write('ifndef::site-gen-antora[]', file=current_extensions_appendix_fp) + write(f"include::{{generated}}/meta/current_extension_appendices_toc{self.file_suffix}[]", file=current_extensions_appendix_fp) + write('endif::site-gen-antora[]', file=current_extensions_appendix_fp) write('\n<<<\n', file=current_extensions_appendix_fp) - write('include::{generated}/meta/current_extension_appendices' + self.file_suffix + '[]', file=current_extensions_appendix_fp) + write(f"include::{{generated}}/meta/current_extension_appendices{self.file_suffix}[]", file=current_extensions_appendix_fp) write('', file=deprecated_extensions_appendix_fp) - write('include::{generated}/meta/deprecated_extensions_guard_macro' + self.file_suffix + '[]', file=deprecated_extensions_appendix_fp) + write(f"include::{{generated}}/meta/deprecated_extensions_guard_macro{self.file_suffix}[]", file=deprecated_extensions_appendix_fp) write('', file=deprecated_extensions_appendix_fp) write('ifdef::HAS_DEPRECATED_EXTENSIONS[]', file=deprecated_extensions_appendix_fp) write('[[deprecated-extension-appendices-list]]', file=deprecated_extensions_appendix_fp) write('== List of Deprecated Extensions', file=deprecated_extensions_appendix_fp) - write('include::{generated}/meta/deprecated_extension_appendices_toc' + self.file_suffix + '[]', file=deprecated_extensions_appendix_fp) + write('ifndef::site-gen-antora[]', file=deprecated_extensions_appendix_fp) + write(f"include::{{generated}}/meta/deprecated_extension_appendices_toc{self.file_suffix}[]", file=deprecated_extensions_appendix_fp) + write('endif::site-gen-antora[]', file=deprecated_extensions_appendix_fp) write('\n<<<\n', file=deprecated_extensions_appendix_fp) - write('include::{generated}/meta/deprecated_extension_appendices' + self.file_suffix + '[]', file=deprecated_extensions_appendix_fp) + write(f"include::{{generated}}/meta/deprecated_extension_appendices{self.file_suffix}[]", file=deprecated_extensions_appendix_fp) write('endif::HAS_DEPRECATED_EXTENSIONS[]', file=deprecated_extensions_appendix_fp) # add include guards to allow multiple includes @@ -537,19 +674,25 @@ def makeSortKey(ext): write(':PROVISIONAL_EXTENSIONS_GUARD_MACRO_INCLUDE_GUARD:\n', file=provisional_extensions_guard_macro_fp) write('', file=provisional_extensions_appendix_fp) - write('include::{generated}/meta/provisional_extensions_guard_macro' + self.file_suffix + '[]', file=provisional_extensions_appendix_fp) + write(f"include::{{generated}}/meta/provisional_extensions_guard_macro{self.file_suffix}[]", file=provisional_extensions_appendix_fp) write('', file=provisional_extensions_appendix_fp) write('ifdef::HAS_PROVISIONAL_EXTENSIONS[]', file=provisional_extensions_appendix_fp) write('[[provisional-extension-appendices-list]]', file=provisional_extensions_appendix_fp) write('== List of Provisional Extensions', file=provisional_extensions_appendix_fp) - write('include::{generated}/meta/provisional_extension_appendices_toc' + self.file_suffix + '[]', file=provisional_extensions_appendix_fp) + write('ifndef::site-gen-antora[]', file=provisional_extensions_appendix_fp) + write(f"include::{{generated}}/meta/provisional_extension_appendices_toc{self.file_suffix}[]", file=provisional_extensions_appendix_fp) + write('endif::site-gen-antora[]', file=provisional_extensions_appendix_fp) write('\n<<<\n', file=provisional_extensions_appendix_fp) - write('include::{generated}/meta/provisional_extension_appendices' + self.file_suffix + '[]', file=provisional_extensions_appendix_fp) + write(f"include::{{generated}}/meta/provisional_extension_appendices{self.file_suffix}[]", file=provisional_extensions_appendix_fp) write('endif::HAS_PROVISIONAL_EXTENSIONS[]', file=provisional_extensions_appendix_fp) - for ext in self.extensions: + # Emit extensions in author ID order + sorted_keys = sorted(self.extensions.keys(), key=makeSortKey) + for name in sorted_keys: + ext = self.extensions[name] + include = self.makeExtensionInclude(ext.name) - link = ' * ' + self.conventions.formatExtension(ext.name) + link = f" * {self.conventions.formatExtension(ext.name)}" # If something is provisional and deprecated, it's deprecated. if ext.provisional == 'true' and ext.deprecationType is None: @@ -583,42 +726,48 @@ def beginFeature(self, interface, emit): self.logMsg('diag', 'beginFeature: ignoring non-extension feature', self.featureName) return - # These attributes must exist name = self.featureName - number = self.getAttrib(interface, 'number') - ext_type = self.getAttrib(interface, 'type') - revision = self.getSpecVersion(interface, name) + + # These attributes may be required to exist, depending on the API + number = self.getAttrib(interface, 'number', + self.conventions.write_extension_number) + ext_type = self.getAttrib(interface, 'type', + self.conventions.write_extension_type) + if self.conventions.write_extension_revision: + revision = self.getSpecVersion(interface, name) + else: + revision = None # These attributes are optional OPTIONAL = False - requires = self.getAttrib(interface, 'requires', OPTIONAL) - requiresCore = self.getAttrib(interface, 'requiresCore', OPTIONAL, '1.0') # TODO update this line with update_version.py + depends = self.getAttrib(interface, 'depends', OPTIONAL, "XR_VERSION_1_0") # TODO should default to base API version 1.0? contact = self.getAttrib(interface, 'contact', OPTIONAL) promotedTo = self.getAttrib(interface, 'promotedto', OPTIONAL) deprecatedBy = self.getAttrib(interface, 'deprecatedby', OPTIONAL) obsoletedBy = self.getAttrib(interface, 'obsoletedby', OPTIONAL) provisional = self.getAttrib(interface, 'provisional', OPTIONAL, 'false') specialuse = self.getAttrib(interface, 'specialuse', OPTIONAL) + ratified = self.getAttrib(interface, 'ratified', OPTIONAL, '') - filename = self.directory / (name + self.file_suffix) + filename = self.directory / f"{name}{self.file_suffix}" extdata = Extension( generator = self, filename = filename, + interface = interface, name = name, number = number, ext_type = ext_type, - requires = requires, - requiresCore = requiresCore, + depends = depends, contact = contact, promotedTo = promotedTo, deprecatedBy = deprecatedBy, obsoletedBy = obsoletedBy, provisional = provisional, revision = revision, - specialuse = specialuse) - self.extensions.append(extdata) - + specialuse = specialuse, + ratified = ratified) + self.extensions[name] = extdata def endFeature(self): # Finish processing in superclass @@ -634,7 +783,7 @@ def getAttrib(self, elem, attribute, required=True, default=None): attrib = elem.get(attribute, default) if required and (attrib is None): name = elem.get('name', 'UNKNOWN') - self.logMsg('error', 'While processing \'' + self.featureName + ', <' + elem.tag + '> \'' + name + '\' does not contain required attribute \'' + attribute + '\'') + self.logMsg('error', f"While processing '{self.featureName}, <{elem.tag}> '{name}' does not contain required attribute '{attribute}'") return attrib def numbersToWords(self, name): @@ -642,13 +791,13 @@ def numbersToWords(self, name): # temporarily replace allowlist items for i, w in enumerate(allowlist): - name = re.sub(w, '{' + str(i) + '}', name) + name = re.sub(w, f"{{{str(i)}}}", name) name = re.sub(r'(?<=[A-Z])(\d+)(?![A-Z])', r'_\g<1>', name) # undo allowlist substitution for i, w in enumerate(allowlist): - name = re.sub('\\{' + str(i) + '}', w, name) + name = re.sub(f"\\{{{str(i)}}}", w, name) return name @@ -660,7 +809,7 @@ def getSpecVersion(self, elem, extname, default=None): - extname - extension name from the 'name' attribute - default - default value if SPEC_VERSION token not present""" # The literal enumerant name to match - versioningEnumName = self.numbersToWords(extname.upper()) + '_SPEC_VERSION' + versioningEnumName = f"{self.numbersToWords(extname.upper())}_SPEC_VERSION" for enum in elem.findall('./require/enum'): enumName = self.getAttrib(enum, 'name') @@ -671,8 +820,8 @@ def getSpecVersion(self, elem, extname, default=None): for enum in elem.findall('./require/enum'): enumName = self.getAttrib(enum, 'name') if enumName.find('SPEC_VERSION') != -1: - self.logMsg('diag', 'Missing ' + versioningEnumName + '! Potential misnamed candidate ' + enumName + '.') + self.logMsg('diag', f"Missing {versioningEnumName}! Potential misnamed candidate {enumName}.") return self.getAttrib(enum, 'value') - self.logMsg('error', 'Missing ' + versioningEnumName + '!') + self.logMsg('error', f"Missing {versioningEnumName}!") return default diff --git a/specification/scripts/extract_code.py b/specification/scripts/extract_code.py index e95f4fb7..fe8db478 100644 --- a/specification/scripts/extract_code.py +++ b/specification/scripts/extract_code.py @@ -1,5 +1,6 @@ #!/usr/bin/python3 # +# Copyright 2018-2024, The Khronos Group Inc. # Copyright (c) 2018 Collabora, Ltd. # # SPDX-License-Identifier: Apache-2.0 @@ -91,7 +92,7 @@ def make_numbered_filename(self, language): def print_message(self, s): if not self.quiet: - print('{}:{}: {}'.format(self.filename, self.line_number, s)) + print(f'{self.filename}:{self.line_number}: {s}') def process_start_of_code_block(self): prev_line = self.get_preceding_line() @@ -137,7 +138,7 @@ def process_end_of_code_block(self): if len(code_lines) < self.MIN_LINES: self.print_message( - 'Not extracting code snippet - only {} lines.'.format(len(code_lines))) + f'Not extracting code snippet - only {len(code_lines)} lines.') return out_filename = self.make_numbered_filename(self.language) @@ -152,7 +153,7 @@ def process_end_of_code_block(self): with out_filename.open('w', encoding='utf-8') as f: f.write('#include "common_include.h"\n') if include_file.exists(): - f.write('#include "{}"\n\n'.format(include_file.name)) + f.write(f'#include "{include_file.name}"\n\n') self.deps.append((out_filename, include_file)) f.write('void func() {\n') f.write(''.join(code_lines)) @@ -161,8 +162,7 @@ def process_end_of_code_block(self): def process_code_block_line(self): if self.code_lines is not None: if self.output_line_numbers: - self.code_lines.append('# {} "{}"\n'.format( - self.line_number, self.filename)) + self.code_lines.append(f'# {self.line_number} "{self.filename}\"\n') self.code_lines.append(self.line) def process_line(self, line_num, line): @@ -219,7 +219,7 @@ def output_makefile(self, makefile): for fn in self.all_generated if fn.suffix == '.c') generated_cpp_string = ' \\\n'.join(str(fn) for fn in self.all_generated if fn.suffix == '.cpp') - deps_string = '\n'.join('{}: {} $(CODEDIR)/common_include.h'.format(fn.with_suffix('.o'), dep) + deps_string = '\n'.join(f"{fn.with_suffix('.o')}: {dep} $(CODEDIR)/common_include.h" for fn, dep in self.deps) extra_arg = '' if self.output_line_numbers: @@ -263,7 +263,7 @@ def output_makefile(self, makefile): .PHONY: gen {deps} -""".format(out=(ROOT / 'specification' / 'generated' / 'out' / '1.0').relative_to(Path('.').resolve()), +""".format(out=(ROOT / 'specification' / 'generated' / 'out' / '1.1').relative_to(Path('.').resolve()), codedir=CODEDIR.relative_to(Path('.').resolve()), c=generated_c_string, cpp=generated_cpp_string, @@ -287,8 +287,7 @@ def output_makefile(self, makefile): compiler = '[cc] ' origin_str = '{} {} extracted from {}:{}'.format(compiler, generated.name.ljust(width), origin_file, origin_line) - f.write('{obj}: ORIGIN := {originstr}\n'.format( - obj=generated.with_suffix('.o'), originstr=origin_str)) + f.write(f"{generated.with_suffix('.o')}: ORIGIN := {origin_str}\n") if __name__ == "__main__": diff --git a/specification/scripts/genRef.py b/specification/scripts/genRef.py index 0f9b477b..f0320a2f 100644 --- a/specification/scripts/genRef.py +++ b/specification/scripts/genRef.py @@ -18,6 +18,7 @@ printPageInfo, setLogFile) from reg import Registry from generator import GeneratorOptions +from parse_dependency import dependencyNames from apiconventions import APIConventions @@ -38,6 +39,7 @@ # Other refpage types - SPIR-V builtins, API feature blocks, etc. - which do # not have structured content. refpage_other_types = ( + 'feature', 'freeform', ) @@ -100,7 +102,7 @@ def printFooter(fp, leveloffset=0): print('ifdef::doctype-manpage[]', f'{prefix} Copyright', '', - 'include::{config}/copyright-ccby' + conventions.file_suffix + '[]', + f"include::{{config}}/copyright-ccby{conventions.file_suffix}[]", 'endif::doctype-manpage[]', '', 'ifndef::doctype-manpage[]', @@ -116,24 +118,24 @@ def macroPrefix(name): If the name is not recognized, use the generic link macro 'reflink:'.""" if name in api.basetypes: - return 'basetype:' + name + return f"basetype:{name}" if name in api.defines: - return 'dlink:' + name + return f"dlink:{name}" if name in api.enums: - return 'elink:' + name + return f"elink:{name}" if name in api.flags: - return 'elink:' + name + return f"elink:{name}" if name in api.funcpointers: - return 'tlink:' + name + return f"tlink:{name}" if name in api.handles: - return 'slink:' + name + return f"slink:{name}" if name in api.protos: - return 'flink:' + name + return f"flink:{name}" if name in api.structs: - return 'slink:' + name + return f"slink:{name}" if name == 'TBD': return 'No cross-references are available' - return 'reflink:' + name + return f"reflink:{name}" def seeAlsoList(apiName, explicitRefs=None, apiAliases=[]): @@ -170,12 +172,16 @@ def seeAlsoList(apiName, explicitRefs=None, apiAliases=[]): for (base,dependency) in api.requiredBy[name]: refs.add(base) if dependency is not None: - refs.add(dependency) + # 'dependency' may be a boolean expression of extension + # names. + # Extract them for use in cross-references. + for extname in dependencyNames(dependency): + refs.add(extname) if len(refs) == 0: return None else: - return ', '.join(macroPrefix(name) for name in sorted(refs)) + '\n' + return f"{', '.join(macroPrefix(name) for name in sorted(refs))}\n" def remapIncludes(lines, baseDir, specDir): @@ -198,10 +204,10 @@ def remapIncludes(lines, baseDir, specDir): if path[0] != '{': # Relative path to include file from here - incPath = specDir + '/' + path + incPath = f"{specDir}/{path}" # Remap to be relative to baseDir newPath = os.path.relpath(incPath, baseDir) - newLine = 'include::' + newPath + '[]\n' + newLine = f"include::{newPath}[]\n" logDiag('remapIncludes: remapping', line, '->', newLine) newLines.append(newLine) else: @@ -228,12 +234,15 @@ def refPageShell(pageName, pageDesc, fp, head_content = None, sections=None, tai print(':data-uri:', ':icons: font', + ':attribute-missing: warn', conventions.extra_refpage_headers, '', sep='\n', file=fp) - s = '{}({})'.format(pageName, man_section) - print('= ' + s, + s = f'{pageName}({man_section})' + print(f"= {s}", + '', + conventions.extra_refpage_body, '', sep='\n', file=fp) if pageDesc.strip() == '': @@ -241,7 +250,7 @@ def refPageShell(pageName, pageDesc, fp, head_content = None, sections=None, tai logWarn('refPageHead: no short description provided for', pageName) print('== Name', - '{} - {}'.format(pageName, pageDesc), + f'{pageName} - {pageDesc}', '', sep='\n', file=fp) @@ -252,7 +261,7 @@ def refPageShell(pageName, pageDesc, fp, head_content = None, sections=None, tai if sections is not None: for title, content in sections.items(): - print('== {}'.format(title), + print(f'== {title}', '', content, '', @@ -336,7 +345,7 @@ def refPageTail(pageName, )) else: notes.extend(( - 'This page is extracted from the ' + specName + ' Specification. ', + f"This page is extracted from the {specName} Specification. ", 'Fixes and changes should be made to the Specification, ' 'not directly.', )) @@ -416,7 +425,7 @@ def emitPage(baseDir, specDir, pi, file): - specDir - directory extracted page source came from - pi - pageInfo for this page relative to file - file - list of strings making up the file, indexed by pi""" - pageName = baseDir + '/' + pi.name + '.txt' + pageName = f'{baseDir}/{pi.name}{conventions.file_suffix}' # Add a dictionary entry for this page global genDict @@ -437,8 +446,12 @@ def emitPage(baseDir, specDir, pi, file): logWarn('emitPage:', pageName, 'INCLUDE is None, no page generated') return - # Specification text - lines = remapIncludes(file[pi.begin:pi.include + 1], baseDir, specDir) + # Specification text from beginning to just before the parameter + # section. This covers the description, the prototype, the version + # note, and any additional version note text. If a parameter section + # is absent then go a line beyond the include. + remap_end = pi.include + 1 if pi.param is None else pi.param + lines = remapIncludes(file[pi.begin:remap_end], baseDir, specDir) specText = ''.join(lines) if pi.param is not None: @@ -499,7 +512,7 @@ def autoGenEnumsPage(baseDir, pi, file): - baseDir - base directory to emit page into - pi - pageInfo for this page relative to file - file - list of strings making up the file, indexed by pi""" - pageName = baseDir + '/' + pi.name + '.txt' + pageName = f'{baseDir}/{pi.name}{conventions.file_suffix}' fp = open(pageName, 'w', encoding='utf-8') # Add a dictionary entry for this page @@ -525,7 +538,7 @@ def autoGenEnumsPage(baseDir, pi, file): 'For more information, see:\n\n', embedRef, ' * The See Also section for other reference pages using this type.\n', - ' * The ' + apiName + ' Specification.\n')) + f" * The {apiName} Specification.\n")) refPageHead(pi.name, pi.desc, @@ -552,7 +565,7 @@ def autoGenFlagsPage(baseDir, flagName): - baseDir - base directory to emit page into - flagName - API *Flags name""" - pageName = baseDir + '/' + flagName + '.txt' + pageName = f'{baseDir}/{flagName}{conventions.file_suffix}' fp = open(pageName, 'w', encoding='utf-8') # Add a dictionary entry for this page @@ -566,24 +579,24 @@ def autoGenFlagsPage(baseDir, flagName): name = matches.group('name') author = matches.group('author') logDiag('autoGenFlagsPage: split name into', name, 'Flags', author) - flagBits = name + 'FlagBits' + author - desc = 'Bitmask of ' + flagBits + flagBits = f"{name}FlagBits{author}" + desc = f"Bitmask of {flagBits}" else: logWarn('autoGenFlagsPage:', pageName, 'does not end in "Flags{author ID}". Cannot infer FlagBits type.') flagBits = None - desc = 'Unknown ' + apiName + ' flags type' + desc = f"Unknown {apiName} flags type" # Description text if flagBits is not None: txt = ''.join(( - 'etext:' + flagName, - ' is a mask of zero or more elink:' + flagBits + '.\n', + f"etext:{flagName}", + f" is a mask of zero or more elink:{flagBits}.\n", 'It is used as a member and/or parameter of the structures and commands\n', 'in the See Also section below.\n')) else: txt = ''.join(( - 'etext:' + flagName, - ' is an unknown ' + apiName + ' type, assumed to be a bitmask.\n')) + f"etext:{flagName}", + f" is an unknown {apiName} type, assumed to be a bitmask.\n")) refPageHead(flagName, desc, @@ -607,7 +620,7 @@ def autoGenHandlePage(baseDir, handleName): - handleName - API handle name""" # @@ Need to determine creation function & add handles/ include for the # @@ interface in generator.py. - pageName = baseDir + '/' + handleName + '.txt' + pageName = f'{baseDir}/{handleName}{conventions.file_suffix}' fp = open(pageName, 'w', encoding='utf-8') # Add a dictionary entry for this page @@ -616,13 +629,13 @@ def autoGenHandlePage(baseDir, handleName): logDiag('autoGenHandlePage:', pageName) # Short description - desc = apiName + ' object handle' + desc = f"{apiName} object handle" descText = ''.join(( - 'sname:' + handleName, + f"sname:{handleName}", ' is an object handle type, referring to an object used\n', - 'by the ' + apiName + ' implementation. These handles are created or allocated\n', - 'by the @@ TBD @@ function, and used by other ' + apiName + ' structures\n', + f"by the {apiName} implementation. These handles are created or allocated\n", + f"by the @@ TBD @@ function, and used by other {apiName} structures\n", 'and commands in the See Also section below.\n')) refPageHead(handleName, @@ -654,7 +667,7 @@ def genRef(specFile, baseDir): specDir = os.path.dirname(os.path.abspath(specFile)) pageMap = findRefs(file, specFile) - logDiag(specFile + ': found', len(pageMap.keys()), 'potential pages') + logDiag(f"{specFile}: found", len(pageMap.keys()), 'potential pages') sys.stderr.flush() @@ -692,7 +705,7 @@ def genRef(specFile, baseDir): printPageInfo(pi, file) if pi.Warning: - logDiag('genRef:', pi.name + ':', pi.Warning) + logDiag('genRef:', f"{pi.name}:", pi.Warning) if pi.extractPage: emitPage(baseDir, specDir, pi, file) @@ -712,7 +725,7 @@ def genRef(specFile, baseDir): def genSinglePageRef(baseDir): - """Generate baseDir/apispec.txt, the single-page version of the ref pages. + """Generate the single-page version of the ref pages. This assumes there is a page for everything in the api module dictionaries. Extensions (KHR, EXT, etc.) are currently skipped""" @@ -721,7 +734,7 @@ def genSinglePageRef(baseDir): printCopyrightSourceComments(head) - print('= ' + apiName + ' API Reference Pages', + print(f"= {apiName} API Reference Pages", ':data-uri:', ':icons: font', ':doctype: book', @@ -730,27 +743,28 @@ def genSinglePageRef(baseDir): ':data-uri:', ':toc2:', ':toclevels: 2', + ':attribute-missing: warn', '', sep='\n', file=head) print('== Copyright', file=head) print('', file=head) - print('include::{config}/copyright-ccby' + conventions.file_suffix + '[]', file=head) + print(f"include::{{config}}/copyright-ccby{conventions.file_suffix}[]", file=head) print('', file=head) # Inject the table of contents. Asciidoc really ought to be generating # this for us. sections = [ - [api.protos, 'protos', apiName + ' Commands'], + [api.protos, 'protos', f"{apiName} Commands"], [api.handles, 'handles', 'Object Handles'], [api.structs, 'structs', 'Structures'], [api.enums, 'enums', 'Enumerations'], [api.flags, 'flags', 'Flags'], [api.funcpointers, 'funcpointers', 'Function Pointer Types'], - [api.basetypes, 'basetypes', apiName + ' Scalar types'], + [api.basetypes, 'basetypes', f"{apiName} Scalar types"], [api.defines, 'defines', 'C Macro Definitions'], - [extensions, 'extensions', apiName + ' Extensions'] + [extensions, 'extensions', f"{apiName} Extensions"] ] # Accumulate body of page @@ -758,9 +772,9 @@ def genSinglePageRef(baseDir): for (apiDict, label, title) in sections: # Add section title/anchor header to body - anchor = '[[' + label + ',' + title + ']]' + anchor = f"[[{label},{title}]]" print(anchor, - '== ' + title, + f"== {title}", '', ':leveloffset: 2', '', @@ -787,7 +801,7 @@ def genSinglePageRef(baseDir): # Now, all are emitted. continue else: - print('include::' + refPage + '.txt[]', file=body) + print(f'include::{refPage}{conventions.file_suffix}[]', file=body) else: # Alternatively, we could (probably should) link to the # aliased refpage @@ -795,10 +809,10 @@ def genSinglePageRef(baseDir): 'in single-page reference', 'because it is an alias of', api.alias[refPage]) - print('\n' + ':leveloffset: 0' + '\n', file=body) + print(f"\n:leveloffset: 0\n", file=body) # Write head and body to the output file - pageName = baseDir + '/apispec.txt' + pageName = f'{baseDir}/apispec{conventions.file_suffix}' fp = open(pageName, 'w', encoding='utf-8') print(head.getvalue(), file=fp, end='') @@ -848,7 +862,10 @@ def genExtension(baseDir, extpath, name, info): continue if req_name not in genDict: - logWarn('ERROR: {} (in extension {}) does not have a ref page.'.format(req_name, name)) + if req_name in api.alias: + logWarn(f'WARN: {req_name} (in extension {name}) is an alias, so does not have a ref page') + else: + logWarn(f'ERROR: {req_name} (in extension {name}) does not have a ref page.') declares.append(req_name) @@ -856,7 +873,7 @@ def genExtension(baseDir, extpath, name, info): tail_content = None if extpath is not None: try: - appPath = extpath + '/' + conventions.extension_file_path(name) + appPath = f"{extpath}/{conventions.extension_file_path(name)}" appfp = open(appPath, 'r', encoding='utf-8') appbody = appfp.read() appfp.close() @@ -877,7 +894,7 @@ def genExtension(baseDir, extpath, name, info): tail_content = makeExtensionInclude(name) # Write the extension refpage - pageName = baseDir + '/' + name + '.txt' + pageName = f'{baseDir}/{name}{conventions.file_suffix}' logDiag('genExtension:', pageName) fp = open(pageName, 'w', encoding='utf-8') @@ -886,7 +903,7 @@ def genExtension(baseDir, extpath, name, info): ref_page_sections['Specification'] = f'See link:{{html_spec_relative}}#{name}[{name}] in the main specification for complete information.' refPageShell(name, - "{} extension".format(ext_type), + f"{ext_type} extension", fp, appbody, sections=ref_page_sections, @@ -938,7 +955,7 @@ def genExtension(baseDir, extpath, name, info): help='Don\'t generate inferred ref pages automatically') parser.add_argument('files', metavar='filename', nargs='*', help='a filename to extract ref pages from') - parser.add_argument('--version', action='version', version='%(prog)s 1.0') + parser.add_argument('--version', action='version', version='%(prog)s 1.1') parser.add_argument('-extension', action='append', default=[], help='Specify an extension or extensions to add to targets') @@ -1026,8 +1043,8 @@ def genExtension(baseDir, extpath, name, info): (api.structs, 'Structures'), (api.protos, 'Prototypes'), (api.funcpointers, 'Function Pointers'), - (api.basetypes, apiName + ' Scalar Types'), - (extensions, apiName + ' Extensions'), + (api.basetypes, f"{apiName} Scalar Types"), + (extensions, f"{apiName} Extensions"), ] # Summarize pages that were not generated, for good or bad reasons @@ -1040,9 +1057,9 @@ def genExtension(baseDir, extpath, name, info): if page not in genDict: # Page was not generated - why not? if page in api.alias: - logWarn('(Benign, is an alias) Ref page for', title, page, 'is aliased into', api.alias[page]) + logDiag('(Benign, is an alias) Ref page for', title, page, 'is aliased into', api.alias[page]) elif page in api.flags and api.flags[page] is None: - logWarn('(Benign, no FlagBits defined) No ref page generated for ', title, + logDiag('(Benign, no FlagBits defined) No ref page generated for ', title, page) else: # Could introduce additional logic to detect diff --git a/specification/scripts/genanchorlinks.py b/specification/scripts/genanchorlinks.py deleted file mode 100644 index a5d7106f..00000000 --- a/specification/scripts/genanchorlinks.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2020-2024, The Khronos Group Inc. -# -# SPDX-License-Identifier: Apache-2.0 - -# Script that adds href to anchors - -import os,sys,re - -def genAnchorLinks(in_file, out_file): - try: - with open(in_file, 'r', encoding='utf8') as f: data = f.read() - except FileNotFoundError: - print('Error: File %s does not exist.' % in_file) - sys.exit(2) - - data = re.sub( r'()', '\g<1>\g<2> href="#\g<3>"\g<4>', data) - with open(out_file, 'w', encoding='utf8') as f: data = f.write(data) - -if __name__ == '__main__': - if len(sys.argv) != 3: - print('Error: genanchorlinks.py requires two arguments.') - print('Usage: genanchorlinks.py infile.html outfile.html') - sys.exit(1) - genAnchorLinks(sys.argv[1], sys.argv[2]) diff --git a/specification/scripts/genanchorlinks.rb b/specification/scripts/genanchorlinks.rb new file mode 100644 index 00000000..6280ff49 --- /dev/null +++ b/specification/scripts/genanchorlinks.rb @@ -0,0 +1,22 @@ +# Copyright 2023-2024 The Khronos Group Inc. +# SPDX-License-Identifier: Apache-2.0 + +# Rewrite VUID anchors with 'href' attributes so they can be selected in a +# browser. + +require 'asciidoctor/extensions' unless RUBY_ENGINE == 'opal' + +include ::Asciidoctor + +class AnchorLinkPostprocessor < Asciidoctor::Extensions::Postprocessor + def process document, output + if document.basebackend? 'html' + return output.gsub(//, '') + end + output + end +end + +Asciidoctor::Extensions.register do + postprocessor AnchorLinkPostprocessor +end diff --git a/specification/scripts/generator.py b/specification/scripts/generator.py index 226fc7b7..866a21c4 100644 --- a/specification/scripts/generator.py +++ b/specification/scripts/generator.py @@ -19,7 +19,7 @@ from pathlib import Path except ImportError: # For limited python 2 compat as used by some Vulkan consumers - from pathlib2 import Path # type: ignore + from pathlib2 import Path # type: ignore from spec_tools.util import getElemName, getElemType @@ -66,9 +66,8 @@ def regSortCategoryKey(feature): return 0.5 else: return 0 - if (feature.category == 'ARB' - or feature.category == 'KHR' - or feature.category == 'OES'): + + if feature.category.upper() in ('ARB', 'KHR', 'OES'): return 1 return 2 @@ -119,7 +118,7 @@ class MissingGeneratorOptionsError(RuntimeError): def __init__(self, msg=None): full_msg = 'Missing generator options object self.genOpts' if msg: - full_msg += ': ' + msg + full_msg += f": {msg}" super().__init__(full_msg) @@ -129,7 +128,7 @@ class MissingRegistryError(RuntimeError): def __init__(self, msg=None): full_msg = 'Missing Registry object self.registry' if msg: - full_msg += ': ' + msg + full_msg += f": {msg}" super().__init__(full_msg) @@ -139,7 +138,7 @@ class MissingGeneratorOptionsConventionsError(RuntimeError): def __init__(self, msg=None): full_msg = 'Missing Conventions object self.genOpts.conventions' if msg: - full_msg += ': ' + msg + full_msg += f": {msg}" super().__init__(full_msg) @@ -155,6 +154,7 @@ def __init__(self, directory='.', genpath=None, apiname=None, + mergeApiNames=None, profile=None, versions='.*', emitversions='.*', @@ -169,6 +169,7 @@ def __init__(self, sortProcedure=regSortFeatures, requireCommandAliases=False, redefineEnumExtends=False, + requireDepends=True, ): """Constructor. @@ -180,6 +181,8 @@ def __init__(self, - directory - directory in which to generate filename - genpath - path to previously generated files, such as apimap.py - apiname - string matching `` 'apiname' attribute, e.g. 'gl'. + - mergeApiNames - If not None, a comma separated list of API names + to merge into the API specified by 'apiname' - profile - string specifying API profile , e.g. 'core', or None. - versions - regex matching API versions to process interfaces for. Normally `'.*'` or `'[0-9][.][0-9]'` to match all defined versions. @@ -209,6 +212,11 @@ def __init__(self, or being complete. Defaults to True. - sortProcedure - takes a list of FeatureInfo objects and sorts them in place to a preferred order in the generated output. + - requireCommandAliases - if True, treat command aliases + as required dependencies. + - requireDepends - whether to follow API dependencies when emitting + APIs. + Default is - core API versions - Khronos (ARB/KHR/OES) extensions @@ -239,6 +247,9 @@ def __init__(self, self.apiname = apiname "string matching `` 'apiname' attribute, e.g. 'gl'." + self.mergeApiNames = mergeApiNames + "comma separated list of API names to merge into the API specified by 'apiname'" + self.profile = profile "string specifying API profile , e.g. 'core', or None." @@ -311,6 +322,9 @@ def __init__(self, generating a standalone header which won't include the actual enum type """ + self.requireDepends = requireDepends + """True if dependencies of API tags are transitively required.""" + def emptyRegex(self, pat): """Substitute a regular expression which matches no version or extension names for None or the empty string.""" @@ -346,7 +360,7 @@ def breakName(self, name, msg): ) if name in bad and True: - print('breakName {}: {}'.format(name, msg)) + print(f'breakName {name}: {msg}') pdb.set_trace() def __init__(self, errFile=sys.stderr, warnFile=sys.stderr, diagFile=sys.stdout): @@ -380,6 +394,9 @@ def __init__(self, errFile=sys.stderr, warnFile=sys.stderr, diagFile=sys.stdout) # derived generators. self.apidict = None + # File suffix for generated files, set in beginFile below. + self.file_suffix = '' + def logMsg(self, level, *args): """Write a message of different categories to different destinations. @@ -404,7 +421,7 @@ def logMsg(self, level, *args): write('DIAG:', *args, file=self.diagFile) else: raise UserWarning( - '*** FATAL ERROR in Generator.logMsg: unknown level:' + level) + f"*** FATAL ERROR in Generator.logMsg: unknown level:{level}") def enumToValue(self, elem, needsNum, bitwidth = 32, forceSuffix = False, parent_for_alias_dereference=None): @@ -457,20 +474,20 @@ def enumToValue(self, elem, needsNum, bitwidth = 32, # value += enuminfo.type if forceSuffix: if bitwidth == 64: - value = value + 'ULL' + value = f"{value}ULL" else: - value = value + 'U' + value = f"{value}U" self.logMsg('diag', 'Enum', name, '-> value [', numVal, ',', value, ']') return [numVal, value] if 'bitpos' in elem.keys(): value = elem.get('bitpos') bitpos = int(value, 0) numVal = 1 << bitpos - value = '0x%08x' % numVal + value = f'0x{numVal:08x}' if bitwidth == 64 or bitpos >= 32: - value = value + 'ULL' + value = f"{value}ULL" elif forceSuffix: - value = value + 'U' + value = f"{value}U" self.logMsg('diag', 'Enum', name, '-> bitpos [', numVal, ',', value, ']') return [numVal, value] if 'offset' in elem.keys(): @@ -546,9 +563,8 @@ def checkDuplicateEnums(self, enums): # still add this enum to the list. (name2, numVal2, strVal2) = valueMap[numVal] - msg = 'Two enums found with the same value: {} = {} = {}'.format( - name, name2.get('name'), strVal) - self.logMsg('error', msg) + self.logMsg('error', 'Two enums found with the same value: %s = %s = %s', + name, name2.get('name'), strVal) # Track this enum to detect followon duplicates nameMap[name] = [elem, numVal, strVal] @@ -588,7 +604,7 @@ def buildEnumCDecl_Bitmask(self, groupinfo, groupName): flagTypeName = groupinfo.flagType.elem.get('name') # Prefix - body = "// Flag bits for " + flagTypeName + "\n" + body = f"// Flag bits for {flagTypeName}\n" # Loop over the nested 'enum' tags. for elem in groupElem.findall('enum'): @@ -598,9 +614,9 @@ def buildEnumCDecl_Bitmask(self, groupinfo, groupName): (_, strVal) = self.enumToValue(elem, True, parent_for_alias_dereference=groupElem) alias_of = elem.get('alias') name = elem.get('name') - body += "static const {} {} = {};".format(flagTypeName, name, strVal) + body += f"static const {flagTypeName} {name} = {strVal};" if alias_of is not None: - body += " // alias of {}".format(alias_of) + body += f" // alias of {alias_of}" body += "\n" # Postfix @@ -618,7 +634,7 @@ def buildEnumCDecl_Enum(self, expand, groupinfo, groupName): expandSuffix = '' expandSuffixMatch = re.search(r'[A-Z][A-Z]+$', groupName) if expandSuffixMatch: - expandSuffix = '_' + expandSuffixMatch.group() + expandSuffix = f"_{expandSuffixMatch.group()}" # Strip off the suffix from the prefix expandPrefix = expandName.rsplit(expandSuffix, 1)[0] @@ -667,14 +683,14 @@ def buildEnumCDecl_Enum(self, expand, groupinfo, groupName): protect = elem.get('protect') if protect is not None: - decl += '#ifdef {}\n'.format(protect) + decl += f'#ifdef {protect}\n' # Indent requirements comment, if there is one requirements = self.genRequirements(name, mustBeFound = False) if requirements != '': - requirements = ' ' + requirements + requirements = f" {requirements}" decl += requirements - decl += ' {} = {},'.format(name, strVal) + decl += f' {name} = {strVal},' if protect is not None: decl += '\n#endif' @@ -745,8 +761,8 @@ def buildConstantCDecl(self, enuminfo, name, alias): if typeStr != "float": number += 'U' strVal = "~" if invert else "" - strVal += "static_cast<" + typeStr + ">(" + number + ")" - body = 'static constexpr ' + typeStr.ljust(9) + name.ljust(33) + ' {' + strVal + '};' + strVal += f"static_cast<{typeStr}>({number})" + body = f"static constexpr {typeStr.ljust(9)}{name.ljust(33)} {{{strVal}}};" elif enuminfo.elem.get('type') and not alias: # Generate e.g.: #define x (~0ULL) typeStr = enuminfo.elem.get('type'); @@ -761,8 +777,8 @@ def buildConstantCDecl(self, enuminfo, name, alias): strVal = "~" if invert else "" strVal += number if paren: - strVal = "(" + strVal + ")"; - body = '#define ' + name.ljust(33) + ' ' + strVal; + strVal = f"({strVal})"; + body = f"#define {name.ljust(33)} {strVal}"; elif self.genOpts.redefineEnumExtends and enuminfo.elem.get('extends'): # tags with an extends field is usually # absorbed into the actual enum definition @@ -778,11 +794,11 @@ def buildConstantCDecl(self, enuminfo, name, alias): strVal = "~" if invert else "" strVal += number if paren: - strVal = "(" + strVal + ")" - strVal = "((" + typeStr + ") "+ strVal + ")" - body = '#define ' + name.ljust(33) + ' ' + strVal + strVal = f"({strVal})" + strVal = f"(({typeStr}) {strVal})" + body = f"#define {name.ljust(33)} {strVal}" else: - body = '#define ' + name.ljust(33) + ' ' + strVal + body = f"#define {name.ljust(33)} {strVal}" return body @@ -790,7 +806,7 @@ def makeDir(self, path: Path): """Create a directory, if not already done. Generally called from derived generators creating hierarchies.""" - self.logMsg('diag', 'OutputGenerator::makeDir(' + str(path) + ')') + self.logMsg('diag', f"OutputGenerator::makeDir({str(path)})") if path not in self.madeDirs: # This can get race conditions with multiple writers, see # https://stackoverflow.com/questions/273192/ @@ -810,6 +826,7 @@ def beginFile(self, genOpts): raise MissingGeneratorOptionsConventionsError() self.should_insert_may_alias_macro = \ self.genOpts.conventions.should_insert_may_alias_macro(self.genOpts) + self.file_suffix = self.genOpts.conventions.file_suffix # Try to import the API dictionary, apimap.py, if it exists. Nothing # in apimap.py cannot be extracted directly from the XML, and in the @@ -958,6 +975,30 @@ def genFormat(self, format, formatinfo, alias): Extend to generate as desired in your derived class.""" return + def genSyncStage(self, stageinfo): + """Generate interface for a sync stage element. + + - stageinfo - SyncStageInfo + + Extend to generate as desired in your derived class.""" + return + + def genSyncAccess(self, accessinfo): + """Generate interface for a sync stage element. + + - accessinfo - AccessInfo + + Extend to generate as desired in your derived class.""" + return + + def genSyncPipeline(self, pipelineinfo): + """Generate interface for a sync stage element. + + - pipelineinfo - SyncPipelineInfo + + Extend to generate as desired in your derived class.""" + return + def makeProtoName(self, name, tail): """Turn a `` `` into C-language prototype and typedef declarations for that name. @@ -972,7 +1013,7 @@ def makeTypedefName(self, name, tail): """Make the function-pointer typedef name for a command.""" if self.genOpts is None: raise MissingGeneratorOptionsError() - return '(' + self.genOpts.apientryp + 'PFN_' + name + tail + ')' + return f"({self.genOpts.apientryp}PFN_{name}{tail})" def makeCParamDecl(self, param, aligncol): """Return a string which is an indented, formatted @@ -1005,14 +1046,14 @@ def makeCParamDecl(self, param, aligncol): # This works around a problem where very long type names - # longer than the alignment column - would run into the tail # text. - paramdecl = paramdecl.ljust(aligncol - 1) + ' ' + paramdecl = f"{paramdecl.ljust(aligncol - 1)} " newLen = len(paramdecl) self.logMsg('diag', 'Adjust length of parameter decl from', oldLen, 'to', newLen, ':', paramdecl) if (self.misracppstyle() and prefix.find('const ') != -1): # Change pointer type order from e.g. "const void *" to "void const *". # If the string starts with 'const', reorder it to be after the first type. - paramdecl += prefix.replace('const ', '') + text + ' const' + tail + paramdecl += f"{prefix.replace('const ', '') + text} const{tail}" else: paramdecl += prefix + text + tail @@ -1039,7 +1080,7 @@ def getCParamTypeLength(self, param): # Allow for missing tag newLen = 0 - paramdecl = ' ' + noneStr(param.text) + paramdecl = f" {noneStr(param.text)}" for elem in param: text = noneStr(elem.text) tail = noneStr(elem.tail) @@ -1270,7 +1311,7 @@ def makeCDecls(self, cmd): # Change pointer type order from e.g. "const void *" to "void const *". # If the string starts with 'const', reorder it to be after the first type. if (prefix.find('const ') != -1): - param += prefix.replace('const ', '') + t + ' const ' + param += f"{prefix.replace('const ', '') + t} const " else: param += prefix + t # Clear prefix for subsequent iterations diff --git a/specification/scripts/genxr.py b/specification/scripts/genxr.py index 74940eeb..c62a2d9c 100755 --- a/specification/scripts/genxr.py +++ b/specification/scripts/genxr.py @@ -17,6 +17,7 @@ from cgenerator import CGeneratorOptions, COutputGenerator from creflectiongenerator import CReflectionOutputGenerator from docgenerator import DocGeneratorOptions, DocOutputGenerator +from interfacedocgenerator import InterfaceDocGenerator from extensionmetadocgenerator import (ExtensionMetaDocGeneratorOptions, ExtensionMetaDocOutputGenerator) from generator import write @@ -52,7 +53,7 @@ def makeREstring(strings, default=None, strings_are_regex=False): if strings or default is None: if not strings_are_regex: strings = (re.escape(s) for s in strings) - return '^(' + '|'.join(strings) + ')$' + return f"^({'|'.join(strings)})$" return default @@ -177,7 +178,7 @@ def makeGenOpts(args): standalonePrefixString = standalonePrefixOverride # "XR_EXT_some_extension" becomes "ext_some_extension.h" - standaloneExtensionFileName = standaloneExtension[len('XR_'):].lower() + '.h' + standaloneExtensionFileName = f"{standaloneExtension[len('XR_'):].lower()}.h" genOpts['standalone_header'] = [ COutputGenerator, @@ -329,11 +330,11 @@ def make_reflection_options(fn): make_reflection_options('openxr_reflection_parent_structs.h'), ] - # OpenXR 1.0 - API include files for spec and ref pages + # OpenXR 1.1 - API include files for spec and ref pages # Overwrites include subdirectories in spec source tree # The generated include files do not include the calling convention # macros (apientry etc.), unlike the header files. - # Because the 1.0 core branch includes ref pages for extensions, + # Because the 1.1 main branch includes ref pages for extensions, # all the extension interfaces need to be generated, even though # none are used by the core spec itself. genOpts['apiinc'] = [ @@ -464,6 +465,25 @@ def make_reflection_options(fn): removeExtensions = None, emitExtensions = emitExtensionsPat) ] + # Version and extension interface docs for version/extension appendices + # Includes all extensions by default. + genOpts['interfaceinc'] = [ + InterfaceDocGenerator, + DocGeneratorOptions( + conventions = conventions, + filename = 'interfaceinc', + directory = directory, + genpath = None, + apiname = 'openxr', + profile = None, + versions = featuresPat, + emitversions = featuresPat, + defaultExtensions = None, + addExtensions = addExtensionsPat, + removeExtensions = removeExtensionsPat, + emitExtensions = emitExtensionsPat, + reparentEnums = False) + ] def genTarget(args): """Create an API generator and corresponding generator options based on @@ -616,7 +636,7 @@ def genTarget(args): else: startTimer(args.time) reg.apiGen() - endTimer(args.time, '* Time to generate ' + options.filename + ' =') + endTimer(args.time, f"* Time to generate {options.filename} =") if not args.quiet: logDiag('* Generated', options.filename) diff --git a/specification/scripts/hostsyncgenerator.py b/specification/scripts/hostsyncgenerator.py index 57e592f1..0eec535f 100644 --- a/specification/scripts/hostsyncgenerator.py +++ b/specification/scripts/hostsyncgenerator.py @@ -31,10 +31,10 @@ class HostSynchronizationOutputGenerator(OutputGenerator): } def makeParameterName(self, name): - return 'pname:' + name + return f"pname:{name}" def makeFLink(self, name): - return 'flink:' + name + return f"flink:{name}" def writeBlock(self, basename, title, contents): """Generate an include file. @@ -42,13 +42,14 @@ def writeBlock(self, basename, title, contents): - directory - subdirectory to put file in - basename - base name of the file - contents - contents of the file (Asciidoc boilerplate aside)""" + assert self.genOpts filename = Path(self.genOpts.directory) / basename self.logMsg('diag', '# Generating include file:', filename) with open(filename, 'w', encoding='utf-8') as fp: write(self.genOpts.conventions.warning_comment, file=fp) if contents: - write('.%s' % title, file=fp) + write(f'.{title}', file=fp) write('****', file=fp) write(contents, file=fp, end='') write('****', file=fp) @@ -58,13 +59,15 @@ def writeBlock(self, basename, title, contents): def writeInclude(self): "Generates the asciidoc include files.""" - self.writeBlock('parameters.txt', + assert self.genOpts + file_suffix = self.genOpts.conventions.file_suffix + self.writeBlock(f'parameters{file_suffix}', 'Externally Synchronized Parameters', self.threadsafety['parameters']) - self.writeBlock('parameterlists.txt', + self.writeBlock(f'parameterlists{file_suffix}', 'Externally Synchronized Parameter Lists', self.threadsafety['parameterlists']) - self.writeBlock('implicit.txt', + self.writeBlock(f'implicit{file_suffix}', 'Implicit Externally Synchronized Parameters', self.threadsafety['implicit']) @@ -73,7 +76,7 @@ def makeThreadSafetyBlocks(self, cmd, paramtext): protoname = cmd.find('proto/name').text # Find and add any parameters that are thread unsafe - explicitexternsyncparams = cmd.findall(paramtext + "[@externsync]") + explicitexternsyncparams = cmd.findall(f"{paramtext}[@externsync]") if explicitexternsyncparams is not None: for param in explicitexternsyncparams: self.makeThreadSafetyForParam(protoname, param) diff --git a/specification/scripts/indexgenerator.py b/specification/scripts/indexgenerator.py index a70de7e3..995f8679 100644 --- a/specification/scripts/indexgenerator.py +++ b/specification/scripts/indexgenerator.py @@ -41,16 +41,16 @@ def record_name(self, name_dict, name, extra_data=None): def output_name_dict(self, name_dict, title, prefix): anchor = title.lower().replace(' ', '-') - write('[[index-{}]]'.format(anchor), file=self.outFile) + write(f'[[index-{anchor}]]', file=self.outFile) - write('### ' + title, file=self.outFile) + write(f"### {title}", file=self.outFile) write('', file=self.outFile) for name in sorted(name_dict.keys()): - text = '* ' + prefix + name + text = f"* {prefix}{name}" extra_data = name_dict[name] if extra_data: - text += ' -- ' + extra_data + text += f" -- {extra_data}" write(text, file=self.outFile) write('', file=self.outFile) @@ -96,7 +96,7 @@ def genType(self, typeinfo, name, alias): elif category == 'bitmask': requiredEnum = typeElem.get('bitvalues') if requiredEnum is not None: - self.record_name(self.flags, name, "See also elink:{}".format(requiredEnum)) + self.record_name(self.flags, name, f"See also elink:{requiredEnum}") elif category == 'enum': self.record_name(self.enums, name) elif category == 'funcpointer': diff --git a/specification/scripts/interfacedocgenerator.py b/specification/scripts/interfacedocgenerator.py new file mode 100644 index 00000000..4b65024e --- /dev/null +++ b/specification/scripts/interfacedocgenerator.py @@ -0,0 +1,128 @@ +#!/usr/bin/python3 -i +# +# Copyright 2013-2024 The Khronos Group Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +from pathlib import Path +from generator import OutputGenerator, write +from parse_dependency import dependencyLanguageSpecMacros + +def interfaceDocSortKey(item): + if item == None: + return '\0' + return item.casefold() + +class InterfaceDocGenerator(OutputGenerator): + """InterfaceDocGenerator - subclass of OutputGenerator. + Generates AsciiDoc includes of the interfaces added by an API version + or extension.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.features = [] + + def beginFile(self, genOpts): + OutputGenerator.beginFile(self, genOpts) + assert self.genOpts + # Create subdirectory, if needed + self.makeDir(self.genOpts.directory) + + def beginFeature(self, interface, emit): + # Start processing in superclass + OutputGenerator.beginFeature(self, interface, emit) + + self.features.append( self.featureName ) + + def endFeature(self): + # Finish processing in superclass + OutputGenerator.endFeature(self) + + def writeNewInterfaces(self, feature, key, title, markup, fp): + dict = self.featureDictionary[feature][key] + + parentmarkup = markup + if key == 'enumconstant': + parentmarkup = 'elink:' + + if dict: + write(f"=== {title}", file=fp) + write('',file=fp) + + # Loop through required blocks, sorted so they start with "core" features + for required in sorted(dict, key = interfaceDocSortKey): + # 'required' may be a boolean expression of extension + # names. + # Currently this syntax is the same as asciidoc conditional + # syntax, but will eventually become more complex. + if required is not None: + # Rewrite with spec macros and xrefs applied to names + requiredlink = dependencyLanguageSpecMacros(required) + + # @@ A better approach would be to actually evaluate the + # logical expression at generation time. + # If the extensions required are not in the spec build, + # then do not include these requirements. + # This would support arbitrarily complex expressions, + # unlike asciidoc ifdef syntax. + write(f"ifdef::{required}[]", file=fp) + write(f'If {requiredlink} is supported:', file=fp) + write('',file=fp) + + # Commands are relatively straightforward + if key == 'command': + for api in sorted(dict[required]): + write(f" * {markup}{api}", file=fp) + # Types and constants are potentially parented, so need to handle that + else: + # Loop through parents, sorted so they start with unparented items + for parent in sorted(dict[required], key = interfaceDocSortKey): + parentstring = '' + if parent: + parentstring = parentmarkup + f", {markup}".join(parent.split(',')) + write(f" * Extending {parentstring}:", file=fp) + for api in sorted(dict[required][parent]): + write(f" ** {markup}{api}", file=fp) + else: + for api in sorted(dict[required][parent]): + write(f" * {markup}{api}", file=fp) + + if required is not None: + write(f"endif::{required}[]", file=fp) + write('',file=fp) + + def makeInterfaceFile(self, feature): + """Generate a file containing feature interface documentation in + asciidoctor markup form. + + - feature - name of the feature being generated""" + + assert self.genOpts + fp = open( + Path(self.genOpts.directory) + / f"{feature}{self.genOpts.conventions.file_suffix}", + "w", + encoding="utf-8", + ) + + # Write out the lists of new interfaces added by the feature + self.writeNewInterfaces(feature, 'define', 'New Macros', 'dlink:', fp) + self.writeNewInterfaces(feature, 'basetype', 'New Base Types', 'basetype:',fp) + self.writeNewInterfaces(feature, 'handle', 'New Object Types', 'slink:', fp) + self.writeNewInterfaces(feature, 'command', 'New Commands', 'flink:', fp) + self.writeNewInterfaces(feature, 'struct', 'New Structures', 'slink:', fp) + self.writeNewInterfaces(feature, 'union', 'New Unions', 'slink:', fp) + self.writeNewInterfaces(feature, 'funcpointer', 'New Function Pointers','tlink:', fp) + self.writeNewInterfaces(feature, 'enum', 'New Enums', 'elink:', fp) + self.writeNewInterfaces(feature, 'bitmask', 'New Bitmasks', 'tlink:', fp) + self.writeNewInterfaces(feature, 'include', 'New Headers', 'code:', fp) + self.writeNewInterfaces(feature, 'enumconstant','New Enum Constants', 'ename:', fp) + + fp.close() + + def endFile(self): + # Generate metadoc feature files, in refpage and non-refpage form + for feature in self.features: + self.makeInterfaceFile(feature) + + OutputGenerator.endFile(self) diff --git a/specification/scripts/jinja_helpers.py b/specification/scripts/jinja_helpers.py index 67b6c678..de4a18bb 100644 --- a/specification/scripts/jinja_helpers.py +++ b/specification/scripts/jinja_helpers.py @@ -36,7 +36,7 @@ def _undecorate(name): def _quote_string(s): - return '"{}"'.format(s) + return f'"{s}"' def _base_name(name): @@ -49,13 +49,13 @@ def _collapse_whitespace(s): def _protect_begin(entity): if entity.protect_value: - return "#if {}".format(entity.protect_string) + return f"#if {entity.protect_string}" return "" def _protect_end(entity): if entity.protect_value: - return "#endif // {}".format(entity.protect_string) + return f"#endif // {entity.protect_string}" return "" def _remove_prefix(s: str, prefix: str): diff --git a/specification/scripts/nonbreaking-ext-titles.rb b/specification/scripts/nonbreaking-ext-titles.rb new file mode 100644 index 00000000..b9a327a1 --- /dev/null +++ b/specification/scripts/nonbreaking-ext-titles.rb @@ -0,0 +1,21 @@ +# Copyright 2023-2024 The Khronos Group Inc. +# SPDX-License-Identifier: Apache-2.0 + +# Adjust extension TOC entries to not break between section number and extension name. + +require 'asciidoctor/extensions' unless RUBY_ENGINE == 'opal' + +include ::Asciidoctor + +class NonbreakingExtTitlesPostprocessor < Asciidoctor::Extensions::Postprocessor + def process document, output + if document.basebackend? 'html' + return output.gsub(/(?[>][0-9]+\.[0-9]+\.) (?XR_[^<]+)/, '\k \k') + end + output + end +end + +Asciidoctor::Extensions.register do + postprocessor NonbreakingExtTitlesPostprocessor +end diff --git a/specification/scripts/parse_dependency.py b/specification/scripts/parse_dependency.py new file mode 100755 index 00000000..08f04875 --- /dev/null +++ b/specification/scripts/parse_dependency.py @@ -0,0 +1,419 @@ +#!/usr/bin/python3 + +# Copyright 2022-2024 The Khronos Group Inc. +# Copyright 2003-2019 Paul McGuire +# SPDX-License-Identifier: MIT + +# apirequirements.py - parse 'depends' expressions in API XML +# Supported methods: +# dependency - the expression string +# +# evaluateDependency(dependency, isSupported) evaluates the expression, +# returning a boolean result. isSupported takes an extension or version name +# string and returns a boolean. +# +# dependencyLanguage(dependency) returns an English string equivalent +# to the expression, suitable for header file comments. +# +# dependencyNames(dependency) returns a set of the extension and +# version names in the expression. +# +# dependencyMarkup(dependency) returns a string containing asciidoctor +# markup for English equivalent to the expression, suitable for extension +# appendices. +# +# All may throw a ParseException if the expression cannot be parsed or is +# not completely consumed by parsing. + +# Supported expressions at present: +# - extension names +# - '+' as AND connector +# - ',' as OR connector +# - parenthesization for grouping + +# Based on https://github.com/pyparsing/pyparsing/blob/master/examples/fourFn.py + +from pathlib import Path + +_GOT_PYPARSING = False +try: + import pyparsing as pp + _GOT_PYPARSING = True +except ImportError: + import sys + sys.path.append( + str( + Path(__file__).resolve().parent.parent.parent / "external" / + "python")) + +if not _GOT_PYPARSING: + import pyparsing as pp + + +from pyparsing import ( + Literal, + Word, + Group, + Forward, + alphas, + alphanums, + Regex, + ParseException, + CaselessKeyword, + Suppress, + delimitedList, + infixNotation, +) +import math +import operator +import re + +from apiconventions import APIConventions as APIConventions +conventions = APIConventions() + +def markupPassthrough(name): + """Pass a name (leaf or operator) through without applying markup""" + return name + +def leafMarkupAsciidoc(name): + """Markup a leaf name as an asciidoc link to an API version or extension + anchor. + + - name - version or extension name""" + + return conventions.formatVersionOrExtension(name) + +def leafMarkupC(name): + """Markup a leaf name as a C expression, using conventions of the + Vulkan Validation Layers + + - name - version or extension name""" + + (apivariant, major, minor) = apiVersionNameMatch(name) + + if apivariant is not None: + return name + else: + return f'ext.{name}' + +opMarkupAsciidocMap = { '+' : 'and', ',' : 'or' } + +def opMarkupAsciidoc(op): + """Markup an operator as an asciidoc spec markup equivalent + + - op - operator ('+' or ',')""" + + return opMarkupAsciidocMap[op] + +opMarkupCMap = { '+' : '&&', ',' : '||' } + +def opMarkupC(op): + """Markup an operator as a C language equivalent + + - op - operator ('+' or ',')""" + + return opMarkupCMap[op] + + +# Unfortunately global to be used in pyparsing +exprStack = [] + +def push_first(toks): + """Push a token on the global stack + + - toks - first element is the token to push""" + + exprStack.append(toks[0]) + +# An identifier (version or extension name) +dependencyIdent = Word(alphanums + '_') + +# Infix expression for depends expressions +dependencyExpr = pp.infixNotation(dependencyIdent, + [ (pp.oneOf(', +'), 2, pp.opAssoc.LEFT), ]) + +# BNF grammar for depends expressions +_bnf = None +def dependencyBNF(): + """ + boolop :: '+' | ',' + extname :: Char(alphas) + atom :: extname | '(' expr ')' + expr :: atom [ boolop atom ]* + """ + global _bnf + if _bnf is None: + and_, or_ = map(Literal, '+,') + lpar, rpar = map(Suppress, '()') + boolop = and_ | or_ + + expr = Forward() + expr_list = delimitedList(Group(expr)) + atom = ( + boolop[...] + + ( + (dependencyIdent).setParseAction(push_first) + | Group(lpar + expr + rpar) + ) + ) + + expr <<= atom + (boolop + atom).setParseAction(push_first)[...] + _bnf = expr + return _bnf + + +# map operator symbols to corresponding arithmetic operations +_opn = { + '+': operator.and_, + ',': operator.or_, +} + +def evaluateStack(stack, isSupported): + """Evaluate an expression stack, returning a boolean result. + + - stack - the stack + - isSupported - function taking a version or extension name string and + returning True or False if that name is supported or not.""" + + op, num_args = stack.pop(), 0 + if isinstance(op, tuple): + op, num_args = op + + if op in '+,': + # Note: operands are pushed onto the stack in reverse order + op2 = evaluateStack(stack, isSupported) + op1 = evaluateStack(stack, isSupported) + return _opn[op](op1, op2) + elif op[0].isalpha(): + return isSupported(op) + else: + raise Exception(f'invalid op: {op}') + +def evaluateDependency(dependency, isSupported): + """Evaluate a dependency expression, returning a boolean result. + + - dependency - the expression + - isSupported - function taking a version or extension name string and + returning True or False if that name is supported or not.""" + + global exprStack + exprStack = [] + results = dependencyBNF().parseString(dependency, parseAll=True) + val = evaluateStack(exprStack[:], isSupported) + return val + +def evalDependencyLanguage(stack, leafMarkup, opMarkup, parenthesize, root): + """Evaluate an expression stack, returning an English equivalent + + - stack - the stack + - leafMarkup, opMarkup, parenthesize - same as dependencyLanguage + - root - True only if this is the outer (root) expression level""" + + op, num_args = stack.pop(), 0 + if isinstance(op, tuple): + op, num_args = op + if op in '+,': + # Could parenthesize, not needed yet + rhs = evalDependencyLanguage(stack, leafMarkup, opMarkup, parenthesize, root = False) + opname = opMarkup(op) + lhs = evalDependencyLanguage(stack, leafMarkup, opMarkup, parenthesize, root = False) + if parenthesize and not root: + return f'({lhs} {opname} {rhs})' + else: + return f'{lhs} {opname} {rhs}' + elif op[0].isalpha(): + # This is an extension or feature name + return leafMarkup(op) + else: + raise Exception(f'invalid op: {op}') + +def dependencyLanguage(dependency, leafMarkup, opMarkup, parenthesize): + """Return an API dependency expression translated to a form suitable for + asciidoctor conditionals or header file comments. + + - dependency - the expression + - leafMarkup - function taking an extension / version name and + returning an equivalent marked up version + - opMarkup - function taking an operator ('+' / ',') name name and + returning an equivalent marked up version + - parenthesize - True if parentheses should be used in the resulting + expression, False otherwise""" + + global exprStack + exprStack = [] + results = dependencyBNF().parseString(dependency, parseAll=True) + return evalDependencyLanguage(exprStack, leafMarkup, opMarkup, parenthesize, root = True) + +# aka specmacros = False +def dependencyLanguageComment(dependency): + """Return dependency expression translated to a form suitable for + comments in headers of emitted C code, as used by the + docgenerator.""" + return dependencyLanguage(dependency, leafMarkup = markupPassthrough, opMarkup = opMarkupAsciidoc, parenthesize = True) + +# aka specmacros = True +def dependencyLanguageSpecMacros(dependency): + """Return dependency expression translated to a form suitable for + comments in headers of emitted C code, as used by the + interfacegenerator.""" + return dependencyLanguage(dependency, leafMarkup = leafMarkupAsciidoc, opMarkup = opMarkupAsciidoc, parenthesize = False) + +def dependencyLanguageC(dependency): + """Return dependency expression translated to a form suitable for + use in C expressions""" + return dependencyLanguage(dependency, leafMarkup = leafMarkupC, opMarkup = opMarkupC, parenthesize = True) + +def evalDependencyNames(stack): + """Evaluate an expression stack, returning the set of extension and + feature names used in the expression. + + - stack - the stack""" + + op, num_args = stack.pop(), 0 + if isinstance(op, tuple): + op, num_args = op + if op in '+,': + # Do not evaluate the operation. We only care about the names. + return evalDependencyNames(stack) | evalDependencyNames(stack) + elif op[0].isalpha(): + return { op } + else: + raise Exception(f'invalid op: {op}') + +def dependencyNames(dependency): + """Return a set of the extension and version names in an API dependency + expression. Used when determining transitive dependencies for spec + generation with specific extensions included. + + - dependency - the expression""" + + global exprStack + exprStack = [] + results = dependencyBNF().parseString(dependency, parseAll=True) + # print(f'names(): stack = {exprStack}') + return evalDependencyNames(exprStack) + +def markupTraverse(expr, level = 0, root = True): + """Recursively process a dependency in infix form, transforming it into + asciidoctor markup with expression nesting indicated by indentation + level. + + - expr - expression to process + - level - indentation level to render expression at + - root - True only on initial call""" + + if level > 0: + prefix = '{nbsp}{nbsp}' * level * 2 + ' ' + else: + prefix = '' + str = '' + + for elem in expr: + if isinstance(elem, pp.ParseResults): + if not root: + nextlevel = level + 1 + else: + # Do not indent the outer expression + nextlevel = level + + str = str + markupTraverse(elem, level = nextlevel, root = False) + elif elem in ('+', ','): + str = str + f'{prefix}{opMarkupAsciidoc(elem)} +\n' + else: + str = str + f'{prefix}{leafMarkupAsciidoc(elem)} +\n' + + return str + +def dependencyMarkup(dependency): + """Return asciidoctor markup for a human-readable equivalent of an API + dependency expression, suitable for use in extension appendix + metadata. + + - dependency - the expression""" + + parsed = dependencyExpr.parseString(dependency) + return markupTraverse(parsed) + +if __name__ == "__main__": + for str in [ 'VK_VERSION_1_0', 'cl_khr_extension_name', 'XR_VERSION_3_2', 'CL_VERSION_1_0' ]: + print(f'{str} -> {conventions.formatVersionOrExtension(str)}') + import sys + sys.exit(0) + + termdict = { + 'VK_VERSION_1_1' : True, + 'false' : False, + 'true' : True, + } + termSupported = lambda name: name in termdict and termdict[name] + + def test(dependency, expected): + val = False + try: + val = evaluateDependency(dependency, termSupported) + except ParseException as pe: + print(dependency, f'failed parse: {dependency}') + except Exception as e: + print(dependency, f'failed eval: {dependency}') + + if val == expected: + True + # print(f'{dependency} = {val} (as expected)') + else: + print(f'{dependency} ERROR: {val} != {expected}') + + # Verify expressions are evaluated left-to-right + + test('false,false+false', False) + test('false,false+true', False) + test('false,true+false', False) + test('false,true+true', True) + test('true,false+false', False) + test('true,false+true', True) + test('true,true+false', False) + test('true,true+true', True) + + test('false,(false+false)', False) + test('false,(false+true)', False) + test('false,(true+false)', False) + test('false,(true+true)', True) + test('true,(false+false)', True) + test('true,(false+true)', True) + test('true,(true+false)', True) + test('true,(true+true)', True) + + + test('false+false,false', False) + test('false+false,true', True) + test('false+true,false', False) + test('false+true,true', True) + test('true+false,false', False) + test('true+false,true', True) + test('true+true,false', True) + test('true+true,true', True) + + test('false+(false,false)', False) + test('false+(false,true)', False) + test('false+(true,false)', False) + test('false+(true,true)', False) + test('true+(false,false)', False) + test('true+(false,true)', True) + test('true+(true,false)', True) + test('true+(true,true)', True) + + # Check formatting + for dependency in [ + #'true', + #'true+true+false', + 'true+false', + 'true+(true+false),(false,true)', + #'true+((true+false),(false,true))', + 'VK_VERSION_1_0+VK_KHR_display', + #'VK_VERSION_1_1+(true,false)', + ]: + print(f'expr = {dependency}\n{dependencyMarkup(dependency)}') + print(f' spec language = {dependencyLanguageSpecMacros(dependency)}') + print(f' comment language = {dependencyLanguageComment(dependency)}') + print(f' C language = {dependencyLanguageC(dependency)}') + print(f' names = {dependencyNames(dependency)}') + print(f' value = {evaluateDependency(dependency, termSupported)}') diff --git a/specification/scripts/pdf_chapter_diff.py b/specification/scripts/pdf_chapter_diff.py index 2cd8ba68..8a50c96e 100755 --- a/specification/scripts/pdf_chapter_diff.py +++ b/specification/scripts/pdf_chapter_diff.py @@ -276,8 +276,7 @@ def get_section_range_pairs(orig_section, new_pdf): """Return MatchingSection for a section.""" other_section = new_pdf.find_corresponding_section(orig_section) if not other_section: - print("Skipping section {} - no match in the other doc!".format( - orig_section.title)) + print(f"Skipping section {orig_section.title} - no match in the other doc!") return None return MatchingSection( title=orig_section.title, @@ -289,8 +288,7 @@ def get_section_page_pairs(orig_section, new_pdf): """Return (orig_page_num, new_page_num) pairs for each page in section.""" other_section = new_pdf.find_corresponding_section(orig_section) if not other_section: - print("Skipping section {} - no match in the other doc!".format( - orig_section.title)) + print(f"Skipping section {orig_section.title} - no match in the other doc!") return [] return zip_longest(orig_section.page_numbers, other_section.page_numbers) @@ -454,7 +452,7 @@ def generate_diff_from_pairs(self, pairs): SPECDIR = Path(__file__).resolve().parent.parent assert SPECDIR.name == "specification" ORIG = SPECDIR / 'compare-base' / 'openxr.pdf' - NEW = SPECDIR / 'generated' / 'out' / '1.0' / 'openxr.pdf' + NEW = SPECDIR / 'generated' / 'out' / '1.1' / 'openxr.pdf' DIFFDIR = SPECDIR / 'diffs' DIFFDIR.mkdir(exist_ok=True) @@ -474,7 +472,7 @@ def is_separate_diff_section(bookmark): img = pdf_diff.render_changes(matching.changes, ('strike', 'underline'), 900) - fn = "Diff part {:02d} - {}.diff.png".format(i, matching.title) + fn = f"Diff part {i:02d} - {matching.title}.diff.png" full_path = DIFFDIR / fn print('Writing', full_path.relative_to(SPECDIR)) diff --git a/specification/scripts/pygenerator.py b/specification/scripts/pygenerator.py index 286ed6c5..72027d15 100644 --- a/specification/scripts/pygenerator.py +++ b/specification/scripts/pygenerator.py @@ -71,8 +71,8 @@ def endFile(self): # human-readable and stable-ordered write(self.beginDict('mapDict'), file=self.outFile) for baseType in sorted(self.mapDict.keys()): - write('{} : {},'.format(enquote(baseType), - pprint.pformat(self.mapDict[baseType])), file=self.outFile) + write(f'{enquote(baseType)} : {pprint.pformat(self.mapDict[baseType])},', + file=self.outFile) write(self.endDict(), file=self.outFile) # List of included feature names @@ -87,8 +87,8 @@ def endFile(self): for api in sorted(self.apimap): # Sort requirements by first feature in each one deps = sorted(self.apimap[api], key = lambda dep: dep[0]) - reqs = ', '.join('({}, {})'.format(enquote(dep[0]), enquote(dep[1])) for dep in deps) - write('{} : [{}],'.format(enquote(api), reqs), file=self.outFile) + reqs = ', '.join(f'({enquote(dep[0])}, {enquote(dep[1])})' for dep in deps) + write(f'{enquote(api)} : [{reqs}],', file=self.outFile) write(self.endDict(), file=self.outFile) super().endFile() diff --git a/specification/scripts/reflib.py b/specification/scripts/reflib.py index 5543fcbc..65a8edaa 100644 --- a/specification/scripts/reflib.py +++ b/specification/scripts/reflib.py @@ -47,14 +47,14 @@ def logHeader(severity): """Generate prefix for a diagnostic line using metadata and severity""" global logSourcefile, logProcname, logLine - msg = severity + ': ' + msg = f"{severity}: " if logProcname: - msg = msg + ' in ' + logProcname + msg = f"{msg} in {logProcname}" if logSourcefile: - msg = msg + ' for ' + logSourcefile + msg = f"{msg} for {logSourcefile}" if logLine: - msg = msg + ' line ' + str(logLine) - return msg + ' ' + msg = f"{msg} line {str(logLine)}" + return f"{msg} " def setLogFile(setDiag, setWarn, filename): """Set the file handle to log either or both warnings and diagnostics to. @@ -164,9 +164,9 @@ def printPageInfoField(desc, line, file): - line - field value or None - file - indexed by line""" if line is not None: - logDiag(desc + ':', line + 1, '\t-> ', file[line], end='') + logDiag(f"{desc}:", line + 1, '\t-> ', file[line], end='') else: - logDiag(desc + ':', line) + logDiag(f"{desc}:", line) def printPageInfo(pi, file): """Print out fields of a pageInfo struct @@ -185,7 +185,7 @@ def printPageInfo(pi, file): printPageInfoField('BODY ', pi.body, file) printPageInfoField('VALIDITY', pi.validity, file) printPageInfoField('END ', pi.end, file) - logDiag('REFS: "' + pi.refs + '"') + logDiag(f'REFS: "{pi.refs}"') def prevPara(file, line): """Go back one paragraph from the specified line and return the line number @@ -284,8 +284,8 @@ def fixupRefs(pageMap, specFile, file): # # line to the include line, so autogeneration can at least # # pull the include out, but mark it not to be extracted. # # Examples include the host sync table includes in - # # chapters/fundamentals.txt and the table of Vk*Flag types in - # # appendices/boilerplate.txt. + # # chapters/fundamentals.adoc and the table of Vk*Flag types in + # # appendices/boilerplate.adoc. # if pi.begin is None and pi.validity is None and pi.end is None: # pi.begin = pi.include # pi.extractPage = False @@ -362,7 +362,7 @@ def fixupRefs(pageMap, specFile, file): logDiag('Skipping check for embedding in:', embed.name) continue if embed.begin is None or embed.end is None: - logDiag('fixupRefs:', name + ':', + logDiag('fixupRefs:', f"{name}:", 'can\'t compare to unanchored ref:', embed.name, 'in', specFile, 'at line', pi.include ) printPageInfo(pi, file) @@ -374,7 +374,7 @@ def fixupRefs(pageMap, specFile, file): 'inside:', embedName, 'in', specFile, 'at line', pi.include ) pi.embed = embed.name - pi.Warning = 'Embedded in definition for ' + embed.name + pi.Warning = f"Embedded in definition for {embed.name}" break else: logDiag('fixupRefs: No embed match for:', name, @@ -382,6 +382,18 @@ def fixupRefs(pageMap, specFile, file): 'at line', pi.include) +def compatiblePageTypes(refpage_type, pagemap_type): + """Returns whether two refpage 'types' (categories) are compatible - + this is only true for 'consts' and 'enums' types.""" + + constsEnums = [ 'consts', 'enums' ] + + if refpage_type == pagemap_type: + return True + if refpage_type in constsEnums and pagemap_type in constsEnums: + return True + return False + # Patterns used to recognize interesting lines in an asciidoc source file. # These patterns are only compiled once. endifPat = re.compile(r'^endif::(?P[\w_+,]+)\[\]') @@ -393,13 +405,13 @@ def fixupRefs(pageMap, specFile, file): errorPat = re.compile(r'^// *refError') # This regex transplanted from check_spec_links -# It looks for either OpenXR or Vulkan generated file conventions, and for -# the api/validity include (generated_type), protos/struct/etc path -# (category), and API name (entity_name). It could be put into the API -# conventions object. +# It looks for various generated file conventions, and for the api/validity +# include (generated_type), protos/struct/etc path (category), and API name +# (entity_name). +# It could be put into the API conventions object, instead of being +# generalized for all the different specs. INCLUDE = re.compile( - r'include::(?P((../){1,4}|\{generated\}/)(generated/)?)(?P[\w]+)/(?P\w+)/(?P[^./]+).txt[\[][\]]') - + r'include::(?P((../){1,4}|\{generated\}/)(generated/)?)(?P[\w]+)/(?P\w+)/(?P[^./]+)\.(adoc|txt)[\[][\]]') def findRefs(file, filename): """Identify reference pages in a list of strings, returning a dictionary of @@ -543,8 +555,8 @@ def findRefs(file, filename): if gen_type == 'validity': logDiag('Matched validity pattern') if pi is not None: - if pi.type and refpage_type != pi.type: - logWarn('ERROR: pageMap[' + name + '] type:', + if pi.type and not compatiblePageTypes(refpage_type, pi.type): + logWarn(f"ERROR: pageMap[{name}] type:", pi.type, 'does not match type:', refpage_type) pi.type = refpage_type pi.validity = line @@ -560,8 +572,8 @@ def findRefs(file, filename): if pi is not None: if pi.include is not None: logDiag('found multiple includes for this block') - if pi.type and refpage_type != pi.type: - logWarn('ERROR: pageMap[' + name + '] type:', + if pi.type and not compatiblePageTypes(refpage_type, pi.type): + logWarn(f"ERROR: pageMap[{name}] type:", pi.type, 'does not match type:', refpage_type) pi.type = refpage_type pi.include = line diff --git a/specification/scripts/reflow.py b/specification/scripts/reflow.py index 79e24849..95298b3b 100755 --- a/specification/scripts/reflow.py +++ b/specification/scripts/reflow.py @@ -60,10 +60,10 @@ # Special case of markup ending a paragraph, used to track the current # command/structure. This allows for either OpenXR or Vulkan API path # conventions. Nominally it should use the file suffix defined by the API -# conventions (conventions.file_suffix), except that XR uses '.txt' for +# conventions (conventions.file_suffix), except that XR used to use '.txt' for # generated API include files, not '.adoc' like its other includes. includePat = re.compile( - r'include::(?P((../){1,4}|\{generated\}/)(generated/)?)(?P[\w]+)/(?P\w+)/(?P[^./]+).txt[\[][\]]') + r'include::(?P((../){1,4}|\{generated\}/)(generated/)?)(?P[\w]+)/(?P\w+)/(?P[^./]+).(txt|adoc)[\[][\]]') # Find the first pname: or code: pattern in a Valid Usage statement pnamePat = re.compile(r'pname:(?P\{?\w+\}?)') @@ -341,7 +341,7 @@ def reflowPara(self): # the current line no matter its length. (addWord, closeLine, startLine) = (True, True, False) - elif beginBullet.match(word + ' '): + elif beginBullet.match(f"{word} "): # If the word *is* a bullet point, add it to # the current line no matter its length. # This avoids an innocent inline '-' or '*' @@ -366,7 +366,7 @@ def reflowPara(self): # Add a word to the current line if addWord: if outLine: - outLine += ' ' + word + outLine += f" {word}" outLineLen = newLen else: # Fall through to startLine case if there is no @@ -378,7 +378,7 @@ def reflowPara(self): # will ever have contents. if closeLine: if outLine: - outPara.append(outLine + '\n') + outPara.append(f"{outLine}\n") outLine = None # Start a new line and add a word to it @@ -392,7 +392,7 @@ def reflowPara(self): # Add this line to the output paragraph. if outLine: - outPara.append(outLine + '\n') + outPara.append(f"{outLine}\n") return outPara @@ -411,7 +411,7 @@ def emitPara(self): # Check for nested bullet points. These should not be # assigned VUIDs, nor present at all, because they break # the VU extractor. - logWarn(self.filename + ': Invalid nested bullet point in VU block:', self.para[0]) + logWarn(f"{self.filename}: Invalid nested bullet point in VU block:", self.para[0]) elif self.vuPrefix not in self.para[0]: # If: # - a tag is not already present, and @@ -721,8 +721,8 @@ def reflowFile(filename, args): and not beginBullet.match(line) and conditionalStart.match(lines[state.lineNumber-2])): - logWarn('Detected embedded Valid Usage conditional: {}:{}'.format( - filename, state.lineNumber - 1)) + logWarn('Detected embedded Valid Usage conditional:', + f'{filename}:{state.lineNumber - 1}') # Keep track of warning check count args.warnCount = args.warnCount + 1 @@ -753,12 +753,12 @@ def reflowAllAdocFiles(folder_to_reflow, args): reflowFile(file_path, args) for subdir in subdirs: sub_folder = os.path.join(root, subdir) - print('Sub-folder = %s' % sub_folder) + print(f'Sub-folder = {sub_folder}') if subdir.lower() not in conventions.spec_no_reflow_dirs: - print(' Parsing = %s' % sub_folder) + print(f' Parsing = {sub_folder}') reflowAllAdocFiles(sub_folder, args) else: - print(' Skipping = %s' % sub_folder) + print(f' Skipping = {sub_folder}') # Patterns used to recognize interesting lines in an asciidoc source file. # These patterns are only compiled once. @@ -815,7 +815,7 @@ def reflowAllAdocFiles(folder_to_reflow, args): help='Set the suffix added to updated file names (default: none)') parser.add_argument('files', metavar='filename', nargs='*', help='a filename to reflow text in') - parser.add_argument('--version', action='version', version='%(prog)s 1.0') + parser.add_argument('--version', action='version', version='%(prog)s 1.1') args = parser.parse_args() @@ -883,9 +883,9 @@ def reflowAllAdocFiles(folder_to_reflow, args): for vuid in sorted(args.vuidDict): found = args.vuidDict[vuid] if len(found) > 1: - logWarn('Duplicate VUID number {} found in files:'.format(vuid)) + logWarn(f'Duplicate VUID number {vuid} found in files:') for (file, line) in found: - logWarn(' {}: {}'.format(file, line)) + logWarn(f' {file}: {line}') dupVUIDs = dupVUIDs + 1 if dupVUIDs > 0: @@ -908,12 +908,8 @@ def reflowAllAdocFiles(folder_to_reflow, args): print('# Key is branch name, value is [ start, end, nextfree ]', file=reflow_count_file) print('vuidCounts = {', file=reflow_count_file) for key in sorted(vuidCounts): - print(" '{}': [ {}, {}, {} ],".format( - key, - vuidCounts[key][0], - vuidCounts[key][1], - vuidCounts[key][2]), - file=reflow_count_file) + print(f" '{key}': [ {', '.join(vuidCounts[key])} ],", + file=reflow_count_file) print('}', file=reflow_count_file) reflow_count_file.close() except: diff --git a/specification/scripts/reg.py b/specification/scripts/reg.py index 90359b2a..bb3ec774 100755 --- a/specification/scripts/reg.py +++ b/specification/scripts/reg.py @@ -15,6 +15,7 @@ from generator import GeneratorOptions, OutputGenerator, noneStr, write from apiconventions import APIConventions + def apiNameMatch(str, supported): """Return whether a required api name matches a pattern specified for an XML 'api' attribute or 'supported' attribute. @@ -88,6 +89,82 @@ def matchAPIProfile(api, profile, elem): return True +def mergeAPIs(tree, fromApiNames, toApiName): + """Merge multiple APIs using the precedence order specified in apiNames. + Also deletes elements. + + tree - Element at the root of the hierarchy to merge. + apiNames - list of strings of API names.""" + + stack = deque() + stack.append(tree) + + while len(stack) > 0: + parent = stack.pop() + + for child in parent.findall('*'): + if child.tag == 'remove': + # Remove elements + parent.remove(child) + else: + stack.append(child) + + supportedList = child.get('supported') + if supportedList: + supportedList = supportedList.split(',') + for apiName in [toApiName] + fromApiNames: + if apiName in supportedList: + child.set('supported', toApiName) + + if child.get('api'): + definitionName = None + definitionVariants = [] + + # Keep only one definition with the same name if there are multiple definitions + if child.tag in ['type']: + if child.get('name') is not None: + definitionName = child.get('name') + definitionVariants = parent.findall(f"{child.tag}[@name='{definitionName}']") + else: + definitionName = child.find('name').text + definitionVariants = parent.findall(f"{child.tag}/name[.='{definitionName}']/..") + elif child.tag in ['member', 'param']: + definitionName = child.find('name').text + definitionVariants = parent.findall(f"{child.tag}/name[.='{definitionName}']/..") + elif child.tag in ['enum', 'feature']: + definitionName = child.get('name') + definitionVariants = parent.findall(f"{child.tag}[@name='{definitionName}']") + elif child.tag in ['require']: + definitionName = child.get('feature') + definitionVariants = parent.findall(f"{child.tag}[@feature='{definitionName}']") + elif child.tag in ['command']: + definitionName = child.find('proto/name').text + definitionVariants = parent.findall(f"{child.tag}/proto/name[.='{definitionName}']/../..") + + if definitionName: + bestMatchApi = None + requires = None + for apiName in [toApiName] + fromApiNames: + for variant in definitionVariants: + # Keep any requires attributes from the target API + if variant.get('requires') and variant.get('api') == apiName: + requires = variant.get('requires') + # Find the best matching definition + if apiName in variant.get('api').split(',') and bestMatchApi is None: + bestMatchApi = variant.get('api') + + if bestMatchApi: + for variant in definitionVariants: + if variant.get('api') != bestMatchApi: + # Only keep best matching definition + parent.remove(variant) + else: + # Add requires attribute from the target API if it is not overridden + if requires is not None and variant.get('requires') is None: + variant.set('requires', requires) + variant.set('api', toApiName) + + def stripNonmatchingAPIs(tree, apiName, actuallyDelete = True): """Remove tree Elements with 'api' attributes matching apiName. @@ -300,6 +377,28 @@ def __init__(self, elem, condition): # Need to save the condition here when it is known self.condition = condition +class SyncStageInfo(BaseInfo): + """Registry information about .""" + + def __init__(self, elem, condition): + BaseInfo.__init__(self, elem) + # Need to save the condition here when it is known + self.condition = condition + +class SyncAccessInfo(BaseInfo): + """Registry information about .""" + + def __init__(self, elem, condition): + BaseInfo.__init__(self, elem) + # Need to save the condition here when it is known + self.condition = condition + +class SyncPipelineInfo(BaseInfo): + """Registry information about .""" + + def __init__(self, elem): + BaseInfo.__init__(self, elem) + class Registry: """Object representing an API registry, loaded from an XML file.""" @@ -356,6 +455,15 @@ def __init__(self, gen=None, genOpts=None): self.formatsdict = {} "dictionary of FeatureInfo objects for `` elements keyed by VkFormat name" + self.syncstagedict = {} + "dictionary of Sync*Info objects for `` elements keyed by VkPipelineStageFlagBits2 name" + + self.syncaccessdict = {} + "dictionary of Sync*Info objects for `` elements keyed by VkAccessFlagBits2 name" + + self.syncpipelinedict = {} + "dictionary of Sync*Info objects for `` elements keyed by pipeline type name" + self.emitFeatures = False """True to actually emit features for a version / extension, or False to just treat them as emitted""" @@ -401,10 +509,10 @@ def addElementInfo(self, elem, info, infoName, dictionary): Intended for internal use only. - - elem - ``/``/``/``/``/``/``/``/`` Element - - info - corresponding {Type|Group|Enum|Cmd|Feature|Spirv|Format}Info object - - infoName - 'type' / 'group' / 'enum' / 'command' / 'feature' / 'extension' / 'spirvextension' / 'spirvcapability' / 'format' - - dictionary - self.{type|group|enum|cmd|api|ext|format|spirvext|spirvcap}dict + - elem - ``/``/``/``/``/``/``/``/``/``/``/`` Element + - info - corresponding {Type|Group|Enum|Cmd|Feature|Spirv|Format|SyncStage|SyncAccess|SyncPipeline}Info object + - infoName - 'type' / 'group' / 'enum' / 'command' / 'feature' / 'extension' / 'spirvextension' / 'spirvcapability' / 'format' / 'syncstage' / 'syncaccess' / 'syncpipeline' + - dictionary - self.{type|group|enum|cmd|api|ext|format|spirvext|spirvcap|sync}dict The dictionary key is the element 'name' attribute.""" @@ -448,8 +556,10 @@ def parseTree(self): raise RuntimeError("Tree not initialized!") self.reg = self.tree.getroot() - # Preprocess the tree by removing all elements with non-matching - # 'api' attributes by breadth-first tree traversal. + # Preprocess the tree in one of the following ways: + # - either merge a set of APIs to another API based on their 'api' attributes + # - or remove all elements with non-matching 'api' attributes + # The preprocessing happens through a breath-first tree traversal. # This is a blunt hammer, but eliminates the need to track and test # the apis deeper in processing to select the correct elements and # avoid duplicates. @@ -457,7 +567,10 @@ def parseTree(self): # overlapping api attributes, or where one element has an api # attribute and the other does not. - stripNonmatchingAPIs(self.reg, self.genOpts.apiname, actuallyDelete = True) + if self.genOpts.mergeApiNames: + mergeAPIs(self.reg, self.genOpts.mergeApiNames.split(','), self.genOpts.apiname) + else: + stripNonmatchingAPIs(self.reg, self.genOpts.apiname, actuallyDelete = True) # Create dictionary of registry types from toplevel tags # and add 'name' attribute to each tag (where missing) @@ -602,6 +715,9 @@ def parseTree(self): enumInfo = EnumInfo(enum) self.addElementInfo(enum, enumInfo, 'enum', self.enumdict) + sync_pipeline_stage_condition = dict() + sync_access_condition = dict() + self.extensions = self.reg.findall('extensions/extension') self.extdict = {} for feature in self.extensions: @@ -646,9 +762,28 @@ def parseTree(self): if enum.get('alias'): format_name = enum.get('alias') if format_name in format_condition: - format_condition[format_name] += "," + featureInfo.name + format_condition[format_name] += f",{featureInfo.name}" else: format_condition[format_name] = featureInfo.name + elif groupName == "VkPipelineStageFlagBits2": + stage_flag = enum.get('name') + if enum.get('alias'): + stage_flag = enum.get('alias') + featureName = elem.get('depends') if elem.get('depends') is not None else featureInfo.name + if stage_flag in sync_pipeline_stage_condition: + sync_pipeline_stage_condition[stage_flag] += "," + featureName + else: + sync_pipeline_stage_condition[stage_flag] = featureName + elif groupName == "VkAccessFlagBits2": + access_flag = enum.get('name') + if enum.get('alias'): + access_flag = enum.get('alias') + featureName = elem.get('depends') if elem.get('depends') is not None else featureInfo.name + if access_flag in sync_access_condition: + sync_access_condition[access_flag] += "," + featureName + else: + sync_access_condition[access_flag] = featureName + addEnumInfo = True elif enum.get('value') or enum.get('bitpos') or enum.get('alias'): # self.gen.logMsg('diag', 'Adding extension constant "enum"', @@ -658,24 +793,6 @@ def parseTree(self): enumInfo = EnumInfo(enum) self.addElementInfo(enum, enumInfo, 'enum', self.enumdict) - # Construct a "validextensionstructs" list for parent structures - # based on "structextends" tags in child structures - disabled_types = [] - for disabled_ext in self.reg.findall('extensions/extension[@supported="disabled"]'): - for type_elem in disabled_ext.findall("*/type"): - disabled_types.append(type_elem.get('name')) - for type_elem in self.reg.findall('types/type'): - if type_elem.get('name') not in disabled_types: - # The structure type this may be chained to. - struct_extends = type_elem.get('structextends') - if struct_extends is not None: - for parent in struct_extends.split(','): - # self.gen.logMsg('diag', type.get('name'), 'extends', parent) - self.validextensionstructs[parent].append(type_elem.get('name')) - # Sort the lists so they do not depend on the XML order - for parent in self.validextensionstructs: - self.validextensionstructs[parent].sort() - # Parse out all spirv tags in dictionaries # Use addElementInfo to catch duplicates for spirv in self.reg.findall('spirvextensions/spirvextension'): @@ -693,6 +810,26 @@ def parseTree(self): formatInfo = FormatInfo(format, condition) self.addElementInfo(format, formatInfo, 'format', self.formatsdict) + for stage in self.reg.findall('sync/syncstage'): + condition = None + stage_flag = stage.get('name') + if stage_flag in sync_pipeline_stage_condition: + condition = sync_pipeline_stage_condition[stage_flag] + syncInfo = SyncStageInfo(stage, condition) + self.addElementInfo(stage, syncInfo, 'syncstage', self.syncstagedict) + + for access in self.reg.findall('sync/syncaccess'): + condition = None + access_flag = access.get('name') + if access_flag in sync_access_condition: + condition = sync_access_condition[access_flag] + syncInfo = SyncAccessInfo(access, condition) + self.addElementInfo(access, syncInfo, 'syncaccess', self.syncaccessdict) + + for pipeline in self.reg.findall('sync/syncpipeline'): + syncInfo = SyncPipelineInfo(pipeline) + self.addElementInfo(pipeline, syncInfo, 'syncpipeline', self.syncpipelinedict) + def dumpReg(self, maxlen=120, filehandle=sys.stdout): """Dump all the dictionaries constructed from the Registry object. @@ -814,7 +951,7 @@ def markEnumRequired(self, enumname, required): # Look up the Info with matching groupName if groupName in self.groupdict: gi = self.groupdict[groupName] - gienum = gi.elem.find("enum[@name='" + enumname + "']") + gienum = gi.elem.find(f"enum[@name='{enumname}']") if gienum is not None: # Remove copy of this enum from the group gi.elem.remove(gienum) @@ -949,6 +1086,8 @@ def getAlias(self, elem, dict): if alias is None: name = elem.get('name') typeinfo = self.lookupElementInfo(name, dict) + if not typeinfo: + self.gen.logMsg('error', name, 'is not a known name') alias = typeinfo.elem.get('alias') return alias @@ -1000,9 +1139,12 @@ def fillFeatureDictionary(self, interface, featurename, api, profile): # Determine the required extension or version needed for a require block # Assumes that only one of these is specified - required_key = require.get('feature') - if required_key is None: - required_key = require.get('extension') + # 'extension', and therefore 'required_key', may be a boolean + # expression of extension names. + # 'required_key' is used only as a dictionary key at + # present, and passed through to the script generators, so + # they must be prepared to parse that boolean expression. + required_key = require.get('depends') # Loop over types, enums, and commands in the tag for typeElem in require.findall('type'): @@ -1016,6 +1158,8 @@ def fillFeatureDictionary(self, interface, featurename, api, profile): # Resolve the type info to the actual type, so we get an accurate read for 'structextends' while alias: typeinfo = self.lookupElementInfo(alias, self.typedict) + if not typeinfo: + raise RuntimeError(f"Missing alias {alias}") alias = typeinfo.elem.get('alias') typecat = typeinfo.elem.get('category') @@ -1026,8 +1170,7 @@ def fillFeatureDictionary(self, interface, featurename, api, profile): self.gen.featureDictionary[featurename][typecat][required_key][typeextends] = [] self.gen.featureDictionary[featurename][typecat][required_key][typeextends].append(typename) else: - self.gen.logMsg('warn', 'fillFeatureDictionary: NOT filling for {}'.format(typename)) - + self.gen.logMsg('warn', f'fillFeatureDictionary: NOT filling for {typename}') for enumElem in require.findall('enum'): enumname = enumElem.get('name') @@ -1043,7 +1186,7 @@ def fillFeatureDictionary(self, interface, featurename, api, profile): self.gen.featureDictionary[featurename]['enumconstant'][required_key][enumextends] = [] self.gen.featureDictionary[featurename]['enumconstant'][required_key][enumextends].append(enumname) else: - self.gen.logMsg('warn', 'fillFeatureDictionary: NOT filling for {}'.format(typename)) + self.gen.logMsg('warn', f'fillFeatureDictionary: NOT filling for {typename}') for cmdElem in require.findall('command'): # Remove aliases in the same extension/feature; these are always added as a correction. Do not need the original to be visible. @@ -1053,7 +1196,7 @@ def fillFeatureDictionary(self, interface, featurename, api, profile): self.gen.featureDictionary[featurename]['command'][required_key] = [] self.gen.featureDictionary[featurename]['command'][required_key].append(cmdElem.get('name')) else: - self.gen.logMsg('warn', 'fillFeatureDictionary: NOT filling for {}'.format(typename)) + self.gen.logMsg('warn', f'fillFeatureDictionary: NOT filling for {typename}') def requireFeatures(self, interface, featurename, api, profile): """Process `` tags for a `` or ``. @@ -1103,15 +1246,23 @@ def removeAdditionalValidity(self, interface, api, profile): if v.get('struct'): self.typedict[v.get('struct')].removedValidity.append(copy.deepcopy(v)) - def generateFeature(self, fname, ftype, dictionary): + def generateFeature(self, fname, ftype, dictionary, explicit=False): """Generate a single type / enum group / enum / command, and all its dependencies as needed. - fname - name of feature (``/``/``) - ftype - type of feature, 'type' | 'enum' | 'command' - - dictionary - of *Info objects - self.{type|enum|cmd}dict""" + - dictionary - of *Info objects - self.{type|enum|cmd}dict + - explicit - True if this is explicitly required by the top-level + XML tag, False if it is a dependency of an explicit + requirement.""" self.gen.logMsg('diag', 'generateFeature: generating', ftype, fname) + + if not (explicit or self.genOpts.requireDepends): + self.gen.logMsg('diag', 'generateFeature: NOT generating', ftype, fname, 'because generator does not require dependencies') + return + f = self.lookupElementInfo(fname, dictionary) if f is None: # No such feature. This is an error, but reported earlier @@ -1294,7 +1445,7 @@ def generateRequiredInterface(self, interface): # Loop over all features inside all tags. for features in interface.findall('require'): for t in features.findall('type'): - self.generateFeature(t.get('name'), 'type', self.typedict) + self.generateFeature(t.get('name'), 'type', self.typedict, explicit=True) for e in features.findall('enum'): # If this is an enum extending an enumerated type, do not # generate it - this has already been done in reg.parseTree, @@ -1307,9 +1458,9 @@ def generateRequiredInterface(self, interface): forceEmit = False if not enumextends or forceEmit: - self.generateFeature(e.get('name'), 'enum', self.enumdict) + self.generateFeature(e.get('name'), 'enum', self.enumdict, explicit=True) for c in features.findall('command'): - self.generateFeature(c.get('name'), 'command', self.cmddict) + self.generateFeature(c.get('name'), 'command', self.cmddict, explicit=True) def generateSpirv(self, spirv, dictionary): if spirv is None: @@ -1339,7 +1490,7 @@ def stripUnsupportedAPIs(self, dictionary, attribute, supportedDictionary): stripped = False for api in attribstring.split(','): ##print('Checking API {} referenced by {}'.format(api, key)) - if supportedDictionary[api].required: + if api in supportedDictionary and supportedDictionary[api].required: apis.append(api) else: stripped = True @@ -1350,6 +1501,30 @@ def stripUnsupportedAPIs(self, dictionary, attribute, supportedDictionary): if stripped: eleminfo.elem.set(attribute, ','.join(apis)) + def stripUnsupportedAPIsFromList(self, dictionary, supportedDictionary): + """Strip unsupported APIs from attributes of APIs. + dictionary - dictionary of list of structure name strings + supportedDictionary - dictionary in which to look for supported + API elements in the attribute""" + + for key in dictionary: + attribstring = dictionary[key] + if attribstring is not None: + apis = [] + stripped = False + for api in attribstring: + ##print('Checking API {} referenced by {}'.format(api, key)) + if supportedDictionary[api].required: + apis.append(api) + else: + stripped = True + ##print('\t**STRIPPING API {} from {}'.format(api, key)) + + # Update the attribute after stripping stuff. + # Could sort apis before joining, but it is not a clear win + if stripped: + dictionary[key] = apis + def generateFormat(self, format, dictionary): if format is None: self.gen.logMsg('diag', 'No entry found for format element', @@ -1363,6 +1538,36 @@ def generateFormat(self, format, dictionary): genProc = self.gen.genFormat genProc(format, name, alias) + def generateSyncStage(self, sync): + genProc = self.gen.genSyncStage + genProc(sync) + + def generateSyncAccess(self, sync): + genProc = self.gen.genSyncAccess + genProc(sync) + + def generateSyncPipeline(self, sync): + genProc = self.gen.genSyncPipeline + genProc(sync) + + def tagValidExtensionStructs(self): + """Construct a "validextensionstructs" list for parent structures + based on "structextends" tags in child structures. + Only do this for structures tagged as required.""" + + for typeinfo in self.typedict.values(): + type_elem = typeinfo.elem + if typeinfo.required and type_elem.get('category') == 'struct': + struct_extends = type_elem.get('structextends') + if struct_extends is not None: + for parent in struct_extends.split(','): + # self.gen.logMsg('diag', type_elem.get('name'), 'extends', parent) + self.validextensionstructs[parent].append(type_elem.get('name')) + + # Sort the lists so they do not depend on the XML order + for parent in self.validextensionstructs: + self.validextensionstructs[parent].sort() + def apiGen(self): """Generate interface for specified versions using the current generator and generator options""" @@ -1533,6 +1738,10 @@ def apiGen(self): self.stripUnsupportedAPIs(self.typedict, 'structextends', self.typedict) self.stripUnsupportedAPIs(self.cmddict, 'successcodes', self.enumdict) self.stripUnsupportedAPIs(self.cmddict, 'errorcodes', self.enumdict) + self.stripUnsupportedAPIsFromList(self.validextensionstructs, self.typedict) + + # Construct lists of valid extension structures + self.tagValidExtensionStructs() # @@May need to strip / # tags of these forms: @@ -1565,6 +1774,12 @@ def apiGen(self): self.generateSpirv(s, self.spirvcapdict) for s in formats: self.generateFormat(s, self.formatsdict) + for s in self.syncstagedict: + self.generateSyncStage(self.syncstagedict[s]) + for s in self.syncaccessdict: + self.generateSyncAccess(self.syncaccessdict[s]) + for s in self.syncpipelinedict: + self.generateSyncPipeline(self.syncpipelinedict[s]) self.gen.endFile() def apiReset(self): diff --git a/specification/scripts/rouge-extend-css.rb b/specification/scripts/rouge-extend-css.rb new file mode 100644 index 00000000..36f5f7ae --- /dev/null +++ b/specification/scripts/rouge-extend-css.rb @@ -0,0 +1,63 @@ +# Copyright 2021-2024 The Khronos Group Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +# Khronos overrides for Rouge 'github' theme CSS for accessibility. +# See (note that this code is evolving, works as of asciidoctor 2.0.12): +# https://github.com/asciidoctor/asciidoctor/blob/master/lib/asciidoctor/syntax_highlighter/rouge.rb + +include ::Asciidoctor + +class ExtendedRougeSyntaxHighlighter < (Asciidoctor::SyntaxHighlighter.for 'rouge') + register_for 'rouge' + + # Insert rouge stylesheet from super + # Then replace many 'github' theme colors for accessibility compliance + # It would be better to use rouge's stylesheet factory, if it has one + def docinfo location, doc, opts + overrides = %() + + # super can return either