From ef72151b2b44d0588124aabf7f744e6e873c9927 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Mon, 14 Oct 2024 13:50:02 -0400 Subject: [PATCH 01/28] fix: update artifact path pattern in workflow --- .github/workflows/essentialsplugins-4Series-builds.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/essentialsplugins-4Series-builds.yml b/.github/workflows/essentialsplugins-4Series-builds.yml index 1c40c29..eb07377 100644 --- a/.github/workflows/essentialsplugins-4Series-builds.yml +++ b/.github/workflows/essentialsplugins-4Series-builds.yml @@ -58,7 +58,7 @@ jobs: uses: ncipollo/release-action@v1 with: allowUpdates: true - artifacts: 'output\*.*(cpz|cplz)' + artifacts: 'output\**\*.*(cpz|cplz)' prerelease: ${{ inputs.channel != '' }} tag: ${{ inputs.tag }} commit: ${{ github.sha }} From 439506ded3e2cef0f971e19b3e96ad2208571e2e Mon Sep 17 00:00:00 2001 From: jtalborough Date: Mon, 14 Oct 2024 14:16:37 -0400 Subject: [PATCH 02/28] feat: add nuspec file creation and cleanup step in workflow --- .../essentialsplugins-4Series-builds.yml | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/.github/workflows/essentialsplugins-4Series-builds.yml b/.github/workflows/essentialsplugins-4Series-builds.yml index eb07377..2a6d804 100644 --- a/.github/workflows/essentialsplugins-4Series-builds.yml +++ b/.github/workflows/essentialsplugins-4Series-builds.yml @@ -63,6 +63,44 @@ jobs: tag: ${{ inputs.tag }} commit: ${{ github.sha }} bodyFile: ./CHANGELOG.md + - name: Remove Existing Nuspec Files + run: | + Get-ChildItem -Path .\ -Filter *.nuspec -Recurse | Remove-Item -Force + - name: Create Nuspec File + shell: powershell + run: | + $year = (Get-Date).Year + $repoFullName = "${{ github.repository }}" # Full repository name, including owner + $repoName = $repoFullName.Split('/')[-1] # Extracting the repository name part + $makeModelParts = $repoName -split '-' + $makeModel = $makeModelParts[1..($makeModelParts.Length - 1)] -join ' ' # Joining all parts after the first dash + $title = (Get-Culture).TextInfo.ToTitleCase($makeModel) + $makeModel = $title -replace ' ', '' # Removing spaces for the ID + $id = "PepperDash.Essentials.Plugin." + $makeModel + $nuspecContent = @" + + + + $id + ${{ inputs.version }} + $title + PepperDash Technologies + pepperdash + false + MIT + https://github.com/${{ github.repository }} + Copyright $year + ${{ github.repository_name }} Epi + crestron 3series 4series + + + + + + + + "@ + echo $nuspecContent > ${{ github.workspace }}\project.nuspec - name: Setup Nuget run: | nuget sources add -name github -source https://nuget.pkg.github.com/pepperdash/index.json -username pepperdash -password ${{ secrets.GITHUB_TOKEN }} From be6ff4426441e5a723f6605e157f23bc79ca20e7 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Mon, 14 Oct 2024 14:24:35 -0400 Subject: [PATCH 03/28] fix: adjust indentation for nuspec file creation step in workflow --- .../essentialsplugins-4Series-builds.yml | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/workflows/essentialsplugins-4Series-builds.yml b/.github/workflows/essentialsplugins-4Series-builds.yml index 2a6d804..16db056 100644 --- a/.github/workflows/essentialsplugins-4Series-builds.yml +++ b/.github/workflows/essentialsplugins-4Series-builds.yml @@ -66,27 +66,27 @@ jobs: - name: Remove Existing Nuspec Files run: | Get-ChildItem -Path .\ -Filter *.nuspec -Recurse | Remove-Item -Force - - name: Create Nuspec File - shell: powershell - run: | - $year = (Get-Date).Year - $repoFullName = "${{ github.repository }}" # Full repository name, including owner - $repoName = $repoFullName.Split('/')[-1] # Extracting the repository name part - $makeModelParts = $repoName -split '-' - $makeModel = $makeModelParts[1..($makeModelParts.Length - 1)] -join ' ' # Joining all parts after the first dash - $title = (Get-Culture).TextInfo.ToTitleCase($makeModel) - $makeModel = $title -replace ' ', '' # Removing spaces for the ID - $id = "PepperDash.Essentials.Plugin." + $makeModel - $nuspecContent = @" - - - - $id - ${{ inputs.version }} - $title - PepperDash Technologies - pepperdash - false + - name: Create Nuspec File + shell: powershell + run: | + $year = (Get-Date).Year + $repoFullName = "${{ github.repository }}" # Full repository name, including owner + $repoName = $repoFullName.Split('/')[-1] # Extracting the repository name part + $makeModelParts = $repoName -split '-' + $makeModel = $makeModelParts[1..($makeModelParts.Length - 1)] -join ' ' # Joining all parts after the first dash + $title = (Get-Culture).TextInfo.ToTitleCase($makeModel) + $makeModel = $title -replace ' ', '' # Removing spaces for the ID + $id = "PepperDash.Essentials.Plugin." + $makeModel + $nuspecContent = @" + + + + $id + ${{ inputs.version }} + $title + PepperDash Technologies + pepperdash + false MIT https://github.com/${{ github.repository }} Copyright $year From 534ac08d60f14ded37ca531cfb8a5b44c7c0c46b Mon Sep 17 00:00:00 2001 From: jtalborough Date: Mon, 14 Oct 2024 14:45:03 -0400 Subject: [PATCH 04/28] feat: add package name validation step in workflow --- .../essentialsplugins-4Series-builds.yml | 64 ++++++++----------- 1 file changed, 26 insertions(+), 38 deletions(-) diff --git a/.github/workflows/essentialsplugins-4Series-builds.yml b/.github/workflows/essentialsplugins-4Series-builds.yml index 16db056..f0178ed 100644 --- a/.github/workflows/essentialsplugins-4Series-builds.yml +++ b/.github/workflows/essentialsplugins-4Series-builds.yml @@ -52,6 +52,32 @@ jobs: uses: actions/download-artifact@v4 with: name: change-log + - name: Check Package Name + shell: powershell + run: | + # Extract the repository name + $repoFullName = "${{ github.repository }}" # Full repository name, including owner + $repoName = $repoFullName.Split('/')[-1] # Extracting the repository name part + + # Expected package name format: PepperDash.Essentials.Make.Model + $expectedPackageName = "PepperDash.Essentials." + ($repoName -replace 'epi-', '').Replace('-', '.') + + # Display expected package name + Write-Output "Expected Package Name: $expectedPackageName" + + # Locate the generated NuGet package in the output directory + $packageFile = Get-ChildItem -Path .\output -Filter *.nupkg -Recurse + if ($null -eq $packageFile) { + throw "No NuGet package found in output directory." + } + + # Extract the package name from the file + $packageName = [System.IO.Path]::GetFileNameWithoutExtension($packageFile.FullName) + + # Compare the actual package name with the expected name + if ($packageName -ne $expectedPackageName) { + throw "Package name mismatch: Expected '$expectedPackageName' but found '$packageName'. Ensure the package name follows the repository naming convention." + } - name: Upload Release if: ${{ inputs.newVersion == 'true' }} id: create_release @@ -63,44 +89,6 @@ jobs: tag: ${{ inputs.tag }} commit: ${{ github.sha }} bodyFile: ./CHANGELOG.md - - name: Remove Existing Nuspec Files - run: | - Get-ChildItem -Path .\ -Filter *.nuspec -Recurse | Remove-Item -Force - - name: Create Nuspec File - shell: powershell - run: | - $year = (Get-Date).Year - $repoFullName = "${{ github.repository }}" # Full repository name, including owner - $repoName = $repoFullName.Split('/')[-1] # Extracting the repository name part - $makeModelParts = $repoName -split '-' - $makeModel = $makeModelParts[1..($makeModelParts.Length - 1)] -join ' ' # Joining all parts after the first dash - $title = (Get-Culture).TextInfo.ToTitleCase($makeModel) - $makeModel = $title -replace ' ', '' # Removing spaces for the ID - $id = "PepperDash.Essentials.Plugin." + $makeModel - $nuspecContent = @" - - - - $id - ${{ inputs.version }} - $title - PepperDash Technologies - pepperdash - false - MIT - https://github.com/${{ github.repository }} - Copyright $year - ${{ github.repository_name }} Epi - crestron 3series 4series - - - - - - - - "@ - echo $nuspecContent > ${{ github.workspace }}\project.nuspec - name: Setup Nuget run: | nuget sources add -name github -source https://nuget.pkg.github.com/pepperdash/index.json -username pepperdash -password ${{ secrets.GITHUB_TOKEN }} From f6233364ac6e7771dc2b0ab43ee8a6bd4997bedc Mon Sep 17 00:00:00 2001 From: jtalborough Date: Mon, 14 Oct 2024 14:46:27 -0400 Subject: [PATCH 05/28] feat: add bypass package check input to workflow --- .../workflows/essentialsplugins-4Series-builds.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/workflows/essentialsplugins-4Series-builds.yml b/.github/workflows/essentialsplugins-4Series-builds.yml index f0178ed..26ccb29 100644 --- a/.github/workflows/essentialsplugins-4Series-builds.yml +++ b/.github/workflows/essentialsplugins-4Series-builds.yml @@ -4,21 +4,25 @@ on: workflow_call: inputs: newVersion: - description: 'new version?' + description: 'New version?' required: true type: string version: - description: 'The version of the file to build and push' + description: 'The version of the file to build and push.' required: true type: string tag: - description: 'The tag of the image to build and push' + description: 'The tag of the image to build and push.' required: true type: string channel: - description: 'The channel of the image to build and push' + description: 'The channel of the image to build and push.' required: true - type: string + type: string + bypassPackageCheck: + description: 'Set to true to bypass the package name check.' + required: false + type: boolean env: BUILD_TYPE: ${{ inputs.channel == '' && 'Release' || 'Debug' }} From c77312508fdbd3344ea55a2ca4f170b22140eeb9 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Mon, 14 Oct 2024 14:47:37 -0400 Subject: [PATCH 06/28] fix: add conditional bypass for package name check in workflow --- .github/workflows/essentialsplugins-4Series-builds.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/essentialsplugins-4Series-builds.yml b/.github/workflows/essentialsplugins-4Series-builds.yml index 26ccb29..5785385 100644 --- a/.github/workflows/essentialsplugins-4Series-builds.yml +++ b/.github/workflows/essentialsplugins-4Series-builds.yml @@ -57,6 +57,7 @@ jobs: with: name: change-log - name: Check Package Name + if: ${{ inputs.bypassPackageCheck != 'true' }} shell: powershell run: | # Extract the repository name From befbe993d1853936f4722ff2585a2a12b88d88a6 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Mon, 14 Oct 2024 15:02:07 -0400 Subject: [PATCH 07/28] fix: improve error handling for NuGet package checks in workflow --- .github/workflows/essentialsplugins-4Series-builds.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/essentialsplugins-4Series-builds.yml b/.github/workflows/essentialsplugins-4Series-builds.yml index 5785385..1a3b79c 100644 --- a/.github/workflows/essentialsplugins-4Series-builds.yml +++ b/.github/workflows/essentialsplugins-4Series-builds.yml @@ -73,7 +73,9 @@ jobs: # Locate the generated NuGet package in the output directory $packageFile = Get-ChildItem -Path .\output -Filter *.nupkg -Recurse if ($null -eq $packageFile) { - throw "No NuGet package found in output directory." + Write-Error "No NuGet package found in the output directory." + Write-Output "::error::No NuGet package found in the output directory. Please check if the build generated the package correctly." + exit 1 } # Extract the package name from the file @@ -81,7 +83,9 @@ jobs: # Compare the actual package name with the expected name if ($packageName -ne $expectedPackageName) { - throw "Package name mismatch: Expected '$expectedPackageName' but found '$packageName'. Ensure the package name follows the repository naming convention." + Write-Error "Package name mismatch: Expected '$expectedPackageName' but found '$packageName'." + Write-Output "::error::Package name mismatch: Expected '$expectedPackageName' but found '$packageName'. Ensure the package name follows the repository naming convention." + exit 1 } - name: Upload Release if: ${{ inputs.newVersion == 'true' }} From 422704fb4e243709e249a2d78b6d723285317921 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Mon, 14 Oct 2024 17:21:46 -0400 Subject: [PATCH 08/28] feat: add commit message check workflow with bypass option --- essentialsplugins-checkCommitMessage.yml | 36 ++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 essentialsplugins-checkCommitMessage.yml diff --git a/essentialsplugins-checkCommitMessage.yml b/essentialsplugins-checkCommitMessage.yml new file mode 100644 index 0000000..cde607b --- /dev/null +++ b/essentialsplugins-checkCommitMessage.yml @@ -0,0 +1,36 @@ +name: Commit Message Check + +on: + workflow_call: + inputs: + bypassCommitCheck: + description: 'Set to true to bypass the commit message check.' + required: false + type: boolean + +jobs: + check_commit_message: + runs-on: ubuntu-latest + if: ${{ inputs.bypassCommitCheck != 'true' }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Check Commit Message + shell: bash + run: | + # Retrieve the commit messages + COMMIT_MSGS=$(git log -n 10 --pretty=format:"%s" HEAD^..HEAD) + + # Define a regular expression to match the conventional commit pattern + CONVENTIONAL_REGEX="^(feat|fix|chore|docs|style|refactor|perf|test|build|ci|revert|wip)(\(.+\))?:\s.+" + + # Loop through each commit message and check against the conventional pattern + for COMMIT_MSG in $COMMIT_MSGS; do + if [[ ! $COMMIT_MSG =~ $CONVENTIONAL_REGEX ]]; then + echo "::error::Commit message '$COMMIT_MSG' does not follow the Conventional Commits format. Please use a message like 'feat: add new feature' or 'fix(scope): correct issue'." + exit 1 + else + echo "Commit message '$COMMIT_MSG' is valid." + fi + done \ No newline at end of file From 9276785f9e6ad97afdb58189f5fc8361a6b69678 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Mon, 14 Oct 2024 17:24:35 -0400 Subject: [PATCH 09/28] feat: implement commit message check workflow --- .../workflows/essentialsplugins-checkCommitMessage.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename essentialsplugins-checkCommitMessage.yml => .github/workflows/essentialsplugins-checkCommitMessage.yml (100%) diff --git a/essentialsplugins-checkCommitMessage.yml b/.github/workflows/essentialsplugins-checkCommitMessage.yml similarity index 100% rename from essentialsplugins-checkCommitMessage.yml rename to .github/workflows/essentialsplugins-checkCommitMessage.yml From a66bdcfb07d4b37015ee91b18f8f6a8fc922a181 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Mon, 14 Oct 2024 17:29:24 -0400 Subject: [PATCH 10/28] fix: update commit message check to retrieve only the latest commit message --- .../essentialsplugins-checkCommitMessage.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/essentialsplugins-checkCommitMessage.yml b/.github/workflows/essentialsplugins-checkCommitMessage.yml index cde607b..a052a34 100644 --- a/.github/workflows/essentialsplugins-checkCommitMessage.yml +++ b/.github/workflows/essentialsplugins-checkCommitMessage.yml @@ -19,14 +19,14 @@ jobs: - name: Check Commit Message shell: bash run: | - # Retrieve the commit messages - COMMIT_MSGS=$(git log -n 10 --pretty=format:"%s" HEAD^..HEAD) - + # Retrieve the commit message from the latest commit + COMMIT_MSGS=$(git log -1 --pretty=format:"%s") + # Define a regular expression to match the conventional commit pattern - CONVENTIONAL_REGEX="^(feat|fix|chore|docs|style|refactor|perf|test|build|ci|revert|wip)(\(.+\))?:\s.+" - + CONVENTIONAL_REGEX="^(feat|fix|chore|docs|style|refactor|perf|test|build|ci|revert|wip)(\\(.+\\))?:\\s.+" + # Loop through each commit message and check against the conventional pattern - for COMMIT_MSG in $COMMIT_MSGS; do + for COMMIT_MSG in "$COMMIT_MSGS"; do if [[ ! $COMMIT_MSG =~ $CONVENTIONAL_REGEX ]]; then echo "::error::Commit message '$COMMIT_MSG' does not follow the Conventional Commits format. Please use a message like 'feat: add new feature' or 'fix(scope): correct issue'." exit 1 From 02264d2eede978bc534639f4335e0bd9bc3a7a44 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Mon, 14 Oct 2024 17:36:21 -0400 Subject: [PATCH 11/28] fix: update error handling for package name mismatch in workflow --- .github/workflows/essentialsplugins-4Series-builds.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/essentialsplugins-4Series-builds.yml b/.github/workflows/essentialsplugins-4Series-builds.yml index 1a3b79c..fb34287 100644 --- a/.github/workflows/essentialsplugins-4Series-builds.yml +++ b/.github/workflows/essentialsplugins-4Series-builds.yml @@ -83,8 +83,7 @@ jobs: # Compare the actual package name with the expected name if ($packageName -ne $expectedPackageName) { - Write-Error "Package name mismatch: Expected '$expectedPackageName' but found '$packageName'." - Write-Output "::error::Package name mismatch: Expected '$expectedPackageName' but found '$packageName'. Ensure the package name follows the repository naming convention." + echo "::error::Package name mismatch: Expected '$expectedPackageName' but found '$packageName'. Ensure the package name follows the repository naming convention." exit 1 } - name: Upload Release From b1d0153a79d114ecbac274585838e1df3b8e305b Mon Sep 17 00:00:00 2001 From: jtalborough Date: Mon, 14 Oct 2024 17:42:55 -0400 Subject: [PATCH 12/28] fix: update expected package name format and improve case-insensitive comparison in workflow --- .../workflows/essentialsplugins-4Series-builds.yml | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/.github/workflows/essentialsplugins-4Series-builds.yml b/.github/workflows/essentialsplugins-4Series-builds.yml index fb34287..178be1e 100644 --- a/.github/workflows/essentialsplugins-4Series-builds.yml +++ b/.github/workflows/essentialsplugins-4Series-builds.yml @@ -57,15 +57,14 @@ jobs: with: name: change-log - name: Check Package Name - if: ${{ inputs.bypassPackageCheck != 'true' }} shell: powershell run: | # Extract the repository name $repoFullName = "${{ github.repository }}" # Full repository name, including owner $repoName = $repoFullName.Split('/')[-1] # Extracting the repository name part - # Expected package name format: PepperDash.Essentials.Make.Model - $expectedPackageName = "PepperDash.Essentials." + ($repoName -replace 'epi-', '').Replace('-', '.') + # Expected package name format: PepperDash.Essentials.Plugins.Make.Model + $expectedPackageName = "PepperDash.Essentials.Plugins." + ($repoName -replace 'epi-', '').Replace('-', '.') # Display expected package name Write-Output "Expected Package Name: $expectedPackageName" @@ -78,11 +77,11 @@ jobs: exit 1 } - # Extract the package name from the file - $packageName = [System.IO.Path]::GetFileNameWithoutExtension($packageFile.FullName) + # Extract the package name from the file (ignore version) + $packageName = [System.IO.Path]::GetFileNameWithoutExtension($packageFile.FullName) -replace '\.\d+.*$' - # Compare the actual package name with the expected name - if ($packageName -ne $expectedPackageName) { + # Compare the actual package name with the expected name (case insensitive) + if ($packageName.ToLower() -ne $expectedPackageName.ToLower()) { echo "::error::Package name mismatch: Expected '$expectedPackageName' but found '$packageName'. Ensure the package name follows the repository naming convention." exit 1 } From 470182f52ac9138c748d5cb784d1c4a35735de67 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Tue, 15 Oct 2024 16:55:31 -0400 Subject: [PATCH 13/28] fix: enhance commit message validation to include merge commit format --- .../essentialsplugins-checkCommitMessage.yml | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/.github/workflows/essentialsplugins-checkCommitMessage.yml b/.github/workflows/essentialsplugins-checkCommitMessage.yml index a052a34..fd7c55f 100644 --- a/.github/workflows/essentialsplugins-checkCommitMessage.yml +++ b/.github/workflows/essentialsplugins-checkCommitMessage.yml @@ -16,19 +16,20 @@ jobs: - name: Checkout repository uses: actions/checkout@v4 - - name: Check Commit Message + - name: Check Commit Messages shell: bash run: | - # Retrieve the commit message from the latest commit - COMMIT_MSGS=$(git log -1 --pretty=format:"%s") + # Retrieve all commit messages from the current merge (HEAD~1..HEAD for merge commit or all in a PR) + COMMIT_MSGS=$(git log --pretty=format:"%s" HEAD~1..HEAD) - # Define a regular expression to match the conventional commit pattern + # Define a regular expression to match the conventional commit pattern or a standard merge commit message CONVENTIONAL_REGEX="^(feat|fix|chore|docs|style|refactor|perf|test|build|ci|revert|wip)(\\(.+\\))?:\\s.+" + MERGE_COMMIT_REGEX="^Merge (branch|remote-tracking branch) '.*'( into .*)?$" - # Loop through each commit message and check against the conventional pattern - for COMMIT_MSG in "$COMMIT_MSGS"; do - if [[ ! $COMMIT_MSG =~ $CONVENTIONAL_REGEX ]]; then - echo "::error::Commit message '$COMMIT_MSG' does not follow the Conventional Commits format. Please use a message like 'feat: add new feature' or 'fix(scope): correct issue'." + # Loop through each commit message and check against the patterns + for COMMIT_MSG in $COMMIT_MSGS; do + if [[ ! $COMMIT_MSG =~ $CONVENTIONAL_REGEX && ! $COMMIT_MSG =~ $MERGE_COMMIT_REGEX ]]; then + echo "::error::Commit message '$COMMIT_MSG' does not follow the Conventional Commits format or merge commit format. Please use a message like 'feat: add new feature' or 'fix(scope): correct issue'." exit 1 else echo "Commit message '$COMMIT_MSG' is valid." From b2348c58482b15a74b3a0d7b3c083c0b155aea80 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Tue, 15 Oct 2024 16:58:01 -0400 Subject: [PATCH 14/28] fix: update commit message retrieval to only fetch the latest commit message for validation --- .../workflows/essentialsplugins-checkCommitMessage.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/essentialsplugins-checkCommitMessage.yml b/.github/workflows/essentialsplugins-checkCommitMessage.yml index fd7c55f..b31b665 100644 --- a/.github/workflows/essentialsplugins-checkCommitMessage.yml +++ b/.github/workflows/essentialsplugins-checkCommitMessage.yml @@ -19,15 +19,15 @@ jobs: - name: Check Commit Messages shell: bash run: | - # Retrieve all commit messages from the current merge (HEAD~1..HEAD for merge commit or all in a PR) - COMMIT_MSGS=$(git log --pretty=format:"%s" HEAD~1..HEAD) - + # Retrieve the last commit messages. For merge commits, it will retrieve the latest commit message. + COMMIT_MSGS=$(git log --pretty=format:"%s" -n 1) + # Define a regular expression to match the conventional commit pattern or a standard merge commit message CONVENTIONAL_REGEX="^(feat|fix|chore|docs|style|refactor|perf|test|build|ci|revert|wip)(\\(.+\\))?:\\s.+" MERGE_COMMIT_REGEX="^Merge (branch|remote-tracking branch) '.*'( into .*)?$" - + # Loop through each commit message and check against the patterns - for COMMIT_MSG in $COMMIT_MSGS; do + for COMMIT_MSG in "$COMMIT_MSGS"; do if [[ ! $COMMIT_MSG =~ $CONVENTIONAL_REGEX && ! $COMMIT_MSG =~ $MERGE_COMMIT_REGEX ]]; then echo "::error::Commit message '$COMMIT_MSG' does not follow the Conventional Commits format or merge commit format. Please use a message like 'feat: add new feature' or 'fix(scope): correct issue'." exit 1 From a4d471c8b01b9f4258f4e1b221843d285c7c0ce9 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Tue, 15 Oct 2024 17:15:45 -0400 Subject: [PATCH 15/28] fix: enhance commit message retrieval and regex patterns for validation --- .../workflows/essentialsplugins-checkCommitMessage.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/essentialsplugins-checkCommitMessage.yml b/.github/workflows/essentialsplugins-checkCommitMessage.yml index b31b665..88d114a 100644 --- a/.github/workflows/essentialsplugins-checkCommitMessage.yml +++ b/.github/workflows/essentialsplugins-checkCommitMessage.yml @@ -15,17 +15,19 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v4 + with: + fetch-depth: 0 # Fetch the full history to make HEAD~1 references valid - name: Check Commit Messages shell: bash run: | - # Retrieve the last commit messages. For merge commits, it will retrieve the latest commit message. + # Retrieve the latest commit message COMMIT_MSGS=$(git log --pretty=format:"%s" -n 1) - + # Define a regular expression to match the conventional commit pattern or a standard merge commit message CONVENTIONAL_REGEX="^(feat|fix|chore|docs|style|refactor|perf|test|build|ci|revert|wip)(\\(.+\\))?:\\s.+" - MERGE_COMMIT_REGEX="^Merge (branch|remote-tracking branch) '.*'( into .*)?$" - + MERGE_COMMIT_REGEX="^(Merge (branch|remote-tracking branch|commit) '.*'( into .*)?)$" + # Loop through each commit message and check against the patterns for COMMIT_MSG in "$COMMIT_MSGS"; do if [[ ! $COMMIT_MSG =~ $CONVENTIONAL_REGEX && ! $COMMIT_MSG =~ $MERGE_COMMIT_REGEX ]]; then From 13b39f3b8b37116fe53d6425725a2118fd85c492 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Tue, 15 Oct 2024 17:27:39 -0400 Subject: [PATCH 16/28] feat: add workflow for automated README updates --- .github/scripts/metadata.py | 441 ++++++++++++++++++++++++++++ .github/workflows/update-readme.yml | 64 ++++ 2 files changed, 505 insertions(+) create mode 100644 .github/scripts/metadata.py create mode 100644 .github/workflows/update-readme.yml diff --git a/.github/scripts/metadata.py b/.github/scripts/metadata.py new file mode 100644 index 0000000..bbb9921 --- /dev/null +++ b/.github/scripts/metadata.py @@ -0,0 +1,441 @@ +import os +import re +import json + +def extract_implemented_interfaces(file_content): + interface_pattern = re.compile(r'class\s+\w+\s*:\s*([^{]+)') + match = interface_pattern.search(file_content) + if match: + items = match.group(1).split(',') + interfaces = [item.strip() for item in items if item.strip().startswith('I')] + base_classes = [item.strip() for item in items if not item.strip().startswith('I') and not item.strip().startswith('EssentialsPluginDeviceFactory')] + return interfaces, base_classes + return [], [] + +def extract_supported_types(file_content): + # Remove commented lines + uncommented_content = re.sub(r'//.*', '', file_content) + + # Updated regex to match TypeNames initialization + types_pattern = re.compile(r'TypeNames\s*=\s*new\s*List\(\)\s*{([^}]+)}') + matches = types_pattern.findall(uncommented_content) + types = [] + for match in matches: + current_types = [type_name.strip().strip('"') for type_name in match.split(',')] + types.extend(current_types) + + # Remove duplicates and filter out unnecessary entries + return list(set(filter(None, types))) + +def extract_minimum_essentials_framework_version(file_content): + # Update the regex to exclude comments or anything unnecessary. + version_pattern = re.compile(r'^\s*MinimumEssentialsFrameworkVersion\s*=\s*"([^"]+)"\s*;', re.MULTILINE) + match = version_pattern.search(file_content) + if match: + return match.group(1) + return None + +def extract_public_methods(file_content): + methods_pattern = re.compile(r'public\s+\w+\s+\w+\s*\([^)]*\)\s*') + matches = methods_pattern.findall(file_content) + return [match.strip() for match in matches] + +def read_files_in_directory(directory): + all_interfaces = [] + all_base_classes = [] + all_supported_types = [] + all_minimum_versions = [] + all_public_methods = [] + all_joins = [] + + for root, _, files in os.walk(directory): + for file in files: + if file.endswith('.cs'): + file_path = os.path.join(root, file) + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + interfaces, base_classes = extract_implemented_interfaces(content) + supported_types = extract_supported_types(content) + minimum_version = extract_minimum_essentials_framework_version(content) + public_methods = extract_public_methods(content) + + all_interfaces.extend(interfaces) + all_base_classes.extend(base_classes) + all_supported_types.extend(supported_types) + if minimum_version: + all_minimum_versions.append(minimum_version) + all_public_methods.extend(public_methods) + + return { + "interfaces": all_interfaces, + "base_classes": all_base_classes, + "supported_types": all_supported_types, + "minimum_versions": all_minimum_versions, + "public_methods": all_public_methods + } + +def read_class_names_and_bases_from_files(directory): + class_defs = {} + class_pattern = re.compile( + r'^\s*(?:\[[^\]]+\]\s*)*' # Optional attributes + r'(?:public\s+|private\s+|protected\s+)?' # Optional access modifier + r'(?:partial\s+)?' # Optional 'partial' keyword + r'class\s+([A-Za-z_]\w*)' # Class name + r'(?:\s*:\s*([^\{]+))?' # Optional base classes + r'\s*\{', # Opening brace + re.MULTILINE + ) + for root, _, files in os.walk(directory): + for file in files: + if file.endswith('.cs'): + file_path = os.path.join(root, file) + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + for match in class_pattern.finditer(content): + class_name = match.group(1) + bases = match.group(2) + if bases: + base_classes = [b.strip() for b in bases.split(',')] + else: + base_classes = [] + class_defs[class_name] = base_classes + return class_defs + +def find_joinmap_classes(class_defs): + joinmap_classes = [] + for class_name, base_classes in class_defs.items(): + if 'JoinMapBaseAdvanced' in base_classes: + joinmap_classes.append(class_name) + return joinmap_classes + +def find_file_in_directory(filename, root_directory): + for root, _, files in os.walk(root_directory): + for file in files: + if file == filename: + full_path = os.path.join(root, file) + return full_path + return None + +def parse_joinmap_info(class_name, root_directory): + filename = f"{class_name}.cs" + file_path = find_file_in_directory(filename, root_directory) + + if not file_path: + print(f"File not found: {filename}. Skipping...") + return [] + + with open(file_path, 'r', encoding='utf-8') as file: + file_content = file.read() + + join_pattern = re.compile( + r'\[JoinName\("(?P[^"]+)"\)\]\s*' # Match the [JoinName("...")] attribute + r'public\s+JoinDataComplete\s+(?P\w+)\s*=\s*' # Match the property declaration + r'new\s+JoinDataComplete\s*\(' # Match 'new JoinDataComplete(' + r'\s*new\s+JoinData\s*\{(?P[^\}]+)\}\s*,' # Match 'new JoinData { ... },' + r'\s*new\s+JoinMetadata\s*\{(?P[^\}]+)\}\s*' # Match 'new JoinMetadata { ... }' + r'\)', # Match closing parenthesis of new JoinDataComplete + re.DOTALL + ) + + joinmap_info = [] + for match in join_pattern.finditer(file_content): + join_name = match.group('join_name') + property_name = match.group('property_name') + join_data = match.group('join_data') + join_metadata = match.group('join_metadata') + + # Now parse join_data and join_metadata to extract join_number, description, join_type, etc. + + # Extract join_number from join_data + join_number_match = re.search(r'JoinNumber\s*=\s*(\d+)', join_data) + if join_number_match: + join_number = join_number_match.group(1) + else: + join_number = None + + # Extract description and join_type from join_metadata + description_match = re.search(r'Description\s*=\s*"([^"]+)"', join_metadata) + if description_match: + description = description_match.group(1) + else: + description = None + + join_type_match = re.search(r'JoinType\s*=\s*eJoinType\.(\w+)', join_metadata) + if join_type_match: + join_type = join_type_match.group(1) + else: + join_type = None + + joinmap_info.append({ + "name": join_name, + "join_number": join_number, + "type": join_type, + "description": description + }) + + return joinmap_info + +def generate_markdown_chart(joins, section_title): + if not joins: + return '' + markdown_chart = f'### {section_title}\n\n' + + # Group joins by type + joins_by_type = {'Digital': [], 'Analog': [], 'Serial': []} + for join in joins: + if join['type'] in joins_by_type: + joins_by_type[join['type']].append(join) + else: + joins_by_type['Digital'].append(join) # Default to Digital if type not recognized + + for join_type in ['Digital', 'Analog', 'Serial']: + if joins_by_type[join_type]: + markdown_chart += f"#### {join_type}s\n\n" + markdown_chart += "| Join | Type (RW) | Description |\n" + markdown_chart += "| --- | --- | --- |\n" + for join in joins_by_type[join_type]: + markdown_chart += f"| {join['join_number']} | R | {join['description']} |\n" + markdown_chart += '\n' + return markdown_chart + +def generate_config_example_markdown(sample_config): + markdown = "### Config Example\n\n" + markdown += "```json\n" + markdown += json.dumps(sample_config, indent=4) + markdown += "\n```\n" + return markdown + +def generate_markdown_list(items, section_title): + """ + Generates a markdown header and list of items. + + Parameters: + - items (list): The list of items to include. + - section_title (str): The header for the section. + + Returns: + - str: The markdown content with the section header. + """ + if not items: + return '' + markdown = f'### {section_title}\n\n' + for item in items: + markdown += f"- {item}\n" + markdown += '\n' + return markdown + +def parse_all_classes(directory): + class_defs = {} + class_pattern = re.compile( + r'^\s*(?:\[[^\]]+\]\s*)*' # Optional attributes + r'(?:public\s+|private\s+|protected\s+)?' # Access modifier + r'(?:partial\s+)?' # Optional 'partial' keyword + r'class\s+([A-Za-z_]\w*)' # Class name + r'(?:\s*:\s*[^\{]+)?' # Optional inheritance + r'\s*\{', # Opening brace + re.MULTILINE + ) + property_pattern = re.compile( + r'^\s*' + r'(?:\[[^\]]*\]\s*)*' # Optional attributes + r'(?:public|private|protected)\s+' # Access modifier + r'(?:static\s+|virtual\s+|override\s+|abstract\s+|readonly\s+)?' # Optional modifiers + r'([A-Za-z0-9_<>,\s\[\]\?]+?)\s+' # Type + r'([A-Za-z_]\w*)\s*' # Property name + r'\{[^}]*?\}', # Property body + re.MULTILINE | re.DOTALL + ) + for root, _, files in os.walk(directory): + for file in files: + if file.endswith('.cs'): + file_path = os.path.join(root, file) + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + # Find all class definitions + for class_match in class_pattern.finditer(content): + class_name = class_match.group(1) + class_start = class_match.end() + # Find the matching closing brace for the class + class_body, end_index = extract_class_body(content, class_start) + # Parse properties within the class body + properties = [] + for prop_match in property_pattern.finditer(class_body): + prop_string = prop_match.group(0) + json_property_match = re.search(r'\[JsonProperty\("([^"]+)"\)\]', prop_string) + json_property_name = json_property_match.group(1) if json_property_match else None + prop_type = prop_match.group(1).strip() + prop_name = prop_match.group(2) + properties.append({ + "json_property_name": json_property_name if json_property_name else prop_name, + "property_name": prop_name, + "property_type": prop_type + }) + class_defs[class_name] = properties + return class_defs + +def extract_class_body(content, start_index): + """ + Extracts the body of a class from the content, starting at start_index. + Returns the class body and the index where it ends. + """ + brace_count = 1 + index = start_index + while brace_count > 0 and index < len(content): + if content[index] == '{': + brace_count += 1 + elif content[index] == '}': + brace_count -= 1 + index += 1 + return content[start_index:index - 1], index - 1 + +def generate_sample_value(property_type, class_defs, processed_classes=None): + if processed_classes is None: + processed_classes = set() + property_type = property_type.strip() + # Handle nullable types + property_type = property_type.rstrip('?') + # Handle primitive types + if property_type in ('int', 'long', 'float', 'double', 'decimal'): + return 0 + elif property_type == 'string': + return "SampleString" + elif property_type == 'bool': + return True + elif property_type == 'DateTime': + return "2021-01-01T00:00:00Z" + # Handle collections + elif property_type.startswith('List<') or property_type.startswith('IList<') or property_type.startswith('IEnumerable<') or property_type.startswith('ObservableCollection<'): + inner_type = property_type[property_type.find('<')+1:-1] + return [generate_sample_value(inner_type, class_defs, processed_classes)] + elif property_type.startswith('Dictionary<'): + types = property_type[property_type.find('<')+1:-1].split(',') + key_type = types[0].strip() + value_type = types[1].strip() + key_sample = generate_sample_value(key_type, class_defs, processed_classes) + value_sample = generate_sample_value(value_type, class_defs, processed_classes) + return { key_sample: value_sample } + # Handle custom classes + elif property_type in class_defs: + if property_type in processed_classes: + return {} + processed_classes.add(property_type) + properties = class_defs[property_type] + sample_obj = {} + for prop in properties: + prop_name = prop['json_property_name'] + prop_type = prop['property_type'] + sample_obj[prop_name] = generate_sample_value(prop_type, class_defs, processed_classes) + processed_classes.remove(property_type) + return sample_obj + else: + # Unknown type, default to a sample value + return "SampleValue" + +def generate_sample_config(config_class_name, class_defs, supported_types): + type_name = config_class_name[:-6] # Remove 'Config' + if type_name not in supported_types: + type_name = supported_types[0] if supported_types else type_name + config = { + "key": "GeneratedKey", + "uid": 1, + "name": "GeneratedName", + "type": type_name, + "group": "Group", + "properties": generate_sample_value(config_class_name, class_defs) + } + return config + +def read_readme_file(filepath): + if not os.path.exists(filepath): + print(f"README.md file not found at {filepath}. A new file will be created.") + return "" + with open(filepath, 'r', encoding='utf-8') as f: + return f.read() + +def update_readme_section(readme_content, section_title, new_section_content): + start_marker = f'' + end_marker = f'' + + pattern = re.compile( + rf'{re.escape(start_marker)}(.*?){re.escape(end_marker)}', + re.DOTALL | re.IGNORECASE + ) + + match = pattern.search(readme_content) + + if match: + section_content = match.group(1) + if '' in section_content: + print(f"Skipping section: {section_title} (found )") + return readme_content # Return the original content unchanged + else: + print(f"Updating existing section: {section_title}") + updated_section = f'{start_marker}\n{new_section_content.rstrip()}\n{end_marker}' + updated_readme = readme_content[:match.start()] + updated_section + readme_content[match.end():] + else: + print(f"Adding new section: {section_title}") + # Ensure there's a newline before adding the new section + if not readme_content.endswith('\n'): + readme_content += '\n' + updated_section = f'{start_marker}\n{new_section_content.rstrip()}\n{end_marker}\n' + updated_readme = readme_content + updated_section + return updated_readme + +def remove_duplicates_preserve_order(seq): + seen = set() + return [x for x in seq if not (x in seen or seen.add(x))] + +if __name__ == "__main__": + project_directory = os.path.abspath("./") + results = read_files_in_directory(project_directory) + + # Remove duplicates from interfaces and base classes while preserving order + unique_interfaces = remove_duplicates_preserve_order(results["interfaces"]) + unique_base_classes = remove_duplicates_preserve_order(results["base_classes"]) + + # Generate markdown sections with titles using the deduplicated lists + interfaces_markdown = generate_markdown_list(unique_interfaces, "Interfaces Implemented") + base_classes_markdown = generate_markdown_list(unique_base_classes, "Base Classes") + supported_types_markdown = generate_markdown_list(results["supported_types"], "Supported Types") + minimum_versions_markdown = generate_markdown_list(results["minimum_versions"], "Minimum Essentials Framework Versions") + public_methods_markdown = generate_markdown_list(results["public_methods"], "Public Methods") + + # Generate Join Maps markdown + class_defs = read_class_names_and_bases_from_files(project_directory) + joinmap_classes = find_joinmap_classes(class_defs) + joinmap_info = [] + for cls in joinmap_classes: + info = parse_joinmap_info(cls, project_directory) + joinmap_info.extend(info) + join_maps_markdown = generate_markdown_chart(joinmap_info, "Join Maps") + + # Generate Config Example markdown + all_class_defs = parse_all_classes(project_directory) + config_classes = [cls for cls in all_class_defs if cls.endswith('Config') or cls.endswith('ConfigObject')] + if not config_classes: + print("No config classes found.") + config_example_markdown = "" + else: + main_config_class = max(config_classes, key=lambda cls: len(all_class_defs[cls])) + sample_config = generate_sample_config(main_config_class, all_class_defs, results["supported_types"]) + config_example_markdown = generate_config_example_markdown(sample_config) + + # Read the existing README.md content + readme_path = os.path.join(project_directory, 'README.md') + readme_content = read_readme_file(readme_path) + + # Update or insert sections with section titles handled in the content + readme_content = update_readme_section(readme_content, "Interfaces Implemented", interfaces_markdown) + readme_content = update_readme_section(readme_content, "Base Classes", base_classes_markdown) + readme_content = update_readme_section(readme_content, "Supported Types", supported_types_markdown) + readme_content = update_readme_section(readme_content, "Minimum Essentials Framework Versions", minimum_versions_markdown) + readme_content = update_readme_section(readme_content, "Public Methods", public_methods_markdown) + readme_content = update_readme_section(readme_content, "Join Maps", join_maps_markdown) + if config_example_markdown: + readme_content = update_readme_section(readme_content, "Config Example", config_example_markdown) + + # Write the updated content back to README.md + with open(readme_path, 'w', encoding='utf-8') as f: + f.write(readme_content) + + print("README.md has been updated.") \ No newline at end of file diff --git a/.github/workflows/update-readme.yml b/.github/workflows/update-readme.yml new file mode 100644 index 0000000..be128d0 --- /dev/null +++ b/.github/workflows/update-readme.yml @@ -0,0 +1,64 @@ +name: Update README + +on: + workflow_call: + inputs: + target-branch: + description: 'The branch to commit the README.md updates to.' + required: true + type: string + +permissions: + contents: write + +jobs: + update-readme: + runs-on: ubuntu-latest + + steps: + - name: Checkout Caller Repository + uses: actions/checkout@v3 + with: + ref: ${{ inputs.target-branch }} + fetch-depth: 0 + + - name: Checkout Reusable Workflow Repository + uses: actions/checkout@v3 + with: + repository: PepperDash/readme-automation + path: readme-automation + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Install Python Dependencies + run: | + python -m pip install --upgrade pip + # pip install -r requirements.txt + + - name: Run README Update Script + run: | + python readme-automation/.github/scripts/metadata.py + + - name: Check for Changes + id: check_for_changes + run: | + if git diff --quiet; then + echo "no_changes=true" >> $GITHUB_OUTPUT + else + echo "no_changes=false" >> $GITHUB_OUTPUT + fi + + - name: Create or Switch to 'robot-docs' Branch + run: | + git checkout -B robot-docs + + - name: Commit and Push Changes to 'robot-docs' Branch + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git add README.md + git commit -m "Automated README update" || echo "No changes to commit" + git push origin robot-docs --force \ No newline at end of file From 829a15a74441374d0b141940cf2897e35ae410f5 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Wed, 16 Oct 2024 09:10:47 -0400 Subject: [PATCH 17/28] fix: reorder README section updates for consistency and clarity --- .github/scripts/metadata.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/scripts/metadata.py b/.github/scripts/metadata.py index bbb9921..1c56d3d 100644 --- a/.github/scripts/metadata.py +++ b/.github/scripts/metadata.py @@ -425,14 +425,14 @@ def remove_duplicates_preserve_order(seq): readme_content = read_readme_file(readme_path) # Update or insert sections with section titles handled in the content - readme_content = update_readme_section(readme_content, "Interfaces Implemented", interfaces_markdown) - readme_content = update_readme_section(readme_content, "Base Classes", base_classes_markdown) - readme_content = update_readme_section(readme_content, "Supported Types", supported_types_markdown) readme_content = update_readme_section(readme_content, "Minimum Essentials Framework Versions", minimum_versions_markdown) - readme_content = update_readme_section(readme_content, "Public Methods", public_methods_markdown) - readme_content = update_readme_section(readme_content, "Join Maps", join_maps_markdown) if config_example_markdown: readme_content = update_readme_section(readme_content, "Config Example", config_example_markdown) + readme_content = update_readme_section(readme_content, "Supported Types", supported_types_markdown) + readme_content = update_readme_section(readme_content, "Join Maps", join_maps_markdown) + readme_content = update_readme_section(readme_content, "Interfaces Implemented", interfaces_markdown) + readme_content = update_readme_section(readme_content, "Base Classes", base_classes_markdown) + readme_content = update_readme_section(readme_content, "Public Methods", public_methods_markdown) # Write the updated content back to README.md with open(readme_path, 'w', encoding='utf-8') as f: From a70aad9a8bb913b43015b91499a3d62c6c97b1ec Mon Sep 17 00:00:00 2001 From: jtalborough Date: Tue, 29 Oct 2024 10:52:56 -0400 Subject: [PATCH 18/28] fix: improve join parsing logic and enhance markdown generation for clarity --- .github/scripts/metadata.py | 86 ++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 49 deletions(-) diff --git a/.github/scripts/metadata.py b/.github/scripts/metadata.py index 1c56d3d..278ffe7 100644 --- a/.github/scripts/metadata.py +++ b/.github/scripts/metadata.py @@ -127,75 +127,63 @@ def parse_joinmap_info(class_name, root_directory): with open(file_path, 'r', encoding='utf-8') as file: file_content = file.read() + # Remove C# comments to avoid false matches + file_content = re.sub(r'//.*?\n|/\*.*?\*/', '', file_content, flags=re.DOTALL) + + # Updated pattern to better match the join definitions join_pattern = re.compile( - r'\[JoinName\("(?P[^"]+)"\)\]\s*' # Match the [JoinName("...")] attribute - r'public\s+JoinDataComplete\s+(?P\w+)\s*=\s*' # Match the property declaration - r'new\s+JoinDataComplete\s*\(' # Match 'new JoinDataComplete(' - r'\s*new\s+JoinData\s*\{(?P[^\}]+)\}\s*,' # Match 'new JoinData { ... },' - r'\s*new\s+JoinMetadata\s*\{(?P[^\}]+)\}\s*' # Match 'new JoinMetadata { ... }' - r'\)', # Match closing parenthesis of new JoinDataComplete + r'\[JoinName\("(?P[^"]+)"\)\]\s*' + r'public\s+JoinDataComplete\s+\w+\s*=\s*new\s+JoinDataComplete\s*\(\s*' + r'new\s+JoinData\s*\{[^}]*JoinNumber\s*=\s*(?P\d+)[^}]*\}\s*,\s*' + r'new\s+JoinMetadata\s*\{[^}]*' + r'Description\s*=\s*"(?P[^"]+)"[^}]*' + r'JoinType\s*=\s*eJoinType\.(?P\w+)[^}]*\}\s*\)', re.DOTALL ) joinmap_info = [] + for match in join_pattern.finditer(file_content): - join_name = match.group('join_name') - property_name = match.group('property_name') - join_data = match.group('join_data') - join_metadata = match.group('join_metadata') - - # Now parse join_data and join_metadata to extract join_number, description, join_type, etc. - - # Extract join_number from join_data - join_number_match = re.search(r'JoinNumber\s*=\s*(\d+)', join_data) - if join_number_match: - join_number = join_number_match.group(1) - else: - join_number = None - - # Extract description and join_type from join_metadata - description_match = re.search(r'Description\s*=\s*"([^"]+)"', join_metadata) - if description_match: - description = description_match.group(1) - else: - description = None - - join_type_match = re.search(r'JoinType\s*=\s*eJoinType\.(\w+)', join_metadata) - if join_type_match: - join_type = join_type_match.group(1) - else: - join_type = None - - joinmap_info.append({ - "name": join_name, - "join_number": join_number, - "type": join_type, - "description": description - }) + join_info = { + "name": match.group('join_name'), + "join_number": match.group('join_number'), + "description": match.group('description'), + "type": match.group('join_type') + } + joinmap_info.append(join_info) return joinmap_info def generate_markdown_chart(joins, section_title): if not joins: return '' + markdown_chart = f'### {section_title}\n\n' # Group joins by type - joins_by_type = {'Digital': [], 'Analog': [], 'Serial': []} + joins_by_type = {} for join in joins: - if join['type'] in joins_by_type: - joins_by_type[join['type']].append(join) - else: - joins_by_type['Digital'].append(join) # Default to Digital if type not recognized + join_type = join['type'] + if join_type not in joins_by_type: + joins_by_type[join_type] = [] + joins_by_type[join_type].append(join) + + # Sort joins within each type by join number + for join_type in joins_by_type: + joins_by_type[join_type].sort(key=lambda x: int(x['join_number'])) + # Process each join type for join_type in ['Digital', 'Analog', 'Serial']: - if joins_by_type[join_type]: - markdown_chart += f"#### {join_type}s\n\n" - markdown_chart += "| Join | Type (RW) | Description |\n" - markdown_chart += "| --- | --- | --- |\n" + if join_type in joins_by_type and joins_by_type[join_type]: + markdown_chart += f"#### {join_type}\n\n" + markdown_chart += "| Join | Description |\n" + markdown_chart += "|------|-------------|\n" + for join in joins_by_type[join_type]: - markdown_chart += f"| {join['join_number']} | R | {join['description']} |\n" + markdown_chart += f"| {join['join_number']} | {join['description']} |\n" + markdown_chart += '\n' + return markdown_chart def generate_config_example_markdown(sample_config): From 08553078ea9bbb03a3e89078e24f4ca6d5eabe5b Mon Sep 17 00:00:00 2001 From: jtalborough Date: Wed, 30 Oct 2024 08:53:10 -0400 Subject: [PATCH 19/28] feat: enhance class and join parsing with improved error handling and detailed logging --- .github/scripts/metadata.py | 172 ++++++++++++++++++++++++------------ 1 file changed, 116 insertions(+), 56 deletions(-) diff --git a/.github/scripts/metadata.py b/.github/scripts/metadata.py index 278ffe7..7cb7bae 100644 --- a/.github/scripts/metadata.py +++ b/.github/scripts/metadata.py @@ -75,48 +75,88 @@ def read_files_in_directory(directory): } def read_class_names_and_bases_from_files(directory): + print("\nScanning for class definitions...") class_defs = {} - class_pattern = re.compile( - r'^\s*(?:\[[^\]]+\]\s*)*' # Optional attributes - r'(?:public\s+|private\s+|protected\s+)?' # Optional access modifier - r'(?:partial\s+)?' # Optional 'partial' keyword - r'class\s+([A-Za-z_]\w*)' # Class name - r'(?:\s*:\s*([^\{]+))?' # Optional base classes - r'\s*\{', # Opening brace - re.MULTILINE - ) for root, _, files in os.walk(directory): for file in files: if file.endswith('.cs'): file_path = os.path.join(root, file) - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - for match in class_pattern.finditer(content): - class_name = match.group(1) - bases = match.group(2) - if bases: - base_classes = [b.strip() for b in bases.split(',')] - else: - base_classes = [] - class_defs[class_name] = base_classes + print(f"\nReading CS file: {file_path}") + try: + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + if "JoinMapBase" in content or "BridgeJoinMap" in content: + print(f"Found potential JoinMap file: {file}") + # Rest of the existing processing... + class_pattern = re.compile( + r'^\s*(?:\[[^\]]+\]\s*)*' + r'(?:public\s+|private\s+|protected\s+)?' + r'(?:partial\s+)?' + r'class\s+([A-Za-z_]\w*)' + r'(?:\s*:\s*([^\n{]+))?' + r'\s*\{', + re.MULTILINE + ) + for match in class_pattern.finditer(content): + class_name = match.group(1) + bases = match.group(2) + if bases: + base_classes = [b.strip() for b in bases.split(',')] + else: + base_classes = [] + class_defs[class_name] = base_classes + print(f"Found class: {class_name} with bases: {base_classes}") + except Exception as e: + print(f"Error reading file {file_path}: {str(e)}") return class_defs def find_joinmap_classes(class_defs): + print("\nSearching for JoinMap classes...") joinmap_classes = [] for class_name, base_classes in class_defs.items(): - if 'JoinMapBaseAdvanced' in base_classes: - joinmap_classes.append(class_name) + print(f"Checking class: {class_name}") + print(f"Base classes: {base_classes}") + + # Convert the found class name to match file naming convention + file_class_name = class_name.replace("OneBeyond", "1Beyond") + + is_joinmap = (any('JoinMapBase' in base for base in base_classes) or + 'BridgeJoinMap' in class_name or + any('JoinMapBaseAdvanced' in base for base in base_classes)) + if is_joinmap: + print(f"Found JoinMap class: {class_name}") + joinmap_classes.append(file_class_name) return joinmap_classes def find_file_in_directory(filename, root_directory): + print(f"\nSearching for file: {filename}") + print(f"Starting search in directory: {root_directory}") + + # Handle variations of the filename + possible_names = [ + filename, # Original name + filename.replace("OneBeyond", "1Beyond"), # Replace OneBeyond with 1Beyond + filename.upper(), # All uppercase + filename.lower(), # All lowercase + ] + for root, _, files in os.walk(root_directory): + print(f"\nChecking directory: {root}") + print(f"Files in directory: {files}") + for file in files: - if file == filename: - full_path = os.path.join(root, file) - return full_path + # Check against all possible variations + for possible_name in possible_names: + if file.lower() == possible_name.lower(): + full_path = os.path.join(root, file) + print(f"Found file at: {full_path}") + return full_path + + print(f"File not found: {filename}") return None def parse_joinmap_info(class_name, root_directory): + print(f"\nParsing JoinMap info for class: {class_name}") filename = f"{class_name}.cs" file_path = find_file_in_directory(filename, root_directory) @@ -124,34 +164,53 @@ def parse_joinmap_info(class_name, root_directory): print(f"File not found: {filename}. Skipping...") return [] + print(f"Processing file: {file_path}") with open(file_path, 'r', encoding='utf-8') as file: file_content = file.read() + print(f"File size: {len(file_content)} characters") # Remove C# comments to avoid false matches file_content = re.sub(r'//.*?\n|/\*.*?\*/', '', file_content, flags=re.DOTALL) - # Updated pattern to better match the join definitions - join_pattern = re.compile( - r'\[JoinName\("(?P[^"]+)"\)\]\s*' - r'public\s+JoinDataComplete\s+\w+\s*=\s*new\s+JoinDataComplete\s*\(\s*' - r'new\s+JoinData\s*\{[^}]*JoinNumber\s*=\s*(?P\d+)[^}]*\}\s*,\s*' - r'new\s+JoinMetadata\s*\{[^}]*' - r'Description\s*=\s*"(?P[^"]+)"[^}]*' - r'JoinType\s*=\s*eJoinType\.(?P\w+)[^}]*\}\s*\)', - re.DOTALL - ) - + # Split into regions first + regions = re.split(r'#region\s+(Digital|Analog|Serial)', file_content)[1:] # Skip the first split which is before first region + joinmap_info = [] - for match in join_pattern.finditer(file_content): - join_info = { - "name": match.group('join_name'), - "join_number": match.group('join_number'), - "description": match.group('description'), - "type": match.group('join_type') - } - joinmap_info.append(join_info) - + # Process regions in pairs (region name and content) + for i in range(0, len(regions), 2): + if i + 1 >= len(regions): + break + + region_type = regions[i].strip() + region_content = regions[i + 1] + + # Find the end of the region + region_content = region_content.split('#endregion')[0] + + # Simpler pattern to match individual joins + join_matches = re.finditer( + r'\[JoinName\("(?P[^"]+)"\)\][\s\n]*' + r'public\s+JoinDataComplete\s+\w+\s*=\s*new\s+JoinDataComplete\s*\(' + r'[\s\n]*new\s+JoinData\s*\{[^}]*?JoinNumber\s*=\s*(?P\d+)[^}]*\}' + r'[\s\n]*,[\s\n]*new\s+JoinMetadata\s*\{[^}]*?Description\s*=\s*"(?P[^"]+)"[^}]*\}', + region_content, + re.DOTALL + ) + + for match in join_matches: + join_info = { + "name": match.group('name'), + "join_number": match.group('number'), + "description": match.group('description'), + "type": region_type + } + print(f"Found join: {join_info}") + joinmap_info.append(join_info) + + # Sort joins by type and number + joinmap_info.sort(key=lambda x: (x['type'], int(x['join_number']))) + print(f"Total joins found: {len(joinmap_info)}") return joinmap_info def generate_markdown_chart(joins, section_title): @@ -161,26 +220,27 @@ def generate_markdown_chart(joins, section_title): markdown_chart = f'### {section_title}\n\n' # Group joins by type - joins_by_type = {} + joins_by_type = {'Digital': [], 'Analog': [], 'Serial': []} for join in joins: join_type = join['type'] - if join_type not in joins_by_type: - joins_by_type[join_type] = [] joins_by_type[join_type].append(join) - # Sort joins within each type by join number - for join_type in joins_by_type: - joins_by_type[join_type].sort(key=lambda x: int(x['join_number'])) - - # Process each join type + # Process each join type in order for join_type in ['Digital', 'Analog', 'Serial']: - if join_type in joins_by_type and joins_by_type[join_type]: - markdown_chart += f"#### {join_type}\n\n" + if joins_by_type[join_type]: + markdown_chart += f"#### {join_type}s\n\n" markdown_chart += "| Join | Description |\n" markdown_chart += "|------|-------------|\n" - for join in joins_by_type[join_type]: - markdown_chart += f"| {join['join_number']} | {join['description']} |\n" + # Sort joins by join number + sorted_joins = sorted(joins_by_type[join_type], key=lambda x: int(x['join_number'])) + + for join in sorted_joins: + # Clean up the description + description = join['description'].strip().replace('|', '\\|') + # Remove any newlines or extra spaces in description + description = ' '.join(description.split()) + markdown_chart += f"| {join['join_number']} | {description} |\n" markdown_chart += '\n' From 722fea24110d3de6ff4c5cd6edbde24fc089ca83 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Wed, 30 Oct 2024 10:35:46 -0400 Subject: [PATCH 20/28] feat: enhance class and join parsing with improved regex patterns and streamlined logic --- .github/scripts/metadata.py | 200 ++++++++++++++---------------------- 1 file changed, 76 insertions(+), 124 deletions(-) diff --git a/.github/scripts/metadata.py b/.github/scripts/metadata.py index 7cb7bae..1c56d3d 100644 --- a/.github/scripts/metadata.py +++ b/.github/scripts/metadata.py @@ -75,88 +75,48 @@ def read_files_in_directory(directory): } def read_class_names_and_bases_from_files(directory): - print("\nScanning for class definitions...") class_defs = {} + class_pattern = re.compile( + r'^\s*(?:\[[^\]]+\]\s*)*' # Optional attributes + r'(?:public\s+|private\s+|protected\s+)?' # Optional access modifier + r'(?:partial\s+)?' # Optional 'partial' keyword + r'class\s+([A-Za-z_]\w*)' # Class name + r'(?:\s*:\s*([^\{]+))?' # Optional base classes + r'\s*\{', # Opening brace + re.MULTILINE + ) for root, _, files in os.walk(directory): for file in files: if file.endswith('.cs'): file_path = os.path.join(root, file) - print(f"\nReading CS file: {file_path}") - try: - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - if "JoinMapBase" in content or "BridgeJoinMap" in content: - print(f"Found potential JoinMap file: {file}") - # Rest of the existing processing... - class_pattern = re.compile( - r'^\s*(?:\[[^\]]+\]\s*)*' - r'(?:public\s+|private\s+|protected\s+)?' - r'(?:partial\s+)?' - r'class\s+([A-Za-z_]\w*)' - r'(?:\s*:\s*([^\n{]+))?' - r'\s*\{', - re.MULTILINE - ) - for match in class_pattern.finditer(content): - class_name = match.group(1) - bases = match.group(2) - if bases: - base_classes = [b.strip() for b in bases.split(',')] - else: - base_classes = [] - class_defs[class_name] = base_classes - print(f"Found class: {class_name} with bases: {base_classes}") - except Exception as e: - print(f"Error reading file {file_path}: {str(e)}") + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + for match in class_pattern.finditer(content): + class_name = match.group(1) + bases = match.group(2) + if bases: + base_classes = [b.strip() for b in bases.split(',')] + else: + base_classes = [] + class_defs[class_name] = base_classes return class_defs def find_joinmap_classes(class_defs): - print("\nSearching for JoinMap classes...") joinmap_classes = [] for class_name, base_classes in class_defs.items(): - print(f"Checking class: {class_name}") - print(f"Base classes: {base_classes}") - - # Convert the found class name to match file naming convention - file_class_name = class_name.replace("OneBeyond", "1Beyond") - - is_joinmap = (any('JoinMapBase' in base for base in base_classes) or - 'BridgeJoinMap' in class_name or - any('JoinMapBaseAdvanced' in base for base in base_classes)) - if is_joinmap: - print(f"Found JoinMap class: {class_name}") - joinmap_classes.append(file_class_name) + if 'JoinMapBaseAdvanced' in base_classes: + joinmap_classes.append(class_name) return joinmap_classes def find_file_in_directory(filename, root_directory): - print(f"\nSearching for file: {filename}") - print(f"Starting search in directory: {root_directory}") - - # Handle variations of the filename - possible_names = [ - filename, # Original name - filename.replace("OneBeyond", "1Beyond"), # Replace OneBeyond with 1Beyond - filename.upper(), # All uppercase - filename.lower(), # All lowercase - ] - for root, _, files in os.walk(root_directory): - print(f"\nChecking directory: {root}") - print(f"Files in directory: {files}") - for file in files: - # Check against all possible variations - for possible_name in possible_names: - if file.lower() == possible_name.lower(): - full_path = os.path.join(root, file) - print(f"Found file at: {full_path}") - return full_path - - print(f"File not found: {filename}") + if file == filename: + full_path = os.path.join(root, file) + return full_path return None def parse_joinmap_info(class_name, root_directory): - print(f"\nParsing JoinMap info for class: {class_name}") filename = f"{class_name}.cs" file_path = find_file_in_directory(filename, root_directory) @@ -164,86 +124,78 @@ def parse_joinmap_info(class_name, root_directory): print(f"File not found: {filename}. Skipping...") return [] - print(f"Processing file: {file_path}") with open(file_path, 'r', encoding='utf-8') as file: file_content = file.read() - print(f"File size: {len(file_content)} characters") - # Remove C# comments to avoid false matches - file_content = re.sub(r'//.*?\n|/\*.*?\*/', '', file_content, flags=re.DOTALL) + join_pattern = re.compile( + r'\[JoinName\("(?P[^"]+)"\)\]\s*' # Match the [JoinName("...")] attribute + r'public\s+JoinDataComplete\s+(?P\w+)\s*=\s*' # Match the property declaration + r'new\s+JoinDataComplete\s*\(' # Match 'new JoinDataComplete(' + r'\s*new\s+JoinData\s*\{(?P[^\}]+)\}\s*,' # Match 'new JoinData { ... },' + r'\s*new\s+JoinMetadata\s*\{(?P[^\}]+)\}\s*' # Match 'new JoinMetadata { ... }' + r'\)', # Match closing parenthesis of new JoinDataComplete + re.DOTALL + ) - # Split into regions first - regions = re.split(r'#region\s+(Digital|Analog|Serial)', file_content)[1:] # Skip the first split which is before first region - joinmap_info = [] - - # Process regions in pairs (region name and content) - for i in range(0, len(regions), 2): - if i + 1 >= len(regions): - break - - region_type = regions[i].strip() - region_content = regions[i + 1] - - # Find the end of the region - region_content = region_content.split('#endregion')[0] - - # Simpler pattern to match individual joins - join_matches = re.finditer( - r'\[JoinName\("(?P[^"]+)"\)\][\s\n]*' - r'public\s+JoinDataComplete\s+\w+\s*=\s*new\s+JoinDataComplete\s*\(' - r'[\s\n]*new\s+JoinData\s*\{[^}]*?JoinNumber\s*=\s*(?P\d+)[^}]*\}' - r'[\s\n]*,[\s\n]*new\s+JoinMetadata\s*\{[^}]*?Description\s*=\s*"(?P[^"]+)"[^}]*\}', - region_content, - re.DOTALL - ) - - for match in join_matches: - join_info = { - "name": match.group('name'), - "join_number": match.group('number'), - "description": match.group('description'), - "type": region_type - } - print(f"Found join: {join_info}") - joinmap_info.append(join_info) - - # Sort joins by type and number - joinmap_info.sort(key=lambda x: (x['type'], int(x['join_number']))) - print(f"Total joins found: {len(joinmap_info)}") + for match in join_pattern.finditer(file_content): + join_name = match.group('join_name') + property_name = match.group('property_name') + join_data = match.group('join_data') + join_metadata = match.group('join_metadata') + + # Now parse join_data and join_metadata to extract join_number, description, join_type, etc. + + # Extract join_number from join_data + join_number_match = re.search(r'JoinNumber\s*=\s*(\d+)', join_data) + if join_number_match: + join_number = join_number_match.group(1) + else: + join_number = None + + # Extract description and join_type from join_metadata + description_match = re.search(r'Description\s*=\s*"([^"]+)"', join_metadata) + if description_match: + description = description_match.group(1) + else: + description = None + + join_type_match = re.search(r'JoinType\s*=\s*eJoinType\.(\w+)', join_metadata) + if join_type_match: + join_type = join_type_match.group(1) + else: + join_type = None + + joinmap_info.append({ + "name": join_name, + "join_number": join_number, + "type": join_type, + "description": description + }) + return joinmap_info def generate_markdown_chart(joins, section_title): if not joins: return '' - markdown_chart = f'### {section_title}\n\n' # Group joins by type joins_by_type = {'Digital': [], 'Analog': [], 'Serial': []} for join in joins: - join_type = join['type'] - joins_by_type[join_type].append(join) + if join['type'] in joins_by_type: + joins_by_type[join['type']].append(join) + else: + joins_by_type['Digital'].append(join) # Default to Digital if type not recognized - # Process each join type in order for join_type in ['Digital', 'Analog', 'Serial']: if joins_by_type[join_type]: markdown_chart += f"#### {join_type}s\n\n" - markdown_chart += "| Join | Description |\n" - markdown_chart += "|------|-------------|\n" - - # Sort joins by join number - sorted_joins = sorted(joins_by_type[join_type], key=lambda x: int(x['join_number'])) - - for join in sorted_joins: - # Clean up the description - description = join['description'].strip().replace('|', '\\|') - # Remove any newlines or extra spaces in description - description = ' '.join(description.split()) - markdown_chart += f"| {join['join_number']} | {description} |\n" - + markdown_chart += "| Join | Type (RW) | Description |\n" + markdown_chart += "| --- | --- | --- |\n" + for join in joins_by_type[join_type]: + markdown_chart += f"| {join['join_number']} | R | {join['description']} |\n" markdown_chart += '\n' - return markdown_chart def generate_config_example_markdown(sample_config): From 65d5bf2ec6924dd2b9a4e1217c0680ab8f99d0b4 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Wed, 30 Oct 2024 10:47:49 -0400 Subject: [PATCH 21/28] feat: enhance class name and base extraction with improved comment handling and generic type support --- .github/scripts/metadata.py | 60 +++++++++++++++++++++++++++---------- 1 file changed, 45 insertions(+), 15 deletions(-) diff --git a/.github/scripts/metadata.py b/.github/scripts/metadata.py index 1c56d3d..fab25ca 100644 --- a/.github/scripts/metadata.py +++ b/.github/scripts/metadata.py @@ -77,28 +77,58 @@ def read_files_in_directory(directory): def read_class_names_and_bases_from_files(directory): class_defs = {} class_pattern = re.compile( - r'^\s*(?:\[[^\]]+\]\s*)*' # Optional attributes + r'^\s*(?:\[[^\]]+\]\s*)*' # Optional attributes r'(?:public\s+|private\s+|protected\s+)?' # Optional access modifier - r'(?:partial\s+)?' # Optional 'partial' keyword - r'class\s+([A-Za-z_]\w*)' # Class name - r'(?:\s*:\s*([^\{]+))?' # Optional base classes - r'\s*\{', # Opening brace + r'(?:partial\s+)?' # Optional 'partial' keyword + r'class\s+([A-Za-z_]\w*)' # Class name + r'\s*:\s*([^{]+)?' # Capture all base classes after colon + r'\s*{', # Opening brace re.MULTILINE ) + for root, _, files in os.walk(directory): for file in files: if file.endswith('.cs'): file_path = os.path.join(root, file) - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - for match in class_pattern.finditer(content): - class_name = match.group(1) - bases = match.group(2) - if bases: - base_classes = [b.strip() for b in bases.split(',')] - else: - base_classes = [] - class_defs[class_name] = base_classes + try: + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + # Remove single-line comments + content = re.sub(r'//.*$', '', content, flags=re.MULTILINE) + # Remove multi-line comments + content = re.sub(r'/\*.*?\*/', '', content, flags=re.DOTALL) + + for match in class_pattern.finditer(content): + class_name = match.group(1) + bases = match.group(2) + if bases: + # Split on comma, handle potential generic types properly + base_classes = [] + brace_count = 0 + current = [] + + for char in bases: + if char == '<': + brace_count += 1 + elif char == '>': + brace_count -= 1 + elif char == ',' and brace_count == 0: + base_classes.append(''.join(current).strip()) + current = [] + continue + current.append(char) + + if current: + base_classes.append(''.join(current).strip()) + + # Clean up base class names + base_classes = [b.split('.')[-1] for b in base_classes] + else: + base_classes = [] + class_defs[class_name] = base_classes + except (UnicodeDecodeError, IOError) as e: + print(f"Error reading {file_path}: {e}") + continue return class_defs def find_joinmap_classes(class_defs): From 233d18b993b77129825b7a563de37f1a9e966da3 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Wed, 30 Oct 2024 13:07:38 -0400 Subject: [PATCH 22/28] feat: optimize join parsing logic and improve performance with caching mechanism --- .github/scripts/metadata.py | 271 +++++++++++++++++++++--------------- 1 file changed, 162 insertions(+), 109 deletions(-) diff --git a/.github/scripts/metadata.py b/.github/scripts/metadata.py index fab25ca..0052da0 100644 --- a/.github/scripts/metadata.py +++ b/.github/scripts/metadata.py @@ -1,18 +1,32 @@ import os import re import json +import logging + +# Set up logging configuration +logging.basicConfig(level=logging.DEBUG) def extract_implemented_interfaces(file_content): + logging.debug("Extracting implemented interfaces and base classes.") interface_pattern = re.compile(r'class\s+\w+\s*:\s*([^{]+)') match = interface_pattern.search(file_content) if match: + logging.debug("Inheritance pattern matched in class definition.") items = match.group(1).split(',') interfaces = [item.strip() for item in items if item.strip().startswith('I')] - base_classes = [item.strip() for item in items if not item.strip().startswith('I') and not item.strip().startswith('EssentialsPluginDeviceFactory')] + base_classes = [ + item.strip() + for item in items + if not item.strip().startswith('I') and not item.strip().startswith('EssentialsPluginDeviceFactory') + ] + logging.debug(f"Interfaces extracted: {interfaces}") + logging.debug(f"Base classes extracted: {base_classes}") return interfaces, base_classes + logging.debug("No implemented interfaces or base classes found.") return [], [] def extract_supported_types(file_content): + logging.debug("Extracting supported types.") # Remove commented lines uncommented_content = re.sub(r'//.*', '', file_content) @@ -23,35 +37,46 @@ def extract_supported_types(file_content): for match in matches: current_types = [type_name.strip().strip('"') for type_name in match.split(',')] types.extend(current_types) + logging.debug(f"Current types extracted: {current_types}") # Remove duplicates and filter out unnecessary entries - return list(set(filter(None, types))) + unique_types = list(set(filter(None, types))) + logging.debug(f"Unique supported types: {unique_types}") + return unique_types def extract_minimum_essentials_framework_version(file_content): - # Update the regex to exclude comments or anything unnecessary. + logging.debug("Extracting minimum Essentials Framework version.") version_pattern = re.compile(r'^\s*MinimumEssentialsFrameworkVersion\s*=\s*"([^"]+)"\s*;', re.MULTILINE) match = version_pattern.search(file_content) if match: - return match.group(1) + version = match.group(1) + logging.debug(f"Minimum Essentials Framework Version found: {version}") + return version + logging.debug("No Minimum Essentials Framework Version found.") return None def extract_public_methods(file_content): + logging.debug("Extracting public methods.") methods_pattern = re.compile(r'public\s+\w+\s+\w+\s*\([^)]*\)\s*') matches = methods_pattern.findall(file_content) - return [match.strip() for match in matches] + methods = [match.strip() for match in matches] + logging.debug(f"Public methods extracted: {methods}") + return methods def read_files_in_directory(directory): + logging.debug(f"Reading files in directory: {directory}") all_interfaces = [] all_base_classes = [] all_supported_types = [] all_minimum_versions = [] all_public_methods = [] - all_joins = [] for root, _, files in os.walk(directory): + logging.debug(f"Entering directory: {root}") for file in files: if file.endswith('.cs'): file_path = os.path.join(root, file) + logging.debug(f"Processing file: {file_path}") with open(file_path, 'r', encoding='utf-8') as f: content = f.read() interfaces, base_classes = extract_implemented_interfaces(content) @@ -66,6 +91,7 @@ def read_files_in_directory(directory): all_minimum_versions.append(minimum_version) all_public_methods.extend(public_methods) + logging.debug("Finished reading all files.") return { "interfaces": all_interfaces, "base_classes": all_base_classes, @@ -75,95 +101,78 @@ def read_files_in_directory(directory): } def read_class_names_and_bases_from_files(directory): + logging.debug(f"Reading class names and bases from files in directory: {directory}") class_defs = {} class_pattern = re.compile( - r'^\s*(?:\[[^\]]+\]\s*)*' # Optional attributes + r'^\s*(?:\[[^\]]+\]\s*)*' # Optional attributes r'(?:public\s+|private\s+|protected\s+)?' # Optional access modifier - r'(?:partial\s+)?' # Optional 'partial' keyword - r'class\s+([A-Za-z_]\w*)' # Class name - r'\s*:\s*([^{]+)?' # Capture all base classes after colon - r'\s*{', # Opening brace + r'(?:partial\s+)?' # Optional 'partial' keyword + r'class\s+([A-Za-z_]\w*)' # Class name + r'(?:\s*:\s*([^\{]+))?' # Optional base classes + r'\s*\{', # Opening brace re.MULTILINE ) - for root, _, files in os.walk(directory): + logging.debug(f"Entering directory: {root}") for file in files: if file.endswith('.cs'): file_path = os.path.join(root, file) - try: - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - # Remove single-line comments - content = re.sub(r'//.*$', '', content, flags=re.MULTILINE) - # Remove multi-line comments - content = re.sub(r'/\*.*?\*/', '', content, flags=re.DOTALL) - - for match in class_pattern.finditer(content): - class_name = match.group(1) - bases = match.group(2) - if bases: - # Split on comma, handle potential generic types properly - base_classes = [] - brace_count = 0 - current = [] - - for char in bases: - if char == '<': - brace_count += 1 - elif char == '>': - brace_count -= 1 - elif char == ',' and brace_count == 0: - base_classes.append(''.join(current).strip()) - current = [] - continue - current.append(char) - - if current: - base_classes.append(''.join(current).strip()) - - # Clean up base class names - base_classes = [b.split('.')[-1] for b in base_classes] - else: - base_classes = [] - class_defs[class_name] = base_classes - except (UnicodeDecodeError, IOError) as e: - print(f"Error reading {file_path}: {e}") - continue + logging.debug(f"Processing file: {file_path}") + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + for match in class_pattern.finditer(content): + class_name = match.group(1) + bases = match.group(2) + if bases: + base_classes = [b.strip() for b in bases.split(',')] + else: + base_classes = [] + logging.debug(f"Class '{class_name}' with bases: {base_classes}") + class_defs[class_name] = base_classes + logging.debug("Finished reading class definitions.") return class_defs def find_joinmap_classes(class_defs): + logging.debug("Finding classes that inherit from 'JoinMapBaseAdvanced'.") joinmap_classes = [] for class_name, base_classes in class_defs.items(): if 'JoinMapBaseAdvanced' in base_classes: + logging.debug(f"Class '{class_name}' is a JoinMap class.") joinmap_classes.append(class_name) return joinmap_classes def find_file_in_directory(filename, root_directory): + logging.debug(f"Searching for file '{filename}' in directory: {root_directory}") for root, _, files in os.walk(root_directory): - for file in files: - if file == filename: - full_path = os.path.join(root, file) - return full_path + if filename in files: + full_path = os.path.join(root, filename) + logging.debug(f"File found: {full_path}") + return full_path + logging.debug(f"File '{filename}' not found in directory '{root_directory}'.") return None def parse_joinmap_info(class_name, root_directory): + logging.debug(f"Parsing join map info for class '{class_name}'.") filename = f"{class_name}.cs" file_path = find_file_in_directory(filename, root_directory) if not file_path: - print(f"File not found: {filename}. Skipping...") + logging.warning(f"File not found: {filename}. Skipping...") return [] with open(file_path, 'r', encoding='utf-8') as file: file_content = file.read() + # Remove comments to prevent interference with regex + file_content = re.sub(r'//.*', '', file_content) + file_content = re.sub(r'/\*.*?\*/', '', file_content, flags=re.DOTALL) + + # Updated regex to handle multiline definitions and optional parameters join_pattern = re.compile( - r'\[JoinName\("(?P[^"]+)"\)\]\s*' # Match the [JoinName("...")] attribute - r'public\s+JoinDataComplete\s+(?P\w+)\s*=\s*' # Match the property declaration - r'new\s+JoinDataComplete\s*\(' # Match 'new JoinDataComplete(' - r'\s*new\s+JoinData\s*\{(?P[^\}]+)\}\s*,' # Match 'new JoinData { ... },' - r'\s*new\s+JoinMetadata\s*\{(?P[^\}]+)\}\s*' # Match 'new JoinMetadata { ... }' - r'\)', # Match closing parenthesis of new JoinDataComplete + r'\[JoinName\("(?P[^"]+)"\)\]\s*' # [JoinName("...")] + r'public\s+JoinDataComplete\s+(?P\w+)\s*=\s*' # public JoinDataComplete PropertyName = + r'new\s+JoinDataComplete\s*\(\s*' # new JoinDataComplete( + r'(?P.*?)\)\s*;', # Capture everything inside the parentheses re.DOTALL ) @@ -171,42 +180,68 @@ def parse_joinmap_info(class_name, root_directory): for match in join_pattern.finditer(file_content): join_name = match.group('join_name') property_name = match.group('property_name') - join_data = match.group('join_data') - join_metadata = match.group('join_metadata') - - # Now parse join_data and join_metadata to extract join_number, description, join_type, etc. - - # Extract join_number from join_data - join_number_match = re.search(r'JoinNumber\s*=\s*(\d+)', join_data) - if join_number_match: - join_number = join_number_match.group(1) + join_params = match.group('join_params') + + logging.debug(f"Processing join '{join_name}' in property '{property_name}'.") + + # Extract JoinData and JoinMetadata from join_params + join_data_match = re.search(r'new\s+JoinData\s*(?:\(\s*\))?\s*\{(.*?)\}', join_params, re.DOTALL) + join_metadata_match = re.search(r'new\s+JoinMetadata\s*(?:\(\s*\))?\s*\{(.*?)\}', join_params, re.DOTALL) + + # Initialize variables + join_number = None + description = None + join_type = None + + if join_data_match: + join_data_content = join_data_match.group(1) + join_number_match = re.search(r'JoinNumber\s*=\s*(\d+)', join_data_content) + if join_number_match: + join_number = join_number_match.group(1) + logging.debug(f"Join number found: {join_number}") + else: + logging.debug(f"No join number found in join data for '{join_name}'.") else: - join_number = None - - # Extract description and join_type from join_metadata - description_match = re.search(r'Description\s*=\s*"([^"]+)"', join_metadata) - if description_match: - description = description_match.group(1) + logging.debug(f"No JoinData found for '{join_name}'.") + + if join_metadata_match: + join_metadata_content = join_metadata_match.group(1) + description_match = re.search(r'Description\s*=\s*"([^"]+)"', join_metadata_content) + if description_match: + description = description_match.group(1) + logging.debug(f"Description found: '{description}'") + else: + logging.debug(f"No description found in join metadata for '{join_name}'.") + + join_type_match = re.search(r'JoinType\s*=\s*eJoinType\.(\w+)', join_metadata_content) + if join_type_match: + join_type = join_type_match.group(1) + logging.debug(f"Join type found: {join_type}") + else: + logging.debug(f"No join type found in join metadata for '{join_name}'.") else: - description = None - - join_type_match = re.search(r'JoinType\s*=\s*eJoinType\.(\w+)', join_metadata) - if join_type_match: - join_type = join_type_match.group(1) + logging.debug(f"No JoinMetadata found for '{join_name}'.") + + if join_name and join_number and join_type: + logging.debug(f"Adding join '{join_name}' to join map info.") + joinmap_info.append({ + "name": join_name, + "join_number": join_number, + "type": join_type, + "description": description + }) else: - join_type = None - - joinmap_info.append({ - "name": join_name, - "join_number": join_number, - "type": join_type, - "description": description - }) + logging.warning(f"Incomplete join information for '{join_name}'. Skipping.") return joinmap_info + + + def generate_markdown_chart(joins, section_title): + logging.debug(f"Generating markdown chart for section '{section_title}'.") if not joins: + logging.debug("No joins to include in the chart.") return '' markdown_chart = f'### {section_title}\n\n' @@ -226,9 +261,11 @@ def generate_markdown_chart(joins, section_title): for join in joins_by_type[join_type]: markdown_chart += f"| {join['join_number']} | R | {join['description']} |\n" markdown_chart += '\n' + logging.debug(f"Markdown chart generated for '{section_title}'.") return markdown_chart def generate_config_example_markdown(sample_config): + logging.debug("Generating config example markdown.") markdown = "### Config Example\n\n" markdown += "```json\n" markdown += json.dumps(sample_config, indent=4) @@ -236,17 +273,9 @@ def generate_config_example_markdown(sample_config): return markdown def generate_markdown_list(items, section_title): - """ - Generates a markdown header and list of items. - - Parameters: - - items (list): The list of items to include. - - section_title (str): The header for the section. - - Returns: - - str: The markdown content with the section header. - """ + logging.debug(f"Generating markdown list for section '{section_title}'.") if not items: + logging.debug(f"No items to include in section '{section_title}'.") return '' markdown = f'### {section_title}\n\n' for item in items: @@ -255,6 +284,7 @@ def generate_markdown_list(items, section_title): return markdown def parse_all_classes(directory): + logging.debug(f"Parsing all classes in directory: {directory}") class_defs = {} class_pattern = re.compile( r'^\s*(?:\[[^\]]+\]\s*)*' # Optional attributes @@ -276,14 +306,17 @@ def parse_all_classes(directory): re.MULTILINE | re.DOTALL ) for root, _, files in os.walk(directory): + logging.debug(f"Entering directory: {root}") for file in files: if file.endswith('.cs'): file_path = os.path.join(root, file) + logging.debug(f"Processing file: {file_path}") with open(file_path, 'r', encoding='utf-8') as f: content = f.read() # Find all class definitions for class_match in class_pattern.finditer(content): class_name = class_match.group(1) + logging.debug(f"Class found: {class_name}") class_start = class_match.end() # Find the matching closing brace for the class class_body, end_index = extract_class_body(content, class_start) @@ -300,10 +333,13 @@ def parse_all_classes(directory): "property_name": prop_name, "property_type": prop_type }) + logging.debug(f"Property found in class '{class_name}': {prop_name} ({prop_type})") class_defs[class_name] = properties + logging.debug("Finished parsing all classes.") return class_defs def extract_class_body(content, start_index): + logging.debug(f"Extracting class body starting at index {start_index}.") """ Extracts the body of a class from the content, starting at start_index. Returns the class body and the index where it ends. @@ -316,7 +352,9 @@ def extract_class_body(content, start_index): elif content[index] == '}': brace_count -= 1 index += 1 - return content[start_index:index - 1], index - 1 + class_body = content[start_index:index - 1] + logging.debug(f"Class body extracted. Length: {len(class_body)} characters.") + return class_body, index - 1 def generate_sample_value(property_type, class_defs, processed_classes=None): if processed_classes is None: @@ -324,6 +362,7 @@ def generate_sample_value(property_type, class_defs, processed_classes=None): property_type = property_type.strip() # Handle nullable types property_type = property_type.rstrip('?') + logging.debug(f"Generating sample value for type '{property_type}'.") # Handle primitive types if property_type in ('int', 'long', 'float', 'double', 'decimal'): return 0 @@ -347,7 +386,9 @@ def generate_sample_value(property_type, class_defs, processed_classes=None): # Handle custom classes elif property_type in class_defs: if property_type in processed_classes: + logging.debug(f"Already processed class '{property_type}', avoiding recursion.") return {} + logging.debug(f"Processing custom class '{property_type}'.") processed_classes.add(property_type) properties = class_defs[property_type] sample_obj = {} @@ -359,9 +400,11 @@ def generate_sample_value(property_type, class_defs, processed_classes=None): return sample_obj else: # Unknown type, default to a sample value + logging.debug(f"Unknown type '{property_type}', using default sample value.") return "SampleValue" def generate_sample_config(config_class_name, class_defs, supported_types): + logging.debug(f"Generating sample config for class '{config_class_name}'.") type_name = config_class_name[:-6] # Remove 'Config' if type_name not in supported_types: type_name = supported_types[0] if supported_types else type_name @@ -373,16 +416,21 @@ def generate_sample_config(config_class_name, class_defs, supported_types): "group": "Group", "properties": generate_sample_value(config_class_name, class_defs) } + logging.debug(f"Sample config generated: {config}") return config def read_readme_file(filepath): + logging.debug(f"Reading README file at: {filepath}") if not os.path.exists(filepath): - print(f"README.md file not found at {filepath}. A new file will be created.") + logging.warning(f"README.md file not found at {filepath}. A new file will be created.") return "" with open(filepath, 'r', encoding='utf-8') as f: - return f.read() + content = f.read() + logging.debug("README.md file content successfully read.") + return content def update_readme_section(readme_content, section_title, new_section_content): + logging.debug(f"Updating README section '{section_title}'.") start_marker = f'' end_marker = f'' @@ -396,14 +444,14 @@ def update_readme_section(readme_content, section_title, new_section_content): if match: section_content = match.group(1) if '' in section_content: - print(f"Skipping section: {section_title} (found )") + logging.info(f"Skipping section: {section_title} (found )") return readme_content # Return the original content unchanged else: - print(f"Updating existing section: {section_title}") + logging.debug(f"Updating existing section: {section_title}") updated_section = f'{start_marker}\n{new_section_content.rstrip()}\n{end_marker}' updated_readme = readme_content[:match.start()] + updated_section + readme_content[match.end():] else: - print(f"Adding new section: {section_title}") + logging.debug(f"Adding new section: {section_title}") # Ensure there's a newline before adding the new section if not readme_content.endswith('\n'): readme_content += '\n' @@ -412,11 +460,15 @@ def update_readme_section(readme_content, section_title, new_section_content): return updated_readme def remove_duplicates_preserve_order(seq): + logging.debug("Removing duplicates while preserving order.") seen = set() - return [x for x in seq if not (x in seen or seen.add(x))] + unique_list = [x for x in seq if not (x in seen or seen.add(x))] + logging.debug(f"Unique items: {unique_list}") + return unique_list if __name__ == "__main__": project_directory = os.path.abspath("./") + logging.info(f"Starting processing in project directory: {project_directory}") results = read_files_in_directory(project_directory) # Remove duplicates from interfaces and base classes while preserving order @@ -443,7 +495,7 @@ def remove_duplicates_preserve_order(seq): all_class_defs = parse_all_classes(project_directory) config_classes = [cls for cls in all_class_defs if cls.endswith('Config') or cls.endswith('ConfigObject')] if not config_classes: - print("No config classes found.") + logging.warning("No config classes found.") config_example_markdown = "" else: main_config_class = max(config_classes, key=lambda cls: len(all_class_defs[cls])) @@ -467,5 +519,6 @@ def remove_duplicates_preserve_order(seq): # Write the updated content back to README.md with open(readme_path, 'w', encoding='utf-8') as f: f.write(readme_content) + logging.info("README.md has been updated.") - print("README.md has been updated.") \ No newline at end of file + logging.info("Processing completed.") \ No newline at end of file From b5135af57bb8d490ca902c4deb1179ea85c539fd Mon Sep 17 00:00:00 2001 From: jtalborough Date: Wed, 4 Dec 2024 09:12:55 -0500 Subject: [PATCH 23/28] feat(metadata): add feedback extraction to documentation generator - Add extract_public_feedbacks function to parse BoolFeedback, IntFeedback, and StringFeedback - Update read_files_in_directory to collect feedback information - Add feedback sections to README.md generation --- .github/scripts/metadata.py | 42 ++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/.github/scripts/metadata.py b/.github/scripts/metadata.py index 0052da0..6c503ed 100644 --- a/.github/scripts/metadata.py +++ b/.github/scripts/metadata.py @@ -63,6 +63,30 @@ def extract_public_methods(file_content): logging.debug(f"Public methods extracted: {methods}") return methods +def extract_public_feedbacks(file_content): + logging.debug("Extracting public feedbacks.") + # Remove commented lines + uncommented_content = re.sub(r'//.*', '', file_content) + + # Define patterns for different feedback types + bool_feedback_pattern = re.compile(r'public\s+BoolFeedback\s+(\w+)') + int_feedback_pattern = re.compile(r'public\s+IntFeedback\s+(\w+)') + string_feedback_pattern = re.compile(r'public\s+StringFeedback\s+(\w+)') + + # Find all matches + bool_feedbacks = bool_feedback_pattern.findall(uncommented_content) + int_feedbacks = int_feedback_pattern.findall(uncommented_content) + string_feedbacks = string_feedback_pattern.findall(uncommented_content) + + feedbacks = { + 'bool_feedbacks': bool_feedbacks, + 'int_feedbacks': int_feedbacks, + 'string_feedbacks': string_feedbacks + } + + logging.debug(f"Extracted feedbacks: {feedbacks}") + return feedbacks + def read_files_in_directory(directory): logging.debug(f"Reading files in directory: {directory}") all_interfaces = [] @@ -70,6 +94,11 @@ def read_files_in_directory(directory): all_supported_types = [] all_minimum_versions = [] all_public_methods = [] + all_feedbacks = { + 'bool_feedbacks': [], + 'int_feedbacks': [], + 'string_feedbacks': [] + } for root, _, files in os.walk(directory): logging.debug(f"Entering directory: {root}") @@ -83,6 +112,7 @@ def read_files_in_directory(directory): supported_types = extract_supported_types(content) minimum_version = extract_minimum_essentials_framework_version(content) public_methods = extract_public_methods(content) + feedbacks = extract_public_feedbacks(content) all_interfaces.extend(interfaces) all_base_classes.extend(base_classes) @@ -90,6 +120,9 @@ def read_files_in_directory(directory): if minimum_version: all_minimum_versions.append(minimum_version) all_public_methods.extend(public_methods) + all_feedbacks['bool_feedbacks'].extend(feedbacks['bool_feedbacks']) + all_feedbacks['int_feedbacks'].extend(feedbacks['int_feedbacks']) + all_feedbacks['string_feedbacks'].extend(feedbacks['string_feedbacks']) logging.debug("Finished reading all files.") return { @@ -97,7 +130,8 @@ def read_files_in_directory(directory): "base_classes": all_base_classes, "supported_types": all_supported_types, "minimum_versions": all_minimum_versions, - "public_methods": all_public_methods + "public_methods": all_public_methods, + "feedbacks": all_feedbacks } def read_class_names_and_bases_from_files(directory): @@ -481,6 +515,9 @@ def remove_duplicates_preserve_order(seq): supported_types_markdown = generate_markdown_list(results["supported_types"], "Supported Types") minimum_versions_markdown = generate_markdown_list(results["minimum_versions"], "Minimum Essentials Framework Versions") public_methods_markdown = generate_markdown_list(results["public_methods"], "Public Methods") + bool_feedbacks_markdown = generate_markdown_list(results["feedbacks"]["bool_feedbacks"], "Bool Feedbacks") + int_feedbacks_markdown = generate_markdown_list(results["feedbacks"]["int_feedbacks"], "Int Feedbacks") + string_feedbacks_markdown = generate_markdown_list(results["feedbacks"]["string_feedbacks"], "String Feedbacks") # Generate Join Maps markdown class_defs = read_class_names_and_bases_from_files(project_directory) @@ -515,6 +552,9 @@ def remove_duplicates_preserve_order(seq): readme_content = update_readme_section(readme_content, "Interfaces Implemented", interfaces_markdown) readme_content = update_readme_section(readme_content, "Base Classes", base_classes_markdown) readme_content = update_readme_section(readme_content, "Public Methods", public_methods_markdown) + readme_content = update_readme_section(readme_content, "Bool Feedbacks", bool_feedbacks_markdown) + readme_content = update_readme_section(readme_content, "Int Feedbacks", int_feedbacks_markdown) + readme_content = update_readme_section(readme_content, "String Feedbacks", string_feedbacks_markdown) # Write the updated content back to README.md with open(readme_path, 'w', encoding='utf-8') as f: From af8cc08a8561c30c8b92d8c44a02bf459e06ec23 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Wed, 4 Dec 2024 11:26:05 -0500 Subject: [PATCH 24/28] fix(metadata): update feedback extraction regex patterns - Update regex patterns to match property-style feedback declarations - Add support for both field and property-style feedback syntax - Add filtering to clean whitespace from extracted feedback names --- .github/scripts/metadata.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/.github/scripts/metadata.py b/.github/scripts/metadata.py index 6c503ed..756d447 100644 --- a/.github/scripts/metadata.py +++ b/.github/scripts/metadata.py @@ -68,16 +68,21 @@ def extract_public_feedbacks(file_content): # Remove commented lines uncommented_content = re.sub(r'//.*', '', file_content) - # Define patterns for different feedback types - bool_feedback_pattern = re.compile(r'public\s+BoolFeedback\s+(\w+)') - int_feedback_pattern = re.compile(r'public\s+IntFeedback\s+(\w+)') - string_feedback_pattern = re.compile(r'public\s+StringFeedback\s+(\w+)') + # Define patterns for different feedback types with property syntax + bool_feedback_pattern = re.compile(r'public\s+BoolFeedback\s+(\w+)(?:\s*{[^}]*})?') + int_feedback_pattern = re.compile(r'public\s+IntFeedback\s+(\w+)(?:\s*{[^}]*})?') + string_feedback_pattern = re.compile(r'public\s+StringFeedback\s+(\w+)(?:\s*{[^}]*})?') # Find all matches bool_feedbacks = bool_feedback_pattern.findall(uncommented_content) int_feedbacks = int_feedback_pattern.findall(uncommented_content) string_feedbacks = string_feedback_pattern.findall(uncommented_content) + # Filter out any empty matches and strip whitespace + bool_feedbacks = [f.strip() for f in bool_feedbacks if f.strip()] + int_feedbacks = [f.strip() for f in int_feedbacks if f.strip()] + string_feedbacks = [f.strip() for f in string_feedbacks if f.strip()] + feedbacks = { 'bool_feedbacks': bool_feedbacks, 'int_feedbacks': int_feedbacks, From 681f68ea51063ccb951e060d2d6046900166aefb Mon Sep 17 00:00:00 2001 From: jtalborough Date: Wed, 4 Dec 2024 11:32:12 -0500 Subject: [PATCH 25/28] feat(metadata): update script to capture all feedback types - Add support for property-style feedback declarations - Enhance regex patterns to match all feedback formats - Fix feedback detection in C# files with get/private set - Include initialization with equals sign pattern --- .github/scripts/metadata.py | 87 +++++++++++++++++++++++++------------ 1 file changed, 60 insertions(+), 27 deletions(-) diff --git a/.github/scripts/metadata.py b/.github/scripts/metadata.py index 756d447..31528d7 100644 --- a/.github/scripts/metadata.py +++ b/.github/scripts/metadata.py @@ -64,32 +64,50 @@ def extract_public_methods(file_content): return methods def extract_public_feedbacks(file_content): - logging.debug("Extracting public feedbacks.") + logging.debug("Starting feedback extraction...") + logging.debug(f"Input content length: {len(file_content)}") + # Remove commented lines uncommented_content = re.sub(r'//.*', '', file_content) + logging.debug(f"Content length after removing comments: {len(uncommented_content)}") # Define patterns for different feedback types with property syntax - bool_feedback_pattern = re.compile(r'public\s+BoolFeedback\s+(\w+)(?:\s*{[^}]*})?') - int_feedback_pattern = re.compile(r'public\s+IntFeedback\s+(\w+)(?:\s*{[^}]*})?') - string_feedback_pattern = re.compile(r'public\s+StringFeedback\s+(\w+)(?:\s*{[^}]*})?') + bool_feedback_pattern = r'public\s+BoolFeedback\s+(\w+)(?:\s*{[^}]*}|\s*;|\s*=)' + int_feedback_pattern = r'public\s+IntFeedback\s+(\w+)(?:\s*{[^}]*}|\s*;|\s*=)' + string_feedback_pattern = r'public\s+StringFeedback\s+(\w+)(?:\s*{[^}]*}|\s*;|\s*=)' + + logging.debug(f"Using patterns:") + logging.debug(f"Bool pattern: {bool_feedback_pattern}") + logging.debug(f"Int pattern: {int_feedback_pattern}") + logging.debug(f"String pattern: {string_feedback_pattern}") # Find all matches - bool_feedbacks = bool_feedback_pattern.findall(uncommented_content) - int_feedbacks = int_feedback_pattern.findall(uncommented_content) - string_feedbacks = string_feedback_pattern.findall(uncommented_content) + bool_feedbacks = re.findall(bool_feedback_pattern, uncommented_content) + int_feedbacks = re.findall(int_feedback_pattern, uncommented_content) + string_feedbacks = re.findall(string_feedback_pattern, uncommented_content) + + logging.debug(f"Raw matches found:") + logging.debug(f"Bool feedbacks: {bool_feedbacks}") + logging.debug(f"Int feedbacks: {int_feedbacks}") + logging.debug(f"String feedbacks: {string_feedbacks}") # Filter out any empty matches and strip whitespace bool_feedbacks = [f.strip() for f in bool_feedbacks if f.strip()] int_feedbacks = [f.strip() for f in int_feedbacks if f.strip()] string_feedbacks = [f.strip() for f in string_feedbacks if f.strip()] + logging.debug(f"After filtering:") + logging.debug(f"Bool feedbacks: {bool_feedbacks}") + logging.debug(f"Int feedbacks: {int_feedbacks}") + logging.debug(f"String feedbacks: {string_feedbacks}") + feedbacks = { 'bool_feedbacks': bool_feedbacks, 'int_feedbacks': int_feedbacks, 'string_feedbacks': string_feedbacks } - logging.debug(f"Extracted feedbacks: {feedbacks}") + logging.debug(f"Final extracted feedbacks: {feedbacks}") return feedbacks def read_files_in_directory(directory): @@ -110,26 +128,36 @@ def read_files_in_directory(directory): for file in files: if file.endswith('.cs'): file_path = os.path.join(root, file) - logging.debug(f"Processing file: {file_path}") - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - interfaces, base_classes = extract_implemented_interfaces(content) - supported_types = extract_supported_types(content) - minimum_version = extract_minimum_essentials_framework_version(content) - public_methods = extract_public_methods(content) - feedbacks = extract_public_feedbacks(content) - - all_interfaces.extend(interfaces) - all_base_classes.extend(base_classes) - all_supported_types.extend(supported_types) - if minimum_version: - all_minimum_versions.append(minimum_version) - all_public_methods.extend(public_methods) - all_feedbacks['bool_feedbacks'].extend(feedbacks['bool_feedbacks']) - all_feedbacks['int_feedbacks'].extend(feedbacks['int_feedbacks']) - all_feedbacks['string_feedbacks'].extend(feedbacks['string_feedbacks']) + logging.debug(f"Processing C# file: {file_path}") + try: + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + logging.debug(f"Successfully read file: {file_path}") + interfaces, base_classes = extract_implemented_interfaces(content) + supported_types = extract_supported_types(content) + minimum_version = extract_minimum_essentials_framework_version(content) + public_methods = extract_public_methods(content) + feedbacks = extract_public_feedbacks(content) + + all_interfaces.extend(interfaces) + all_base_classes.extend(base_classes) + all_supported_types.extend(supported_types) + if minimum_version: + all_minimum_versions.append(minimum_version) + all_public_methods.extend(public_methods) + all_feedbacks['bool_feedbacks'].extend(feedbacks['bool_feedbacks']) + all_feedbacks['int_feedbacks'].extend(feedbacks['int_feedbacks']) + all_feedbacks['string_feedbacks'].extend(feedbacks['string_feedbacks']) + + logging.debug(f"Extracted from {file_path}:") + logging.debug(f"- Interfaces: {interfaces}") + logging.debug(f"- Base classes: {base_classes}") + logging.debug(f"- Feedbacks: {feedbacks}") + except Exception as e: + logging.error(f"Error processing file {file_path}: {str(e)}") logging.debug("Finished reading all files.") + logging.debug(f"Total feedbacks collected: {all_feedbacks}") return { "interfaces": all_interfaces, "base_classes": all_base_classes, @@ -506,7 +534,12 @@ def remove_duplicates_preserve_order(seq): return unique_list if __name__ == "__main__": - project_directory = os.path.abspath("./") + import sys + if len(sys.argv) > 1: + project_directory = os.path.abspath(sys.argv[1]) + else: + project_directory = os.path.abspath("./") + logging.info(f"Starting processing in project directory: {project_directory}") results = read_files_in_directory(project_directory) From 6aac0b74a9eee80012d9669a0109c4cb5eb3bb59 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Wed, 4 Dec 2024 11:51:01 -0500 Subject: [PATCH 26/28] fix(workflow): update script path and target directory - Add target directory argument to metadata.py script execution - Add debug output (pwd and ls) to help troubleshoot GitHub workflow - Fix script execution path to work with readme-automation checkout --- .github/workflows/update-readme.yml | 128 ++++++++++++++-------------- 1 file changed, 65 insertions(+), 63 deletions(-) diff --git a/.github/workflows/update-readme.yml b/.github/workflows/update-readme.yml index be128d0..0bcd34e 100644 --- a/.github/workflows/update-readme.yml +++ b/.github/workflows/update-readme.yml @@ -1,64 +1,66 @@ -name: Update README - -on: - workflow_call: - inputs: - target-branch: - description: 'The branch to commit the README.md updates to.' - required: true - type: string - -permissions: - contents: write - -jobs: - update-readme: - runs-on: ubuntu-latest - - steps: - - name: Checkout Caller Repository - uses: actions/checkout@v3 - with: - ref: ${{ inputs.target-branch }} - fetch-depth: 0 - - - name: Checkout Reusable Workflow Repository - uses: actions/checkout@v3 - with: - repository: PepperDash/readme-automation - path: readme-automation - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.x' - - - name: Install Python Dependencies - run: | - python -m pip install --upgrade pip - # pip install -r requirements.txt - - - name: Run README Update Script - run: | - python readme-automation/.github/scripts/metadata.py - - - name: Check for Changes - id: check_for_changes - run: | - if git diff --quiet; then - echo "no_changes=true" >> $GITHUB_OUTPUT - else - echo "no_changes=false" >> $GITHUB_OUTPUT - fi - - - name: Create or Switch to 'robot-docs' Branch - run: | - git checkout -B robot-docs - - - name: Commit and Push Changes to 'robot-docs' Branch - run: | - git config --local user.email "action@github.com" - git config --local user.name "GitHub Action" - git add README.md - git commit -m "Automated README update" || echo "No changes to commit" +name: Update README + +on: + workflow_call: + inputs: + target-branch: + description: 'The branch to commit the README.md updates to.' + required: true + type: string + +permissions: + contents: write + +jobs: + update-readme: + runs-on: ubuntu-latest + + steps: + - name: Checkout Caller Repository + uses: actions/checkout@v3 + with: + ref: ${{ inputs.target-branch }} + fetch-depth: 0 + + - name: Checkout Reusable Workflow Repository + uses: actions/checkout@v3 + with: + repository: PepperDash/readme-automation + path: readme-automation + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Install Python Dependencies + run: | + python -m pip install --upgrade pip + # pip install -r requirements.txt + + - name: Run README Update Script + run: | + pwd + ls -la + python readme-automation/.github/scripts/metadata.py . + + - name: Check for Changes + id: check_for_changes + run: | + if git diff --quiet; then + echo "no_changes=true" >> $GITHUB_OUTPUT + else + echo "no_changes=false" >> $GITHUB_OUTPUT + fi + + - name: Create or Switch to 'robot-docs' Branch + run: | + git checkout -B robot-docs + + - name: Commit and Push Changes to 'robot-docs' Branch + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git add README.md + git commit -m "Automated README update" || echo "No changes to commit" git push origin robot-docs --force \ No newline at end of file From 79a21f024444e13a14d74e26b2ee0a3298526776 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Wed, 4 Dec 2024 12:02:35 -0500 Subject: [PATCH 27/28] fix(workflow): simplify script execution path - Remove redundant workflow repository checkout - Use script directly from workflow repository via github.workspace - Fix target directory path to point to caller's repository root --- .github/workflows/update-readme.yml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/.github/workflows/update-readme.yml b/.github/workflows/update-readme.yml index 0bcd34e..2739824 100644 --- a/.github/workflows/update-readme.yml +++ b/.github/workflows/update-readme.yml @@ -22,12 +22,6 @@ jobs: ref: ${{ inputs.target-branch }} fetch-depth: 0 - - name: Checkout Reusable Workflow Repository - uses: actions/checkout@v3 - with: - repository: PepperDash/readme-automation - path: readme-automation - - name: Set up Python uses: actions/setup-python@v4 with: @@ -42,7 +36,7 @@ jobs: run: | pwd ls -la - python readme-automation/.github/scripts/metadata.py . + python ${{ github.workspace }}/.github/scripts/metadata.py ${{ github.workspace }} - name: Check for Changes id: check_for_changes From af0ee46726b537dcdb9405591636bf467d977ae5 Mon Sep 17 00:00:00 2001 From: jtalborough Date: Wed, 4 Dec 2024 12:05:43 -0500 Subject: [PATCH 28/28] fix(workflow): properly handle reusable workflow paths - Add separate checkouts for caller and workflow repositories - Set working directory to caller's repository - Fix script path to reference workflow repository - Remove debug commands --- .github/workflows/update-readme.yml | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/.github/workflows/update-readme.yml b/.github/workflows/update-readme.yml index 2739824..c82ab19 100644 --- a/.github/workflows/update-readme.yml +++ b/.github/workflows/update-readme.yml @@ -21,6 +21,14 @@ jobs: with: ref: ${{ inputs.target-branch }} fetch-depth: 0 + path: repo + + - name: Checkout Workflow Repository + uses: actions/checkout@v3 + with: + repository: PepperDash/workflow-templates + ref: add-feedbacks + path: workflow-templates - name: Set up Python uses: actions/setup-python@v4 @@ -33,12 +41,12 @@ jobs: # pip install -r requirements.txt - name: Run README Update Script + working-directory: repo run: | - pwd - ls -la - python ${{ github.workspace }}/.github/scripts/metadata.py ${{ github.workspace }} + python ../workflow-templates/.github/scripts/metadata.py . - name: Check for Changes + working-directory: repo id: check_for_changes run: | if git diff --quiet; then @@ -48,10 +56,12 @@ jobs: fi - name: Create or Switch to 'robot-docs' Branch + working-directory: repo run: | git checkout -B robot-docs - name: Commit and Push Changes to 'robot-docs' Branch + working-directory: repo run: | git config --local user.email "action@github.com" git config --local user.name "GitHub Action"