Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automated gtest formatting checks #3370

Open
wants to merge 27 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 15 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -615,6 +615,19 @@ pipeline {
buildHipClangJobAndReboot(setup_cmd: "", build_cmd: "", execute_cmd: execute_cmd, needs_gpu:false, needs_reboot:false)
}
}
stage('Check GTest Format') {
agent { label rocmnode("nogpu") }
when {
changeset "**/test/utils/**"
}
Copy link
Contributor

@BrianHarrisonAMD BrianHarrisonAMD Dec 12, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will this only run if changes happened inside test/utils?

I think we would want this to run if changes happened in changeset "**/test/gtest/**"

Also, did we run it generally on the latest changes to ensure everything is now passing?

steps {
script {
checkout scm
sh 'cd ./test/utils && python3 gtest_formating_checks.py'
}
}
}

stage('HipNoGPU Debug Build Test') {
when {
beforeAgent true
Expand Down
1 change: 1 addition & 0 deletions test/gtest/adam.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
*******************************************************************************/
#include "adam.hpp"

// testing ci changes
namespace adam {

struct GPU_Adam_FP32 : AdamTest<float, float>
Expand Down
89 changes: 89 additions & 0 deletions test/utils/gtest_formating_checks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import os
import re
import logging

logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')

FOLDER_PATH = "../../test/gtest"

# Ignore list: Add test names or file paths you want to exclude
IGNORE_LIST = {
"CPU_MIOpenDriverRegressionBigTensorTest_FP32",
"../../test/gtest/reduce_custom_fp32.cpp",
}

# Valid enums and Regex for validation
VALID_HW_TYPES = {"CPU", "GPU"}
VALID_DATATYPES = {"FP8", "FP16", "FP32", "FP64", "BFP16", "BFP8", "I64", "I32", "I16", "I8", "NONE"}
TESTSUITE_REGEX = re.compile(
r"^(CPU|GPU)_[A-Za-z0-9]+(?:_[A-Za-z0-9]+)*_(" + "|".join(VALID_DATATYPES) + r")$"
)
Comment on lines +21 to +25
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Doesn't it duplicate #3218 ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This one extends on it + CI integration that runs upon adding new gtest

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need separate python scripts?
Seems like we might be able to unify these.

TEST_P_REGEX = re.compile(r"TEST_P\(([^,]+),\s*([^)]+)\)")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add the word boundary check to these regexes--it should only be checking valid macro invocations.

TEST_P_REGEX = re.compile(r"\bTEST_P\(([^,]+),\s*([^)]+)\)")
INSTANTIATE_TEST_REGEX = re.compile(r"\bINSTANTIATE_TEST_SUITE_P\(\s*([^\n,]+),\s*([^\n,]+),")
ALLOW_UNINSTANTIATED_REGEX = re.compile(r"\bGTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST\(\s*([^\)]+)\)")

INSTANTIATE_TEST_REGEX = re.compile(r"INSTANTIATE_TEST_SUITE_P\(\s*([^\n,]+),\s*([^\n,]+),")
TEST_TYPE_REGEX = re.compile(r"^(Smoke|Full|Perf|Unit)([A-Za-z0-9]*)?$")


def analyze_tests(folder_path):
errors = []

for root, _, files in os.walk(folder_path):
for file in files:
if file.endswith(".cpp"):
file_path = os.path.join(root, file)

if file_path in IGNORE_LIST:
logging.info(f"Skipping ignored file: {file_path}")
continue

with open(file_path, "r") as f:
content = f.read()

# Extract TEST_P and INSTANTIATE_TEST_SUITE_P
test_p_matches = TEST_P_REGEX.findall(content)
instantiate_matches = INSTANTIATE_TEST_REGEX.findall(content)

test_p_suites = {suite: info for suite, info in test_p_matches}
instantiated_suites = {suite: test_type for test_type, suite in instantiate_matches}

# Validate TEST_P suites
for suite, info in test_p_suites.items():
if suite in IGNORE_LIST:
logging.info(f"Skipping ignored test suite: {suite}")
continue

if not TESTSUITE_REGEX.match(suite):
errors.append(f"{file_path}: Invalid TESTSUITE_NAME '{suite}' in TEST_P.")

if suite not in instantiated_suites:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's nice to catch these before runtime! It does need to be aware of GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST though, and it also gives false failures when macros are used, e.g. layout_transpose.cpp. I don't think we should discard that flexibility.

errors.append(f"{file_path}: Test '{suite}.{info}' does not have a matching INSTANTIATE_TEST_SUITE_P.")

# Validate instantiated suites
for suite, test_type in instantiated_suites.items():
normalized_test_type = test_type.replace("\\", "").strip()

if suite in IGNORE_LIST:
logging.info(f"Skipping ignored instantiated suite: {suite}")
continue

if suite not in test_p_suites:
errors.append(f"{file_path}: INSTANTIATE_TEST_SUITE_P references non-existent TESTSUITE_NAME '{suite}'.")
if not TEST_TYPE_REGEX.match(normalized_test_type):
errors.append(f"{file_path}: Invalid TEST_TYPE '{test_type}' in INSTANTIATE_TEST_SUITE_P.")

return errors


def main():
errors = analyze_tests(FOLDER_PATH)

if errors:
logging.error("The following issues were found:")
for error in errors:
logging.error(f" {error}")
raise ValueError("Validation failed. See the errors above.")
else:
logging.info("All tests meet the criteria.")


if __name__ == "__main__":
main()