diff --git a/README.md b/README.md index 897f78d..e3dfef2 100644 --- a/README.md +++ b/README.md @@ -119,6 +119,12 @@ trafficgen_port: # trafficgen REST port trafficgen_timeout: # trafficgen command timeout (in minutes) trafficgen_rx_bps_limit: # trafficgen baseline comparison (bps) log_performance: # boolean, use false to omit sanity performance test details in logs/result files (only pass or fail) +log_performance_elastic: # boolean, use true to upload the sanity performance bps to elastic node +# Below configures the SR_IOV_Sanity_Performance Elastic parameters, if log_performance_elastic is set +elastic_host: # IP address or hostname of elastic +elastic_port: # Port of elastic +elastic_username: # Elastic username +elastic_password: # Elastic password ``` A current version of Python is recommended to run the tests. As of writing the minimum version to avoid warnings would be 3.7. However, the tests have been successfully run up to version 3.11, the latest active release as of writing. The same is true of pip, which should be a current version (23.0 as of writing, but this should be upgraded in the following steps). @@ -183,7 +189,7 @@ The common code has its own test cases. The majority of the common code test cas A small portion of common code test cases are done using mock. These mock unit test cases are under the `sriov/common` folder, along with the common code itself. The purpose of the mock unit tests is to cover scenarios that are difficult to cover via the e2e tests. These tests must be run from the root of the repo, unless one sets the `PYTHONPATH` environment variable to include the root, in which case the mock tests may be run from another directory. -## Debug Failed Test Case +## Debug Failed Test Cases When a test case is failing, one may want to immediately stop the test run and keep the failed setup for manual debugging. This can not be achieved with the pytest `-x` option, as `-x` still allow the cleanup to happen. Instead, this can be done by using the `--skipclean` option. @@ -194,8 +200,11 @@ The test execution will stop immediately without cleaning up, and one may access After the debug is complete, one has to manually clean up the setup. -## Uncommon options +## Uncommon Options The following test options are uncommon and meant to use under rare situations: + `--debug-execute`: debug command execution over the ssh session +## Storing Test Results (Experimental) + +A natural extension of this testing framework involves storing results of tests for historical purposes, as well as to query the data afterwords. To this end we have implemented a (currently experimental/in development) integration allowing the results of `SR_IOV_Sanity_Performance` to be pushed to an Elasticsearch instance. To see config fields required for using Elastic, see the `Usage` section above. As noted, this is an initial implementation and further development is needed to flesh out features. \ No newline at end of file diff --git a/sriov/requirements.txt b/sriov/requirements.txt index f766577..c8eb95e 100644 --- a/sriov/requirements.txt +++ b/sriov/requirements.txt @@ -4,3 +4,4 @@ PyYAML==6.0 pytest-html docutils gitpython +elasticsearch diff --git a/sriov/tests/SR_IOV_MTU/test_SR_IOV_MTU.py b/sriov/tests/SR_IOV_MTU/test_SR_IOV_MTU.py index 6992881..22d5410 100644 --- a/sriov/tests/SR_IOV_MTU/test_SR_IOV_MTU.py +++ b/sriov/tests/SR_IOV_MTU/test_SR_IOV_MTU.py @@ -9,6 +9,7 @@ get_vf_mac, switch_detected, ) +import time def test_SR_IOV_MTU(dut, trafficgen, settings, testdata): @@ -58,6 +59,7 @@ def test_SR_IOV_MTU(dut, trafficgen, settings, testdata): else: mtu = min(dut_mtu, trafficgen_mtu) + time.sleep(5) assert set_mtu(trafficgen, trafficgen_pf, dut, pf, 0, mtu, testdata) steps = [f"ip link set {pf}v0 up", f"ip add add {dut_ip}/24 dev {pf}v0"] diff --git a/sriov/tests/SR_IOV_Sanity_Performance/test_SR_IOV_Sanity_Performance.py b/sriov/tests/SR_IOV_Sanity_Performance/test_SR_IOV_Sanity_Performance.py index 9c2bfd5..bd4546d 100644 --- a/sriov/tests/SR_IOV_Sanity_Performance/test_SR_IOV_Sanity_Performance.py +++ b/sriov/tests/SR_IOV_Sanity_Performance/test_SR_IOV_Sanity_Performance.py @@ -1,3 +1,5 @@ +from datetime import datetime +from sriov.tests.conftest import elastic from sriov.common.utils import ( execute_and_assert, execute_until_timeout, @@ -13,7 +15,7 @@ # Use pytest --iteration to adjust the execution_number parameter for desired amount # of repeated tests -def test_SRIOV_Sanity_Performance(dut, trafficgen, settings, testdata): +def test_SRIOV_Sanity_Performance(dut, trafficgen, settings, testdata): # noqa: C901 """Test and ensure that VFs provision with MTU functions as intended Args: @@ -199,6 +201,16 @@ def test_SRIOV_Sanity_Performance(dut, trafficgen, settings, testdata): results = json.loads(outs[0][0]) if settings.config["log_performance"]: print(json.dumps(results)) + if settings.config["log_performance_elastic"]: + log_elastic(results) # Compare trafficgen results to config assert results["0"]["rx_l1_bps"] >= settings.config["trafficgen_rx_bps_limit"] + + +def log_elastic(results): + elastic.elastic_index = "test-perf-index" + elastic.elastic_doc["rx_l1_bps"] = results["0"]["rx_l1_bps"] + elastic.elastic_doc["timestamp"] = datetime.now() + + print(elastic.elastic_index) diff --git a/sriov/tests/config_template.yaml b/sriov/tests/config_template.yaml index 580cd9b..b2467e9 100644 --- a/sriov/tests/config_template.yaml +++ b/sriov/tests/config_template.yaml @@ -17,4 +17,10 @@ trafficgen_img: "trafficgen:latest" trafficgen_port: 8080 trafficgen_timeout: 12 trafficgen_rx_bps_limit: 9990000000 -log_performance: false \ No newline at end of file +log_performance: false +log_performance_elastic: true +elastic_host: 192.168.1.1 +elastic_port: 9200 +#elastic_ca_cert_path: "./http_ca.crt" +elastic_username: "elastic" +elastic_password: "PASSWORD" \ No newline at end of file diff --git a/sriov/tests/conftest.py b/sriov/tests/conftest.py index 8a00cad..a9657b4 100644 --- a/sriov/tests/conftest.py +++ b/sriov/tests/conftest.py @@ -1,3 +1,4 @@ +from elasticsearch import Elasticsearch import git import os import pytest @@ -21,6 +22,12 @@ ) +class elastic: + # track elastic results (currently used in SR_IOV_Sanity_Performance) + elastic_index = None + elastic_doc = {} + + def get_settings_obj() -> Config: script_dir = os.path.dirname(os.path.realpath(__file__)) config_file = script_dir + "/config.yaml" @@ -188,6 +195,9 @@ def _cleanup( reset_command(dut, testdata) + if settings.config["log_performance_elastic"]: + elastic_push(settings, testdata) + def pytest_configure(config: Config) -> None: ShellHandler.debug_cmd_execute = config.getoption("--debug-execute") @@ -228,6 +238,7 @@ def pytest_configure(config: Config) -> None: cmd = "cat /sys/bus/pci/drivers/iavf/module/version" dut.log_str(cmd) code, out, err = dut.execute(cmd) + dut.log_str(str(code)) if code == 0: iavf_driver = out[0].strip() config._metadata["IAVF Driver"] = iavf_driver @@ -253,7 +264,7 @@ def parse_file_for_field(file_path, field) -> str: @pytest.fixture(autouse=True) -def _report_extras(extra, request, settings, monkeypatch) -> None: +def _report_extras(extra, request, settings, testdata, monkeypatch) -> None: monkeypatch.chdir(request.fspath.dirname) try: @@ -298,6 +309,13 @@ def _report_extras(extra, request, settings, monkeypatch) -> None: case_name = "No tag or commit hash: No Link to" link = "#" + if settings.config["log_performance_elastic"]: + elastic.elastic_doc["tag"] = None + if git_tag: + elastic.elastic_doc["tag"] = str(git_tag) + elif sha: + elastic.elastic_doc["tag"] = str(sha.hexsha) + extra.append( extras.html( '

Link to the test specification: