Skip to content

Commit

Permalink
Initial working elastic logging for sanity performance tests: (#165)
Browse files Browse the repository at this point in the history
* Initial working elastic logging for sanity performance tests:

NOTE: CA Certs failed outside of elastic host, will investigate further soon

* Add elasticsearch to requirements

* Performance test noqa

* Formatting Perf

* Updates for elastic

* Update elatic perf variables working with current version

* Update with tag field for elastic

* formatting

* Formatting

* README update

* Update README with reference to the Elastic changes
  • Loading branch information
dkosteck authored Dec 8, 2023
1 parent f49cd3e commit 011a0af
Show file tree
Hide file tree
Showing 6 changed files with 73 additions and 5 deletions.
13 changes: 11 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,12 @@ trafficgen_port: # trafficgen REST port
trafficgen_timeout: # trafficgen command timeout (in minutes)
trafficgen_rx_bps_limit: # trafficgen baseline comparison (bps)
log_performance: # boolean, use false to omit sanity performance test details in logs/result files (only pass or fail)
log_performance_elastic: # boolean, use true to upload the sanity performance bps to elastic node
# Below configures the SR_IOV_Sanity_Performance Elastic parameters, if log_performance_elastic is set
elastic_host: # IP address or hostname of elastic
elastic_port: # Port of elastic
elastic_username: # Elastic username
elastic_password: # Elastic password
```

A current version of Python is recommended to run the tests. As of writing the minimum version to avoid warnings would be 3.7. However, the tests have been successfully run up to version 3.11, the latest active release as of writing. The same is true of pip, which should be a current version (23.0 as of writing, but this should be upgraded in the following steps).
Expand Down Expand Up @@ -183,7 +189,7 @@ The common code has its own test cases. The majority of the common code test cas

A small portion of common code test cases are done using mock. These mock unit test cases are under the `sriov/common` folder, along with the common code itself. The purpose of the mock unit tests is to cover scenarios that are difficult to cover via the e2e tests. These tests must be run from the root of the repo, unless one sets the `PYTHONPATH` environment variable to include the root, in which case the mock tests may be run from another directory.

## Debug Failed Test Case
## Debug Failed Test Cases

When a test case is failing, one may want to immediately stop the test run and keep the failed setup for manual debugging. This can not be achieved with the pytest `-x` option, as `-x` still allow the cleanup to happen. Instead, this can be done by using the `--skipclean` option.

Expand All @@ -194,8 +200,11 @@ The test execution will stop immediately without cleaning up, and one may access

After the debug is complete, one has to manually clean up the setup.

## Uncommon options
## Uncommon Options

The following test options are uncommon and meant to use under rare situations:
+ `--debug-execute`: debug command execution over the ssh session

## Storing Test Results (Experimental)

A natural extension of this testing framework involves storing results of tests for historical purposes, as well as to query the data afterwords. To this end we have implemented a (currently experimental/in development) integration allowing the results of `SR_IOV_Sanity_Performance` to be pushed to an Elasticsearch instance. To see config fields required for using Elastic, see the `Usage` section above. As noted, this is an initial implementation and further development is needed to flesh out features.
1 change: 1 addition & 0 deletions sriov/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ PyYAML==6.0
pytest-html
docutils
gitpython
elasticsearch
2 changes: 2 additions & 0 deletions sriov/tests/SR_IOV_MTU/test_SR_IOV_MTU.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
get_vf_mac,
switch_detected,
)
import time


def test_SR_IOV_MTU(dut, trafficgen, settings, testdata):
Expand Down Expand Up @@ -58,6 +59,7 @@ def test_SR_IOV_MTU(dut, trafficgen, settings, testdata):
else:
mtu = min(dut_mtu, trafficgen_mtu)

time.sleep(5)
assert set_mtu(trafficgen, trafficgen_pf, dut, pf, 0, mtu, testdata)

steps = [f"ip link set {pf}v0 up", f"ip add add {dut_ip}/24 dev {pf}v0"]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from datetime import datetime
from sriov.tests.conftest import elastic
from sriov.common.utils import (
execute_and_assert,
execute_until_timeout,
Expand All @@ -13,7 +15,7 @@

# Use pytest --iteration to adjust the execution_number parameter for desired amount
# of repeated tests
def test_SRIOV_Sanity_Performance(dut, trafficgen, settings, testdata):
def test_SRIOV_Sanity_Performance(dut, trafficgen, settings, testdata): # noqa: C901
"""Test and ensure that VFs provision with MTU functions as intended
Args:
Expand Down Expand Up @@ -199,6 +201,16 @@ def test_SRIOV_Sanity_Performance(dut, trafficgen, settings, testdata):
results = json.loads(outs[0][0])
if settings.config["log_performance"]:
print(json.dumps(results))
if settings.config["log_performance_elastic"]:
log_elastic(results)

# Compare trafficgen results to config
assert results["0"]["rx_l1_bps"] >= settings.config["trafficgen_rx_bps_limit"]


def log_elastic(results):
elastic.elastic_index = "test-perf-index"
elastic.elastic_doc["rx_l1_bps"] = results["0"]["rx_l1_bps"]
elastic.elastic_doc["timestamp"] = datetime.now()

print(elastic.elastic_index)
8 changes: 7 additions & 1 deletion sriov/tests/config_template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,10 @@ trafficgen_img: "trafficgen:latest"
trafficgen_port: 8080
trafficgen_timeout: 12
trafficgen_rx_bps_limit: 9990000000
log_performance: false
log_performance: false
log_performance_elastic: true
elastic_host: 192.168.1.1
elastic_port: 9200
#elastic_ca_cert_path: "./http_ca.crt"
elastic_username: "elastic"
elastic_password: "PASSWORD"
40 changes: 39 additions & 1 deletion sriov/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from elasticsearch import Elasticsearch
import git
import os
import pytest
Expand All @@ -21,6 +22,12 @@
)


class elastic:
# track elastic results (currently used in SR_IOV_Sanity_Performance)
elastic_index = None
elastic_doc = {}


def get_settings_obj() -> Config:
script_dir = os.path.dirname(os.path.realpath(__file__))
config_file = script_dir + "/config.yaml"
Expand Down Expand Up @@ -188,6 +195,9 @@ def _cleanup(

reset_command(dut, testdata)

if settings.config["log_performance_elastic"]:
elastic_push(settings, testdata)


def pytest_configure(config: Config) -> None:
ShellHandler.debug_cmd_execute = config.getoption("--debug-execute")
Expand Down Expand Up @@ -228,6 +238,7 @@ def pytest_configure(config: Config) -> None:
cmd = "cat /sys/bus/pci/drivers/iavf/module/version"
dut.log_str(cmd)
code, out, err = dut.execute(cmd)
dut.log_str(str(code))
if code == 0:
iavf_driver = out[0].strip()
config._metadata["IAVF Driver"] = iavf_driver
Expand All @@ -253,7 +264,7 @@ def parse_file_for_field(file_path, field) -> str:


@pytest.fixture(autouse=True)
def _report_extras(extra, request, settings, monkeypatch) -> None:
def _report_extras(extra, request, settings, testdata, monkeypatch) -> None:
monkeypatch.chdir(request.fspath.dirname)

try:
Expand Down Expand Up @@ -298,6 +309,13 @@ def _report_extras(extra, request, settings, monkeypatch) -> None:
case_name = "No tag or commit hash: No Link to"
link = "#"

if settings.config["log_performance_elastic"]:
elastic.elastic_doc["tag"] = None
if git_tag:
elastic.elastic_doc["tag"] = str(git_tag)
elif sha:
elastic.elastic_doc["tag"] = str(sha.hexsha)

extra.append(
extras.html(
'<p>Link to the test specification: <a href="'
Expand Down Expand Up @@ -339,6 +357,26 @@ def pytest_generate_tests(metafunc) -> None:
metafunc.parametrize("execution_number", range(end))


def elastic_push(settings, testdata):
if settings.config["log_performance_elastic"]:
es = Elasticsearch(
(
f'https://{settings.config["elastic_host"]}:'
+ f'{settings.config["elastic_port"]}'
),
verify_certs=False,
# ca_certs=settings.config["elastic_ca_cert_path"],
basic_auth=(
settings.config["elastic_username"],
settings.config["elastic_password"],
),
)
es.info()

resp = es.index(index=elastic.elastic_index, document=elastic.elastic_doc)
print(resp["result"])


@pytest.fixture(scope="session")
def skipclean(request):
return request.config.option.skipclean

0 comments on commit 011a0af

Please sign in to comment.