Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Initial working elastic logging for sanity performance tests: #165

Merged
merged 11 commits into from
Dec 8, 2023
8 changes: 7 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,12 @@ trafficgen_port: # trafficgen REST port
trafficgen_timeout: # trafficgen command timeout (in minutes)
trafficgen_rx_bps_limit: # trafficgen baseline comparison (bps)
log_performance: # boolean, use false to omit sanity performance test details in logs/result files (only pass or fail)
log_performance_elastic: # boolean, use true to upload the sanity performance bps to elastic node
# Below configures the SR_IOV_Sanity_Performance Elastic parameters, if log_performance_elastic is set
elastic_host: # IP address or hostname of elastic
elastic_port: # Port of elastic
elastic_username: # Elastic username
elastic_password: # Elastic password
```

A current version of Python is recommended to run the tests. As of writing the minimum version to avoid warnings would be 3.7. However, the tests have been successfully run up to version 3.11, the latest active release as of writing. The same is true of pip, which should be a current version (23.0 as of writing, but this should be upgraded in the following steps).
Expand Down Expand Up @@ -181,7 +187,7 @@ The common code has its own test cases. The majority of the common code test cas

A small portion of common code test cases are done using mock. These mock unit test cases are under the `sriov/common` folder, along with the common code itself. The purpose of the mock unit tests is to cover scenarios that are difficult to cover via the e2e tests. These tests must be run from the root of the repo, unless one sets the `PYTHONPATH` environment variable to include the root, in which case the mock tests may be run from another directory.

## Debug Failed Test Case
## Debug Failed Test Cases

When a test case is failing, one may want to immediately stop the test run and keep the failed setup for manual debugging. This can not be achieved with the pytest `-x` option, as `-x` still allow the cleanup to happen. Instead, this can be done by using the `--skipclean` option.

Expand Down
1 change: 1 addition & 0 deletions sriov/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ PyYAML==6.0
pytest-html
docutils
gitpython
elasticsearch
2 changes: 2 additions & 0 deletions sriov/tests/SR_IOV_MTU/test_SR_IOV_MTU.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
get_vf_mac,
switch_detected,
)
import time


def test_SR_IOV_MTU(dut, trafficgen, settings, testdata):
Expand Down Expand Up @@ -58,6 +59,7 @@ def test_SR_IOV_MTU(dut, trafficgen, settings, testdata):
else:
mtu = min(dut_mtu, trafficgen_mtu)

time.sleep(5)
assert set_mtu(trafficgen, trafficgen_pf, dut, pf, 0, mtu, testdata)

steps = [f"ip link set {pf}v0 up", f"ip add add {dut_ip}/24 dev {pf}v0"]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from datetime import datetime
from sriov.tests.conftest import elastic
from sriov.common.utils import (
execute_and_assert,
execute_until_timeout,
Expand All @@ -13,7 +15,7 @@

# Use pytest --iteration to adjust the execution_number parameter for desired amount
# of repeated tests
def test_SRIOV_Sanity_Performance(dut, trafficgen, settings, testdata):
def test_SRIOV_Sanity_Performance(dut, trafficgen, settings, testdata): # noqa: C901
"""Test and ensure that VFs provision with MTU functions as intended

Args:
Expand Down Expand Up @@ -191,6 +193,16 @@ def test_SRIOV_Sanity_Performance(dut, trafficgen, settings, testdata):
results = json.loads(outs[0][0])
if settings.config["log_performance"]:
print(json.dumps(results))
if settings.config["log_performance_elastic"]:
log_elastic(results)

# Compare trafficgen results to config
assert results["0"]["rx_l1_bps"] >= settings.config["trafficgen_rx_bps_limit"]


def log_elastic(results):
elastic.elastic_index = "test-perf-index"
elastic.elastic_doc["rx_l1_bps"] = results["0"]["rx_l1_bps"]
elastic.elastic_doc["timestamp"] = datetime.now()

print(elastic.elastic_index)
8 changes: 7 additions & 1 deletion sriov/tests/config_template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,10 @@ trafficgen_img: "trafficgen:latest"
trafficgen_port: 8080
trafficgen_timeout: 12
trafficgen_rx_bps_limit: 9990000000
log_performance: false
log_performance: false
log_performance_elastic: true
elastic_host: 192.168.1.1
elastic_port: 9200
#elastic_ca_cert_path: "./http_ca.crt"
elastic_username: "elastic"
elastic_password: "PASSWORD"
40 changes: 39 additions & 1 deletion sriov/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from elasticsearch import Elasticsearch
import git
import os
import pytest
Expand All @@ -20,6 +21,12 @@
)


class elastic:
# track elastic results (currently used in SR_IOV_Sanity_Performance)
elastic_index = None
elastic_doc = {}


def get_settings_obj() -> Config:
script_dir = os.path.dirname(os.path.realpath(__file__))
config_file = script_dir + "/config.yaml"
Expand Down Expand Up @@ -175,6 +182,9 @@ def _cleanup(

reset_command(dut, testdata)

if settings.config["log_performance_elastic"]:
elastic_push(settings, testdata)


def pytest_configure(config: Config) -> None:
ShellHandler.debug_cmd_execute = config.getoption("--debug-execute")
Expand Down Expand Up @@ -215,6 +225,7 @@ def pytest_configure(config: Config) -> None:
cmd = "cat /sys/bus/pci/drivers/iavf/module/version"
dut.log_str(cmd)
code, out, err = dut.execute(cmd)
dut.log_str(str(code))
if code == 0:
iavf_driver = out[0].strip()
config._metadata["IAVF Driver"] = iavf_driver
Expand All @@ -240,7 +251,7 @@ def parse_file_for_field(file_path, field) -> str:


@pytest.fixture(autouse=True)
def _report_extras(extra, request, settings, monkeypatch) -> None:
def _report_extras(extra, request, settings, testdata, monkeypatch) -> None:
monkeypatch.chdir(request.fspath.dirname)

try:
Expand Down Expand Up @@ -285,6 +296,13 @@ def _report_extras(extra, request, settings, monkeypatch) -> None:
case_name = "No tag or commit hash: No Link to"
link = "#"

if settings.config["log_performance_elastic"]:
elastic.elastic_doc["tag"] = None
if git_tag:
elastic.elastic_doc["tag"] = str(git_tag)
elif sha:
elastic.elastic_doc["tag"] = str(sha.hexsha)

extra.append(
extras.html(
'<p>Link to the test specification: <a href="'
Expand Down Expand Up @@ -326,6 +344,26 @@ def pytest_generate_tests(metafunc) -> None:
metafunc.parametrize("execution_number", range(end))


def elastic_push(settings, testdata):
if settings.config["log_performance_elastic"]:
es = Elasticsearch(
(
f'https://{settings.config["elastic_host"]}:'
+ f'{settings.config["elastic_port"]}'
),
verify_certs=False,
# ca_certs=settings.config["elastic_ca_cert_path"],
basic_auth=(
settings.config["elastic_username"],
settings.config["elastic_password"],
),
)
es.info()

resp = es.index(index=elastic.elastic_index, document=elastic.elastic_doc)
print(resp["result"])


@pytest.fixture(scope="session")
def skipclean(request):
return request.config.option.skipclean