From 88e9cc04e15214975ecdfd6b60df462346a6e04b Mon Sep 17 00:00:00 2001 From: Samuel Dobron Date: Fri, 31 May 2024 15:47:00 +0200 Subject: [PATCH] Recipes: Full conntrack table insertion rate recipe Recipe measures insertion/deletion rate of full[1] table. [1] conntrack table is implemented as a hashtable, so full means 2/3 of buckets being occupied --- .../ENRT/CTFulltableInsertionRateRecipe.py | 8 + .../ConfigMixins/LongLivedConnectionsMixin.py | 230 ++++++++++++++++++ lnst/Recipes/ENRT/__init__.py | 2 +- 3 files changed, 239 insertions(+), 1 deletion(-) create mode 100644 lnst/Recipes/ENRT/CTFulltableInsertionRateRecipe.py create mode 100644 lnst/Recipes/ENRT/ConfigMixins/LongLivedConnectionsMixin.py diff --git a/lnst/Recipes/ENRT/CTFulltableInsertionRateRecipe.py b/lnst/Recipes/ENRT/CTFulltableInsertionRateRecipe.py new file mode 100644 index 000000000..dd138f0f1 --- /dev/null +++ b/lnst/Recipes/ENRT/CTFulltableInsertionRateRecipe.py @@ -0,0 +1,8 @@ +from .SimpleNetworkRecipe import SimpleNetworkRecipe + +from .CTInsertionRateNftablesRecipe import CTInsertionRateNftablesRecipe +from .ConfigMixins.LongLivedConnectionsMixin import LongLivedConnectionsMixin + +class CTFulltableInsertionRateRecipe(LongLivedConnectionsMixin, CTInsertionRateNftablesRecipe): + pass + diff --git a/lnst/Recipes/ENRT/ConfigMixins/LongLivedConnectionsMixin.py b/lnst/Recipes/ENRT/ConfigMixins/LongLivedConnectionsMixin.py new file mode 100644 index 000000000..0f5ce1faa --- /dev/null +++ b/lnst/Recipes/ENRT/ConfigMixins/LongLivedConnectionsMixin.py @@ -0,0 +1,230 @@ +import time +import logging +import signal +import copy +from math import ceil +from lnst.Tests.LongLivedConnections import LongLivedServer, LongLivedClient +from lnst.Common.IpAddress import Ip6Address, Ip4Address +from lnst.Common.IpAddress import interface_addresses +from lnst.Common.Parameters import IntParam +from lnst.Common.IpAddress import Ip4Address, Ip6Address, BaseIpAddress +from lnst.Recipes.ENRT.helpers import ip_endpoint_pairs +from lnst.Common.IpAddress import ip_version_string + + +class LongLivedConnectionsMixin: + """ + This mixin adds support for long-lived connections. + + Based on `long_lived_conns` parameter, it will create a number of + long-lived connections between the hosts. Receiver is a server, while + generator is a client. + + Only long lived connections IPs are handled by this mixin. + Therefore, if your test requires perf IPs, it should be + configured by parent's test_wide_configuration() method. + IPs used for long-lived client and server are added based on + long_lived_conns_per_ip parameter, which defines size of + addressable space for connections. Sinde L4 can address up + to 65535 ports, it's obviously limited by that. + + Connections are not equally distributed among clients. + The first client (and others) will get long_lived_conns_per_ip + connections, while the last one will get the remaining connections. + + Don't forget to set appropriate NO_FILES ulimit (if needed). + See LongLivedServer/LongLivedClient for more details. + """ + + long_lived_conns = IntParam(mandatory=True) + long_lived_conns_port = IntParam(default=20000) + long_lived_conns_per_ip = IntParam(default=20000) + + def _generate_ip_endpoints(self, config): + return [ + ip_endpoint_pairs( + config, + (self.matched.host1.eth0, self.matched.host2.eth0), + combination_func=zip, + ) + ] + + def _filter_ip_endpoints(self, config, slicer): + ips = [] + + for parallel_endpoints in self._generate_ip_endpoints(config): + ipv4 = [ + endpoint_pair + for endpoint_pair in parallel_endpoints + if isinstance(endpoint_pair.first.address, Ip4Address) + ] + ipv6 = [ + endpoint_pair + for endpoint_pair in parallel_endpoints + if isinstance(endpoint_pair.first.address, Ip6Address) + ] + + ips.append(ipv4[slicer]) + ips.append(ipv6[slicer]) + + return ips + + def generate_perf_endpoints(self, config): + return self._filter_ip_endpoints( + config, slice(0, 1) # only the first IP is used for perf + ) + + def generate_long_lived_conns_endpoints(self, config): + return self._filter_ip_endpoints( + config, + slice(1, None), # all IPs except the first one which is used for perf + ) + + @property + def servers_count(self): + return ceil(self.params.long_lived_conns / self.params.long_lived_conns_per_ip) + + def calculate_client_connections(self, client_id): + client_id += 1 # 0-based index + if client_id < self.servers_count: + return self.params.long_lived_conns_per_ip + + # remaining connections are handled by last client + return ( + self.params.long_lived_conns + - (self.servers_count - 1) * self.params.long_lived_conns_per_ip + ) + + def _prepare_server(self, receiver, conns_count): + server = LongLivedServer( + server_ip=receiver.address, + server_port=self.params.long_lived_conns_port, + connections_count=conns_count, + ) + + job = receiver.device.netns.prepare_job(server) + + return job + + def _prepare_client(self, generator, receiver, conns_count): + client = LongLivedClient( + server_ip=receiver.address, + server_port=self.params.long_lived_conns_port, + client_ip=generator.address, + connections_count=conns_count, + ) + + job = generator.device.netns.prepare_job(client) + + return job + + def wait_agent_for_establish_conns(self, agent, timeout=60): + # Yeah, a bit hacky but simplest way of waiting. + # The problem is, that LNST doesn't support waiting + # for a specific condition at a REMOTE agent. + + # TODO: refactor to use condition wait, when implemented + while timeout: + job = agent.run("ss | grep ESTAB | wc -l") + conns = int(job.stdout) + logging.debug( + f"Connections established: {conns}/{self.params.long_lived_conns}" + ) + if conns > self.params.long_lived_conns: + logging.info("All long-lived connections established") + break + time.sleep(1) + timeout -= 1 + + def test_wide_configuration(self): + config = super().test_wide_configuration() + # L4 can address up to 65535 ports (size of addresable space + # defined in long_lived_conns_per_ip) Therefore opening connections + # may require multiple IPs. + host1, host2 = self.matched.host1, self.matched.host2 + + ipv4_addr = interface_addresses(self.params.net_ipv4) + ipv6_addr = interface_addresses(self.params.net_ipv6) + + for _ in range(2): + # 2 addresses are already assigned by super().test_wide_configuration() + # those are used for perf tests. Generator needs to be moved to 3rd address + next(ipv4_addr) + next(ipv6_addr) + + host1.eth0.down() + host2.eth0.down() + for host in [host1, host2]: + for _ in range(self.servers_count): + config.configure_and_track_ip(host.eth0, next(ipv4_addr)) + config.configure_and_track_ip(host.eth0, next(ipv6_addr)) + host1.eth0.up() + host2.eth0.up() + + self.wait_tentative_ips(config.configured_devices) + + return config + + def generate_sub_configurations(self, config): + for parent_config in super().generate_sub_configurations(config): + parent_config.long_lived_connections = [] + + for parallel_endpoint_pairs in self.generate_long_lived_conns_endpoints(parent_config): + for ip_version in self.params.ip_versions: + filtered_parallel_endpoints = [ + endpoint_pair + for endpoint_pair in parallel_endpoint_pairs + if ip_version_string(endpoint_pair.first.address) == ip_version + ] + for i, endpoint_pair in enumerate(filtered_parallel_endpoints): + generator = endpoint_pair.first + receiver = endpoint_pair.second + + connections_count = self.calculate_client_connections(i) + + server_job = self._prepare_server(receiver, connections_count) + client_job = self._prepare_client( + generator, receiver, connections_count + ) + + parent_config.long_lived_connections.append((client_job, server_job)) + + yield parent_config + + def apply_sub_configuration(self, config): + super().apply_sub_configuration(config) + + for client_job, server_job in config.long_lived_connections: + server_job.start(bg=True) + time.sleep(2) # just to be sure the server is up + client_job.start(bg=True) + + self.wait_agent_for_establish_conns(self.matched.host1) + logging.info("Long-lived connections established") + + def generate_sub_configuration_description(self, config): + desc = super().generate_sub_configuration_description(config) + + for client_job, server_job in config.long_lived_connections: + desc.append(f"Long-lived connection between {client_job.what} and {server_job.what}") + + return desc + + def remove_sub_configuration(self, config): + for client_job, server_job in config.long_lived_connections: + client_job.kill(signal.SIGINT) + server_job.kill(signal.SIGINT) + # ^^ both server and client runs in while(1) loop interruptable by SIGINT + # which shutdowns them down gracefully + + try: + client_job.wait(timeout=client_job.what.runtime_estimate()) + server_job.wait(timeout=server_job.what.runtime_estimate()) + finally: + client_job.kill() + server_job.kill() + + del config.long_lived_connections + + return super().remove_sub_configuration(config) + diff --git a/lnst/Recipes/ENRT/__init__.py b/lnst/Recipes/ENRT/__init__.py index 2bb20f25c..7668754a7 100644 --- a/lnst/Recipes/ENRT/__init__.py +++ b/lnst/Recipes/ENRT/__init__.py @@ -113,6 +113,6 @@ from lnst.Recipes.ENRT.SoftwareRDMARecipe import SoftwareRDMARecipe from lnst.Recipes.ENRT.XDPDropRecipe import XDPDropRecipe from lnst.Recipes.ENRT.CTInsertionRateNftablesRecipe import CTInsertionRateNftablesRecipe - +from .CTFulltableInsertionRateRecipe import CTFulltableInsertionRateRecipe from lnst.Recipes.ENRT.BaseEnrtRecipe import BaseEnrtRecipe from lnst.Recipes.ENRT.BaseTunnelRecipe import BaseTunnelRecipe