Skip to content

Commit

Permalink
selftests/bpf: Add mptcp_address bpf_iter subtest
Browse files Browse the repository at this point in the history
This patch adds a test program for the newly added mptcp_address bpf_iter
in SEC "cgroup/getsockopt". This test iterates over all address entries on
the local address list of userspace PM and check whether each one is an
IPv4mapped address.

Export mptcp_address helpers bpf_iter_mptcp_address_new/_next/_destroy into
bpf_experimental.h. Use bpf_mptcp_sock_acquire() to acquire the msk, then
lock the msk pm lock and use bpf_for_each(mptcp_address) to walk the local
address list of this msk. Invoke bpf_ipv6_addr_v4mapped() in the loop to
check whether the address is an IPv4mapped one. Then Add the address ID of
each entry to local veriable local_ids.

Out of the loop, unlock the msk pm lock and use bpf_mptcp_sock_release() to
release the msk. Finally, assign local_ids to global variable ids so that
the application can obtain this value.

Add a subtest named test_iters_address to load and verify the newly added
mptcp_address type bpf_iter example in test_mptcp.

Since mptcp_address bpf_iter iterates over all address entries on the
local address list of userspace PM. A set of userspace PM helpers are
added.

Set pm_type and start "pm_nl_ctl events" command to save pm events into
the log file /tmp/bpf_userspace_pm_events in userspace_pm_init(). Kill
"pm_nl_ctl" command and remove the log file in userspace_pm_cleanup().
Parse the log file in userspace_pm_add_subflow() to get the token, sport
and dport values, then use "pm_nl_ctl csf" command to create a subflow.

Use the helper userspace_pm_add_subflow() to add 3 new subflow endpoints.
Send a byte of message to start the mptcp connection, and receive the
message.

getsockopt() is invoked to trigger the "cgroup/getsockopt" test program.
Check if skel->bss->ids equals 60 to verify whether the program loops
three times as expected.

Signed-off-by: Geliang Tang <[email protected]>
  • Loading branch information
Geliang Tang authored and intel-lab-lkp committed Oct 22, 2024
1 parent 782c987 commit 0dc4039
Show file tree
Hide file tree
Showing 4 changed files with 266 additions and 0 deletions.
8 changes: 8 additions & 0 deletions tools/testing/selftests/bpf/bpf_experimental.h
Original file line number Diff line number Diff line change
Expand Up @@ -583,6 +583,14 @@ bpf_iter_mptcp_subflow_next(struct bpf_iter_mptcp_subflow *it) __weak __ksym;
extern void
bpf_iter_mptcp_subflow_destroy(struct bpf_iter_mptcp_subflow *it) __weak __ksym;

struct bpf_iter_mptcp_address;
extern int bpf_iter_mptcp_address_new(struct bpf_iter_mptcp_address *it,
struct mptcp_sock *msk) __weak __ksym;
extern struct mptcp_pm_addr_entry *
bpf_iter_mptcp_address_next(struct bpf_iter_mptcp_address *it) __weak __ksym;
extern void
bpf_iter_mptcp_address_destroy(struct bpf_iter_mptcp_address *it) __weak __ksym;

extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
Expand Down
214 changes: 214 additions & 0 deletions tools/testing/selftests/bpf/prog_tests/mptcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#define ADDR6_4 "dead:beef:4::1"
#define PORT_1 10001
#define PM_CTL "./mptcp_pm_nl_ctl"
#define PM_EVENTS "/tmp/bpf_userspace_pm_events"
#define WITH_DATA true
#define WITHOUT_DATA false

Expand Down Expand Up @@ -57,6 +58,14 @@
#endif
#define MPTCP_SCHED_NAME_MAX 16

enum mptcp_pm_type {
MPTCP_PM_TYPE_KERNEL = 0,
MPTCP_PM_TYPE_USERSPACE,

__MPTCP_PM_TYPE_NR,
__MPTCP_PM_TYPE_MAX = __MPTCP_PM_TYPE_NR - 1,
};

static const unsigned int total_bytes = 10 * 1024 * 1024;
static int duration;

Expand Down Expand Up @@ -270,6 +279,19 @@ static void send_byte(int fd)
ASSERT_EQ(write(fd, &b, sizeof(b)), 1, "send single byte");
}

static int recv_byte(int fd)
{
char buf[1];
ssize_t n;

n = recv(fd, buf, sizeof(buf), 0);
if (CHECK(n <= 0, "recv_byte", "recv")) {
log_err("failed/partial recv");
return -1;
}
return 0;
}

static int verify_mptcpify(int server_fd, int client_fd)
{
struct __mptcp_info info;
Expand Down Expand Up @@ -585,6 +607,196 @@ static void test_iters_subflow(void)
close(cgroup_fd);
}

static int userspace_pm_init(enum mptcp_pm_type pm_type)
{
if (address_init())
goto fail;

SYS(fail, "ip netns exec %s sysctl -qw net.mptcp.pm_type=%u",
NS_TEST, pm_type);
SYS(fail, "ip netns exec %s %s limits 4 4",
NS_TEST, PM_CTL);
SYS(fail, "ip netns exec %s %s events >> %s 2>&1 &",
NS_TEST, PM_CTL, PM_EVENTS);

return 0;
fail:
return -1;
}

static void userspace_pm_cleanup(void)
{
SYS_NOFAIL("ip netns exec %s killall %s > /dev/null 2>&1",
NS_TEST, PM_CTL);
SYS_NOFAIL("ip netns exec %s rm -rf %s", NS_TEST, PM_EVENTS);
}

static int userspace_pm_get_events_line(char *type, char *line)
{
char buf[BUFSIZ], *str;
size_t len;
int fd;

fd = open(PM_EVENTS, O_RDONLY);
if (fd < 0) {
log_err("failed to open pm events\n");
return -1;
}

len = read(fd, buf, sizeof(buf));
close(fd);
if (len <= 0) {
log_err("failed to read pm events\n");
return -1;
}

str = strstr(buf, type);
if (!str) {
log_err("failed to get type %s pm event\n", type);
return -1;
}

strcpy(line, str);
return 0;
}

static int userspace_pm_get_token(int fd)
{
char line[1024], *str;
__u32 token;
int i;

/* Wait max 2 sec for the connection to be established */
for (i = 0; i < 10; i++) {
usleep(200000); /* 0.2s */
send_byte(fd);

sync();
if (userspace_pm_get_events_line("type:2", line))
continue;
str = strstr(line, "token");
if (!str)
continue;
if (sscanf(str, "token:%u,", &token) != 1)
continue;
return token;
}

return 0;
}

static int userspace_pm_add_subflow(__u32 token, char *addr, __u8 id)
{
bool ipv6 = strstr(addr, ":");
char line[1024], *str;
__u32 sport, dport;

if (userspace_pm_get_events_line("type:2", line))
return -1;

str = strstr(line, "sport");
if (!str || sscanf(str, "sport:%u,dport:%u,", &sport, &dport) != 2) {
log_err("add_subflow error, str=%s\n", str);
return -1;
}

str = ipv6 ? (strstr(addr, ".") ? "::ffff:"ADDR_1 : ADDR6_1) : ADDR_1;
SYS_NOFAIL("ip netns exec %s %s csf lip %s lid %u rip %s rport %u token %u",
NS_TEST, PM_CTL, addr, id, str, dport, token);

return 0;
}

static void run_iters_address(void)
{
int server_fd, client_fd, accept_fd;
int is_mptcp, err;
socklen_t len;
__u32 token;

server_fd = start_mptcp_server(AF_INET6, "::ffff:"ADDR_1, PORT_1, 0);
if (!ASSERT_OK_FD(server_fd, "start_mptcp_server"))
return;

client_fd = connect_to_fd(server_fd, 0);
if (!ASSERT_OK_FD(client_fd, "connect_to_fd"))
goto close_server;

accept_fd = accept(server_fd, NULL, NULL);
if (!ASSERT_OK_FD(accept_fd, "accept"))
goto close_client;

token = userspace_pm_get_token(client_fd);
if (!token)
goto close_client;
recv_byte(accept_fd);
usleep(200000); /* 0.2s */

err = userspace_pm_add_subflow(token, "::ffff:"ADDR_2, 10);
err = err ?: userspace_pm_add_subflow(token, "::ffff:"ADDR_3, 20);
err = err ?: userspace_pm_add_subflow(token, "::ffff:"ADDR_4, 30);
if (!ASSERT_OK(err, "userspace_pm_add_subflow"))
goto close_accept;

send_byte(accept_fd);
recv_byte(client_fd);

len = sizeof(is_mptcp);
/* mainly to trigger the BPF program */
err = getsockopt(client_fd, SOL_TCP, TCP_IS_MPTCP, &is_mptcp, &len);
if (ASSERT_OK(err, "getsockopt(client_fd, TCP_IS_MPTCP)"))
ASSERT_EQ(is_mptcp, 1, "is_mptcp");

close_accept:
close(accept_fd);
close_client:
close(client_fd);
close_server:
close(server_fd);
}

static void test_iters_address(void)
{
struct mptcp_bpf_iters *skel;
struct nstoken *nstoken;
int cgroup_fd;
int err;

cgroup_fd = test__join_cgroup("/iters_address");
if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup: iters_address"))
return;

skel = mptcp_bpf_iters__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_load: iters_address"))
goto close_cgroup;

skel->links.iters_address = bpf_program__attach_cgroup(skel->progs.iters_address,
cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.iters_address, "attach getsockopt"))
goto skel_destroy;

nstoken = create_netns();
if (!ASSERT_OK_PTR(nstoken, "create_netns"))
goto skel_destroy;

err = userspace_pm_init(MPTCP_PM_TYPE_USERSPACE);
if (!ASSERT_OK(err, "userspace_pm_init: iters_address"))
goto close_netns;

run_iters_address();

/* 10 + 20 + 30 = 60 */
ASSERT_EQ(skel->bss->ids, 60, "address ids");

userspace_pm_cleanup();
close_netns:
cleanup_netns(nstoken);
skel_destroy:
mptcp_bpf_iters__destroy(skel);
close_cgroup:
close(cgroup_fd);
}

static struct nstoken *sched_init(char *flags, char *sched)
{
struct nstoken *nstoken;
Expand Down Expand Up @@ -768,6 +980,8 @@ void test_mptcp(void)
test_subflow();
if (test__start_subtest("iters_subflow"))
test_iters_subflow();
if (test__start_subtest("iters_address"))
test_iters_address();
if (test__start_subtest("default"))
test_default();
if (test__start_subtest("first"))
Expand Down
5 changes: 5 additions & 0 deletions tools/testing/selftests/bpf/progs/mptcp_bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,11 @@ bpf_mptcp_subflow_ctx(const struct sock *sk) __ksym;
extern struct sock *
bpf_mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow) __ksym;

extern void bpf_spin_lock_bh(spinlock_t *lock) __ksym;
extern void bpf_spin_unlock_bh(spinlock_t *lock) __ksym;

extern bool bpf_ipv6_addr_v4mapped(const struct mptcp_addr_info *a) __ksym;

extern void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
bool scheduled) __ksym;

Expand Down
39 changes: 39 additions & 0 deletions tools/testing/selftests/bpf/progs/mptcp_bpf_iters.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,3 +62,42 @@ int iters_subflow(struct bpf_sockopt *ctx)
bpf_mptcp_sock_release(msk);
return 1;
}

SEC("cgroup/getsockopt")
int iters_address(struct bpf_sockopt *ctx)
{
struct mptcp_pm_addr_entry *entry;
struct bpf_sock *sk = ctx->sk;
struct mptcp_sock *msk;
int local_ids = 0;

if (!sk || sk->protocol != IPPROTO_MPTCP ||
ctx->level != SOL_TCP || ctx->optname != TCP_IS_MPTCP)
return 1;

msk = bpf_mptcp_sk((struct sock *)sk);
if (msk->pm.server_side)
return 1;

msk = bpf_mptcp_sock_acquire(msk);
if (!msk)
return 1;
bpf_spin_lock_bh(&msk->pm.lock);
bpf_for_each(mptcp_address, entry, msk) {
/* Here MPTCP-specific path manager kfunc can be called:
* this test is not doing anything really useful, only to
* verify the iteration works.
*/

if (!bpf_ipv6_addr_v4mapped(&entry->addr))
break;

local_ids += entry->addr.id;
}
bpf_spin_unlock_bh(&msk->pm.lock);
bpf_mptcp_sock_release(msk);

ids = local_ids;

return 1;
}

0 comments on commit 0dc4039

Please sign in to comment.