Skip to content

Commit

Permalink
mptcp: consolidate subflow cleanup
Browse files Browse the repository at this point in the history
Consolidate all the cleanup actions requiring the worked in a single
helper and ensure the dummy data fin creation for fallback socket is
performed only when the tcp rx queue is empty.

There are no functional changes intended, but this will simplify the
next patch, when the tcp rx queue spooling could be delayed at release_cb
time.

Signed-off-by: Paolo Abeni <[email protected]>
  • Loading branch information
Paolo Abeni authored and intel-lab-lkp committed Nov 29, 2024
1 parent e0d5da1 commit ee024dd
Showing 1 changed file with 18 additions and 15 deletions.
33 changes: 18 additions & 15 deletions net/mptcp/subflow.c
Original file line number Diff line number Diff line change
Expand Up @@ -1271,7 +1271,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
subflow->map_valid = 0;
}

/* sched mptcp worker to remove the subflow if no more data is pending */
static bool subflow_is_done(const struct sock *sk)
{
return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
}

/* sched mptcp worker for subflow cleanup if no more data is pending */
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
{
struct sock *sk = (struct sock *)msk;
Expand All @@ -1281,8 +1286,18 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
inet_sk_state_load(sk) != TCP_ESTABLISHED)))
return;

if (skb_queue_empty(&ssk->sk_receive_queue) &&
!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
if (!skb_queue_empty(&ssk->sk_receive_queue))
return;

if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
mptcp_schedule_work(sk);

/* when the fallback subflow closes the rx side, trigger a 'dummy'
* ingress data fin, so that the msk state will follow along
*/
if (__mptcp_check_fallback(msk) && subflow_is_done(ssk) &&
msk->first == ssk &&
mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
mptcp_schedule_work(sk);
}

Expand Down Expand Up @@ -1842,11 +1857,6 @@ static void __subflow_state_change(struct sock *sk)
rcu_read_unlock();
}

static bool subflow_is_done(const struct sock *sk)
{
return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
}

static void subflow_state_change(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Expand All @@ -1873,13 +1883,6 @@ static void subflow_state_change(struct sock *sk)
subflow_error_report(sk);

subflow_sched_work_if_closed(mptcp_sk(parent), sk);

/* when the fallback subflow closes the rx side, trigger a 'dummy'
* ingress data fin, so that the msk state will follow along
*/
if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk &&
mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
mptcp_schedule_work(parent);
}

void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
Expand Down

0 comments on commit ee024dd

Please sign in to comment.