@@ -360,9 +360,8 @@ void mptcp_subflow_reset(struct sock *ssk)
360
360
361
361
tcp_send_active_reset (ssk , GFP_ATOMIC );
362
362
tcp_done (ssk );
363
- if (!test_and_set_bit (MPTCP_WORK_CLOSE_SUBFLOW , & mptcp_sk (sk )-> flags ) &&
364
- schedule_work (& mptcp_sk (sk )-> work ))
365
- return ; /* worker will put sk for us */
363
+ if (!test_and_set_bit (MPTCP_WORK_CLOSE_SUBFLOW , & mptcp_sk (sk )-> flags ))
364
+ mptcp_schedule_work (sk );
366
365
367
366
sock_put (sk );
368
367
}
@@ -1010,8 +1009,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
1010
1009
skb_ext_del (skb , SKB_EXT_MPTCP );
1011
1010
return MAPPING_OK ;
1012
1011
} else {
1013
- if (updated && schedule_work ( & msk -> work ) )
1014
- sock_hold ((struct sock * )msk );
1012
+ if (updated )
1013
+ mptcp_schedule_work ((struct sock * )msk );
1015
1014
1016
1015
return MAPPING_DATA_FIN ;
1017
1016
}
@@ -1114,17 +1113,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1114
1113
/* sched mptcp worker to remove the subflow if no more data is pending */
1115
1114
static void subflow_sched_work_if_closed (struct mptcp_sock * msk , struct sock * ssk )
1116
1115
{
1117
- struct sock * sk = (struct sock * )msk ;
1118
-
1119
1116
if (likely (ssk -> sk_state != TCP_CLOSE ))
1120
1117
return ;
1121
1118
1122
1119
if (skb_queue_empty (& ssk -> sk_receive_queue ) &&
1123
- !test_and_set_bit (MPTCP_WORK_CLOSE_SUBFLOW , & msk -> flags )) {
1124
- sock_hold (sk );
1125
- if (!schedule_work (& msk -> work ))
1126
- sock_put (sk );
1127
- }
1120
+ !test_and_set_bit (MPTCP_WORK_CLOSE_SUBFLOW , & msk -> flags ))
1121
+ mptcp_schedule_work ((struct sock * )msk );
1128
1122
}
1129
1123
1130
1124
static bool subflow_can_fallback (struct mptcp_subflow_context * subflow )
0 commit comments