Skip to content

Commit ad25f5c

Browse files
dhowellsdavem330
authored andcommitted
rxrpc: Fix locking issue
There's a locking issue with the per-netns list of calls in rxrpc. The pieces of code that add and remove a call from the list use write_lock() and the calls procfile uses read_lock() to access it. However, the timer callback function may trigger a removal by trying to queue a call for processing and finding that it's already queued - at which point it has a spare refcount that it has to do something with. Unfortunately, if it puts the call and this reduces the refcount to 0, the call will be removed from the list. Unfortunately, since the _bh variants of the locking functions aren't used, this can deadlock. ================================ WARNING: inconsistent lock state 5.18.0-rc3-build4+ #10 Not tainted -------------------------------- inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. ksoftirqd/2/25 [HC0[0]:SC1[1]:HE1:SE0] takes: ffff888107ac4038 (&rxnet->call_lock){+.?.}-{2:2}, at: rxrpc_put_call+0x103/0x14b {SOFTIRQ-ON-W} state was registered at: ... Possible unsafe locking scenario: CPU0 ---- lock(&rxnet->call_lock); <Interrupt> lock(&rxnet->call_lock); *** DEADLOCK *** 1 lock held by ksoftirqd/2/25: #0: ffff8881008ffdb0 ((&call->timer)){+.-.}-{0:0}, at: call_timer_fn+0x5/0x23d Changes ======= ver #2) - Changed to using list_next_rcu() rather than rcu_dereference() directly. Fixes: 17926a7 ("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both") Signed-off-by: David Howells <[email protected]> cc: Marc Dionne <[email protected]> cc: [email protected] Signed-off-by: David S. Miller <[email protected]>
1 parent a057542 commit ad25f5c

File tree

8 files changed

+62
-22
lines changed

8 files changed

+62
-22
lines changed

fs/seq_file.c

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -931,6 +931,38 @@ struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
931931
}
932932
EXPORT_SYMBOL(seq_list_next);
933933

934+
struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos)
935+
{
936+
struct list_head *lh;
937+
938+
list_for_each_rcu(lh, head)
939+
if (pos-- == 0)
940+
return lh;
941+
942+
return NULL;
943+
}
944+
EXPORT_SYMBOL(seq_list_start_rcu);
945+
946+
struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos)
947+
{
948+
if (!pos)
949+
return head;
950+
951+
return seq_list_start_rcu(head, pos - 1);
952+
}
953+
EXPORT_SYMBOL(seq_list_start_head_rcu);
954+
955+
struct list_head *seq_list_next_rcu(void *v, struct list_head *head,
956+
loff_t *ppos)
957+
{
958+
struct list_head *lh;
959+
960+
lh = list_next_rcu((struct list_head *)v);
961+
++*ppos;
962+
return lh == head ? NULL : lh;
963+
}
964+
EXPORT_SYMBOL(seq_list_next_rcu);
965+
934966
/**
935967
* seq_hlist_start - start an iteration of a hlist
936968
* @head: the head of the hlist

include/linux/list.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -605,6 +605,16 @@ static inline void list_splice_tail_init(struct list_head *list,
605605
#define list_for_each(pos, head) \
606606
for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
607607

608+
/**
609+
* list_for_each_rcu - Iterate over a list in an RCU-safe fashion
610+
* @pos: the &struct list_head to use as a loop cursor.
611+
* @head: the head for your list.
612+
*/
613+
#define list_for_each_rcu(pos, head) \
614+
for (pos = rcu_dereference((head)->next); \
615+
!list_is_head(pos, (head)); \
616+
pos = rcu_dereference(pos->next))
617+
608618
/**
609619
* list_for_each_continue - continue iteration over a list
610620
* @pos: the &struct list_head to use as a loop cursor.

include/linux/seq_file.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -277,6 +277,10 @@ extern struct list_head *seq_list_start_head(struct list_head *head,
277277
extern struct list_head *seq_list_next(void *v, struct list_head *head,
278278
loff_t *ppos);
279279

280+
extern struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos);
281+
extern struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos);
282+
extern struct list_head *seq_list_next_rcu(void *v, struct list_head *head, loff_t *ppos);
283+
280284
/*
281285
* Helpers for iteration over hlist_head-s in seq_files
282286
*/

net/rxrpc/ar-internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ struct rxrpc_net {
6060
struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
6161
u32 epoch; /* Local epoch for detecting local-end reset */
6262
struct list_head calls; /* List of calls active in this namespace */
63-
rwlock_t call_lock; /* Lock for ->calls */
63+
spinlock_t call_lock; /* Lock for ->calls */
6464
atomic_t nr_calls; /* Count of allocated calls */
6565

6666
atomic_t nr_conns;

net/rxrpc/call_accept.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -140,9 +140,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
140140
write_unlock(&rx->call_lock);
141141

142142
rxnet = call->rxnet;
143-
write_lock(&rxnet->call_lock);
144-
list_add_tail(&call->link, &rxnet->calls);
145-
write_unlock(&rxnet->call_lock);
143+
spin_lock_bh(&rxnet->call_lock);
144+
list_add_tail_rcu(&call->link, &rxnet->calls);
145+
spin_unlock_bh(&rxnet->call_lock);
146146

147147
b->call_backlog[call_head] = call;
148148
smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));

net/rxrpc/call_object.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -337,9 +337,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
337337
write_unlock(&rx->call_lock);
338338

339339
rxnet = call->rxnet;
340-
write_lock(&rxnet->call_lock);
341-
list_add_tail(&call->link, &rxnet->calls);
342-
write_unlock(&rxnet->call_lock);
340+
spin_lock_bh(&rxnet->call_lock);
341+
list_add_tail_rcu(&call->link, &rxnet->calls);
342+
spin_unlock_bh(&rxnet->call_lock);
343343

344344
/* From this point on, the call is protected by its own lock. */
345345
release_sock(&rx->sk);
@@ -633,9 +633,9 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
633633
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
634634

635635
if (!list_empty(&call->link)) {
636-
write_lock(&rxnet->call_lock);
636+
spin_lock_bh(&rxnet->call_lock);
637637
list_del_init(&call->link);
638-
write_unlock(&rxnet->call_lock);
638+
spin_unlock_bh(&rxnet->call_lock);
639639
}
640640

641641
rxrpc_cleanup_call(call);
@@ -707,7 +707,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
707707
_enter("");
708708

709709
if (!list_empty(&rxnet->calls)) {
710-
write_lock(&rxnet->call_lock);
710+
spin_lock_bh(&rxnet->call_lock);
711711

712712
while (!list_empty(&rxnet->calls)) {
713713
call = list_entry(rxnet->calls.next,
@@ -722,12 +722,12 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
722722
rxrpc_call_states[call->state],
723723
call->flags, call->events);
724724

725-
write_unlock(&rxnet->call_lock);
725+
spin_unlock_bh(&rxnet->call_lock);
726726
cond_resched();
727-
write_lock(&rxnet->call_lock);
727+
spin_lock_bh(&rxnet->call_lock);
728728
}
729729

730-
write_unlock(&rxnet->call_lock);
730+
spin_unlock_bh(&rxnet->call_lock);
731731
}
732732

733733
atomic_dec(&rxnet->nr_calls);

net/rxrpc/net_ns.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ static __net_init int rxrpc_init_net(struct net *net)
5050
rxnet->epoch |= RXRPC_RANDOM_EPOCH;
5151

5252
INIT_LIST_HEAD(&rxnet->calls);
53-
rwlock_init(&rxnet->call_lock);
53+
spin_lock_init(&rxnet->call_lock);
5454
atomic_set(&rxnet->nr_calls, 1);
5555

5656
atomic_set(&rxnet->nr_conns, 1);

net/rxrpc/proc.c

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,29 +26,23 @@ static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
2626
*/
2727
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
2828
__acquires(rcu)
29-
__acquires(rxnet->call_lock)
3029
{
3130
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
3231

3332
rcu_read_lock();
34-
read_lock(&rxnet->call_lock);
35-
return seq_list_start_head(&rxnet->calls, *_pos);
33+
return seq_list_start_head_rcu(&rxnet->calls, *_pos);
3634
}
3735

3836
static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3937
{
4038
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
4139

42-
return seq_list_next(v, &rxnet->calls, pos);
40+
return seq_list_next_rcu(v, &rxnet->calls, pos);
4341
}
4442

4543
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
46-
__releases(rxnet->call_lock)
4744
__releases(rcu)
4845
{
49-
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
50-
51-
read_unlock(&rxnet->call_lock);
5246
rcu_read_unlock();
5347
}
5448

0 commit comments

Comments
 (0)