@@ -139,18 +139,22 @@ static int neigh_forced_gc(struct neigh_table *tbl)
139
139
nht = rcu_dereference_protected (tbl -> nht ,
140
140
lockdep_is_held (& tbl -> lock ));
141
141
for (i = 0 ; i <= nht -> hash_mask ; i ++ ) {
142
- struct neighbour * n , * * np ;
142
+ struct neighbour * n ;
143
+ struct neighbour __rcu * * np ;
143
144
144
145
np = & nht -> hash_buckets [i ];
145
- while ((n = * np ) != NULL ) {
146
+ while ((n = rcu_dereference_protected (* np ,
147
+ lockdep_is_held (& tbl -> lock ))) != NULL ) {
146
148
/* Neighbour record may be discarded if:
147
149
* - nobody refers to it.
148
150
* - it is not permanent
149
151
*/
150
152
write_lock (& n -> lock );
151
153
if (atomic_read (& n -> refcnt ) == 1 &&
152
154
!(n -> nud_state & NUD_PERMANENT )) {
153
- * np = n -> next ;
155
+ rcu_assign_pointer (* np ,
156
+ rcu_dereference_protected (n -> next ,
157
+ lockdep_is_held (& tbl -> lock )));
154
158
n -> dead = 1 ;
155
159
shrunk = 1 ;
156
160
write_unlock (& n -> lock );
@@ -208,14 +212,18 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
208
212
lockdep_is_held (& tbl -> lock ));
209
213
210
214
for (i = 0 ; i <= nht -> hash_mask ; i ++ ) {
211
- struct neighbour * n , * * np = & nht -> hash_buckets [i ];
215
+ struct neighbour * n ;
216
+ struct neighbour __rcu * * np = & nht -> hash_buckets [i ];
212
217
213
- while ((n = * np ) != NULL ) {
218
+ while ((n = rcu_dereference_protected (* np ,
219
+ lockdep_is_held (& tbl -> lock ))) != NULL ) {
214
220
if (dev && n -> dev != dev ) {
215
221
np = & n -> next ;
216
222
continue ;
217
223
}
218
- * np = n -> next ;
224
+ rcu_assign_pointer (* np ,
225
+ rcu_dereference_protected (n -> next ,
226
+ lockdep_is_held (& tbl -> lock )));
219
227
write_lock (& n -> lock );
220
228
neigh_del_timer (n );
221
229
n -> dead = 1 ;
@@ -323,7 +331,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
323
331
kfree (ret );
324
332
return NULL ;
325
333
}
326
- ret -> hash_buckets = buckets ;
334
+ rcu_assign_pointer ( ret -> hash_buckets , buckets ) ;
327
335
ret -> hash_mask = entries - 1 ;
328
336
get_random_bytes (& ret -> hash_rnd , sizeof (ret -> hash_rnd ));
329
337
return ret ;
@@ -362,17 +370,22 @@ static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
362
370
for (i = 0 ; i <= old_nht -> hash_mask ; i ++ ) {
363
371
struct neighbour * n , * next ;
364
372
365
- for (n = old_nht -> hash_buckets [i ];
373
+ for (n = rcu_dereference_protected (old_nht -> hash_buckets [i ],
374
+ lockdep_is_held (& tbl -> lock ));
366
375
n != NULL ;
367
376
n = next ) {
368
377
hash = tbl -> hash (n -> primary_key , n -> dev ,
369
378
new_nht -> hash_rnd );
370
379
371
380
hash &= new_nht -> hash_mask ;
372
- next = n -> next ;
373
-
374
- n -> next = new_nht -> hash_buckets [hash ];
375
- new_nht -> hash_buckets [hash ] = n ;
381
+ next = rcu_dereference_protected (n -> next ,
382
+ lockdep_is_held (& tbl -> lock ));
383
+
384
+ rcu_assign_pointer (n -> next ,
385
+ rcu_dereference_protected (
386
+ new_nht -> hash_buckets [hash ],
387
+ lockdep_is_held (& tbl -> lock )));
388
+ rcu_assign_pointer (new_nht -> hash_buckets [hash ], n );
376
389
}
377
390
}
378
391
@@ -394,15 +407,18 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
394
407
rcu_read_lock_bh ();
395
408
nht = rcu_dereference_bh (tbl -> nht );
396
409
hash_val = tbl -> hash (pkey , dev , nht -> hash_rnd ) & nht -> hash_mask ;
397
- read_lock (& tbl -> lock );
398
- for (n = nht -> hash_buckets [hash_val ]; n ; n = n -> next ) {
410
+
411
+ for (n = rcu_dereference_bh (nht -> hash_buckets [hash_val ]);
412
+ n != NULL ;
413
+ n = rcu_dereference_bh (n -> next )) {
399
414
if (dev == n -> dev && !memcmp (n -> primary_key , pkey , key_len )) {
400
- neigh_hold (n );
415
+ if (!atomic_inc_not_zero (& n -> refcnt ))
416
+ n = NULL ;
401
417
NEIGH_CACHE_STAT_INC (tbl , hits );
402
418
break ;
403
419
}
404
420
}
405
- read_unlock ( & tbl -> lock );
421
+
406
422
rcu_read_unlock_bh ();
407
423
return n ;
408
424
}
@@ -421,16 +437,19 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
421
437
rcu_read_lock_bh ();
422
438
nht = rcu_dereference_bh (tbl -> nht );
423
439
hash_val = tbl -> hash (pkey , NULL , nht -> hash_rnd ) & nht -> hash_mask ;
424
- read_lock (& tbl -> lock );
425
- for (n = nht -> hash_buckets [hash_val ]; n ; n = n -> next ) {
440
+
441
+ for (n = rcu_dereference_bh (nht -> hash_buckets [hash_val ]);
442
+ n != NULL ;
443
+ n = rcu_dereference_bh (n -> next )) {
426
444
if (!memcmp (n -> primary_key , pkey , key_len ) &&
427
445
net_eq (dev_net (n -> dev ), net )) {
428
- neigh_hold (n );
446
+ if (!atomic_inc_not_zero (& n -> refcnt ))
447
+ n = NULL ;
429
448
NEIGH_CACHE_STAT_INC (tbl , hits );
430
449
break ;
431
450
}
432
451
}
433
- read_unlock ( & tbl -> lock );
452
+
434
453
rcu_read_unlock_bh ();
435
454
return n ;
436
455
}
@@ -483,18 +502,24 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
483
502
goto out_tbl_unlock ;
484
503
}
485
504
486
- for (n1 = nht -> hash_buckets [hash_val ]; n1 ; n1 = n1 -> next ) {
505
+ for (n1 = rcu_dereference_protected (nht -> hash_buckets [hash_val ],
506
+ lockdep_is_held (& tbl -> lock ));
507
+ n1 != NULL ;
508
+ n1 = rcu_dereference_protected (n1 -> next ,
509
+ lockdep_is_held (& tbl -> lock ))) {
487
510
if (dev == n1 -> dev && !memcmp (n1 -> primary_key , pkey , key_len )) {
488
511
neigh_hold (n1 );
489
512
rc = n1 ;
490
513
goto out_tbl_unlock ;
491
514
}
492
515
}
493
516
494
- n -> next = nht -> hash_buckets [hash_val ];
495
- nht -> hash_buckets [hash_val ] = n ;
496
517
n -> dead = 0 ;
497
518
neigh_hold (n );
519
+ rcu_assign_pointer (n -> next ,
520
+ rcu_dereference_protected (nht -> hash_buckets [hash_val ],
521
+ lockdep_is_held (& tbl -> lock )));
522
+ rcu_assign_pointer (nht -> hash_buckets [hash_val ], n );
498
523
write_unlock_bh (& tbl -> lock );
499
524
NEIGH_PRINTK2 ("neigh %p is created.\n" , n );
500
525
rc = n ;
@@ -651,6 +676,12 @@ static inline void neigh_parms_put(struct neigh_parms *parms)
651
676
neigh_parms_destroy (parms );
652
677
}
653
678
679
+ static void neigh_destroy_rcu (struct rcu_head * head )
680
+ {
681
+ struct neighbour * neigh = container_of (head , struct neighbour , rcu );
682
+
683
+ kmem_cache_free (neigh -> tbl -> kmem_cachep , neigh );
684
+ }
654
685
/*
655
686
* neighbour must already be out of the table;
656
687
*
@@ -690,7 +721,7 @@ void neigh_destroy(struct neighbour *neigh)
690
721
NEIGH_PRINTK2 ("neigh %p is destroyed.\n" , neigh );
691
722
692
723
atomic_dec (& neigh -> tbl -> entries );
693
- kmem_cache_free ( neigh -> tbl -> kmem_cachep , neigh );
724
+ call_rcu ( & neigh -> rcu , neigh_destroy_rcu );
694
725
}
695
726
EXPORT_SYMBOL (neigh_destroy );
696
727
@@ -731,7 +762,8 @@ static void neigh_connect(struct neighbour *neigh)
731
762
static void neigh_periodic_work (struct work_struct * work )
732
763
{
733
764
struct neigh_table * tbl = container_of (work , struct neigh_table , gc_work .work );
734
- struct neighbour * n , * * np ;
765
+ struct neighbour * n ;
766
+ struct neighbour __rcu * * np ;
735
767
unsigned int i ;
736
768
struct neigh_hash_table * nht ;
737
769
@@ -756,7 +788,8 @@ static void neigh_periodic_work(struct work_struct *work)
756
788
for (i = 0 ; i <= nht -> hash_mask ; i ++ ) {
757
789
np = & nht -> hash_buckets [i ];
758
790
759
- while ((n = * np ) != NULL ) {
791
+ while ((n = rcu_dereference_protected (* np ,
792
+ lockdep_is_held (& tbl -> lock ))) != NULL ) {
760
793
unsigned int state ;
761
794
762
795
write_lock (& n -> lock );
@@ -1213,8 +1246,8 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1213
1246
}
1214
1247
1215
1248
/* This function can be used in contexts, where only old dev_queue_xmit
1216
- worked, f.e. if you want to override normal output path (eql, shaper),
1217
- but resolution is not made yet.
1249
+ * worked, f.e. if you want to override normal output path (eql, shaper),
1250
+ * but resolution is not made yet.
1218
1251
*/
1219
1252
1220
1253
int neigh_compat_output (struct sk_buff * skb )
@@ -2123,7 +2156,7 @@ static void neigh_update_notify(struct neighbour *neigh)
2123
2156
static int neigh_dump_table (struct neigh_table * tbl , struct sk_buff * skb ,
2124
2157
struct netlink_callback * cb )
2125
2158
{
2126
- struct net * net = sock_net (skb -> sk );
2159
+ struct net * net = sock_net (skb -> sk );
2127
2160
struct neighbour * n ;
2128
2161
int rc , h , s_h = cb -> args [1 ];
2129
2162
int idx , s_idx = idx = cb -> args [2 ];
@@ -2132,13 +2165,14 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2132
2165
rcu_read_lock_bh ();
2133
2166
nht = rcu_dereference_bh (tbl -> nht );
2134
2167
2135
- read_lock (& tbl -> lock );
2136
2168
for (h = 0 ; h <= nht -> hash_mask ; h ++ ) {
2137
2169
if (h < s_h )
2138
2170
continue ;
2139
2171
if (h > s_h )
2140
2172
s_idx = 0 ;
2141
- for (n = nht -> hash_buckets [h ], idx = 0 ; n ; n = n -> next ) {
2173
+ for (n = rcu_dereference_bh (nht -> hash_buckets [h ]), idx = 0 ;
2174
+ n != NULL ;
2175
+ n = rcu_dereference_bh (n -> next )) {
2142
2176
if (!net_eq (dev_net (n -> dev ), net ))
2143
2177
continue ;
2144
2178
if (idx < s_idx )
@@ -2150,13 +2184,12 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2150
2184
rc = -1 ;
2151
2185
goto out ;
2152
2186
}
2153
- next :
2187
+ next :
2154
2188
idx ++ ;
2155
2189
}
2156
2190
}
2157
2191
rc = skb -> len ;
2158
2192
out :
2159
- read_unlock (& tbl -> lock );
2160
2193
rcu_read_unlock_bh ();
2161
2194
cb -> args [1 ] = h ;
2162
2195
cb -> args [2 ] = idx ;
@@ -2195,11 +2228,13 @@ void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void
2195
2228
rcu_read_lock_bh ();
2196
2229
nht = rcu_dereference_bh (tbl -> nht );
2197
2230
2198
- read_lock (& tbl -> lock );
2231
+ read_lock (& tbl -> lock ); /* avoid resizes */
2199
2232
for (chain = 0 ; chain <= nht -> hash_mask ; chain ++ ) {
2200
2233
struct neighbour * n ;
2201
2234
2202
- for (n = nht -> hash_buckets [chain ]; n ; n = n -> next )
2235
+ for (n = rcu_dereference_bh (nht -> hash_buckets [chain ]);
2236
+ n != NULL ;
2237
+ n = rcu_dereference_bh (n -> next ))
2203
2238
cb (n , cookie );
2204
2239
}
2205
2240
read_unlock (& tbl -> lock );
@@ -2217,16 +2252,20 @@ void __neigh_for_each_release(struct neigh_table *tbl,
2217
2252
nht = rcu_dereference_protected (tbl -> nht ,
2218
2253
lockdep_is_held (& tbl -> lock ));
2219
2254
for (chain = 0 ; chain <= nht -> hash_mask ; chain ++ ) {
2220
- struct neighbour * n , * * np ;
2255
+ struct neighbour * n ;
2256
+ struct neighbour __rcu * * np ;
2221
2257
2222
2258
np = & nht -> hash_buckets [chain ];
2223
- while ((n = * np ) != NULL ) {
2259
+ while ((n = rcu_dereference_protected (* np ,
2260
+ lockdep_is_held (& tbl -> lock ))) != NULL ) {
2224
2261
int release ;
2225
2262
2226
2263
write_lock (& n -> lock );
2227
2264
release = cb (n );
2228
2265
if (release ) {
2229
- * np = n -> next ;
2266
+ rcu_assign_pointer (* np ,
2267
+ rcu_dereference_protected (n -> next ,
2268
+ lockdep_is_held (& tbl -> lock )));
2230
2269
n -> dead = 1 ;
2231
2270
} else
2232
2271
np = & n -> next ;
@@ -2250,7 +2289,7 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
2250
2289
2251
2290
state -> flags &= ~NEIGH_SEQ_IS_PNEIGH ;
2252
2291
for (bucket = 0 ; bucket <= nht -> hash_mask ; bucket ++ ) {
2253
- n = nht -> hash_buckets [bucket ];
2292
+ n = rcu_dereference_bh ( nht -> hash_buckets [bucket ]) ;
2254
2293
2255
2294
while (n ) {
2256
2295
if (!net_eq (dev_net (n -> dev ), net ))
@@ -2267,8 +2306,8 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
2267
2306
break ;
2268
2307
if (n -> nud_state & ~NUD_NOARP )
2269
2308
break ;
2270
- next :
2271
- n = n -> next ;
2309
+ next :
2310
+ n = rcu_dereference_bh ( n -> next ) ;
2272
2311
}
2273
2312
2274
2313
if (n )
@@ -2292,7 +2331,7 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
2292
2331
if (v )
2293
2332
return n ;
2294
2333
}
2295
- n = n -> next ;
2334
+ n = rcu_dereference_bh ( n -> next ) ;
2296
2335
2297
2336
while (1 ) {
2298
2337
while (n ) {
@@ -2309,8 +2348,8 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
2309
2348
2310
2349
if (n -> nud_state & ~NUD_NOARP )
2311
2350
break ;
2312
- next :
2313
- n = n -> next ;
2351
+ next :
2352
+ n = rcu_dereference_bh ( n -> next ) ;
2314
2353
}
2315
2354
2316
2355
if (n )
@@ -2319,7 +2358,7 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
2319
2358
if (++ state -> bucket > nht -> hash_mask )
2320
2359
break ;
2321
2360
2322
- n = nht -> hash_buckets [state -> bucket ];
2361
+ n = rcu_dereference_bh ( nht -> hash_buckets [state -> bucket ]) ;
2323
2362
}
2324
2363
2325
2364
if (n && pos )
@@ -2417,7 +2456,6 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2417
2456
}
2418
2457
2419
2458
void * neigh_seq_start (struct seq_file * seq , loff_t * pos , struct neigh_table * tbl , unsigned int neigh_seq_flags )
2420
- __acquires (tbl - > lock )
2421
2459
__acquires (rcu_bh )
2422
2460
{
2423
2461
struct neigh_seq_state * state = seq -> private ;
@@ -2428,7 +2466,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
2428
2466
2429
2467
rcu_read_lock_bh ();
2430
2468
state -> nht = rcu_dereference_bh (tbl -> nht );
2431
- read_lock ( & tbl -> lock );
2469
+
2432
2470
return * pos ? neigh_get_idx_any (seq , pos ) : SEQ_START_TOKEN ;
2433
2471
}
2434
2472
EXPORT_SYMBOL (neigh_seq_start );
@@ -2461,13 +2499,8 @@ void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2461
2499
EXPORT_SYMBOL (neigh_seq_next );
2462
2500
2463
2501
void neigh_seq_stop (struct seq_file * seq , void * v )
2464
- __releases (tbl - > lock )
2465
2502
__releases (rcu_bh )
2466
2503
{
2467
- struct neigh_seq_state * state = seq -> private ;
2468
- struct neigh_table * tbl = state -> tbl ;
2469
-
2470
- read_unlock (& tbl -> lock );
2471
2504
rcu_read_unlock_bh ();
2472
2505
}
2473
2506
EXPORT_SYMBOL (neigh_seq_stop );
0 commit comments