@@ -52,7 +52,6 @@ struct xdp_bulk_queue {
52
52
struct bpf_cpu_map_entry {
53
53
u32 cpu ; /* kthread CPU and map index */
54
54
int map_id ; /* Back reference to map */
55
- u32 qsize ; /* Queue size placeholder for map lookup */
56
55
57
56
/* XDP can run multiple RX-ring queues, need __percpu enqueue store */
58
57
struct xdp_bulk_queue __percpu * bulkq ;
@@ -62,10 +61,13 @@ struct bpf_cpu_map_entry {
62
61
/* Queue with potential multi-producers, and single-consumer kthread */
63
62
struct ptr_ring * queue ;
64
63
struct task_struct * kthread ;
65
- struct work_struct kthread_stop_wq ;
64
+
65
+ struct bpf_cpumap_val value ;
66
66
67
67
atomic_t refcnt ; /* Control when this struct can be free'ed */
68
68
struct rcu_head rcu ;
69
+
70
+ struct work_struct kthread_stop_wq ;
69
71
};
70
72
71
73
struct bpf_cpu_map {
@@ -307,8 +309,8 @@ static int cpu_map_kthread_run(void *data)
307
309
return 0 ;
308
310
}
309
311
310
- static struct bpf_cpu_map_entry * __cpu_map_entry_alloc ( u32 qsize , u32 cpu ,
311
- int map_id )
312
+ static struct bpf_cpu_map_entry *
313
+ __cpu_map_entry_alloc ( struct bpf_cpumap_val * value , u32 cpu , int map_id )
312
314
{
313
315
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN ;
314
316
struct bpf_cpu_map_entry * rcpu ;
@@ -338,13 +340,13 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
338
340
if (!rcpu -> queue )
339
341
goto free_bulkq ;
340
342
341
- err = ptr_ring_init (rcpu -> queue , qsize , gfp );
343
+ err = ptr_ring_init (rcpu -> queue , value -> qsize , gfp );
342
344
if (err )
343
345
goto free_queue ;
344
346
345
347
rcpu -> cpu = cpu ;
346
348
rcpu -> map_id = map_id ;
347
- rcpu -> qsize = qsize ;
349
+ rcpu -> value . qsize = value -> qsize ;
348
350
349
351
/* Setup kthread */
350
352
rcpu -> kthread = kthread_create_on_node (cpu_map_kthread_run , rcpu , numa ,
@@ -437,31 +439,31 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
437
439
u64 map_flags )
438
440
{
439
441
struct bpf_cpu_map * cmap = container_of (map , struct bpf_cpu_map , map );
442
+ struct bpf_cpumap_val cpumap_value = {};
440
443
struct bpf_cpu_map_entry * rcpu ;
441
-
442
444
/* Array index key correspond to CPU number */
443
445
u32 key_cpu = * (u32 * )key ;
444
- /* Value is the queue size */
445
- u32 qsize = * ( u32 * ) value ;
446
+
447
+ memcpy ( & cpumap_value , value , map -> value_size ) ;
446
448
447
449
if (unlikely (map_flags > BPF_EXIST ))
448
450
return - EINVAL ;
449
451
if (unlikely (key_cpu >= cmap -> map .max_entries ))
450
452
return - E2BIG ;
451
453
if (unlikely (map_flags == BPF_NOEXIST ))
452
454
return - EEXIST ;
453
- if (unlikely (qsize > 16384 )) /* sanity limit on qsize */
455
+ if (unlikely (cpumap_value . qsize > 16384 )) /* sanity limit on qsize */
454
456
return - EOVERFLOW ;
455
457
456
458
/* Make sure CPU is a valid possible cpu */
457
459
if (key_cpu >= nr_cpumask_bits || !cpu_possible (key_cpu ))
458
460
return - ENODEV ;
459
461
460
- if (qsize == 0 ) {
462
+ if (cpumap_value . qsize == 0 ) {
461
463
rcpu = NULL ; /* Same as deleting */
462
464
} else {
463
465
/* Updating qsize cause re-allocation of bpf_cpu_map_entry */
464
- rcpu = __cpu_map_entry_alloc (qsize , key_cpu , map -> id );
466
+ rcpu = __cpu_map_entry_alloc (& cpumap_value , key_cpu , map -> id );
465
467
if (!rcpu )
466
468
return - ENOMEM ;
467
469
rcpu -> cmap = cmap ;
@@ -523,7 +525,7 @@ static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
523
525
struct bpf_cpu_map_entry * rcpu =
524
526
__cpu_map_lookup_elem (map , * (u32 * )key );
525
527
526
- return rcpu ? & rcpu -> qsize : NULL ;
528
+ return rcpu ? & rcpu -> value : NULL ;
527
529
}
528
530
529
531
static int cpu_map_get_next_key (struct bpf_map * map , void * key , void * next_key )
0 commit comments