Skip to content

Commit 644bfe5

Browse files
LorenzoBianconiborkmann
authored andcommitted
cpumap: Formalize map value as a named struct
As it has been already done for devmap, introduce 'struct bpf_cpumap_val' to formalize the expected values that can be passed in for a CPUMAP. Update cpumap code to use the struct. Signed-off-by: Lorenzo Bianconi <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Jesper Dangaard Brouer <[email protected]> Link: https://lore.kernel.org/bpf/754f950674665dae6139c061d28c1d982aaf4170.1594734381.git.lorenzo@kernel.org
1 parent a4e76f1 commit 644bfe5

File tree

3 files changed

+33
-13
lines changed

3 files changed

+33
-13
lines changed

include/uapi/linux/bpf.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3849,6 +3849,15 @@ struct bpf_devmap_val {
38493849
} bpf_prog;
38503850
};
38513851

3852+
/* CPUMAP map-value layout
3853+
*
3854+
* The struct data-layout of map-value is a configuration interface.
3855+
* New members can only be added to the end of this structure.
3856+
*/
3857+
struct bpf_cpumap_val {
3858+
__u32 qsize; /* queue size to remote target CPU */
3859+
};
3860+
38523861
enum sk_action {
38533862
SK_DROP = 0,
38543863
SK_PASS,

kernel/bpf/cpumap.c

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ struct xdp_bulk_queue {
5252
struct bpf_cpu_map_entry {
5353
u32 cpu; /* kthread CPU and map index */
5454
int map_id; /* Back reference to map */
55-
u32 qsize; /* Queue size placeholder for map lookup */
5655

5756
/* XDP can run multiple RX-ring queues, need __percpu enqueue store */
5857
struct xdp_bulk_queue __percpu *bulkq;
@@ -62,10 +61,13 @@ struct bpf_cpu_map_entry {
6261
/* Queue with potential multi-producers, and single-consumer kthread */
6362
struct ptr_ring *queue;
6463
struct task_struct *kthread;
65-
struct work_struct kthread_stop_wq;
64+
65+
struct bpf_cpumap_val value;
6666

6767
atomic_t refcnt; /* Control when this struct can be free'ed */
6868
struct rcu_head rcu;
69+
70+
struct work_struct kthread_stop_wq;
6971
};
7072

7173
struct bpf_cpu_map {
@@ -307,8 +309,8 @@ static int cpu_map_kthread_run(void *data)
307309
return 0;
308310
}
309311

310-
static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
311-
int map_id)
312+
static struct bpf_cpu_map_entry *
313+
__cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
312314
{
313315
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
314316
struct bpf_cpu_map_entry *rcpu;
@@ -338,13 +340,13 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
338340
if (!rcpu->queue)
339341
goto free_bulkq;
340342

341-
err = ptr_ring_init(rcpu->queue, qsize, gfp);
343+
err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
342344
if (err)
343345
goto free_queue;
344346

345347
rcpu->cpu = cpu;
346348
rcpu->map_id = map_id;
347-
rcpu->qsize = qsize;
349+
rcpu->value.qsize = value->qsize;
348350

349351
/* Setup kthread */
350352
rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
@@ -437,31 +439,31 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
437439
u64 map_flags)
438440
{
439441
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
442+
struct bpf_cpumap_val cpumap_value = {};
440443
struct bpf_cpu_map_entry *rcpu;
441-
442444
/* Array index key correspond to CPU number */
443445
u32 key_cpu = *(u32 *)key;
444-
/* Value is the queue size */
445-
u32 qsize = *(u32 *)value;
446+
447+
memcpy(&cpumap_value, value, map->value_size);
446448

447449
if (unlikely(map_flags > BPF_EXIST))
448450
return -EINVAL;
449451
if (unlikely(key_cpu >= cmap->map.max_entries))
450452
return -E2BIG;
451453
if (unlikely(map_flags == BPF_NOEXIST))
452454
return -EEXIST;
453-
if (unlikely(qsize > 16384)) /* sanity limit on qsize */
455+
if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */
454456
return -EOVERFLOW;
455457

456458
/* Make sure CPU is a valid possible cpu */
457459
if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
458460
return -ENODEV;
459461

460-
if (qsize == 0) {
462+
if (cpumap_value.qsize == 0) {
461463
rcpu = NULL; /* Same as deleting */
462464
} else {
463465
/* Updating qsize cause re-allocation of bpf_cpu_map_entry */
464-
rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id);
466+
rcpu = __cpu_map_entry_alloc(&cpumap_value, key_cpu, map->id);
465467
if (!rcpu)
466468
return -ENOMEM;
467469
rcpu->cmap = cmap;
@@ -523,7 +525,7 @@ static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
523525
struct bpf_cpu_map_entry *rcpu =
524526
__cpu_map_lookup_elem(map, *(u32 *)key);
525527

526-
return rcpu ? &rcpu->qsize : NULL;
528+
return rcpu ? &rcpu->value : NULL;
527529
}
528530

529531
static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)

tools/include/uapi/linux/bpf.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3849,6 +3849,15 @@ struct bpf_devmap_val {
38493849
} bpf_prog;
38503850
};
38513851

3852+
/* CPUMAP map-value layout
3853+
*
3854+
* The struct data-layout of map-value is a configuration interface.
3855+
* New members can only be added to the end of this structure.
3856+
*/
3857+
struct bpf_cpumap_val {
3858+
__u32 qsize; /* queue size to remote target CPU */
3859+
};
3860+
38523861
enum sk_action {
38533862
SK_DROP = 0,
38543863
SK_PASS,

0 commit comments

Comments
 (0)