Skip to content

Commit ac49b75

Browse files
Yonghong Songgregkh
authored andcommitted
bpf: Do not include stack ptr register in precision backtracking bookkeeping
[ Upstream commit e2d2115 ] Yi Lai reported an issue ([1]) where the following warning appears in kernel dmesg: [ 60.643604] verifier backtracking bug [ 60.643635] WARNING: CPU: 10 PID: 2315 at kernel/bpf/verifier.c:4302 __mark_chain_precision+0x3a6c/0x3e10 [ 60.648428] Modules linked in: bpf_testmod(OE) [ 60.650471] CPU: 10 UID: 0 PID: 2315 Comm: test_progs Tainted: G OE 6.15.0-rc4-gef11287f8289-dirty torvalds#327 PREEMPT(full) [ 60.654385] Tainted: [O]=OOT_MODULE, [E]=UNSIGNED_MODULE [ 60.656682] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014 [ 60.660475] RIP: 0010:__mark_chain_precision+0x3a6c/0x3e10 [ 60.662814] Code: 5a 30 84 89 ea e8 c4 d9 01 00 80 3d 3e 7d d8 04 00 0f 85 60 fa ff ff c6 05 31 7d d8 04 01 48 c7 c7 00 58 30 84 e8 c4 06 a5 ff <0f> 0b e9 46 fa ff ff 48 ... [ 60.668720] RSP: 0018:ffff888116cc7298 EFLAGS: 00010246 [ 60.671075] RAX: 54d70e82dfd31900 RBX: ffff888115b65e20 RCX: 0000000000000000 [ 60.673659] RDX: 0000000000000001 RSI: 0000000000000004 RDI: 00000000ffffffff [ 60.676241] RBP: 0000000000000400 R08: ffff8881f6f23bd3 R09: 1ffff1103ede477a [ 60.678787] R10: dffffc0000000000 R11: ffffed103ede477b R12: ffff888115b60ae8 [ 60.681420] R13: 1ffff11022b6cbc4 R14: 00000000fffffff2 R15: 0000000000000001 [ 60.684030] FS: 00007fc2aedd80c0(0000) GS:ffff88826fa8a000(0000) knlGS:0000000000000000 [ 60.686837] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 60.689027] CR2: 000056325369e000 CR3: 000000011088b002 CR4: 0000000000370ef0 [ 60.691623] Call Trace: [ 60.692821] <TASK> [ 60.693960] ? __pfx_verbose+0x10/0x10 [ 60.695656] ? __pfx_disasm_kfunc_name+0x10/0x10 [ 60.697495] check_cond_jmp_op+0x16f7/0x39b0 [ 60.699237] do_check+0x58fa/0xab10 ... Further analysis shows the warning is at line 4302 as below: 4294 /* static subprog call instruction, which 4295 * means that we are exiting current subprog, 4296 * so only r1-r5 could be still requested as 4297 * precise, r0 and r6-r10 or any stack slot in 4298 * the current frame should be zero by now 4299 */ 4300 if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { 4301 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 4302 WARN_ONCE(1, "verifier backtracking bug"); 4303 return -EFAULT; 4304 } With the below test (also in the next patch): __used __naked static void __bpf_jmp_r10(void) { asm volatile ( "r2 = 2314885393468386424 ll;" "goto +0;" "if r2 <= r10 goto +3;" "if r1 >= -1835016 goto +0;" "if r2 <= 8 goto +0;" "if r3 <= 0 goto +0;" "exit;" ::: __clobber_all); } SEC("?raw_tp") __naked void bpf_jmp_r10(void) { asm volatile ( "r3 = 0 ll;" "call __bpf_jmp_r10;" "r0 = 0;" "exit;" ::: __clobber_all); } The following is the verifier failure log: 0: (18) r3 = 0x0 ; R3_w=0 2: (85) call pc+2 caller: R10=fp0 callee: frame1: R1=ctx() R3_w=0 R10=fp0 5: frame1: R1=ctx() R3_w=0 R10=fp0 ; asm volatile (" \ @ verifier_precision.c:184 5: (18) r2 = 0x20202000256c6c78 ; frame1: R2_w=0x20202000256c6c78 7: (05) goto pc+0 8: (bd) if r2 <= r10 goto pc+3 ; frame1: R2_w=0x20202000256c6c78 R10=fp0 9: (35) if r1 >= 0xffe3fff8 goto pc+0 ; frame1: R1=ctx() 10: (b5) if r2 <= 0x8 goto pc+0 mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1 mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0 mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3 mark_precise: frame1: regs=r2,r10 stack= before 7: (05) goto pc+0 mark_precise: frame1: regs=r2,r10 stack= before 5: (18) r2 = 0x20202000256c6c78 mark_precise: frame1: regs=r10 stack= before 2: (85) call pc+2 BUG regs 400 The main failure reason is due to r10 in precision backtracking bookkeeping. Actually r10 is always precise and there is no need to add it for the precision backtracking bookkeeping. One way to fix the issue is to prevent bt_set_reg() if any src/dst reg is r10. Andrii suggested to go with push_insn_history() approach to avoid explicitly checking r10 in backtrack_insn(). This patch added push_insn_history() support for cond_jmp like 'rX <op> rY' operations. In check_cond_jmp_op(), if any of rX or rY is a stack pointer, push_insn_history() will record such information, and later backtrack_insn() will do bt_set_reg() properly for those register(s). [1] https://lore.kernel.org/bpf/Z%2F8q3xzpU59CIYQE@ly-workstation/ Reported by: Yi Lai <[email protected]> Fixes: 407958a ("bpf: encapsulate precision backtracking bookkeeping") Signed-off-by: Yonghong Song <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected] Signed-off-by: Sasha Levin <[email protected]>
1 parent 0b9bb52 commit ac49b75

File tree

2 files changed

+24
-6
lines changed

2 files changed

+24
-6
lines changed

include/linux/bpf_verifier.h

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -356,7 +356,11 @@ enum {
356356
INSN_F_SPI_MASK = 0x3f, /* 6 bits */
357357
INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
358358

359-
INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
359+
INSN_F_STACK_ACCESS = BIT(9),
360+
361+
INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
362+
INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
363+
/* total 12 bits are used now. */
360364
};
361365

362366
static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
@@ -365,9 +369,9 @@ static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
365369
struct bpf_insn_hist_entry {
366370
u32 idx;
367371
/* insn idx can't be bigger than 1 million */
368-
u32 prev_idx : 22;
369-
/* special flags, e.g., whether insn is doing register stack spill/load */
370-
u32 flags : 10;
372+
u32 prev_idx : 20;
373+
/* special INSN_F_xxx flags */
374+
u32 flags : 12;
371375
/* additional registers that need precision tracking when this
372376
* jump is backtracked, vector of six 10-bit records
373377
*/

kernel/bpf/verifier.c

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4413,8 +4413,10 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
44134413
* before it would be equally necessary to
44144414
* propagate it to dreg.
44154415
*/
4416-
bt_set_reg(bt, dreg);
4417-
bt_set_reg(bt, sreg);
4416+
if (!hist || !(hist->flags & INSN_F_SRC_REG_STACK))
4417+
bt_set_reg(bt, sreg);
4418+
if (!hist || !(hist->flags & INSN_F_DST_REG_STACK))
4419+
bt_set_reg(bt, dreg);
44184420
} else if (BPF_SRC(insn->code) == BPF_K) {
44194421
/* dreg <cond> K
44204422
* Only dreg still needs precision before
@@ -16377,6 +16379,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
1637716379
struct bpf_reg_state *eq_branch_regs;
1637816380
struct linked_regs linked_regs = {};
1637916381
u8 opcode = BPF_OP(insn->code);
16382+
int insn_flags = 0;
1638016383
bool is_jmp32;
1638116384
int pred = -1;
1638216385
int err;
@@ -16435,6 +16438,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
1643516438
insn->src_reg);
1643616439
return -EACCES;
1643716440
}
16441+
16442+
if (src_reg->type == PTR_TO_STACK)
16443+
insn_flags |= INSN_F_SRC_REG_STACK;
1643816444
} else {
1643916445
if (insn->src_reg != BPF_REG_0) {
1644016446
verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
@@ -16446,6 +16452,14 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
1644616452
__mark_reg_known(src_reg, insn->imm);
1644716453
}
1644816454

16455+
if (dst_reg->type == PTR_TO_STACK)
16456+
insn_flags |= INSN_F_DST_REG_STACK;
16457+
if (insn_flags) {
16458+
err = push_insn_history(env, this_branch, insn_flags, 0);
16459+
if (err)
16460+
return err;
16461+
}
16462+
1644916463
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1645016464
pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32);
1645116465
if (pred >= 0) {

0 commit comments

Comments
 (0)