@@ -4413,8 +4413,10 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
4413
4413
* before it would be equally necessary to
4414
4414
* propagate it to dreg.
4415
4415
*/
4416
- bt_set_reg(bt, dreg);
4417
- bt_set_reg(bt, sreg);
4416
+ if (!hist || !(hist->flags & INSN_F_SRC_REG_STACK))
4417
+ bt_set_reg(bt, sreg);
4418
+ if (!hist || !(hist->flags & INSN_F_DST_REG_STACK))
4419
+ bt_set_reg(bt, dreg);
4418
4420
} else if (BPF_SRC(insn->code) == BPF_K) {
4419
4421
/* dreg <cond> K
4420
4422
* Only dreg still needs precision before
@@ -16377,6 +16379,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
16377
16379
struct bpf_reg_state *eq_branch_regs;
16378
16380
struct linked_regs linked_regs = {};
16379
16381
u8 opcode = BPF_OP(insn->code);
16382
+ int insn_flags = 0;
16380
16383
bool is_jmp32;
16381
16384
int pred = -1;
16382
16385
int err;
@@ -16435,6 +16438,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
16435
16438
insn->src_reg);
16436
16439
return -EACCES;
16437
16440
}
16441
+
16442
+ if (src_reg->type == PTR_TO_STACK)
16443
+ insn_flags |= INSN_F_SRC_REG_STACK;
16438
16444
} else {
16439
16445
if (insn->src_reg != BPF_REG_0) {
16440
16446
verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
@@ -16446,6 +16452,14 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
16446
16452
__mark_reg_known(src_reg, insn->imm);
16447
16453
}
16448
16454
16455
+ if (dst_reg->type == PTR_TO_STACK)
16456
+ insn_flags |= INSN_F_DST_REG_STACK;
16457
+ if (insn_flags) {
16458
+ err = push_insn_history(env, this_branch, insn_flags, 0);
16459
+ if (err)
16460
+ return err;
16461
+ }
16462
+
16449
16463
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
16450
16464
pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32);
16451
16465
if (pred >= 0) {
0 commit comments