// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <linux/bitmap.h>
#define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
/* for any branch, call, exit record the history of jmps in the given state */
int bpf_push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
int insn_flags, u64 linked_regs)
{
u32 cnt = cur->jmp_history_cnt;
struct bpf_jmp_history_entry *p;
size_t alloc_size;
/* combine instruction flags if we already recorded this instruction */
if (env->cur_hist_ent) {
/* atomic instructions push insn_flags twice, for READ and
* WRITE sides, but they should agree on stack slot
*/
verifier_bug_if((env->cur_hist_ent->flags & insn_flags) &&
(env->cur_hist_ent->flags & insn_flags) != insn_flags,
env, "insn history: insn_idx %d cur flags %x new flags %x",
env->insn_idx, env->cur_hist_ent->flags, insn_flags);
env->cur_hist_ent->flags |= insn_flags;
verifier_bug_if(env->cur_hist_ent->linked_regs != 0, env,
"insn history: insn_idx %d linked_regs: %#llx",
env->insn_idx, env->cur_hist_ent->linked_regs);
env->cur_hist_ent->linked_regs = linked_regs;
return 0;
}
cnt++;
alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
p = krealloc(cur->jmp_history, alloc_size, GFP_KERNEL_ACCOUNT);
if (!p)
return -ENOMEM;
cur->jmp_history = p;
p = &cur->jmp_history[cnt - 1];
p->idx = env->insn_idx;
p->prev_idx = env->prev_insn_idx;
p->flags = insn_flags;
p->linked_regs = linked_regs;
cur->jmp_history_cnt = cnt;
env->cur_hist_ent = p;
return 0;
}
static bool is_atomic_load_insn(const struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_STX &&
BPF_MODE(insn->code) == BPF_ATOMIC &&
insn->imm == BPF_LOAD_ACQ;
}
static bool is_atomic_fetch_insn(const struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_STX &&
BPF_MODE(insn->code) == BPF_ATOMIC &&
(insn->imm & BPF_FETCH);
}
static int insn_stack_access_spi(int insn_flags)
{
return (insn_flags >> INSN_F_SPI_SHIFT) & INSN_F_SPI_MASK;
}
static int insn_stack_access_frameno(int insn_flags)
{
return insn_flags & INSN_F_FRAMENO_MASK;
}
/* Backtrack one insn at a time. If idx is not at the top of recorded
* history then previous instruction came from straight line execution.
* Return -ENOENT if we exhausted all instructions within given state.
*
* It's legal to have a bit of a looping with the same starting and ending
* insn index within the same state, e.g.: 3->4->5->3, so just because current
* instruction index is the same as state's first_idx doesn't mean we are
* done. If there is still some jump history left, we should keep going. We
* need to take into account that we might have a jump history between given
* state's parent and itself, due to checkpointing. In this case, we'll have
* history entry recording a jump from last instruction of parent state and
* first instruction of given state.
*/
static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
u32 *history)
{
u32 cnt = *history;
if (i == st->first_insn_idx) {
if (cnt == 0)
return -ENOENT;
if (cnt == 1 && st->jmp_history[0].idx == i)
return -ENOENT;
}
if (cnt && st->jmp_history[cnt - 1].idx == i) {
i = st->jmp_history[cnt - 1].prev_idx;
(*history)--;
} else {
i--;
}
return i;
}
static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st,
u32 hist_end, int insn_idx)
{
if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx)
return &st->jmp_history[hist_end - 1];
return NULL;
}
static inline void bt_init(struct backtrack_state *bt, u32 frame)
{
bt->frame = frame;
}
static inline void bt_reset(struct backtrack_state *bt)
{
struct bpf_verifier_env *env = bt->env;
memset(bt, 0, sizeof(*bt));
bt->env = env;
}
static inline u32 bt_empty(struct backtrack_state *bt)
{
u64 mask = 0;
int i;
for (i = 0; i