Lines Matching refs:meta

20 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,  in nfp_bpf_goto_meta()  argument
25 backward = meta->n - insn_idx; in nfp_bpf_goto_meta()
26 forward = insn_idx - meta->n; in nfp_bpf_goto_meta()
30 meta = nfp_prog_last_meta(nfp_prog); in nfp_bpf_goto_meta()
34 meta = nfp_prog_first_meta(nfp_prog); in nfp_bpf_goto_meta()
39 meta = nfp_meta_next(meta); in nfp_bpf_goto_meta()
42 meta = nfp_meta_prev(meta); in nfp_bpf_goto_meta()
44 return meta; in nfp_bpf_goto_meta()
49 struct nfp_insn_meta *meta, in nfp_record_adjust_head() argument
71 if (nfp_prog->adjust_head_location != meta->n) in nfp_record_adjust_head()
74 if (meta->arg2.reg.var_off.value != imm) in nfp_record_adjust_head()
78 location = meta->n; in nfp_record_adjust_head()
158 struct nfp_insn_meta *meta, in nfp_bpf_map_call_ok() argument
172 struct nfp_insn_meta *meta) in nfp_bpf_check_helper_call() argument
178 u32 func_id = meta->insn.imm; in nfp_bpf_check_helper_call()
191 nfp_record_adjust_head(bpf, nfp_prog, meta, reg2); in nfp_bpf_check_helper_call()
202 if (!nfp_bpf_map_call_ok("map_lookup", env, meta, in nfp_bpf_check_helper_call()
205 meta->func_id ? &meta->arg2 : NULL)) in nfp_bpf_check_helper_call()
210 if (!nfp_bpf_map_call_ok("map_update", env, meta, in nfp_bpf_check_helper_call()
213 meta->func_id ? &meta->arg2 : NULL) || in nfp_bpf_check_helper_call()
220 if (!nfp_bpf_map_call_ok("map_delete", env, meta, in nfp_bpf_check_helper_call()
223 meta->func_id ? &meta->arg2 : NULL)) in nfp_bpf_check_helper_call()
288 if (!meta->func_id) in nfp_bpf_check_helper_call()
291 if (reg1->type != meta->arg1.type) { in nfp_bpf_check_helper_call()
293 meta->arg1.type, reg1->type); in nfp_bpf_check_helper_call()
303 meta->func_id = func_id; in nfp_bpf_check_helper_call()
304 meta->arg1 = *reg1; in nfp_bpf_check_helper_call()
305 meta->arg2.reg = *reg2; in nfp_bpf_check_helper_call()
344 struct nfp_insn_meta *meta, in nfp_bpf_check_stack_access() argument
351 meta->flags |= FLAG_INSN_PTR_CALLER_STACK_FRAME; in nfp_bpf_check_stack_access()
358 if (meta->ptr.type == NOT_INIT) in nfp_bpf_check_stack_access()
361 old_off = meta->ptr.off + meta->ptr.var_off.value; in nfp_bpf_check_stack_access()
364 meta->ptr_not_const |= old_off != new_off; in nfp_bpf_check_stack_access()
366 if (!meta->ptr_not_const) in nfp_bpf_check_stack_access()
417 nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta, in nfp_bpf_map_mark_used() argument
431 off = reg->var_off.value + meta->insn.off + reg->off; in nfp_bpf_map_mark_used()
432 size = BPF_LDST_BYTES(&meta->insn); in nfp_bpf_map_mark_used()
451 nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, in nfp_bpf_check_ptr() argument
466 err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env); in nfp_bpf_check_ptr()
472 if (is_mbpf_load(meta)) { in nfp_bpf_check_ptr()
473 err = nfp_bpf_map_mark_used(env, meta, reg, in nfp_bpf_check_ptr()
478 if (is_mbpf_store(meta)) { in nfp_bpf_check_ptr()
482 if (is_mbpf_atomic(meta)) { in nfp_bpf_check_ptr()
483 err = nfp_bpf_map_mark_used(env, meta, reg, in nfp_bpf_check_ptr()
490 if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) { in nfp_bpf_check_ptr()
492 meta->ptr.type, reg->type); in nfp_bpf_check_ptr()
496 meta->ptr = *reg; in nfp_bpf_check_ptr()
502 nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, in nfp_bpf_check_store() argument
505 const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg; in nfp_bpf_check_store()
510 switch (meta->insn.off) { in nfp_bpf_check_store()
522 return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg); in nfp_bpf_check_store()
526 nfp_bpf_check_atomic(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, in nfp_bpf_check_atomic() argument
529 const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg; in nfp_bpf_check_atomic()
530 const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg; in nfp_bpf_check_atomic()
532 if (meta->insn.imm != BPF_ADD) { in nfp_bpf_check_atomic()
533 pr_vlog(env, "atomic op not implemented: %d\n", meta->insn.imm); in nfp_bpf_check_atomic()
547 meta->xadd_over_16bit |= in nfp_bpf_check_atomic()
549 meta->xadd_maybe_16bit |= in nfp_bpf_check_atomic()
552 return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg); in nfp_bpf_check_atomic()
556 nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, in nfp_bpf_check_alu() argument
560 cur_regs(env) + meta->insn.src_reg; in nfp_bpf_check_alu()
562 cur_regs(env) + meta->insn.dst_reg; in nfp_bpf_check_alu()
564 meta->umin_src = min(meta->umin_src, sreg->umin_value); in nfp_bpf_check_alu()
565 meta->umax_src = max(meta->umax_src, sreg->umax_value); in nfp_bpf_check_alu()
566 meta->umin_dst = min(meta->umin_dst, dreg->umin_value); in nfp_bpf_check_alu()
567 meta->umax_dst = max(meta->umax_dst, dreg->umax_value); in nfp_bpf_check_alu()
581 if (is_mbpf_mul(meta)) { in nfp_bpf_check_alu()
582 if (meta->umax_dst > U32_MAX) { in nfp_bpf_check_alu()
586 if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) { in nfp_bpf_check_alu()
590 if (mbpf_class(meta) == BPF_ALU64 && in nfp_bpf_check_alu()
591 mbpf_src(meta) == BPF_K && meta->insn.imm < 0) { in nfp_bpf_check_alu()
607 if (is_mbpf_div(meta)) { in nfp_bpf_check_alu()
608 if (meta->umax_dst > U32_MAX) { in nfp_bpf_check_alu()
612 if (mbpf_src(meta) == BPF_X) { in nfp_bpf_check_alu()
613 if (meta->umin_src != meta->umax_src) { in nfp_bpf_check_alu()
617 if (meta->umax_src > U32_MAX) { in nfp_bpf_check_alu()
622 if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) { in nfp_bpf_check_alu()
635 struct nfp_insn_meta *meta = nfp_prog->verifier_meta; in nfp_verify_insn() local
637 meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx); in nfp_verify_insn()
638 nfp_prog->verifier_meta = meta; in nfp_verify_insn()
640 if (!nfp_bpf_supported_opcode(meta->insn.code)) { in nfp_verify_insn()
642 meta->insn.code); in nfp_verify_insn()
646 if (meta->insn.src_reg >= MAX_BPF_REG || in nfp_verify_insn()
647 meta->insn.dst_reg >= MAX_BPF_REG) { in nfp_verify_insn()
652 if (is_mbpf_helper_call(meta)) in nfp_verify_insn()
653 return nfp_bpf_check_helper_call(nfp_prog, env, meta); in nfp_verify_insn()
654 if (meta->insn.code == (BPF_JMP | BPF_EXIT)) in nfp_verify_insn()
657 if (is_mbpf_load(meta)) in nfp_verify_insn()
658 return nfp_bpf_check_ptr(nfp_prog, meta, env, in nfp_verify_insn()
659 meta->insn.src_reg); in nfp_verify_insn()
660 if (is_mbpf_store(meta)) in nfp_verify_insn()
661 return nfp_bpf_check_store(nfp_prog, meta, env); in nfp_verify_insn()
663 if (is_mbpf_atomic(meta)) in nfp_verify_insn()
664 return nfp_bpf_check_atomic(nfp_prog, meta, env); in nfp_verify_insn()
666 if (is_mbpf_alu(meta)) in nfp_verify_insn()
667 return nfp_bpf_check_alu(nfp_prog, meta, env); in nfp_verify_insn()
676 struct nfp_insn_meta *meta; in nfp_assign_subprog_idx_and_regs() local
679 list_for_each_entry(meta, &nfp_prog->insns, l) { in nfp_assign_subprog_idx_and_regs()
680 if (nfp_is_subprog_start(meta)) in nfp_assign_subprog_idx_and_regs()
682 meta->subprog_idx = index; in nfp_assign_subprog_idx_and_regs()
684 if (meta->insn.dst_reg >= BPF_REG_6 && in nfp_assign_subprog_idx_and_regs()
685 meta->insn.dst_reg <= BPF_REG_9) in nfp_assign_subprog_idx_and_regs()
700 struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog); in nfp_bpf_get_stack_usage() local
705 unsigned short idx = meta->subprog_idx; in nfp_bpf_get_stack_usage()
720 for (; meta != nfp_prog_last_meta(nfp_prog) && meta->subprog_idx == idx; in nfp_bpf_get_stack_usage()
721 meta = nfp_meta_next(meta)) { in nfp_bpf_get_stack_usage()
722 if (!is_mbpf_pseudo_call(meta)) in nfp_bpf_get_stack_usage()
728 ret_insn[frame] = nfp_meta_next(meta); in nfp_bpf_get_stack_usage()
732 meta = nfp_bpf_goto_meta(nfp_prog, meta, in nfp_bpf_get_stack_usage()
733 meta->n + 1 + meta->insn.imm); in nfp_bpf_get_stack_usage()
734 idx = meta->subprog_idx; in nfp_bpf_get_stack_usage()
747 meta = ret_insn[frame]; in nfp_bpf_get_stack_usage()
755 struct nfp_insn_meta *meta; in nfp_bpf_insn_flag_zext() local
757 list_for_each_entry(meta, &nfp_prog->insns, l) { in nfp_bpf_insn_flag_zext()
758 if (aux[meta->n].zext_dst) in nfp_bpf_insn_flag_zext()
759 meta->flags |= FLAG_INSN_DO_ZEXT; in nfp_bpf_insn_flag_zext()
812 struct nfp_insn_meta *meta = nfp_prog->verifier_meta; in nfp_bpf_opt_replace_insn() local
814 meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx); in nfp_bpf_opt_replace_insn()
815 nfp_prog->verifier_meta = meta; in nfp_bpf_opt_replace_insn()
818 if (is_mbpf_cond_jump(meta) && in nfp_bpf_opt_replace_insn()
825 meta->jmp_dst = list_next_entry(meta, l); in nfp_bpf_opt_replace_insn()
826 meta->jump_neg_op = false; in nfp_bpf_opt_replace_insn()
827 } else if (meta->jmp_dst->n != aux_data[tgt_off].orig_idx) { in nfp_bpf_opt_replace_insn()
829 off, meta->jmp_dst->n, in nfp_bpf_opt_replace_insn()
837 meta->insn.code, insn->code); in nfp_bpf_opt_replace_insn()
845 struct nfp_insn_meta *meta = nfp_prog->verifier_meta; in nfp_bpf_opt_remove_insns() local
848 meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx); in nfp_bpf_opt_remove_insns()
851 if (WARN_ON_ONCE(&meta->l == &nfp_prog->insns)) in nfp_bpf_opt_remove_insns()
855 if (meta->flags & FLAG_INSN_SKIP_VERIFIER_OPT) in nfp_bpf_opt_remove_insns()
858 meta->flags |= FLAG_INSN_SKIP_VERIFIER_OPT; in nfp_bpf_opt_remove_insns()
859 meta = list_next_entry(meta, l); in nfp_bpf_opt_remove_insns()