xref: /linux/tools/lib/bpf/libbpf.c (revision c78420ba)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  * Copyright (C) 2019 Isovalent, Inc.
11  */
12 
13 #ifndef _GNU_SOURCE
14 #define _GNU_SOURCE
15 #endif
16 #include <stdlib.h>
17 #include <stdio.h>
18 #include <stdarg.h>
19 #include <libgen.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <endian.h>
25 #include <fcntl.h>
26 #include <errno.h>
27 #include <ctype.h>
28 #include <asm/unistd.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/bpf.h>
32 #include <linux/btf.h>
33 #include <linux/filter.h>
34 #include <linux/limits.h>
35 #include <linux/perf_event.h>
36 #include <linux/bpf_perf_event.h>
37 #include <linux/ring_buffer.h>
38 #include <sys/epoll.h>
39 #include <sys/ioctl.h>
40 #include <sys/mman.h>
41 #include <sys/stat.h>
42 #include <sys/types.h>
43 #include <sys/vfs.h>
44 #include <sys/utsname.h>
45 #include <sys/resource.h>
46 #include <libelf.h>
47 #include <gelf.h>
48 #include <zlib.h>
49 
50 #include "libbpf.h"
51 #include "bpf.h"
52 #include "btf.h"
53 #include "str_error.h"
54 #include "libbpf_internal.h"
55 #include "hashmap.h"
56 #include "bpf_gen_internal.h"
57 #include "zip.h"
58 
59 #ifndef BPF_FS_MAGIC
60 #define BPF_FS_MAGIC		0xcafe4a11
61 #endif
62 
63 #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
64 
65 #define BPF_INSN_SZ (sizeof(struct bpf_insn))
66 
67 /* vsprintf() in __base_pr() uses nonliteral format string. It may break
68  * compilation if user enables corresponding warning. Disable it explicitly.
69  */
70 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
71 
72 #define __printf(a, b)	__attribute__((format(printf, a, b)))
73 
74 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
75 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
76 static int map_set_def_max_entries(struct bpf_map *map);
77 
78 static const char * const attach_type_name[] = {
79 	[BPF_CGROUP_INET_INGRESS]	= "cgroup_inet_ingress",
80 	[BPF_CGROUP_INET_EGRESS]	= "cgroup_inet_egress",
81 	[BPF_CGROUP_INET_SOCK_CREATE]	= "cgroup_inet_sock_create",
82 	[BPF_CGROUP_INET_SOCK_RELEASE]	= "cgroup_inet_sock_release",
83 	[BPF_CGROUP_SOCK_OPS]		= "cgroup_sock_ops",
84 	[BPF_CGROUP_DEVICE]		= "cgroup_device",
85 	[BPF_CGROUP_INET4_BIND]		= "cgroup_inet4_bind",
86 	[BPF_CGROUP_INET6_BIND]		= "cgroup_inet6_bind",
87 	[BPF_CGROUP_INET4_CONNECT]	= "cgroup_inet4_connect",
88 	[BPF_CGROUP_INET6_CONNECT]	= "cgroup_inet6_connect",
89 	[BPF_CGROUP_UNIX_CONNECT]       = "cgroup_unix_connect",
90 	[BPF_CGROUP_INET4_POST_BIND]	= "cgroup_inet4_post_bind",
91 	[BPF_CGROUP_INET6_POST_BIND]	= "cgroup_inet6_post_bind",
92 	[BPF_CGROUP_INET4_GETPEERNAME]	= "cgroup_inet4_getpeername",
93 	[BPF_CGROUP_INET6_GETPEERNAME]	= "cgroup_inet6_getpeername",
94 	[BPF_CGROUP_UNIX_GETPEERNAME]	= "cgroup_unix_getpeername",
95 	[BPF_CGROUP_INET4_GETSOCKNAME]	= "cgroup_inet4_getsockname",
96 	[BPF_CGROUP_INET6_GETSOCKNAME]	= "cgroup_inet6_getsockname",
97 	[BPF_CGROUP_UNIX_GETSOCKNAME]	= "cgroup_unix_getsockname",
98 	[BPF_CGROUP_UDP4_SENDMSG]	= "cgroup_udp4_sendmsg",
99 	[BPF_CGROUP_UDP6_SENDMSG]	= "cgroup_udp6_sendmsg",
100 	[BPF_CGROUP_UNIX_SENDMSG]	= "cgroup_unix_sendmsg",
101 	[BPF_CGROUP_SYSCTL]		= "cgroup_sysctl",
102 	[BPF_CGROUP_UDP4_RECVMSG]	= "cgroup_udp4_recvmsg",
103 	[BPF_CGROUP_UDP6_RECVMSG]	= "cgroup_udp6_recvmsg",
104 	[BPF_CGROUP_UNIX_RECVMSG]	= "cgroup_unix_recvmsg",
105 	[BPF_CGROUP_GETSOCKOPT]		= "cgroup_getsockopt",
106 	[BPF_CGROUP_SETSOCKOPT]		= "cgroup_setsockopt",
107 	[BPF_SK_SKB_STREAM_PARSER]	= "sk_skb_stream_parser",
108 	[BPF_SK_SKB_STREAM_VERDICT]	= "sk_skb_stream_verdict",
109 	[BPF_SK_SKB_VERDICT]		= "sk_skb_verdict",
110 	[BPF_SK_MSG_VERDICT]		= "sk_msg_verdict",
111 	[BPF_LIRC_MODE2]		= "lirc_mode2",
112 	[BPF_FLOW_DISSECTOR]		= "flow_dissector",
113 	[BPF_TRACE_RAW_TP]		= "trace_raw_tp",
114 	[BPF_TRACE_FENTRY]		= "trace_fentry",
115 	[BPF_TRACE_FEXIT]		= "trace_fexit",
116 	[BPF_MODIFY_RETURN]		= "modify_return",
117 	[BPF_LSM_MAC]			= "lsm_mac",
118 	[BPF_LSM_CGROUP]		= "lsm_cgroup",
119 	[BPF_SK_LOOKUP]			= "sk_lookup",
120 	[BPF_TRACE_ITER]		= "trace_iter",
121 	[BPF_XDP_DEVMAP]		= "xdp_devmap",
122 	[BPF_XDP_CPUMAP]		= "xdp_cpumap",
123 	[BPF_XDP]			= "xdp",
124 	[BPF_SK_REUSEPORT_SELECT]	= "sk_reuseport_select",
125 	[BPF_SK_REUSEPORT_SELECT_OR_MIGRATE]	= "sk_reuseport_select_or_migrate",
126 	[BPF_PERF_EVENT]		= "perf_event",
127 	[BPF_TRACE_KPROBE_MULTI]	= "trace_kprobe_multi",
128 	[BPF_STRUCT_OPS]		= "struct_ops",
129 	[BPF_NETFILTER]			= "netfilter",
130 	[BPF_TCX_INGRESS]		= "tcx_ingress",
131 	[BPF_TCX_EGRESS]		= "tcx_egress",
132 	[BPF_TRACE_UPROBE_MULTI]	= "trace_uprobe_multi",
133 	[BPF_NETKIT_PRIMARY]		= "netkit_primary",
134 	[BPF_NETKIT_PEER]		= "netkit_peer",
135 	[BPF_TRACE_KPROBE_SESSION]	= "trace_kprobe_session",
136 };
137 
138 static const char * const link_type_name[] = {
139 	[BPF_LINK_TYPE_UNSPEC]			= "unspec",
140 	[BPF_LINK_TYPE_RAW_TRACEPOINT]		= "raw_tracepoint",
141 	[BPF_LINK_TYPE_TRACING]			= "tracing",
142 	[BPF_LINK_TYPE_CGROUP]			= "cgroup",
143 	[BPF_LINK_TYPE_ITER]			= "iter",
144 	[BPF_LINK_TYPE_NETNS]			= "netns",
145 	[BPF_LINK_TYPE_XDP]			= "xdp",
146 	[BPF_LINK_TYPE_PERF_EVENT]		= "perf_event",
147 	[BPF_LINK_TYPE_KPROBE_MULTI]		= "kprobe_multi",
148 	[BPF_LINK_TYPE_STRUCT_OPS]		= "struct_ops",
149 	[BPF_LINK_TYPE_NETFILTER]		= "netfilter",
150 	[BPF_LINK_TYPE_TCX]			= "tcx",
151 	[BPF_LINK_TYPE_UPROBE_MULTI]		= "uprobe_multi",
152 	[BPF_LINK_TYPE_NETKIT]			= "netkit",
153 	[BPF_LINK_TYPE_SOCKMAP]			= "sockmap",
154 };
155 
156 static const char * const map_type_name[] = {
157 	[BPF_MAP_TYPE_UNSPEC]			= "unspec",
158 	[BPF_MAP_TYPE_HASH]			= "hash",
159 	[BPF_MAP_TYPE_ARRAY]			= "array",
160 	[BPF_MAP_TYPE_PROG_ARRAY]		= "prog_array",
161 	[BPF_MAP_TYPE_PERF_EVENT_ARRAY]		= "perf_event_array",
162 	[BPF_MAP_TYPE_PERCPU_HASH]		= "percpu_hash",
163 	[BPF_MAP_TYPE_PERCPU_ARRAY]		= "percpu_array",
164 	[BPF_MAP_TYPE_STACK_TRACE]		= "stack_trace",
165 	[BPF_MAP_TYPE_CGROUP_ARRAY]		= "cgroup_array",
166 	[BPF_MAP_TYPE_LRU_HASH]			= "lru_hash",
167 	[BPF_MAP_TYPE_LRU_PERCPU_HASH]		= "lru_percpu_hash",
168 	[BPF_MAP_TYPE_LPM_TRIE]			= "lpm_trie",
169 	[BPF_MAP_TYPE_ARRAY_OF_MAPS]		= "array_of_maps",
170 	[BPF_MAP_TYPE_HASH_OF_MAPS]		= "hash_of_maps",
171 	[BPF_MAP_TYPE_DEVMAP]			= "devmap",
172 	[BPF_MAP_TYPE_DEVMAP_HASH]		= "devmap_hash",
173 	[BPF_MAP_TYPE_SOCKMAP]			= "sockmap",
174 	[BPF_MAP_TYPE_CPUMAP]			= "cpumap",
175 	[BPF_MAP_TYPE_XSKMAP]			= "xskmap",
176 	[BPF_MAP_TYPE_SOCKHASH]			= "sockhash",
177 	[BPF_MAP_TYPE_CGROUP_STORAGE]		= "cgroup_storage",
178 	[BPF_MAP_TYPE_REUSEPORT_SOCKARRAY]	= "reuseport_sockarray",
179 	[BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE]	= "percpu_cgroup_storage",
180 	[BPF_MAP_TYPE_QUEUE]			= "queue",
181 	[BPF_MAP_TYPE_STACK]			= "stack",
182 	[BPF_MAP_TYPE_SK_STORAGE]		= "sk_storage",
183 	[BPF_MAP_TYPE_STRUCT_OPS]		= "struct_ops",
184 	[BPF_MAP_TYPE_RINGBUF]			= "ringbuf",
185 	[BPF_MAP_TYPE_INODE_STORAGE]		= "inode_storage",
186 	[BPF_MAP_TYPE_TASK_STORAGE]		= "task_storage",
187 	[BPF_MAP_TYPE_BLOOM_FILTER]		= "bloom_filter",
188 	[BPF_MAP_TYPE_USER_RINGBUF]             = "user_ringbuf",
189 	[BPF_MAP_TYPE_CGRP_STORAGE]		= "cgrp_storage",
190 	[BPF_MAP_TYPE_ARENA]			= "arena",
191 };
192 
193 static const char * const prog_type_name[] = {
194 	[BPF_PROG_TYPE_UNSPEC]			= "unspec",
195 	[BPF_PROG_TYPE_SOCKET_FILTER]		= "socket_filter",
196 	[BPF_PROG_TYPE_KPROBE]			= "kprobe",
197 	[BPF_PROG_TYPE_SCHED_CLS]		= "sched_cls",
198 	[BPF_PROG_TYPE_SCHED_ACT]		= "sched_act",
199 	[BPF_PROG_TYPE_TRACEPOINT]		= "tracepoint",
200 	[BPF_PROG_TYPE_XDP]			= "xdp",
201 	[BPF_PROG_TYPE_PERF_EVENT]		= "perf_event",
202 	[BPF_PROG_TYPE_CGROUP_SKB]		= "cgroup_skb",
203 	[BPF_PROG_TYPE_CGROUP_SOCK]		= "cgroup_sock",
204 	[BPF_PROG_TYPE_LWT_IN]			= "lwt_in",
205 	[BPF_PROG_TYPE_LWT_OUT]			= "lwt_out",
206 	[BPF_PROG_TYPE_LWT_XMIT]		= "lwt_xmit",
207 	[BPF_PROG_TYPE_SOCK_OPS]		= "sock_ops",
208 	[BPF_PROG_TYPE_SK_SKB]			= "sk_skb",
209 	[BPF_PROG_TYPE_CGROUP_DEVICE]		= "cgroup_device",
210 	[BPF_PROG_TYPE_SK_MSG]			= "sk_msg",
211 	[BPF_PROG_TYPE_RAW_TRACEPOINT]		= "raw_tracepoint",
212 	[BPF_PROG_TYPE_CGROUP_SOCK_ADDR]	= "cgroup_sock_addr",
213 	[BPF_PROG_TYPE_LWT_SEG6LOCAL]		= "lwt_seg6local",
214 	[BPF_PROG_TYPE_LIRC_MODE2]		= "lirc_mode2",
215 	[BPF_PROG_TYPE_SK_REUSEPORT]		= "sk_reuseport",
216 	[BPF_PROG_TYPE_FLOW_DISSECTOR]		= "flow_dissector",
217 	[BPF_PROG_TYPE_CGROUP_SYSCTL]		= "cgroup_sysctl",
218 	[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE]	= "raw_tracepoint_writable",
219 	[BPF_PROG_TYPE_CGROUP_SOCKOPT]		= "cgroup_sockopt",
220 	[BPF_PROG_TYPE_TRACING]			= "tracing",
221 	[BPF_PROG_TYPE_STRUCT_OPS]		= "struct_ops",
222 	[BPF_PROG_TYPE_EXT]			= "ext",
223 	[BPF_PROG_TYPE_LSM]			= "lsm",
224 	[BPF_PROG_TYPE_SK_LOOKUP]		= "sk_lookup",
225 	[BPF_PROG_TYPE_SYSCALL]			= "syscall",
226 	[BPF_PROG_TYPE_NETFILTER]		= "netfilter",
227 };
228 
__base_pr(enum libbpf_print_level level,const char * format,va_list args)229 static int __base_pr(enum libbpf_print_level level, const char *format,
230 		     va_list args)
231 {
232 	if (level == LIBBPF_DEBUG)
233 		return 0;
234 
235 	return vfprintf(stderr, format, args);
236 }
237 
238 static libbpf_print_fn_t __libbpf_pr = __base_pr;
239 
libbpf_set_print(libbpf_print_fn_t fn)240 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
241 {
242 	libbpf_print_fn_t old_print_fn;
243 
244 	old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED);
245 
246 	return old_print_fn;
247 }
248 
249 __printf(2, 3)
libbpf_print(enum libbpf_print_level level,const char * format,...)250 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
251 {
252 	va_list args;
253 	int old_errno;
254 	libbpf_print_fn_t print_fn;
255 
256 	print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED);
257 	if (!print_fn)
258 		return;
259 
260 	old_errno = errno;
261 
262 	va_start(args, format);
263 	__libbpf_pr(level, format, args);
264 	va_end(args);
265 
266 	errno = old_errno;
267 }
268 
pr_perm_msg(int err)269 static void pr_perm_msg(int err)
270 {
271 	struct rlimit limit;
272 	char buf[100];
273 
274 	if (err != -EPERM || geteuid() != 0)
275 		return;
276 
277 	err = getrlimit(RLIMIT_MEMLOCK, &limit);
278 	if (err)
279 		return;
280 
281 	if (limit.rlim_cur == RLIM_INFINITY)
282 		return;
283 
284 	if (limit.rlim_cur < 1024)
285 		snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
286 	else if (limit.rlim_cur < 1024*1024)
287 		snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
288 	else
289 		snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
290 
291 	pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
292 		buf);
293 }
294 
295 #define STRERR_BUFSIZE  128
296 
297 /* Copied from tools/perf/util/util.h */
298 #ifndef zfree
299 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
300 #endif
301 
302 #ifndef zclose
303 # define zclose(fd) ({			\
304 	int ___err = 0;			\
305 	if ((fd) >= 0)			\
306 		___err = close((fd));	\
307 	fd = -1;			\
308 	___err; })
309 #endif
310 
ptr_to_u64(const void * ptr)311 static inline __u64 ptr_to_u64(const void *ptr)
312 {
313 	return (__u64) (unsigned long) ptr;
314 }
315 
libbpf_set_strict_mode(enum libbpf_strict_mode mode)316 int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
317 {
318 	/* as of v1.0 libbpf_set_strict_mode() is a no-op */
319 	return 0;
320 }
321 
libbpf_major_version(void)322 __u32 libbpf_major_version(void)
323 {
324 	return LIBBPF_MAJOR_VERSION;
325 }
326 
libbpf_minor_version(void)327 __u32 libbpf_minor_version(void)
328 {
329 	return LIBBPF_MINOR_VERSION;
330 }
331 
libbpf_version_string(void)332 const char *libbpf_version_string(void)
333 {
334 #define __S(X) #X
335 #define _S(X) __S(X)
336 	return  "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
337 #undef _S
338 #undef __S
339 }
340 
341 enum reloc_type {
342 	RELO_LD64,
343 	RELO_CALL,
344 	RELO_DATA,
345 	RELO_EXTERN_LD64,
346 	RELO_EXTERN_CALL,
347 	RELO_SUBPROG_ADDR,
348 	RELO_CORE,
349 };
350 
351 struct reloc_desc {
352 	enum reloc_type type;
353 	int insn_idx;
354 	union {
355 		const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
356 		struct {
357 			int map_idx;
358 			int sym_off;
359 			int ext_idx;
360 		};
361 	};
362 };
363 
364 /* stored as sec_def->cookie for all libbpf-supported SEC()s */
365 enum sec_def_flags {
366 	SEC_NONE = 0,
367 	/* expected_attach_type is optional, if kernel doesn't support that */
368 	SEC_EXP_ATTACH_OPT = 1,
369 	/* legacy, only used by libbpf_get_type_names() and
370 	 * libbpf_attach_type_by_name(), not used by libbpf itself at all.
371 	 * This used to be associated with cgroup (and few other) BPF programs
372 	 * that were attachable through BPF_PROG_ATTACH command. Pretty
373 	 * meaningless nowadays, though.
374 	 */
375 	SEC_ATTACHABLE = 2,
376 	SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
377 	/* attachment target is specified through BTF ID in either kernel or
378 	 * other BPF program's BTF object
379 	 */
380 	SEC_ATTACH_BTF = 4,
381 	/* BPF program type allows sleeping/blocking in kernel */
382 	SEC_SLEEPABLE = 8,
383 	/* BPF program support non-linear XDP buffer */
384 	SEC_XDP_FRAGS = 16,
385 	/* Setup proper attach type for usdt probes. */
386 	SEC_USDT = 32,
387 };
388 
389 struct bpf_sec_def {
390 	char *sec;
391 	enum bpf_prog_type prog_type;
392 	enum bpf_attach_type expected_attach_type;
393 	long cookie;
394 	int handler_id;
395 
396 	libbpf_prog_setup_fn_t prog_setup_fn;
397 	libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
398 	libbpf_prog_attach_fn_t prog_attach_fn;
399 };
400 
401 /*
402  * bpf_prog should be a better name but it has been used in
403  * linux/filter.h.
404  */
405 struct bpf_program {
406 	char *name;
407 	char *sec_name;
408 	size_t sec_idx;
409 	const struct bpf_sec_def *sec_def;
410 	/* this program's instruction offset (in number of instructions)
411 	 * within its containing ELF section
412 	 */
413 	size_t sec_insn_off;
414 	/* number of original instructions in ELF section belonging to this
415 	 * program, not taking into account subprogram instructions possible
416 	 * appended later during relocation
417 	 */
418 	size_t sec_insn_cnt;
419 	/* Offset (in number of instructions) of the start of instruction
420 	 * belonging to this BPF program  within its containing main BPF
421 	 * program. For the entry-point (main) BPF program, this is always
422 	 * zero. For a sub-program, this gets reset before each of main BPF
423 	 * programs are processed and relocated and is used to determined
424 	 * whether sub-program was already appended to the main program, and
425 	 * if yes, at which instruction offset.
426 	 */
427 	size_t sub_insn_off;
428 
429 	/* instructions that belong to BPF program; insns[0] is located at
430 	 * sec_insn_off instruction within its ELF section in ELF file, so
431 	 * when mapping ELF file instruction index to the local instruction,
432 	 * one needs to subtract sec_insn_off; and vice versa.
433 	 */
434 	struct bpf_insn *insns;
435 	/* actual number of instruction in this BPF program's image; for
436 	 * entry-point BPF programs this includes the size of main program
437 	 * itself plus all the used sub-programs, appended at the end
438 	 */
439 	size_t insns_cnt;
440 
441 	struct reloc_desc *reloc_desc;
442 	int nr_reloc;
443 
444 	/* BPF verifier log settings */
445 	char *log_buf;
446 	size_t log_size;
447 	__u32 log_level;
448 
449 	struct bpf_object *obj;
450 
451 	int fd;
452 	bool autoload;
453 	bool autoattach;
454 	bool sym_global;
455 	bool mark_btf_static;
456 	enum bpf_prog_type type;
457 	enum bpf_attach_type expected_attach_type;
458 	int exception_cb_idx;
459 
460 	int prog_ifindex;
461 	__u32 attach_btf_obj_fd;
462 	__u32 attach_btf_id;
463 	__u32 attach_prog_fd;
464 
465 	void *func_info;
466 	__u32 func_info_rec_size;
467 	__u32 func_info_cnt;
468 
469 	void *line_info;
470 	__u32 line_info_rec_size;
471 	__u32 line_info_cnt;
472 	__u32 prog_flags;
473 };
474 
475 struct bpf_struct_ops {
476 	const char *tname;
477 	const struct btf_type *type;
478 	struct bpf_program **progs;
479 	__u32 *kern_func_off;
480 	/* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
481 	void *data;
482 	/* e.g. struct bpf_struct_ops_tcp_congestion_ops in
483 	 *      btf_vmlinux's format.
484 	 * struct bpf_struct_ops_tcp_congestion_ops {
485 	 *	[... some other kernel fields ...]
486 	 *	struct tcp_congestion_ops data;
487 	 * }
488 	 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
489 	 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
490 	 * from "data".
491 	 */
492 	void *kern_vdata;
493 	__u32 type_id;
494 };
495 
496 #define DATA_SEC ".data"
497 #define BSS_SEC ".bss"
498 #define RODATA_SEC ".rodata"
499 #define KCONFIG_SEC ".kconfig"
500 #define KSYMS_SEC ".ksyms"
501 #define STRUCT_OPS_SEC ".struct_ops"
502 #define STRUCT_OPS_LINK_SEC ".struct_ops.link"
503 #define ARENA_SEC ".addr_space.1"
504 
505 enum libbpf_map_type {
506 	LIBBPF_MAP_UNSPEC,
507 	LIBBPF_MAP_DATA,
508 	LIBBPF_MAP_BSS,
509 	LIBBPF_MAP_RODATA,
510 	LIBBPF_MAP_KCONFIG,
511 };
512 
513 struct bpf_map_def {
514 	unsigned int type;
515 	unsigned int key_size;
516 	unsigned int value_size;
517 	unsigned int max_entries;
518 	unsigned int map_flags;
519 };
520 
521 struct bpf_map {
522 	struct bpf_object *obj;
523 	char *name;
524 	/* real_name is defined for special internal maps (.rodata*,
525 	 * .data*, .bss, .kconfig) and preserves their original ELF section
526 	 * name. This is important to be able to find corresponding BTF
527 	 * DATASEC information.
528 	 */
529 	char *real_name;
530 	int fd;
531 	int sec_idx;
532 	size_t sec_offset;
533 	int map_ifindex;
534 	int inner_map_fd;
535 	struct bpf_map_def def;
536 	__u32 numa_node;
537 	__u32 btf_var_idx;
538 	int mod_btf_fd;
539 	__u32 btf_key_type_id;
540 	__u32 btf_value_type_id;
541 	__u32 btf_vmlinux_value_type_id;
542 	enum libbpf_map_type libbpf_type;
543 	void *mmaped;
544 	struct bpf_struct_ops *st_ops;
545 	struct bpf_map *inner_map;
546 	void **init_slots;
547 	int init_slots_sz;
548 	char *pin_path;
549 	bool pinned;
550 	bool reused;
551 	bool autocreate;
552 	__u64 map_extra;
553 };
554 
555 enum extern_type {
556 	EXT_UNKNOWN,
557 	EXT_KCFG,
558 	EXT_KSYM,
559 };
560 
561 enum kcfg_type {
562 	KCFG_UNKNOWN,
563 	KCFG_CHAR,
564 	KCFG_BOOL,
565 	KCFG_INT,
566 	KCFG_TRISTATE,
567 	KCFG_CHAR_ARR,
568 };
569 
570 struct extern_desc {
571 	enum extern_type type;
572 	int sym_idx;
573 	int btf_id;
574 	int sec_btf_id;
575 	const char *name;
576 	char *essent_name;
577 	bool is_set;
578 	bool is_weak;
579 	union {
580 		struct {
581 			enum kcfg_type type;
582 			int sz;
583 			int align;
584 			int data_off;
585 			bool is_signed;
586 		} kcfg;
587 		struct {
588 			unsigned long long addr;
589 
590 			/* target btf_id of the corresponding kernel var. */
591 			int kernel_btf_obj_fd;
592 			int kernel_btf_id;
593 
594 			/* local btf_id of the ksym extern's type. */
595 			__u32 type_id;
596 			/* BTF fd index to be patched in for insn->off, this is
597 			 * 0 for vmlinux BTF, index in obj->fd_array for module
598 			 * BTF
599 			 */
600 			__s16 btf_fd_idx;
601 		} ksym;
602 	};
603 };
604 
605 struct module_btf {
606 	struct btf *btf;
607 	char *name;
608 	__u32 id;
609 	int fd;
610 	int fd_array_idx;
611 };
612 
613 enum sec_type {
614 	SEC_UNUSED = 0,
615 	SEC_RELO,
616 	SEC_BSS,
617 	SEC_DATA,
618 	SEC_RODATA,
619 	SEC_ST_OPS,
620 };
621 
622 struct elf_sec_desc {
623 	enum sec_type sec_type;
624 	Elf64_Shdr *shdr;
625 	Elf_Data *data;
626 };
627 
628 struct elf_state {
629 	int fd;
630 	const void *obj_buf;
631 	size_t obj_buf_sz;
632 	Elf *elf;
633 	Elf64_Ehdr *ehdr;
634 	Elf_Data *symbols;
635 	Elf_Data *arena_data;
636 	size_t shstrndx; /* section index for section name strings */
637 	size_t strtabidx;
638 	struct elf_sec_desc *secs;
639 	size_t sec_cnt;
640 	int btf_maps_shndx;
641 	__u32 btf_maps_sec_btf_id;
642 	int text_shndx;
643 	int symbols_shndx;
644 	bool has_st_ops;
645 	int arena_data_shndx;
646 };
647 
648 struct usdt_manager;
649 
650 struct bpf_object {
651 	char name[BPF_OBJ_NAME_LEN];
652 	char license[64];
653 	__u32 kern_version;
654 
655 	struct bpf_program *programs;
656 	size_t nr_programs;
657 	struct bpf_map *maps;
658 	size_t nr_maps;
659 	size_t maps_cap;
660 
661 	char *kconfig;
662 	struct extern_desc *externs;
663 	int nr_extern;
664 	int kconfig_map_idx;
665 
666 	bool loaded;
667 	bool has_subcalls;
668 	bool has_rodata;
669 
670 	struct bpf_gen *gen_loader;
671 
672 	/* Information when doing ELF related work. Only valid if efile.elf is not NULL */
673 	struct elf_state efile;
674 
675 	struct btf *btf;
676 	struct btf_ext *btf_ext;
677 
678 	/* Parse and load BTF vmlinux if any of the programs in the object need
679 	 * it at load time.
680 	 */
681 	struct btf *btf_vmlinux;
682 	/* Path to the custom BTF to be used for BPF CO-RE relocations as an
683 	 * override for vmlinux BTF.
684 	 */
685 	char *btf_custom_path;
686 	/* vmlinux BTF override for CO-RE relocations */
687 	struct btf *btf_vmlinux_override;
688 	/* Lazily initialized kernel module BTFs */
689 	struct module_btf *btf_modules;
690 	bool btf_modules_loaded;
691 	size_t btf_module_cnt;
692 	size_t btf_module_cap;
693 
694 	/* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
695 	char *log_buf;
696 	size_t log_size;
697 	__u32 log_level;
698 
699 	int *fd_array;
700 	size_t fd_array_cap;
701 	size_t fd_array_cnt;
702 
703 	struct usdt_manager *usdt_man;
704 
705 	struct bpf_map *arena_map;
706 	void *arena_data;
707 	size_t arena_data_sz;
708 
709 	struct kern_feature_cache *feat_cache;
710 	char *token_path;
711 	int token_fd;
712 
713 	char path[];
714 };
715 
716 static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
717 static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
718 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
719 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
720 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
721 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
722 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
723 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
724 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
725 
bpf_program__unload(struct bpf_program * prog)726 void bpf_program__unload(struct bpf_program *prog)
727 {
728 	if (!prog)
729 		return;
730 
731 	zclose(prog->fd);
732 
733 	zfree(&prog->func_info);
734 	zfree(&prog->line_info);
735 }
736 
bpf_program__exit(struct bpf_program * prog)737 static void bpf_program__exit(struct bpf_program *prog)
738 {
739 	if (!prog)
740 		return;
741 
742 	bpf_program__unload(prog);
743 	zfree(&prog->name);
744 	zfree(&prog->sec_name);
745 	zfree(&prog->insns);
746 	zfree(&prog->reloc_desc);
747 
748 	prog->nr_reloc = 0;
749 	prog->insns_cnt = 0;
750 	prog->sec_idx = -1;
751 }
752 
insn_is_subprog_call(const struct bpf_insn * insn)753 static bool insn_is_subprog_call(const struct bpf_insn *insn)
754 {
755 	return BPF_CLASS(insn->code) == BPF_JMP &&
756 	       BPF_OP(insn->code) == BPF_CALL &&
757 	       BPF_SRC(insn->code) == BPF_K &&
758 	       insn->src_reg == BPF_PSEUDO_CALL &&
759 	       insn->dst_reg == 0 &&
760 	       insn->off == 0;
761 }
762 
is_call_insn(const struct bpf_insn * insn)763 static bool is_call_insn(const struct bpf_insn *insn)
764 {
765 	return insn->code == (BPF_JMP | BPF_CALL);
766 }
767 
insn_is_pseudo_func(struct bpf_insn * insn)768 static bool insn_is_pseudo_func(struct bpf_insn *insn)
769 {
770 	return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
771 }
772 
773 static int
bpf_object__init_prog(struct bpf_object * obj,struct bpf_program * prog,const char * name,size_t sec_idx,const char * sec_name,size_t sec_off,void * insn_data,size_t insn_data_sz)774 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
775 		      const char *name, size_t sec_idx, const char *sec_name,
776 		      size_t sec_off, void *insn_data, size_t insn_data_sz)
777 {
778 	if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
779 		pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
780 			sec_name, name, sec_off, insn_data_sz);
781 		return -EINVAL;
782 	}
783 
784 	memset(prog, 0, sizeof(*prog));
785 	prog->obj = obj;
786 
787 	prog->sec_idx = sec_idx;
788 	prog->sec_insn_off = sec_off / BPF_INSN_SZ;
789 	prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
790 	/* insns_cnt can later be increased by appending used subprograms */
791 	prog->insns_cnt = prog->sec_insn_cnt;
792 
793 	prog->type = BPF_PROG_TYPE_UNSPEC;
794 	prog->fd = -1;
795 	prog->exception_cb_idx = -1;
796 
797 	/* libbpf's convention for SEC("?abc...") is that it's just like
798 	 * SEC("abc...") but the corresponding bpf_program starts out with
799 	 * autoload set to false.
800 	 */
801 	if (sec_name[0] == '?') {
802 		prog->autoload = false;
803 		/* from now on forget there was ? in section name */
804 		sec_name++;
805 	} else {
806 		prog->autoload = true;
807 	}
808 
809 	prog->autoattach = true;
810 
811 	/* inherit object's log_level */
812 	prog->log_level = obj->log_level;
813 
814 	prog->sec_name = strdup(sec_name);
815 	if (!prog->sec_name)
816 		goto errout;
817 
818 	prog->name = strdup(name);
819 	if (!prog->name)
820 		goto errout;
821 
822 	prog->insns = malloc(insn_data_sz);
823 	if (!prog->insns)
824 		goto errout;
825 	memcpy(prog->insns, insn_data, insn_data_sz);
826 
827 	return 0;
828 errout:
829 	pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
830 	bpf_program__exit(prog);
831 	return -ENOMEM;
832 }
833 
834 static int
bpf_object__add_programs(struct bpf_object * obj,Elf_Data * sec_data,const char * sec_name,int sec_idx)835 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
836 			 const char *sec_name, int sec_idx)
837 {
838 	Elf_Data *symbols = obj->efile.symbols;
839 	struct bpf_program *prog, *progs;
840 	void *data = sec_data->d_buf;
841 	size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
842 	int nr_progs, err, i;
843 	const char *name;
844 	Elf64_Sym *sym;
845 
846 	progs = obj->programs;
847 	nr_progs = obj->nr_programs;
848 	nr_syms = symbols->d_size / sizeof(Elf64_Sym);
849 
850 	for (i = 0; i < nr_syms; i++) {
851 		sym = elf_sym_by_idx(obj, i);
852 
853 		if (sym->st_shndx != sec_idx)
854 			continue;
855 		if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
856 			continue;
857 
858 		prog_sz = sym->st_size;
859 		sec_off = sym->st_value;
860 
861 		name = elf_sym_str(obj, sym->st_name);
862 		if (!name) {
863 			pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
864 				sec_name, sec_off);
865 			return -LIBBPF_ERRNO__FORMAT;
866 		}
867 
868 		if (sec_off + prog_sz > sec_sz) {
869 			pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
870 				sec_name, sec_off);
871 			return -LIBBPF_ERRNO__FORMAT;
872 		}
873 
874 		if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
875 			pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
876 			return -ENOTSUP;
877 		}
878 
879 		pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
880 			 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
881 
882 		progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
883 		if (!progs) {
884 			/*
885 			 * In this case the original obj->programs
886 			 * is still valid, so don't need special treat for
887 			 * bpf_close_object().
888 			 */
889 			pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
890 				sec_name, name);
891 			return -ENOMEM;
892 		}
893 		obj->programs = progs;
894 
895 		prog = &progs[nr_progs];
896 
897 		err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
898 					    sec_off, data + sec_off, prog_sz);
899 		if (err)
900 			return err;
901 
902 		if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL)
903 			prog->sym_global = true;
904 
905 		/* if function is a global/weak symbol, but has restricted
906 		 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
907 		 * as static to enable more permissive BPF verification mode
908 		 * with more outside context available to BPF verifier
909 		 */
910 		if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
911 		    || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
912 			prog->mark_btf_static = true;
913 
914 		nr_progs++;
915 		obj->nr_programs = nr_progs;
916 	}
917 
918 	return 0;
919 }
920 
921 static const struct btf_member *
find_member_by_offset(const struct btf_type * t,__u32 bit_offset)922 find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
923 {
924 	struct btf_member *m;
925 	int i;
926 
927 	for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
928 		if (btf_member_bit_offset(t, i) == bit_offset)
929 			return m;
930 	}
931 
932 	return NULL;
933 }
934 
935 static const struct btf_member *
find_member_by_name(const struct btf * btf,const struct btf_type * t,const char * name)936 find_member_by_name(const struct btf *btf, const struct btf_type *t,
937 		    const char *name)
938 {
939 	struct btf_member *m;
940 	int i;
941 
942 	for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
943 		if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
944 			return m;
945 	}
946 
947 	return NULL;
948 }
949 
950 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
951 			    __u16 kind, struct btf **res_btf,
952 			    struct module_btf **res_mod_btf);
953 
954 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
955 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
956 				   const char *name, __u32 kind);
957 
958 static int
find_struct_ops_kern_types(struct bpf_object * obj,const char * tname_raw,struct module_btf ** mod_btf,const struct btf_type ** type,__u32 * type_id,const struct btf_type ** vtype,__u32 * vtype_id,const struct btf_member ** data_member)959 find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
960 			   struct module_btf **mod_btf,
961 			   const struct btf_type **type, __u32 *type_id,
962 			   const struct btf_type **vtype, __u32 *vtype_id,
963 			   const struct btf_member **data_member)
964 {
965 	const struct btf_type *kern_type, *kern_vtype;
966 	const struct btf_member *kern_data_member;
967 	struct btf *btf;
968 	__s32 kern_vtype_id, kern_type_id;
969 	char tname[256];
970 	__u32 i;
971 
972 	snprintf(tname, sizeof(tname), "%.*s",
973 		 (int)bpf_core_essential_name_len(tname_raw), tname_raw);
974 
975 	kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
976 					&btf, mod_btf);
977 	if (kern_type_id < 0) {
978 		pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
979 			tname);
980 		return kern_type_id;
981 	}
982 	kern_type = btf__type_by_id(btf, kern_type_id);
983 
984 	/* Find the corresponding "map_value" type that will be used
985 	 * in map_update(BPF_MAP_TYPE_STRUCT_OPS).  For example,
986 	 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
987 	 * btf_vmlinux.
988 	 */
989 	kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
990 						tname, BTF_KIND_STRUCT);
991 	if (kern_vtype_id < 0) {
992 		pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
993 			STRUCT_OPS_VALUE_PREFIX, tname);
994 		return kern_vtype_id;
995 	}
996 	kern_vtype = btf__type_by_id(btf, kern_vtype_id);
997 
998 	/* Find "struct tcp_congestion_ops" from
999 	 * struct bpf_struct_ops_tcp_congestion_ops {
1000 	 *	[ ... ]
1001 	 *	struct tcp_congestion_ops data;
1002 	 * }
1003 	 */
1004 	kern_data_member = btf_members(kern_vtype);
1005 	for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
1006 		if (kern_data_member->type == kern_type_id)
1007 			break;
1008 	}
1009 	if (i == btf_vlen(kern_vtype)) {
1010 		pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
1011 			tname, STRUCT_OPS_VALUE_PREFIX, tname);
1012 		return -EINVAL;
1013 	}
1014 
1015 	*type = kern_type;
1016 	*type_id = kern_type_id;
1017 	*vtype = kern_vtype;
1018 	*vtype_id = kern_vtype_id;
1019 	*data_member = kern_data_member;
1020 
1021 	return 0;
1022 }
1023 
bpf_map__is_struct_ops(const struct bpf_map * map)1024 static bool bpf_map__is_struct_ops(const struct bpf_map *map)
1025 {
1026 	return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
1027 }
1028 
is_valid_st_ops_program(struct bpf_object * obj,const struct bpf_program * prog)1029 static bool is_valid_st_ops_program(struct bpf_object *obj,
1030 				    const struct bpf_program *prog)
1031 {
1032 	int i;
1033 
1034 	for (i = 0; i < obj->nr_programs; i++) {
1035 		if (&obj->programs[i] == prog)
1036 			return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
1037 	}
1038 
1039 	return false;
1040 }
1041 
1042 /* For each struct_ops program P, referenced from some struct_ops map M,
1043  * enable P.autoload if there are Ms for which M.autocreate is true,
1044  * disable P.autoload if for all Ms M.autocreate is false.
1045  * Don't change P.autoload for programs that are not referenced from any maps.
1046  */
bpf_object_adjust_struct_ops_autoload(struct bpf_object * obj)1047 static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
1048 {
1049 	struct bpf_program *prog, *slot_prog;
1050 	struct bpf_map *map;
1051 	int i, j, k, vlen;
1052 
1053 	for (i = 0; i < obj->nr_programs; ++i) {
1054 		int should_load = false;
1055 		int use_cnt = 0;
1056 
1057 		prog = &obj->programs[i];
1058 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1059 			continue;
1060 
1061 		for (j = 0; j < obj->nr_maps; ++j) {
1062 			map = &obj->maps[j];
1063 			if (!bpf_map__is_struct_ops(map))
1064 				continue;
1065 
1066 			vlen = btf_vlen(map->st_ops->type);
1067 			for (k = 0; k < vlen; ++k) {
1068 				slot_prog = map->st_ops->progs[k];
1069 				if (prog != slot_prog)
1070 					continue;
1071 
1072 				use_cnt++;
1073 				if (map->autocreate)
1074 					should_load = true;
1075 			}
1076 		}
1077 		if (use_cnt)
1078 			prog->autoload = should_load;
1079 	}
1080 
1081 	return 0;
1082 }
1083 
1084 /* Init the map's fields that depend on kern_btf */
bpf_map__init_kern_struct_ops(struct bpf_map * map)1085 static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
1086 {
1087 	const struct btf_member *member, *kern_member, *kern_data_member;
1088 	const struct btf_type *type, *kern_type, *kern_vtype;
1089 	__u32 i, kern_type_id, kern_vtype_id, kern_data_off;
1090 	struct bpf_object *obj = map->obj;
1091 	const struct btf *btf = obj->btf;
1092 	struct bpf_struct_ops *st_ops;
1093 	const struct btf *kern_btf;
1094 	struct module_btf *mod_btf;
1095 	void *data, *kern_data;
1096 	const char *tname;
1097 	int err;
1098 
1099 	st_ops = map->st_ops;
1100 	type = st_ops->type;
1101 	tname = st_ops->tname;
1102 	err = find_struct_ops_kern_types(obj, tname, &mod_btf,
1103 					 &kern_type, &kern_type_id,
1104 					 &kern_vtype, &kern_vtype_id,
1105 					 &kern_data_member);
1106 	if (err)
1107 		return err;
1108 
1109 	kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
1110 
1111 	pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
1112 		 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
1113 
1114 	map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
1115 	map->def.value_size = kern_vtype->size;
1116 	map->btf_vmlinux_value_type_id = kern_vtype_id;
1117 
1118 	st_ops->kern_vdata = calloc(1, kern_vtype->size);
1119 	if (!st_ops->kern_vdata)
1120 		return -ENOMEM;
1121 
1122 	data = st_ops->data;
1123 	kern_data_off = kern_data_member->offset / 8;
1124 	kern_data = st_ops->kern_vdata + kern_data_off;
1125 
1126 	member = btf_members(type);
1127 	for (i = 0; i < btf_vlen(type); i++, member++) {
1128 		const struct btf_type *mtype, *kern_mtype;
1129 		__u32 mtype_id, kern_mtype_id;
1130 		void *mdata, *kern_mdata;
1131 		struct bpf_program *prog;
1132 		__s64 msize, kern_msize;
1133 		__u32 moff, kern_moff;
1134 		__u32 kern_member_idx;
1135 		const char *mname;
1136 
1137 		mname = btf__name_by_offset(btf, member->name_off);
1138 		moff = member->offset / 8;
1139 		mdata = data + moff;
1140 		msize = btf__resolve_size(btf, member->type);
1141 		if (msize < 0) {
1142 			pr_warn("struct_ops init_kern %s: failed to resolve the size of member %s\n",
1143 				map->name, mname);
1144 			return msize;
1145 		}
1146 
1147 		kern_member = find_member_by_name(kern_btf, kern_type, mname);
1148 		if (!kern_member) {
1149 			if (!libbpf_is_mem_zeroed(mdata, msize)) {
1150 				pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
1151 					map->name, mname);
1152 				return -ENOTSUP;
1153 			}
1154 
1155 			if (st_ops->progs[i]) {
1156 				/* If we had declaratively set struct_ops callback, we need to
1157 				 * force its autoload to false, because it doesn't have
1158 				 * a chance of succeeding from POV of the current struct_ops map.
1159 				 * If this program is still referenced somewhere else, though,
1160 				 * then bpf_object_adjust_struct_ops_autoload() will update its
1161 				 * autoload accordingly.
1162 				 */
1163 				st_ops->progs[i]->autoload = false;
1164 				st_ops->progs[i] = NULL;
1165 			}
1166 
1167 			/* Skip all-zero/NULL fields if they are not present in the kernel BTF */
1168 			pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n",
1169 				map->name, mname);
1170 			continue;
1171 		}
1172 
1173 		kern_member_idx = kern_member - btf_members(kern_type);
1174 		if (btf_member_bitfield_size(type, i) ||
1175 		    btf_member_bitfield_size(kern_type, kern_member_idx)) {
1176 			pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
1177 				map->name, mname);
1178 			return -ENOTSUP;
1179 		}
1180 
1181 		kern_moff = kern_member->offset / 8;
1182 		kern_mdata = kern_data + kern_moff;
1183 
1184 		mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1185 		kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1186 						    &kern_mtype_id);
1187 		if (BTF_INFO_KIND(mtype->info) !=
1188 		    BTF_INFO_KIND(kern_mtype->info)) {
1189 			pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
1190 				map->name, mname, BTF_INFO_KIND(mtype->info),
1191 				BTF_INFO_KIND(kern_mtype->info));
1192 			return -ENOTSUP;
1193 		}
1194 
1195 		if (btf_is_ptr(mtype)) {
1196 			prog = *(void **)mdata;
1197 			/* just like for !kern_member case above, reset declaratively
1198 			 * set (at compile time) program's autload to false,
1199 			 * if user replaced it with another program or NULL
1200 			 */
1201 			if (st_ops->progs[i] && st_ops->progs[i] != prog)
1202 				st_ops->progs[i]->autoload = false;
1203 
1204 			/* Update the value from the shadow type */
1205 			st_ops->progs[i] = prog;
1206 			if (!prog)
1207 				continue;
1208 
1209 			if (!is_valid_st_ops_program(obj, prog)) {
1210 				pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
1211 					map->name, mname);
1212 				return -ENOTSUP;
1213 			}
1214 
1215 			kern_mtype = skip_mods_and_typedefs(kern_btf,
1216 							    kern_mtype->type,
1217 							    &kern_mtype_id);
1218 
1219 			/* mtype->type must be a func_proto which was
1220 			 * guaranteed in bpf_object__collect_st_ops_relos(),
1221 			 * so only check kern_mtype for func_proto here.
1222 			 */
1223 			if (!btf_is_func_proto(kern_mtype)) {
1224 				pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
1225 					map->name, mname);
1226 				return -ENOTSUP;
1227 			}
1228 
1229 			if (mod_btf)
1230 				prog->attach_btf_obj_fd = mod_btf->fd;
1231 
1232 			/* if we haven't yet processed this BPF program, record proper
1233 			 * attach_btf_id and member_idx
1234 			 */
1235 			if (!prog->attach_btf_id) {
1236 				prog->attach_btf_id = kern_type_id;
1237 				prog->expected_attach_type = kern_member_idx;
1238 			}
1239 
1240 			/* struct_ops BPF prog can be re-used between multiple
1241 			 * .struct_ops & .struct_ops.link as long as it's the
1242 			 * same struct_ops struct definition and the same
1243 			 * function pointer field
1244 			 */
1245 			if (prog->attach_btf_id != kern_type_id) {
1246 				pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
1247 					map->name, mname, prog->name, prog->sec_name, prog->type,
1248 					prog->attach_btf_id, kern_type_id);
1249 				return -EINVAL;
1250 			}
1251 			if (prog->expected_attach_type != kern_member_idx) {
1252 				pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
1253 					map->name, mname, prog->name, prog->sec_name, prog->type,
1254 					prog->expected_attach_type, kern_member_idx);
1255 				return -EINVAL;
1256 			}
1257 
1258 			st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1259 
1260 			pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1261 				 map->name, mname, prog->name, moff,
1262 				 kern_moff);
1263 
1264 			continue;
1265 		}
1266 
1267 		kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
1268 		if (kern_msize < 0 || msize != kern_msize) {
1269 			pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1270 				map->name, mname, (ssize_t)msize,
1271 				(ssize_t)kern_msize);
1272 			return -ENOTSUP;
1273 		}
1274 
1275 		pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1276 			 map->name, mname, (unsigned int)msize,
1277 			 moff, kern_moff);
1278 		memcpy(kern_mdata, mdata, msize);
1279 	}
1280 
1281 	return 0;
1282 }
1283 
bpf_object__init_kern_struct_ops_maps(struct bpf_object * obj)1284 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1285 {
1286 	struct bpf_map *map;
1287 	size_t i;
1288 	int err;
1289 
1290 	for (i = 0; i < obj->nr_maps; i++) {
1291 		map = &obj->maps[i];
1292 
1293 		if (!bpf_map__is_struct_ops(map))
1294 			continue;
1295 
1296 		if (!map->autocreate)
1297 			continue;
1298 
1299 		err = bpf_map__init_kern_struct_ops(map);
1300 		if (err)
1301 			return err;
1302 	}
1303 
1304 	return 0;
1305 }
1306 
init_struct_ops_maps(struct bpf_object * obj,const char * sec_name,int shndx,Elf_Data * data)1307 static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
1308 				int shndx, Elf_Data *data)
1309 {
1310 	const struct btf_type *type, *datasec;
1311 	const struct btf_var_secinfo *vsi;
1312 	struct bpf_struct_ops *st_ops;
1313 	const char *tname, *var_name;
1314 	__s32 type_id, datasec_id;
1315 	const struct btf *btf;
1316 	struct bpf_map *map;
1317 	__u32 i;
1318 
1319 	if (shndx == -1)
1320 		return 0;
1321 
1322 	btf = obj->btf;
1323 	datasec_id = btf__find_by_name_kind(btf, sec_name,
1324 					    BTF_KIND_DATASEC);
1325 	if (datasec_id < 0) {
1326 		pr_warn("struct_ops init: DATASEC %s not found\n",
1327 			sec_name);
1328 		return -EINVAL;
1329 	}
1330 
1331 	datasec = btf__type_by_id(btf, datasec_id);
1332 	vsi = btf_var_secinfos(datasec);
1333 	for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1334 		type = btf__type_by_id(obj->btf, vsi->type);
1335 		var_name = btf__name_by_offset(obj->btf, type->name_off);
1336 
1337 		type_id = btf__resolve_type(obj->btf, vsi->type);
1338 		if (type_id < 0) {
1339 			pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1340 				vsi->type, sec_name);
1341 			return -EINVAL;
1342 		}
1343 
1344 		type = btf__type_by_id(obj->btf, type_id);
1345 		tname = btf__name_by_offset(obj->btf, type->name_off);
1346 		if (!tname[0]) {
1347 			pr_warn("struct_ops init: anonymous type is not supported\n");
1348 			return -ENOTSUP;
1349 		}
1350 		if (!btf_is_struct(type)) {
1351 			pr_warn("struct_ops init: %s is not a struct\n", tname);
1352 			return -EINVAL;
1353 		}
1354 
1355 		map = bpf_object__add_map(obj);
1356 		if (IS_ERR(map))
1357 			return PTR_ERR(map);
1358 
1359 		map->sec_idx = shndx;
1360 		map->sec_offset = vsi->offset;
1361 		map->name = strdup(var_name);
1362 		if (!map->name)
1363 			return -ENOMEM;
1364 		map->btf_value_type_id = type_id;
1365 
1366 		/* Follow same convention as for programs autoload:
1367 		 * SEC("?.struct_ops") means map is not created by default.
1368 		 */
1369 		if (sec_name[0] == '?') {
1370 			map->autocreate = false;
1371 			/* from now on forget there was ? in section name */
1372 			sec_name++;
1373 		}
1374 
1375 		map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1376 		map->def.key_size = sizeof(int);
1377 		map->def.value_size = type->size;
1378 		map->def.max_entries = 1;
1379 		map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
1380 
1381 		map->st_ops = calloc(1, sizeof(*map->st_ops));
1382 		if (!map->st_ops)
1383 			return -ENOMEM;
1384 		st_ops = map->st_ops;
1385 		st_ops->data = malloc(type->size);
1386 		st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1387 		st_ops->kern_func_off = malloc(btf_vlen(type) *
1388 					       sizeof(*st_ops->kern_func_off));
1389 		if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1390 			return -ENOMEM;
1391 
1392 		if (vsi->offset + type->size > data->d_size) {
1393 			pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1394 				var_name, sec_name);
1395 			return -EINVAL;
1396 		}
1397 
1398 		memcpy(st_ops->data,
1399 		       data->d_buf + vsi->offset,
1400 		       type->size);
1401 		st_ops->tname = tname;
1402 		st_ops->type = type;
1403 		st_ops->type_id = type_id;
1404 
1405 		pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1406 			 tname, type_id, var_name, vsi->offset);
1407 	}
1408 
1409 	return 0;
1410 }
1411 
bpf_object_init_struct_ops(struct bpf_object * obj)1412 static int bpf_object_init_struct_ops(struct bpf_object *obj)
1413 {
1414 	const char *sec_name;
1415 	int sec_idx, err;
1416 
1417 	for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) {
1418 		struct elf_sec_desc *desc = &obj->efile.secs[sec_idx];
1419 
1420 		if (desc->sec_type != SEC_ST_OPS)
1421 			continue;
1422 
1423 		sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1424 		if (!sec_name)
1425 			return -LIBBPF_ERRNO__FORMAT;
1426 
1427 		err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data);
1428 		if (err)
1429 			return err;
1430 	}
1431 
1432 	return 0;
1433 }
1434 
bpf_object__new(const char * path,const void * obj_buf,size_t obj_buf_sz,const char * obj_name)1435 static struct bpf_object *bpf_object__new(const char *path,
1436 					  const void *obj_buf,
1437 					  size_t obj_buf_sz,
1438 					  const char *obj_name)
1439 {
1440 	struct bpf_object *obj;
1441 	char *end;
1442 
1443 	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1444 	if (!obj) {
1445 		pr_warn("alloc memory failed for %s\n", path);
1446 		return ERR_PTR(-ENOMEM);
1447 	}
1448 
1449 	strcpy(obj->path, path);
1450 	if (obj_name) {
1451 		libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1452 	} else {
1453 		/* Using basename() GNU version which doesn't modify arg. */
1454 		libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1455 		end = strchr(obj->name, '.');
1456 		if (end)
1457 			*end = 0;
1458 	}
1459 
1460 	obj->efile.fd = -1;
1461 	/*
1462 	 * Caller of this function should also call
1463 	 * bpf_object__elf_finish() after data collection to return
1464 	 * obj_buf to user. If not, we should duplicate the buffer to
1465 	 * avoid user freeing them before elf finish.
1466 	 */
1467 	obj->efile.obj_buf = obj_buf;
1468 	obj->efile.obj_buf_sz = obj_buf_sz;
1469 	obj->efile.btf_maps_shndx = -1;
1470 	obj->kconfig_map_idx = -1;
1471 
1472 	obj->kern_version = get_kernel_version();
1473 	obj->loaded = false;
1474 
1475 	return obj;
1476 }
1477 
bpf_object__elf_finish(struct bpf_object * obj)1478 static void bpf_object__elf_finish(struct bpf_object *obj)
1479 {
1480 	if (!obj->efile.elf)
1481 		return;
1482 
1483 	elf_end(obj->efile.elf);
1484 	obj->efile.elf = NULL;
1485 	obj->efile.symbols = NULL;
1486 	obj->efile.arena_data = NULL;
1487 
1488 	zfree(&obj->efile.secs);
1489 	obj->efile.sec_cnt = 0;
1490 	zclose(obj->efile.fd);
1491 	obj->efile.obj_buf = NULL;
1492 	obj->efile.obj_buf_sz = 0;
1493 }
1494 
bpf_object__elf_init(struct bpf_object * obj)1495 static int bpf_object__elf_init(struct bpf_object *obj)
1496 {
1497 	Elf64_Ehdr *ehdr;
1498 	int err = 0;
1499 	Elf *elf;
1500 
1501 	if (obj->efile.elf) {
1502 		pr_warn("elf: init internal error\n");
1503 		return -LIBBPF_ERRNO__LIBELF;
1504 	}
1505 
1506 	if (obj->efile.obj_buf_sz > 0) {
1507 		/* obj_buf should have been validated by bpf_object__open_mem(). */
1508 		elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1509 	} else {
1510 		obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1511 		if (obj->efile.fd < 0) {
1512 			char errmsg[STRERR_BUFSIZE], *cp;
1513 
1514 			err = -errno;
1515 			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1516 			pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1517 			return err;
1518 		}
1519 
1520 		elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1521 	}
1522 
1523 	if (!elf) {
1524 		pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1525 		err = -LIBBPF_ERRNO__LIBELF;
1526 		goto errout;
1527 	}
1528 
1529 	obj->efile.elf = elf;
1530 
1531 	if (elf_kind(elf) != ELF_K_ELF) {
1532 		err = -LIBBPF_ERRNO__FORMAT;
1533 		pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1534 		goto errout;
1535 	}
1536 
1537 	if (gelf_getclass(elf) != ELFCLASS64) {
1538 		err = -LIBBPF_ERRNO__FORMAT;
1539 		pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1540 		goto errout;
1541 	}
1542 
1543 	obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1544 	if (!obj->efile.ehdr) {
1545 		pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1546 		err = -LIBBPF_ERRNO__FORMAT;
1547 		goto errout;
1548 	}
1549 
1550 	if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1551 		pr_warn("elf: failed to get section names section index for %s: %s\n",
1552 			obj->path, elf_errmsg(-1));
1553 		err = -LIBBPF_ERRNO__FORMAT;
1554 		goto errout;
1555 	}
1556 
1557 	/* ELF is corrupted/truncated, avoid calling elf_strptr. */
1558 	if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1559 		pr_warn("elf: failed to get section names strings from %s: %s\n",
1560 			obj->path, elf_errmsg(-1));
1561 		err = -LIBBPF_ERRNO__FORMAT;
1562 		goto errout;
1563 	}
1564 
1565 	/* Old LLVM set e_machine to EM_NONE */
1566 	if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
1567 		pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1568 		err = -LIBBPF_ERRNO__FORMAT;
1569 		goto errout;
1570 	}
1571 
1572 	return 0;
1573 errout:
1574 	bpf_object__elf_finish(obj);
1575 	return err;
1576 }
1577 
bpf_object__check_endianness(struct bpf_object * obj)1578 static int bpf_object__check_endianness(struct bpf_object *obj)
1579 {
1580 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1581 	if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
1582 		return 0;
1583 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1584 	if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
1585 		return 0;
1586 #else
1587 # error "Unrecognized __BYTE_ORDER__"
1588 #endif
1589 	pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1590 	return -LIBBPF_ERRNO__ENDIAN;
1591 }
1592 
1593 static int
bpf_object__init_license(struct bpf_object * obj,void * data,size_t size)1594 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1595 {
1596 	if (!data) {
1597 		pr_warn("invalid license section in %s\n", obj->path);
1598 		return -LIBBPF_ERRNO__FORMAT;
1599 	}
1600 	/* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
1601 	 * go over allowed ELF data section buffer
1602 	 */
1603 	libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1604 	pr_debug("license of %s is %s\n", obj->path, obj->license);
1605 	return 0;
1606 }
1607 
1608 static int
bpf_object__init_kversion(struct bpf_object * obj,void * data,size_t size)1609 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1610 {
1611 	__u32 kver;
1612 
1613 	if (!data || size != sizeof(kver)) {
1614 		pr_warn("invalid kver section in %s\n", obj->path);
1615 		return -LIBBPF_ERRNO__FORMAT;
1616 	}
1617 	memcpy(&kver, data, sizeof(kver));
1618 	obj->kern_version = kver;
1619 	pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1620 	return 0;
1621 }
1622 
bpf_map_type__is_map_in_map(enum bpf_map_type type)1623 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1624 {
1625 	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1626 	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
1627 		return true;
1628 	return false;
1629 }
1630 
find_elf_sec_sz(const struct bpf_object * obj,const char * name,__u32 * size)1631 static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1632 {
1633 	Elf_Data *data;
1634 	Elf_Scn *scn;
1635 
1636 	if (!name)
1637 		return -EINVAL;
1638 
1639 	scn = elf_sec_by_name(obj, name);
1640 	data = elf_sec_data(obj, scn);
1641 	if (data) {
1642 		*size = data->d_size;
1643 		return 0; /* found it */
1644 	}
1645 
1646 	return -ENOENT;
1647 }
1648 
find_elf_var_sym(const struct bpf_object * obj,const char * name)1649 static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
1650 {
1651 	Elf_Data *symbols = obj->efile.symbols;
1652 	const char *sname;
1653 	size_t si;
1654 
1655 	for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1656 		Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1657 
1658 		if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1659 			continue;
1660 
1661 		if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
1662 		    ELF64_ST_BIND(sym->st_info) != STB_WEAK)
1663 			continue;
1664 
1665 		sname = elf_sym_str(obj, sym->st_name);
1666 		if (!sname) {
1667 			pr_warn("failed to get sym name string for var %s\n", name);
1668 			return ERR_PTR(-EIO);
1669 		}
1670 		if (strcmp(name, sname) == 0)
1671 			return sym;
1672 	}
1673 
1674 	return ERR_PTR(-ENOENT);
1675 }
1676 
1677 /* Some versions of Android don't provide memfd_create() in their libc
1678  * implementation, so avoid complications and just go straight to Linux
1679  * syscall.
1680  */
sys_memfd_create(const char * name,unsigned flags)1681 static int sys_memfd_create(const char *name, unsigned flags)
1682 {
1683 	return syscall(__NR_memfd_create, name, flags);
1684 }
1685 
1686 #ifndef MFD_CLOEXEC
1687 #define MFD_CLOEXEC 0x0001U
1688 #endif
1689 
create_placeholder_fd(void)1690 static int create_placeholder_fd(void)
1691 {
1692 	int fd;
1693 
1694 	fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
1695 	if (fd < 0)
1696 		return -errno;
1697 	return fd;
1698 }
1699 
bpf_object__add_map(struct bpf_object * obj)1700 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1701 {
1702 	struct bpf_map *map;
1703 	int err;
1704 
1705 	err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1706 				sizeof(*obj->maps), obj->nr_maps + 1);
1707 	if (err)
1708 		return ERR_PTR(err);
1709 
1710 	map = &obj->maps[obj->nr_maps++];
1711 	map->obj = obj;
1712 	/* Preallocate map FD without actually creating BPF map just yet.
1713 	 * These map FD "placeholders" will be reused later without changing
1714 	 * FD value when map is actually created in the kernel.
1715 	 *
1716 	 * This is useful to be able to perform BPF program relocations
1717 	 * without having to create BPF maps before that step. This allows us
1718 	 * to finalize and load BTF very late in BPF object's loading phase,
1719 	 * right before BPF maps have to be created and BPF programs have to
1720 	 * be loaded. By having these map FD placeholders we can perform all
1721 	 * the sanitizations, relocations, and any other adjustments before we
1722 	 * start creating actual BPF kernel objects (BTF, maps, progs).
1723 	 */
1724 	map->fd = create_placeholder_fd();
1725 	if (map->fd < 0)
1726 		return ERR_PTR(map->fd);
1727 	map->inner_map_fd = -1;
1728 	map->autocreate = true;
1729 
1730 	return map;
1731 }
1732 
array_map_mmap_sz(unsigned int value_sz,unsigned int max_entries)1733 static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
1734 {
1735 	const long page_sz = sysconf(_SC_PAGE_SIZE);
1736 	size_t map_sz;
1737 
1738 	map_sz = (size_t)roundup(value_sz, 8) * max_entries;
1739 	map_sz = roundup(map_sz, page_sz);
1740 	return map_sz;
1741 }
1742 
bpf_map_mmap_sz(const struct bpf_map * map)1743 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1744 {
1745 	const long page_sz = sysconf(_SC_PAGE_SIZE);
1746 
1747 	switch (map->def.type) {
1748 	case BPF_MAP_TYPE_ARRAY:
1749 		return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
1750 	case BPF_MAP_TYPE_ARENA:
1751 		return page_sz * map->def.max_entries;
1752 	default:
1753 		return 0; /* not supported */
1754 	}
1755 }
1756 
bpf_map_mmap_resize(struct bpf_map * map,size_t old_sz,size_t new_sz)1757 static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
1758 {
1759 	void *mmaped;
1760 
1761 	if (!map->mmaped)
1762 		return -EINVAL;
1763 
1764 	if (old_sz == new_sz)
1765 		return 0;
1766 
1767 	mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1768 	if (mmaped == MAP_FAILED)
1769 		return -errno;
1770 
1771 	memcpy(mmaped, map->mmaped, min(old_sz, new_sz));
1772 	munmap(map->mmaped, old_sz);
1773 	map->mmaped = mmaped;
1774 	return 0;
1775 }
1776 
internal_map_name(struct bpf_object * obj,const char * real_name)1777 static char *internal_map_name(struct bpf_object *obj, const char *real_name)
1778 {
1779 	char map_name[BPF_OBJ_NAME_LEN], *p;
1780 	int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1781 
1782 	/* This is one of the more confusing parts of libbpf for various
1783 	 * reasons, some of which are historical. The original idea for naming
1784 	 * internal names was to include as much of BPF object name prefix as
1785 	 * possible, so that it can be distinguished from similar internal
1786 	 * maps of a different BPF object.
1787 	 * As an example, let's say we have bpf_object named 'my_object_name'
1788 	 * and internal map corresponding to '.rodata' ELF section. The final
1789 	 * map name advertised to user and to the kernel will be
1790 	 * 'my_objec.rodata', taking first 8 characters of object name and
1791 	 * entire 7 characters of '.rodata'.
1792 	 * Somewhat confusingly, if internal map ELF section name is shorter
1793 	 * than 7 characters, e.g., '.bss', we still reserve 7 characters
1794 	 * for the suffix, even though we only have 4 actual characters, and
1795 	 * resulting map will be called 'my_objec.bss', not even using all 15
1796 	 * characters allowed by the kernel. Oh well, at least the truncated
1797 	 * object name is somewhat consistent in this case. But if the map
1798 	 * name is '.kconfig', we'll still have entirety of '.kconfig' added
1799 	 * (8 chars) and thus will be left with only first 7 characters of the
1800 	 * object name ('my_obje'). Happy guessing, user, that the final map
1801 	 * name will be "my_obje.kconfig".
1802 	 * Now, with libbpf starting to support arbitrarily named .rodata.*
1803 	 * and .data.* data sections, it's possible that ELF section name is
1804 	 * longer than allowed 15 chars, so we now need to be careful to take
1805 	 * only up to 15 first characters of ELF name, taking no BPF object
1806 	 * name characters at all. So '.rodata.abracadabra' will result in
1807 	 * '.rodata.abracad' kernel and user-visible name.
1808 	 * We need to keep this convoluted logic intact for .data, .bss and
1809 	 * .rodata maps, but for new custom .data.custom and .rodata.custom
1810 	 * maps we use their ELF names as is, not prepending bpf_object name
1811 	 * in front. We still need to truncate them to 15 characters for the
1812 	 * kernel. Full name can be recovered for such maps by using DATASEC
1813 	 * BTF type associated with such map's value type, though.
1814 	 */
1815 	if (sfx_len >= BPF_OBJ_NAME_LEN)
1816 		sfx_len = BPF_OBJ_NAME_LEN - 1;
1817 
1818 	/* if there are two or more dots in map name, it's a custom dot map */
1819 	if (strchr(real_name + 1, '.') != NULL)
1820 		pfx_len = 0;
1821 	else
1822 		pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1823 
1824 	snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1825 		 sfx_len, real_name);
1826 
1827 	/* sanitise map name to characters allowed by kernel */
1828 	for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1829 		if (!isalnum(*p) && *p != '_' && *p != '.')
1830 			*p = '_';
1831 
1832 	return strdup(map_name);
1833 }
1834 
1835 static int
1836 map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
1837 
1838 /* Internal BPF map is mmap()'able only if at least one of corresponding
1839  * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
1840  * variable and it's not marked as __hidden (which turns it into, effectively,
1841  * a STATIC variable).
1842  */
map_is_mmapable(struct bpf_object * obj,struct bpf_map * map)1843 static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
1844 {
1845 	const struct btf_type *t, *vt;
1846 	struct btf_var_secinfo *vsi;
1847 	int i, n;
1848 
1849 	if (!map->btf_value_type_id)
1850 		return false;
1851 
1852 	t = btf__type_by_id(obj->btf, map->btf_value_type_id);
1853 	if (!btf_is_datasec(t))
1854 		return false;
1855 
1856 	vsi = btf_var_secinfos(t);
1857 	for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
1858 		vt = btf__type_by_id(obj->btf, vsi->type);
1859 		if (!btf_is_var(vt))
1860 			continue;
1861 
1862 		if (btf_var(vt)->linkage != BTF_VAR_STATIC)
1863 			return true;
1864 	}
1865 
1866 	return false;
1867 }
1868 
1869 static int
bpf_object__init_internal_map(struct bpf_object * obj,enum libbpf_map_type type,const char * real_name,int sec_idx,void * data,size_t data_sz)1870 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1871 			      const char *real_name, int sec_idx, void *data, size_t data_sz)
1872 {
1873 	struct bpf_map_def *def;
1874 	struct bpf_map *map;
1875 	size_t mmap_sz;
1876 	int err;
1877 
1878 	map = bpf_object__add_map(obj);
1879 	if (IS_ERR(map))
1880 		return PTR_ERR(map);
1881 
1882 	map->libbpf_type = type;
1883 	map->sec_idx = sec_idx;
1884 	map->sec_offset = 0;
1885 	map->real_name = strdup(real_name);
1886 	map->name = internal_map_name(obj, real_name);
1887 	if (!map->real_name || !map->name) {
1888 		zfree(&map->real_name);
1889 		zfree(&map->name);
1890 		return -ENOMEM;
1891 	}
1892 
1893 	def = &map->def;
1894 	def->type = BPF_MAP_TYPE_ARRAY;
1895 	def->key_size = sizeof(int);
1896 	def->value_size = data_sz;
1897 	def->max_entries = 1;
1898 	def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1899 		? BPF_F_RDONLY_PROG : 0;
1900 
1901 	/* failures are fine because of maps like .rodata.str1.1 */
1902 	(void) map_fill_btf_type_info(obj, map);
1903 
1904 	if (map_is_mmapable(obj, map))
1905 		def->map_flags |= BPF_F_MMAPABLE;
1906 
1907 	pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1908 		 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1909 
1910 	mmap_sz = bpf_map_mmap_sz(map);
1911 	map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
1912 			   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1913 	if (map->mmaped == MAP_FAILED) {
1914 		err = -errno;
1915 		map->mmaped = NULL;
1916 		pr_warn("failed to alloc map '%s' content buffer: %d\n",
1917 			map->name, err);
1918 		zfree(&map->real_name);
1919 		zfree(&map->name);
1920 		return err;
1921 	}
1922 
1923 	if (data)
1924 		memcpy(map->mmaped, data, data_sz);
1925 
1926 	pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1927 	return 0;
1928 }
1929 
bpf_object__init_global_data_maps(struct bpf_object * obj)1930 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1931 {
1932 	struct elf_sec_desc *sec_desc;
1933 	const char *sec_name;
1934 	int err = 0, sec_idx;
1935 
1936 	/*
1937 	 * Populate obj->maps with libbpf internal maps.
1938 	 */
1939 	for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1940 		sec_desc = &obj->efile.secs[sec_idx];
1941 
1942 		/* Skip recognized sections with size 0. */
1943 		if (!sec_desc->data || sec_desc->data->d_size == 0)
1944 			continue;
1945 
1946 		switch (sec_desc->sec_type) {
1947 		case SEC_DATA:
1948 			sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1949 			err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1950 							    sec_name, sec_idx,
1951 							    sec_desc->data->d_buf,
1952 							    sec_desc->data->d_size);
1953 			break;
1954 		case SEC_RODATA:
1955 			obj->has_rodata = true;
1956 			sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1957 			err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1958 							    sec_name, sec_idx,
1959 							    sec_desc->data->d_buf,
1960 							    sec_desc->data->d_size);
1961 			break;
1962 		case SEC_BSS:
1963 			sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1964 			err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1965 							    sec_name, sec_idx,
1966 							    NULL,
1967 							    sec_desc->data->d_size);
1968 			break;
1969 		default:
1970 			/* skip */
1971 			break;
1972 		}
1973 		if (err)
1974 			return err;
1975 	}
1976 	return 0;
1977 }
1978 
1979 
find_extern_by_name(const struct bpf_object * obj,const void * name)1980 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1981 					       const void *name)
1982 {
1983 	int i;
1984 
1985 	for (i = 0; i < obj->nr_extern; i++) {
1986 		if (strcmp(obj->externs[i].name, name) == 0)
1987 			return &obj->externs[i];
1988 	}
1989 	return NULL;
1990 }
1991 
find_extern_by_name_with_len(const struct bpf_object * obj,const void * name,int len)1992 static struct extern_desc *find_extern_by_name_with_len(const struct bpf_object *obj,
1993 							const void *name, int len)
1994 {
1995 	const char *ext_name;
1996 	int i;
1997 
1998 	for (i = 0; i < obj->nr_extern; i++) {
1999 		ext_name = obj->externs[i].name;
2000 		if (strlen(ext_name) == len && strncmp(ext_name, name, len) == 0)
2001 			return &obj->externs[i];
2002 	}
2003 	return NULL;
2004 }
2005 
set_kcfg_value_tri(struct extern_desc * ext,void * ext_val,char value)2006 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
2007 			      char value)
2008 {
2009 	switch (ext->kcfg.type) {
2010 	case KCFG_BOOL:
2011 		if (value == 'm') {
2012 			pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n",
2013 				ext->name, value);
2014 			return -EINVAL;
2015 		}
2016 		*(bool *)ext_val = value == 'y' ? true : false;
2017 		break;
2018 	case KCFG_TRISTATE:
2019 		if (value == 'y')
2020 			*(enum libbpf_tristate *)ext_val = TRI_YES;
2021 		else if (value == 'm')
2022 			*(enum libbpf_tristate *)ext_val = TRI_MODULE;
2023 		else /* value == 'n' */
2024 			*(enum libbpf_tristate *)ext_val = TRI_NO;
2025 		break;
2026 	case KCFG_CHAR:
2027 		*(char *)ext_val = value;
2028 		break;
2029 	case KCFG_UNKNOWN:
2030 	case KCFG_INT:
2031 	case KCFG_CHAR_ARR:
2032 	default:
2033 		pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n",
2034 			ext->name, value);
2035 		return -EINVAL;
2036 	}
2037 	ext->is_set = true;
2038 	return 0;
2039 }
2040 
set_kcfg_value_str(struct extern_desc * ext,char * ext_val,const char * value)2041 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
2042 			      const char *value)
2043 {
2044 	size_t len;
2045 
2046 	if (ext->kcfg.type != KCFG_CHAR_ARR) {
2047 		pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n",
2048 			ext->name, value);
2049 		return -EINVAL;
2050 	}
2051 
2052 	len = strlen(value);
2053 	if (value[len - 1] != '"') {
2054 		pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
2055 			ext->name, value);
2056 		return -EINVAL;
2057 	}
2058 
2059 	/* strip quotes */
2060 	len -= 2;
2061 	if (len >= ext->kcfg.sz) {
2062 		pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n",
2063 			ext->name, value, len, ext->kcfg.sz - 1);
2064 		len = ext->kcfg.sz - 1;
2065 	}
2066 	memcpy(ext_val, value + 1, len);
2067 	ext_val[len] = '\0';
2068 	ext->is_set = true;
2069 	return 0;
2070 }
2071 
parse_u64(const char * value,__u64 * res)2072 static int parse_u64(const char *value, __u64 *res)
2073 {
2074 	char *value_end;
2075 	int err;
2076 
2077 	errno = 0;
2078 	*res = strtoull(value, &value_end, 0);
2079 	if (errno) {
2080 		err = -errno;
2081 		pr_warn("failed to parse '%s' as integer: %d\n", value, err);
2082 		return err;
2083 	}
2084 	if (*value_end) {
2085 		pr_warn("failed to parse '%s' as integer completely\n", value);
2086 		return -EINVAL;
2087 	}
2088 	return 0;
2089 }
2090 
is_kcfg_value_in_range(const struct extern_desc * ext,__u64 v)2091 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
2092 {
2093 	int bit_sz = ext->kcfg.sz * 8;
2094 
2095 	if (ext->kcfg.sz == 8)
2096 		return true;
2097 
2098 	/* Validate that value stored in u64 fits in integer of `ext->sz`
2099 	 * bytes size without any loss of information. If the target integer
2100 	 * is signed, we rely on the following limits of integer type of
2101 	 * Y bits and subsequent transformation:
2102 	 *
2103 	 *     -2^(Y-1) <= X           <= 2^(Y-1) - 1
2104 	 *            0 <= X + 2^(Y-1) <= 2^Y - 1
2105 	 *            0 <= X + 2^(Y-1) <  2^Y
2106 	 *
2107 	 *  For unsigned target integer, check that all the (64 - Y) bits are
2108 	 *  zero.
2109 	 */
2110 	if (ext->kcfg.is_signed)
2111 		return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
2112 	else
2113 		return (v >> bit_sz) == 0;
2114 }
2115 
set_kcfg_value_num(struct extern_desc * ext,void * ext_val,__u64 value)2116 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
2117 			      __u64 value)
2118 {
2119 	if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
2120 	    ext->kcfg.type != KCFG_BOOL) {
2121 		pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
2122 			ext->name, (unsigned long long)value);
2123 		return -EINVAL;
2124 	}
2125 	if (ext->kcfg.type == KCFG_BOOL && value > 1) {
2126 		pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
2127 			ext->name, (unsigned long long)value);
2128 		return -EINVAL;
2129 
2130 	}
2131 	if (!is_kcfg_value_in_range(ext, value)) {
2132 		pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
2133 			ext->name, (unsigned long long)value, ext->kcfg.sz);
2134 		return -ERANGE;
2135 	}
2136 	switch (ext->kcfg.sz) {
2137 	case 1:
2138 		*(__u8 *)ext_val = value;
2139 		break;
2140 	case 2:
2141 		*(__u16 *)ext_val = value;
2142 		break;
2143 	case 4:
2144 		*(__u32 *)ext_val = value;
2145 		break;
2146 	case 8:
2147 		*(__u64 *)ext_val = value;
2148 		break;
2149 	default:
2150 		return -EINVAL;
2151 	}
2152 	ext->is_set = true;
2153 	return 0;
2154 }
2155 
bpf_object__process_kconfig_line(struct bpf_object * obj,char * buf,void * data)2156 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
2157 					    char *buf, void *data)
2158 {
2159 	struct extern_desc *ext;
2160 	char *sep, *value;
2161 	int len, err = 0;
2162 	void *ext_val;
2163 	__u64 num;
2164 
2165 	if (!str_has_pfx(buf, "CONFIG_"))
2166 		return 0;
2167 
2168 	sep = strchr(buf, '=');
2169 	if (!sep) {
2170 		pr_warn("failed to parse '%s': no separator\n", buf);
2171 		return -EINVAL;
2172 	}
2173 
2174 	/* Trim ending '\n' */
2175 	len = strlen(buf);
2176 	if (buf[len - 1] == '\n')
2177 		buf[len - 1] = '\0';
2178 	/* Split on '=' and ensure that a value is present. */
2179 	*sep = '\0';
2180 	if (!sep[1]) {
2181 		*sep = '=';
2182 		pr_warn("failed to parse '%s': no value\n", buf);
2183 		return -EINVAL;
2184 	}
2185 
2186 	ext = find_extern_by_name(obj, buf);
2187 	if (!ext || ext->is_set)
2188 		return 0;
2189 
2190 	ext_val = data + ext->kcfg.data_off;
2191 	value = sep + 1;
2192 
2193 	switch (*value) {
2194 	case 'y': case 'n': case 'm':
2195 		err = set_kcfg_value_tri(ext, ext_val, *value);
2196 		break;
2197 	case '"':
2198 		err = set_kcfg_value_str(ext, ext_val, value);
2199 		break;
2200 	default:
2201 		/* assume integer */
2202 		err = parse_u64(value, &num);
2203 		if (err) {
2204 			pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
2205 			return err;
2206 		}
2207 		if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
2208 			pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
2209 			return -EINVAL;
2210 		}
2211 		err = set_kcfg_value_num(ext, ext_val, num);
2212 		break;
2213 	}
2214 	if (err)
2215 		return err;
2216 	pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
2217 	return 0;
2218 }
2219 
bpf_object__read_kconfig_file(struct bpf_object * obj,void * data)2220 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
2221 {
2222 	char buf[PATH_MAX];
2223 	struct utsname uts;
2224 	int len, err = 0;
2225 	gzFile file;
2226 
2227 	uname(&uts);
2228 	len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
2229 	if (len < 0)
2230 		return -EINVAL;
2231 	else if (len >= PATH_MAX)
2232 		return -ENAMETOOLONG;
2233 
2234 	/* gzopen also accepts uncompressed files. */
2235 	file = gzopen(buf, "re");
2236 	if (!file)
2237 		file = gzopen("/proc/config.gz", "re");
2238 
2239 	if (!file) {
2240 		pr_warn("failed to open system Kconfig\n");
2241 		return -ENOENT;
2242 	}
2243 
2244 	while (gzgets(file, buf, sizeof(buf))) {
2245 		err = bpf_object__process_kconfig_line(obj, buf, data);
2246 		if (err) {
2247 			pr_warn("error parsing system Kconfig line '%s': %d\n",
2248 				buf, err);
2249 			goto out;
2250 		}
2251 	}
2252 
2253 out:
2254 	gzclose(file);
2255 	return err;
2256 }
2257 
bpf_object__read_kconfig_mem(struct bpf_object * obj,const char * config,void * data)2258 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
2259 					const char *config, void *data)
2260 {
2261 	char buf[PATH_MAX];
2262 	int err = 0;
2263 	FILE *file;
2264 
2265 	file = fmemopen((void *)config, strlen(config), "r");
2266 	if (!file) {
2267 		err = -errno;
2268 		pr_warn("failed to open in-memory Kconfig: %d\n", err);
2269 		return err;
2270 	}
2271 
2272 	while (fgets(buf, sizeof(buf), file)) {
2273 		err = bpf_object__process_kconfig_line(obj, buf, data);
2274 		if (err) {
2275 			pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
2276 				buf, err);
2277 			break;
2278 		}
2279 	}
2280 
2281 	fclose(file);
2282 	return err;
2283 }
2284 
bpf_object__init_kconfig_map(struct bpf_object * obj)2285 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
2286 {
2287 	struct extern_desc *last_ext = NULL, *ext;
2288 	size_t map_sz;
2289 	int i, err;
2290 
2291 	for (i = 0; i < obj->nr_extern; i++) {
2292 		ext = &obj->externs[i];
2293 		if (ext->type == EXT_KCFG)
2294 			last_ext = ext;
2295 	}
2296 
2297 	if (!last_ext)
2298 		return 0;
2299 
2300 	map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
2301 	err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
2302 					    ".kconfig", obj->efile.symbols_shndx,
2303 					    NULL, map_sz);
2304 	if (err)
2305 		return err;
2306 
2307 	obj->kconfig_map_idx = obj->nr_maps - 1;
2308 
2309 	return 0;
2310 }
2311 
2312 const struct btf_type *
skip_mods_and_typedefs(const struct btf * btf,__u32 id,__u32 * res_id)2313 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
2314 {
2315 	const struct btf_type *t = btf__type_by_id(btf, id);
2316 
2317 	if (res_id)
2318 		*res_id = id;
2319 
2320 	while (btf_is_mod(t) || btf_is_typedef(t)) {
2321 		if (res_id)
2322 			*res_id = t->type;
2323 		t = btf__type_by_id(btf, t->type);
2324 	}
2325 
2326 	return t;
2327 }
2328 
2329 static const struct btf_type *
resolve_func_ptr(const struct btf * btf,__u32 id,__u32 * res_id)2330 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2331 {
2332 	const struct btf_type *t;
2333 
2334 	t = skip_mods_and_typedefs(btf, id, NULL);
2335 	if (!btf_is_ptr(t))
2336 		return NULL;
2337 
2338 	t = skip_mods_and_typedefs(btf, t->type, res_id);
2339 
2340 	return btf_is_func_proto(t) ? t : NULL;
2341 }
2342 
__btf_kind_str(__u16 kind)2343 static const char *__btf_kind_str(__u16 kind)
2344 {
2345 	switch (kind) {
2346 	case BTF_KIND_UNKN: return "void";
2347 	case BTF_KIND_INT: return "int";
2348 	case BTF_KIND_PTR: return "ptr";
2349 	case BTF_KIND_ARRAY: return "array";
2350 	case BTF_KIND_STRUCT: return "struct";
2351 	case BTF_KIND_UNION: return "union";
2352 	case BTF_KIND_ENUM: return "enum";
2353 	case BTF_KIND_FWD: return "fwd";
2354 	case BTF_KIND_TYPEDEF: return "typedef";
2355 	case BTF_KIND_VOLATILE: return "volatile";
2356 	case BTF_KIND_CONST: return "const";
2357 	case BTF_KIND_RESTRICT: return "restrict";
2358 	case BTF_KIND_FUNC: return "func";
2359 	case BTF_KIND_FUNC_PROTO: return "func_proto";
2360 	case BTF_KIND_VAR: return "var";
2361 	case BTF_KIND_DATASEC: return "datasec";
2362 	case BTF_KIND_FLOAT: return "float";
2363 	case BTF_KIND_DECL_TAG: return "decl_tag";
2364 	case BTF_KIND_TYPE_TAG: return "type_tag";
2365 	case BTF_KIND_ENUM64: return "enum64";
2366 	default: return "unknown";
2367 	}
2368 }
2369 
btf_kind_str(const struct btf_type * t)2370 const char *btf_kind_str(const struct btf_type *t)
2371 {
2372 	return __btf_kind_str(btf_kind(t));
2373 }
2374 
2375 /*
2376  * Fetch integer attribute of BTF map definition. Such attributes are
2377  * represented using a pointer to an array, in which dimensionality of array
2378  * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2379  * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2380  * type definition, while using only sizeof(void *) space in ELF data section.
2381  */
get_map_field_int(const char * map_name,const struct btf * btf,const struct btf_member * m,__u32 * res)2382 static bool get_map_field_int(const char *map_name, const struct btf *btf,
2383 			      const struct btf_member *m, __u32 *res)
2384 {
2385 	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2386 	const char *name = btf__name_by_offset(btf, m->name_off);
2387 	const struct btf_array *arr_info;
2388 	const struct btf_type *arr_t;
2389 
2390 	if (!btf_is_ptr(t)) {
2391 		pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2392 			map_name, name, btf_kind_str(t));
2393 		return false;
2394 	}
2395 
2396 	arr_t = btf__type_by_id(btf, t->type);
2397 	if (!arr_t) {
2398 		pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2399 			map_name, name, t->type);
2400 		return false;
2401 	}
2402 	if (!btf_is_array(arr_t)) {
2403 		pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2404 			map_name, name, btf_kind_str(arr_t));
2405 		return false;
2406 	}
2407 	arr_info = btf_array(arr_t);
2408 	*res = arr_info->nelems;
2409 	return true;
2410 }
2411 
get_map_field_long(const char * map_name,const struct btf * btf,const struct btf_member * m,__u64 * res)2412 static bool get_map_field_long(const char *map_name, const struct btf *btf,
2413 			       const struct btf_member *m, __u64 *res)
2414 {
2415 	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2416 	const char *name = btf__name_by_offset(btf, m->name_off);
2417 
2418 	if (btf_is_ptr(t)) {
2419 		__u32 res32;
2420 		bool ret;
2421 
2422 		ret = get_map_field_int(map_name, btf, m, &res32);
2423 		if (ret)
2424 			*res = (__u64)res32;
2425 		return ret;
2426 	}
2427 
2428 	if (!btf_is_enum(t) && !btf_is_enum64(t)) {
2429 		pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n",
2430 			map_name, name, btf_kind_str(t));
2431 		return false;
2432 	}
2433 
2434 	if (btf_vlen(t) != 1) {
2435 		pr_warn("map '%s': attr '%s': invalid __ulong\n",
2436 			map_name, name);
2437 		return false;
2438 	}
2439 
2440 	if (btf_is_enum(t)) {
2441 		const struct btf_enum *e = btf_enum(t);
2442 
2443 		*res = e->val;
2444 	} else {
2445 		const struct btf_enum64 *e = btf_enum64(t);
2446 
2447 		*res = btf_enum64_value(e);
2448 	}
2449 	return true;
2450 }
2451 
pathname_concat(char * buf,size_t buf_sz,const char * path,const char * name)2452 static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
2453 {
2454 	int len;
2455 
2456 	len = snprintf(buf, buf_sz, "%s/%s", path, name);
2457 	if (len < 0)
2458 		return -EINVAL;
2459 	if (len >= buf_sz)
2460 		return -ENAMETOOLONG;
2461 
2462 	return 0;
2463 }
2464 
build_map_pin_path(struct bpf_map * map,const char * path)2465 static int build_map_pin_path(struct bpf_map *map, const char *path)
2466 {
2467 	char buf[PATH_MAX];
2468 	int err;
2469 
2470 	if (!path)
2471 		path = BPF_FS_DEFAULT_PATH;
2472 
2473 	err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
2474 	if (err)
2475 		return err;
2476 
2477 	return bpf_map__set_pin_path(map, buf);
2478 }
2479 
2480 /* should match definition in bpf_helpers.h */
2481 enum libbpf_pin_type {
2482 	LIBBPF_PIN_NONE,
2483 	/* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
2484 	LIBBPF_PIN_BY_NAME,
2485 };
2486 
parse_btf_map_def(const char * map_name,struct btf * btf,const struct btf_type * def_t,bool strict,struct btf_map_def * map_def,struct btf_map_def * inner_def)2487 int parse_btf_map_def(const char *map_name, struct btf *btf,
2488 		      const struct btf_type *def_t, bool strict,
2489 		      struct btf_map_def *map_def, struct btf_map_def *inner_def)
2490 {
2491 	const struct btf_type *t;
2492 	const struct btf_member *m;
2493 	bool is_inner = inner_def == NULL;
2494 	int vlen, i;
2495 
2496 	vlen = btf_vlen(def_t);
2497 	m = btf_members(def_t);
2498 	for (i = 0; i < vlen; i++, m++) {
2499 		const char *name = btf__name_by_offset(btf, m->name_off);
2500 
2501 		if (!name) {
2502 			pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2503 			return -EINVAL;
2504 		}
2505 		if (strcmp(name, "type") == 0) {
2506 			if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2507 				return -EINVAL;
2508 			map_def->parts |= MAP_DEF_MAP_TYPE;
2509 		} else if (strcmp(name, "max_entries") == 0) {
2510 			if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2511 				return -EINVAL;
2512 			map_def->parts |= MAP_DEF_MAX_ENTRIES;
2513 		} else if (strcmp(name, "map_flags") == 0) {
2514 			if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2515 				return -EINVAL;
2516 			map_def->parts |= MAP_DEF_MAP_FLAGS;
2517 		} else if (strcmp(name, "numa_node") == 0) {
2518 			if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2519 				return -EINVAL;
2520 			map_def->parts |= MAP_DEF_NUMA_NODE;
2521 		} else if (strcmp(name, "key_size") == 0) {
2522 			__u32 sz;
2523 
2524 			if (!get_map_field_int(map_name, btf, m, &sz))
2525 				return -EINVAL;
2526 			if (map_def->key_size && map_def->key_size != sz) {
2527 				pr_warn("map '%s': conflicting key size %u != %u.\n",
2528 					map_name, map_def->key_size, sz);
2529 				return -EINVAL;
2530 			}
2531 			map_def->key_size = sz;
2532 			map_def->parts |= MAP_DEF_KEY_SIZE;
2533 		} else if (strcmp(name, "key") == 0) {
2534 			__s64 sz;
2535 
2536 			t = btf__type_by_id(btf, m->type);
2537 			if (!t) {
2538 				pr_warn("map '%s': key type [%d] not found.\n",
2539 					map_name, m->type);
2540 				return -EINVAL;
2541 			}
2542 			if (!btf_is_ptr(t)) {
2543 				pr_warn("map '%s': key spec is not PTR: %s.\n",
2544 					map_name, btf_kind_str(t));
2545 				return -EINVAL;
2546 			}
2547 			sz = btf__resolve_size(btf, t->type);
2548 			if (sz < 0) {
2549 				pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2550 					map_name, t->type, (ssize_t)sz);
2551 				return sz;
2552 			}
2553 			if (map_def->key_size && map_def->key_size != sz) {
2554 				pr_warn("map '%s': conflicting key size %u != %zd.\n",
2555 					map_name, map_def->key_size, (ssize_t)sz);
2556 				return -EINVAL;
2557 			}
2558 			map_def->key_size = sz;
2559 			map_def->key_type_id = t->type;
2560 			map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2561 		} else if (strcmp(name, "value_size") == 0) {
2562 			__u32 sz;
2563 
2564 			if (!get_map_field_int(map_name, btf, m, &sz))
2565 				return -EINVAL;
2566 			if (map_def->value_size && map_def->value_size != sz) {
2567 				pr_warn("map '%s': conflicting value size %u != %u.\n",
2568 					map_name, map_def->value_size, sz);
2569 				return -EINVAL;
2570 			}
2571 			map_def->value_size = sz;
2572 			map_def->parts |= MAP_DEF_VALUE_SIZE;
2573 		} else if (strcmp(name, "value") == 0) {
2574 			__s64 sz;
2575 
2576 			t = btf__type_by_id(btf, m->type);
2577 			if (!t) {
2578 				pr_warn("map '%s': value type [%d] not found.\n",
2579 					map_name, m->type);
2580 				return -EINVAL;
2581 			}
2582 			if (!btf_is_ptr(t)) {
2583 				pr_warn("map '%s': value spec is not PTR: %s.\n",
2584 					map_name, btf_kind_str(t));
2585 				return -EINVAL;
2586 			}
2587 			sz = btf__resolve_size(btf, t->type);
2588 			if (sz < 0) {
2589 				pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2590 					map_name, t->type, (ssize_t)sz);
2591 				return sz;
2592 			}
2593 			if (map_def->value_size && map_def->value_size != sz) {
2594 				pr_warn("map '%s': conflicting value size %u != %zd.\n",
2595 					map_name, map_def->value_size, (ssize_t)sz);
2596 				return -EINVAL;
2597 			}
2598 			map_def->value_size = sz;
2599 			map_def->value_type_id = t->type;
2600 			map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2601 		}
2602 		else if (strcmp(name, "values") == 0) {
2603 			bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2604 			bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2605 			const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
2606 			char inner_map_name[128];
2607 			int err;
2608 
2609 			if (is_inner) {
2610 				pr_warn("map '%s': multi-level inner maps not supported.\n",
2611 					map_name);
2612 				return -ENOTSUP;
2613 			}
2614 			if (i != vlen - 1) {
2615 				pr_warn("map '%s': '%s' member should be last.\n",
2616 					map_name, name);
2617 				return -EINVAL;
2618 			}
2619 			if (!is_map_in_map && !is_prog_array) {
2620 				pr_warn("map '%s': should be map-in-map or prog-array.\n",
2621 					map_name);
2622 				return -ENOTSUP;
2623 			}
2624 			if (map_def->value_size && map_def->value_size != 4) {
2625 				pr_warn("map '%s': conflicting value size %u != 4.\n",
2626 					map_name, map_def->value_size);
2627 				return -EINVAL;
2628 			}
2629 			map_def->value_size = 4;
2630 			t = btf__type_by_id(btf, m->type);
2631 			if (!t) {
2632 				pr_warn("map '%s': %s type [%d] not found.\n",
2633 					map_name, desc, m->type);
2634 				return -EINVAL;
2635 			}
2636 			if (!btf_is_array(t) || btf_array(t)->nelems) {
2637 				pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2638 					map_name, desc);
2639 				return -EINVAL;
2640 			}
2641 			t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2642 			if (!btf_is_ptr(t)) {
2643 				pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2644 					map_name, desc, btf_kind_str(t));
2645 				return -EINVAL;
2646 			}
2647 			t = skip_mods_and_typedefs(btf, t->type, NULL);
2648 			if (is_prog_array) {
2649 				if (!btf_is_func_proto(t)) {
2650 					pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2651 						map_name, btf_kind_str(t));
2652 					return -EINVAL;
2653 				}
2654 				continue;
2655 			}
2656 			if (!btf_is_struct(t)) {
2657 				pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2658 					map_name, btf_kind_str(t));
2659 				return -EINVAL;
2660 			}
2661 
2662 			snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2663 			err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2664 			if (err)
2665 				return err;
2666 
2667 			map_def->parts |= MAP_DEF_INNER_MAP;
2668 		} else if (strcmp(name, "pinning") == 0) {
2669 			__u32 val;
2670 
2671 			if (is_inner) {
2672 				pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2673 				return -EINVAL;
2674 			}
2675 			if (!get_map_field_int(map_name, btf, m, &val))
2676 				return -EINVAL;
2677 			if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2678 				pr_warn("map '%s': invalid pinning value %u.\n",
2679 					map_name, val);
2680 				return -EINVAL;
2681 			}
2682 			map_def->pinning = val;
2683 			map_def->parts |= MAP_DEF_PINNING;
2684 		} else if (strcmp(name, "map_extra") == 0) {
2685 			__u64 map_extra;
2686 
2687 			if (!get_map_field_long(map_name, btf, m, &map_extra))
2688 				return -EINVAL;
2689 			map_def->map_extra = map_extra;
2690 			map_def->parts |= MAP_DEF_MAP_EXTRA;
2691 		} else {
2692 			if (strict) {
2693 				pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2694 				return -ENOTSUP;
2695 			}
2696 			pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2697 		}
2698 	}
2699 
2700 	if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2701 		pr_warn("map '%s': map type isn't specified.\n", map_name);
2702 		return -EINVAL;
2703 	}
2704 
2705 	return 0;
2706 }
2707 
adjust_ringbuf_sz(size_t sz)2708 static size_t adjust_ringbuf_sz(size_t sz)
2709 {
2710 	__u32 page_sz = sysconf(_SC_PAGE_SIZE);
2711 	__u32 mul;
2712 
2713 	/* if user forgot to set any size, make sure they see error */
2714 	if (sz == 0)
2715 		return 0;
2716 	/* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
2717 	 * a power-of-2 multiple of kernel's page size. If user diligently
2718 	 * satisified these conditions, pass the size through.
2719 	 */
2720 	if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
2721 		return sz;
2722 
2723 	/* Otherwise find closest (page_sz * power_of_2) product bigger than
2724 	 * user-set size to satisfy both user size request and kernel
2725 	 * requirements and substitute correct max_entries for map creation.
2726 	 */
2727 	for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
2728 		if (mul * page_sz > sz)
2729 			return mul * page_sz;
2730 	}
2731 
2732 	/* if it's impossible to satisfy the conditions (i.e., user size is
2733 	 * very close to UINT_MAX but is not a power-of-2 multiple of
2734 	 * page_size) then just return original size and let kernel reject it
2735 	 */
2736 	return sz;
2737 }
2738 
map_is_ringbuf(const struct bpf_map * map)2739 static bool map_is_ringbuf(const struct bpf_map *map)
2740 {
2741 	return map->def.type == BPF_MAP_TYPE_RINGBUF ||
2742 	       map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
2743 }
2744 
fill_map_from_def(struct bpf_map * map,const struct btf_map_def * def)2745 static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2746 {
2747 	map->def.type = def->map_type;
2748 	map->def.key_size = def->key_size;
2749 	map->def.value_size = def->value_size;
2750 	map->def.max_entries = def->max_entries;
2751 	map->def.map_flags = def->map_flags;
2752 	map->map_extra = def->map_extra;
2753 
2754 	map->numa_node = def->numa_node;
2755 	map->btf_key_type_id = def->key_type_id;
2756 	map->btf_value_type_id = def->value_type_id;
2757 
2758 	/* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
2759 	if (map_is_ringbuf(map))
2760 		map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
2761 
2762 	if (def->parts & MAP_DEF_MAP_TYPE)
2763 		pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2764 
2765 	if (def->parts & MAP_DEF_KEY_TYPE)
2766 		pr_debug("map '%s': found key [%u], sz = %u.\n",
2767 			 map->name, def->key_type_id, def->key_size);
2768 	else if (def->parts & MAP_DEF_KEY_SIZE)
2769 		pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2770 
2771 	if (def->parts & MAP_DEF_VALUE_TYPE)
2772 		pr_debug("map '%s': found value [%u], sz = %u.\n",
2773 			 map->name, def->value_type_id, def->value_size);
2774 	else if (def->parts & MAP_DEF_VALUE_SIZE)
2775 		pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2776 
2777 	if (def->parts & MAP_DEF_MAX_ENTRIES)
2778 		pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2779 	if (def->parts & MAP_DEF_MAP_FLAGS)
2780 		pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2781 	if (def->parts & MAP_DEF_MAP_EXTRA)
2782 		pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2783 			 (unsigned long long)def->map_extra);
2784 	if (def->parts & MAP_DEF_PINNING)
2785 		pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2786 	if (def->parts & MAP_DEF_NUMA_NODE)
2787 		pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2788 
2789 	if (def->parts & MAP_DEF_INNER_MAP)
2790 		pr_debug("map '%s': found inner map definition.\n", map->name);
2791 }
2792 
btf_var_linkage_str(__u32 linkage)2793 static const char *btf_var_linkage_str(__u32 linkage)
2794 {
2795 	switch (linkage) {
2796 	case BTF_VAR_STATIC: return "static";
2797 	case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2798 	case BTF_VAR_GLOBAL_EXTERN: return "extern";
2799 	default: return "unknown";
2800 	}
2801 }
2802 
bpf_object__init_user_btf_map(struct bpf_object * obj,const struct btf_type * sec,int var_idx,int sec_idx,const Elf_Data * data,bool strict,const char * pin_root_path)2803 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2804 					 const struct btf_type *sec,
2805 					 int var_idx, int sec_idx,
2806 					 const Elf_Data *data, bool strict,
2807 					 const char *pin_root_path)
2808 {
2809 	struct btf_map_def map_def = {}, inner_def = {};
2810 	const struct btf_type *var, *def;
2811 	const struct btf_var_secinfo *vi;
2812 	const struct btf_var *var_extra;
2813 	const char *map_name;
2814 	struct bpf_map *map;
2815 	int err;
2816 
2817 	vi = btf_var_secinfos(sec) + var_idx;
2818 	var = btf__type_by_id(obj->btf, vi->type);
2819 	var_extra = btf_var(var);
2820 	map_name = btf__name_by_offset(obj->btf, var->name_off);
2821 
2822 	if (map_name == NULL || map_name[0] == '\0') {
2823 		pr_warn("map #%d: empty name.\n", var_idx);
2824 		return -EINVAL;
2825 	}
2826 	if ((__u64)vi->offset + vi->size > data->d_size) {
2827 		pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2828 		return -EINVAL;
2829 	}
2830 	if (!btf_is_var(var)) {
2831 		pr_warn("map '%s': unexpected var kind %s.\n",
2832 			map_name, btf_kind_str(var));
2833 		return -EINVAL;
2834 	}
2835 	if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2836 		pr_warn("map '%s': unsupported map linkage %s.\n",
2837 			map_name, btf_var_linkage_str(var_extra->linkage));
2838 		return -EOPNOTSUPP;
2839 	}
2840 
2841 	def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2842 	if (!btf_is_struct(def)) {
2843 		pr_warn("map '%s': unexpected def kind %s.\n",
2844 			map_name, btf_kind_str(var));
2845 		return -EINVAL;
2846 	}
2847 	if (def->size > vi->size) {
2848 		pr_warn("map '%s': invalid def size.\n", map_name);
2849 		return -EINVAL;
2850 	}
2851 
2852 	map = bpf_object__add_map(obj);
2853 	if (IS_ERR(map))
2854 		return PTR_ERR(map);
2855 	map->name = strdup(map_name);
2856 	if (!map->name) {
2857 		pr_warn("map '%s': failed to alloc map name.\n", map_name);
2858 		return -ENOMEM;
2859 	}
2860 	map->libbpf_type = LIBBPF_MAP_UNSPEC;
2861 	map->def.type = BPF_MAP_TYPE_UNSPEC;
2862 	map->sec_idx = sec_idx;
2863 	map->sec_offset = vi->offset;
2864 	map->btf_var_idx = var_idx;
2865 	pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2866 		 map_name, map->sec_idx, map->sec_offset);
2867 
2868 	err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2869 	if (err)
2870 		return err;
2871 
2872 	fill_map_from_def(map, &map_def);
2873 
2874 	if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2875 		err = build_map_pin_path(map, pin_root_path);
2876 		if (err) {
2877 			pr_warn("map '%s': couldn't build pin path.\n", map->name);
2878 			return err;
2879 		}
2880 	}
2881 
2882 	if (map_def.parts & MAP_DEF_INNER_MAP) {
2883 		map->inner_map = calloc(1, sizeof(*map->inner_map));
2884 		if (!map->inner_map)
2885 			return -ENOMEM;
2886 		map->inner_map->fd = create_placeholder_fd();
2887 		if (map->inner_map->fd < 0)
2888 			return map->inner_map->fd;
2889 		map->inner_map->sec_idx = sec_idx;
2890 		map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2891 		if (!map->inner_map->name)
2892 			return -ENOMEM;
2893 		sprintf(map->inner_map->name, "%s.inner", map_name);
2894 
2895 		fill_map_from_def(map->inner_map, &inner_def);
2896 	}
2897 
2898 	err = map_fill_btf_type_info(obj, map);
2899 	if (err)
2900 		return err;
2901 
2902 	return 0;
2903 }
2904 
init_arena_map_data(struct bpf_object * obj,struct bpf_map * map,const char * sec_name,int sec_idx,void * data,size_t data_sz)2905 static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
2906 			       const char *sec_name, int sec_idx,
2907 			       void *data, size_t data_sz)
2908 {
2909 	const long page_sz = sysconf(_SC_PAGE_SIZE);
2910 	size_t mmap_sz;
2911 
2912 	mmap_sz = bpf_map_mmap_sz(obj->arena_map);
2913 	if (roundup(data_sz, page_sz) > mmap_sz) {
2914 		pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
2915 			sec_name, mmap_sz, data_sz);
2916 		return -E2BIG;
2917 	}
2918 
2919 	obj->arena_data = malloc(data_sz);
2920 	if (!obj->arena_data)
2921 		return -ENOMEM;
2922 	memcpy(obj->arena_data, data, data_sz);
2923 	obj->arena_data_sz = data_sz;
2924 
2925 	/* make bpf_map__init_value() work for ARENA maps */
2926 	map->mmaped = obj->arena_data;
2927 
2928 	return 0;
2929 }
2930 
bpf_object__init_user_btf_maps(struct bpf_object * obj,bool strict,const char * pin_root_path)2931 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2932 					  const char *pin_root_path)
2933 {
2934 	const struct btf_type *sec = NULL;
2935 	int nr_types, i, vlen, err;
2936 	const struct btf_type *t;
2937 	const char *name;
2938 	Elf_Data *data;
2939 	Elf_Scn *scn;
2940 
2941 	if (obj->efile.btf_maps_shndx < 0)
2942 		return 0;
2943 
2944 	scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2945 	data = elf_sec_data(obj, scn);
2946 	if (!scn || !data) {
2947 		pr_warn("elf: failed to get %s map definitions for %s\n",
2948 			MAPS_ELF_SEC, obj->path);
2949 		return -EINVAL;
2950 	}
2951 
2952 	nr_types = btf__type_cnt(obj->btf);
2953 	for (i = 1; i < nr_types; i++) {
2954 		t = btf__type_by_id(obj->btf, i);
2955 		if (!btf_is_datasec(t))
2956 			continue;
2957 		name = btf__name_by_offset(obj->btf, t->name_off);
2958 		if (strcmp(name, MAPS_ELF_SEC) == 0) {
2959 			sec = t;
2960 			obj->efile.btf_maps_sec_btf_id = i;
2961 			break;
2962 		}
2963 	}
2964 
2965 	if (!sec) {
2966 		pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2967 		return -ENOENT;
2968 	}
2969 
2970 	vlen = btf_vlen(sec);
2971 	for (i = 0; i < vlen; i++) {
2972 		err = bpf_object__init_user_btf_map(obj, sec, i,
2973 						    obj->efile.btf_maps_shndx,
2974 						    data, strict,
2975 						    pin_root_path);
2976 		if (err)
2977 			return err;
2978 	}
2979 
2980 	for (i = 0; i < obj->nr_maps; i++) {
2981 		struct bpf_map *map = &obj->maps[i];
2982 
2983 		if (map->def.type != BPF_MAP_TYPE_ARENA)
2984 			continue;
2985 
2986 		if (obj->arena_map) {
2987 			pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
2988 				map->name, obj->arena_map->name);
2989 			return -EINVAL;
2990 		}
2991 		obj->arena_map = map;
2992 
2993 		if (obj->efile.arena_data) {
2994 			err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
2995 						  obj->efile.arena_data->d_buf,
2996 						  obj->efile.arena_data->d_size);
2997 			if (err)
2998 				return err;
2999 		}
3000 	}
3001 	if (obj->efile.arena_data && !obj->arena_map) {
3002 		pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
3003 			ARENA_SEC);
3004 		return -ENOENT;
3005 	}
3006 
3007 	return 0;
3008 }
3009 
bpf_object__init_maps(struct bpf_object * obj,const struct bpf_object_open_opts * opts)3010 static int bpf_object__init_maps(struct bpf_object *obj,
3011 				 const struct bpf_object_open_opts *opts)
3012 {
3013 	const char *pin_root_path;
3014 	bool strict;
3015 	int err = 0;
3016 
3017 	strict = !OPTS_GET(opts, relaxed_maps, false);
3018 	pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
3019 
3020 	err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
3021 	err = err ?: bpf_object__init_global_data_maps(obj);
3022 	err = err ?: bpf_object__init_kconfig_map(obj);
3023 	err = err ?: bpf_object_init_struct_ops(obj);
3024 
3025 	return err;
3026 }
3027 
section_have_execinstr(struct bpf_object * obj,int idx)3028 static bool section_have_execinstr(struct bpf_object *obj, int idx)
3029 {
3030 	Elf64_Shdr *sh;
3031 
3032 	sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
3033 	if (!sh)
3034 		return false;
3035 
3036 	return sh->sh_flags & SHF_EXECINSTR;
3037 }
3038 
starts_with_qmark(const char * s)3039 static bool starts_with_qmark(const char *s)
3040 {
3041 	return s && s[0] == '?';
3042 }
3043 
btf_needs_sanitization(struct bpf_object * obj)3044 static bool btf_needs_sanitization(struct bpf_object *obj)
3045 {
3046 	bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
3047 	bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
3048 	bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
3049 	bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
3050 	bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
3051 	bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
3052 	bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
3053 	bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
3054 
3055 	return !has_func || !has_datasec || !has_func_global || !has_float ||
3056 	       !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec;
3057 }
3058 
bpf_object__sanitize_btf(struct bpf_object * obj,struct btf * btf)3059 static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
3060 {
3061 	bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
3062 	bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
3063 	bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
3064 	bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
3065 	bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
3066 	bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
3067 	bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
3068 	bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
3069 	int enum64_placeholder_id = 0;
3070 	struct btf_type *t;
3071 	int i, j, vlen;
3072 
3073 	for (i = 1; i < btf__type_cnt(btf); i++) {
3074 		t = (struct btf_type *)btf__type_by_id(btf, i);
3075 
3076 		if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
3077 			/* replace VAR/DECL_TAG with INT */
3078 			t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
3079 			/*
3080 			 * using size = 1 is the safest choice, 4 will be too
3081 			 * big and cause kernel BTF validation failure if
3082 			 * original variable took less than 4 bytes
3083 			 */
3084 			t->size = 1;
3085 			*(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
3086 		} else if (!has_datasec && btf_is_datasec(t)) {
3087 			/* replace DATASEC with STRUCT */
3088 			const struct btf_var_secinfo *v = btf_var_secinfos(t);
3089 			struct btf_member *m = btf_members(t);
3090 			struct btf_type *vt;
3091 			char *name;
3092 
3093 			name = (char *)btf__name_by_offset(btf, t->name_off);
3094 			while (*name) {
3095 				if (*name == '.' || *name == '?')
3096 					*name = '_';
3097 				name++;
3098 			}
3099 
3100 			vlen = btf_vlen(t);
3101 			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
3102 			for (j = 0; j < vlen; j++, v++, m++) {
3103 				/* order of field assignments is important */
3104 				m->offset = v->offset * 8;
3105 				m->type = v->type;
3106 				/* preserve variable name as member name */
3107 				vt = (void *)btf__type_by_id(btf, v->type);
3108 				m->name_off = vt->name_off;
3109 			}
3110 		} else if (!has_qmark_datasec && btf_is_datasec(t) &&
3111 			   starts_with_qmark(btf__name_by_offset(btf, t->name_off))) {
3112 			/* replace '?' prefix with '_' for DATASEC names */
3113 			char *name;
3114 
3115 			name = (char *)btf__name_by_offset(btf, t->name_off);
3116 			if (name[0] == '?')
3117 				name[0] = '_';
3118 		} else if (!has_func && btf_is_func_proto(t)) {
3119 			/* replace FUNC_PROTO with ENUM */
3120 			vlen = btf_vlen(t);
3121 			t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
3122 			t->size = sizeof(__u32); /* kernel enforced */
3123 		} else if (!has_func && btf_is_func(t)) {
3124 			/* replace FUNC with TYPEDEF */
3125 			t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
3126 		} else if (!has_func_global && btf_is_func(t)) {
3127 			/* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
3128 			t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
3129 		} else if (!has_float && btf_is_float(t)) {
3130 			/* replace FLOAT with an equally-sized empty STRUCT;
3131 			 * since C compilers do not accept e.g. "float" as a
3132 			 * valid struct name, make it anonymous
3133 			 */
3134 			t->name_off = 0;
3135 			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
3136 		} else if (!has_type_tag && btf_is_type_tag(t)) {
3137 			/* replace TYPE_TAG with a CONST */
3138 			t->name_off = 0;
3139 			t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
3140 		} else if (!has_enum64 && btf_is_enum(t)) {
3141 			/* clear the kflag */
3142 			t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
3143 		} else if (!has_enum64 && btf_is_enum64(t)) {
3144 			/* replace ENUM64 with a union */
3145 			struct btf_member *m;
3146 
3147 			if (enum64_placeholder_id == 0) {
3148 				enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
3149 				if (enum64_placeholder_id < 0)
3150 					return enum64_placeholder_id;
3151 
3152 				t = (struct btf_type *)btf__type_by_id(btf, i);
3153 			}
3154 
3155 			m = btf_members(t);
3156 			vlen = btf_vlen(t);
3157 			t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
3158 			for (j = 0; j < vlen; j++, m++) {
3159 				m->type = enum64_placeholder_id;
3160 				m->offset = 0;
3161 			}
3162 		}
3163 	}
3164 
3165 	return 0;
3166 }
3167 
libbpf_needs_btf(const struct bpf_object * obj)3168 static bool libbpf_needs_btf(const struct bpf_object *obj)
3169 {
3170 	return obj->efile.btf_maps_shndx >= 0 ||
3171 	       obj->efile.has_st_ops ||
3172 	       obj->nr_extern > 0;
3173 }
3174 
kernel_needs_btf(const struct bpf_object * obj)3175 static bool kernel_needs_btf(const struct bpf_object *obj)
3176 {
3177 	return obj->efile.has_st_ops;
3178 }
3179 
bpf_object__init_btf(struct bpf_object * obj,Elf_Data * btf_data,Elf_Data * btf_ext_data)3180 static int bpf_object__init_btf(struct bpf_object *obj,
3181 				Elf_Data *btf_data,
3182 				Elf_Data *btf_ext_data)
3183 {
3184 	int err = -ENOENT;
3185 
3186 	if (btf_data) {
3187 		obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
3188 		err = libbpf_get_error(obj->btf);
3189 		if (err) {
3190 			obj->btf = NULL;
3191 			pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
3192 			goto out;
3193 		}
3194 		/* enforce 8-byte pointers for BPF-targeted BTFs */
3195 		btf__set_pointer_size(obj->btf, 8);
3196 	}
3197 	if (btf_ext_data) {
3198 		struct btf_ext_info *ext_segs[3];
3199 		int seg_num, sec_num;
3200 
3201 		if (!obj->btf) {
3202 			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
3203 				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
3204 			goto out;
3205 		}
3206 		obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
3207 		err = libbpf_get_error(obj->btf_ext);
3208 		if (err) {
3209 			pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
3210 				BTF_EXT_ELF_SEC, err);
3211 			obj->btf_ext = NULL;
3212 			goto out;
3213 		}
3214 
3215 		/* setup .BTF.ext to ELF section mapping */
3216 		ext_segs[0] = &obj->btf_ext->func_info;
3217 		ext_segs[1] = &obj->btf_ext->line_info;
3218 		ext_segs[2] = &obj->btf_ext->core_relo_info;
3219 		for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
3220 			struct btf_ext_info *seg = ext_segs[seg_num];
3221 			const struct btf_ext_info_sec *sec;
3222 			const char *sec_name;
3223 			Elf_Scn *scn;
3224 
3225 			if (seg->sec_cnt == 0)
3226 				continue;
3227 
3228 			seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
3229 			if (!seg->sec_idxs) {
3230 				err = -ENOMEM;
3231 				goto out;
3232 			}
3233 
3234 			sec_num = 0;
3235 			for_each_btf_ext_sec(seg, sec) {
3236 				/* preventively increment index to avoid doing
3237 				 * this before every continue below
3238 				 */
3239 				sec_num++;
3240 
3241 				sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3242 				if (str_is_empty(sec_name))
3243 					continue;
3244 				scn = elf_sec_by_name(obj, sec_name);
3245 				if (!scn)
3246 					continue;
3247 
3248 				seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
3249 			}
3250 		}
3251 	}
3252 out:
3253 	if (err && libbpf_needs_btf(obj)) {
3254 		pr_warn("BTF is required, but is missing or corrupted.\n");
3255 		return err;
3256 	}
3257 	return 0;
3258 }
3259 
compare_vsi_off(const void * _a,const void * _b)3260 static int compare_vsi_off(const void *_a, const void *_b)
3261 {
3262 	const struct btf_var_secinfo *a = _a;
3263 	const struct btf_var_secinfo *b = _b;
3264 
3265 	return a->offset - b->offset;
3266 }
3267 
btf_fixup_datasec(struct bpf_object * obj,struct btf * btf,struct btf_type * t)3268 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
3269 			     struct btf_type *t)
3270 {
3271 	__u32 size = 0, i, vars = btf_vlen(t);
3272 	const char *sec_name = btf__name_by_offset(btf, t->name_off);
3273 	struct btf_var_secinfo *vsi;
3274 	bool fixup_offsets = false;
3275 	int err;
3276 
3277 	if (!sec_name) {
3278 		pr_debug("No name found in string section for DATASEC kind.\n");
3279 		return -ENOENT;
3280 	}
3281 
3282 	/* Extern-backing datasecs (.ksyms, .kconfig) have their size and
3283 	 * variable offsets set at the previous step. Further, not every
3284 	 * extern BTF VAR has corresponding ELF symbol preserved, so we skip
3285 	 * all fixups altogether for such sections and go straight to sorting
3286 	 * VARs within their DATASEC.
3287 	 */
3288 	if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
3289 		goto sort_vars;
3290 
3291 	/* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
3292 	 * fix this up. But BPF static linker already fixes this up and fills
3293 	 * all the sizes and offsets during static linking. So this step has
3294 	 * to be optional. But the STV_HIDDEN handling is non-optional for any
3295 	 * non-extern DATASEC, so the variable fixup loop below handles both
3296 	 * functions at the same time, paying the cost of BTF VAR <-> ELF
3297 	 * symbol matching just once.
3298 	 */
3299 	if (t->size == 0) {
3300 		err = find_elf_sec_sz(obj, sec_name, &size);
3301 		if (err || !size) {
3302 			pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
3303 				 sec_name, size, err);
3304 			return -ENOENT;
3305 		}
3306 
3307 		t->size = size;
3308 		fixup_offsets = true;
3309 	}
3310 
3311 	for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
3312 		const struct btf_type *t_var;
3313 		struct btf_var *var;
3314 		const char *var_name;
3315 		Elf64_Sym *sym;
3316 
3317 		t_var = btf__type_by_id(btf, vsi->type);
3318 		if (!t_var || !btf_is_var(t_var)) {
3319 			pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
3320 			return -EINVAL;
3321 		}
3322 
3323 		var = btf_var(t_var);
3324 		if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
3325 			continue;
3326 
3327 		var_name = btf__name_by_offset(btf, t_var->name_off);
3328 		if (!var_name) {
3329 			pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
3330 				 sec_name, i);
3331 			return -ENOENT;
3332 		}
3333 
3334 		sym = find_elf_var_sym(obj, var_name);
3335 		if (IS_ERR(sym)) {
3336 			pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
3337 				 sec_name, var_name);
3338 			return -ENOENT;
3339 		}
3340 
3341 		if (fixup_offsets)
3342 			vsi->offset = sym->st_value;
3343 
3344 		/* if variable is a global/weak symbol, but has restricted
3345 		 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
3346 		 * as static. This follows similar logic for functions (BPF
3347 		 * subprogs) and influences libbpf's further decisions about
3348 		 * whether to make global data BPF array maps as
3349 		 * BPF_F_MMAPABLE.
3350 		 */
3351 		if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
3352 		    || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
3353 			var->linkage = BTF_VAR_STATIC;
3354 	}
3355 
3356 sort_vars:
3357 	qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
3358 	return 0;
3359 }
3360 
bpf_object_fixup_btf(struct bpf_object * obj)3361 static int bpf_object_fixup_btf(struct bpf_object *obj)
3362 {
3363 	int i, n, err = 0;
3364 
3365 	if (!obj->btf)
3366 		return 0;
3367 
3368 	n = btf__type_cnt(obj->btf);
3369 	for (i = 1; i < n; i++) {
3370 		struct btf_type *t = btf_type_by_id(obj->btf, i);
3371 
3372 		/* Loader needs to fix up some of the things compiler
3373 		 * couldn't get its hands on while emitting BTF. This
3374 		 * is section size and global variable offset. We use
3375 		 * the info from the ELF itself for this purpose.
3376 		 */
3377 		if (btf_is_datasec(t)) {
3378 			err = btf_fixup_datasec(obj, obj->btf, t);
3379 			if (err)
3380 				return err;
3381 		}
3382 	}
3383 
3384 	return 0;
3385 }
3386 
prog_needs_vmlinux_btf(struct bpf_program * prog)3387 static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
3388 {
3389 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
3390 	    prog->type == BPF_PROG_TYPE_LSM)
3391 		return true;
3392 
3393 	/* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
3394 	 * also need vmlinux BTF
3395 	 */
3396 	if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
3397 		return true;
3398 
3399 	return false;
3400 }
3401 
map_needs_vmlinux_btf(struct bpf_map * map)3402 static bool map_needs_vmlinux_btf(struct bpf_map *map)
3403 {
3404 	return bpf_map__is_struct_ops(map);
3405 }
3406 
obj_needs_vmlinux_btf(const struct bpf_object * obj)3407 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
3408 {
3409 	struct bpf_program *prog;
3410 	struct bpf_map *map;
3411 	int i;
3412 
3413 	/* CO-RE relocations need kernel BTF, only when btf_custom_path
3414 	 * is not specified
3415 	 */
3416 	if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
3417 		return true;
3418 
3419 	/* Support for typed ksyms needs kernel BTF */
3420 	for (i = 0; i < obj->nr_extern; i++) {
3421 		const struct extern_desc *ext;
3422 
3423 		ext = &obj->externs[i];
3424 		if (ext->type == EXT_KSYM && ext->ksym.type_id)
3425 			return true;
3426 	}
3427 
3428 	bpf_object__for_each_program(prog, obj) {
3429 		if (!prog->autoload)
3430 			continue;
3431 		if (prog_needs_vmlinux_btf(prog))
3432 			return true;
3433 	}
3434 
3435 	bpf_object__for_each_map(map, obj) {
3436 		if (map_needs_vmlinux_btf(map))
3437 			return true;
3438 	}
3439 
3440 	return false;
3441 }
3442 
bpf_object__load_vmlinux_btf(struct bpf_object * obj,bool force)3443 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
3444 {
3445 	int err;
3446 
3447 	/* btf_vmlinux could be loaded earlier */
3448 	if (obj->btf_vmlinux || obj->gen_loader)
3449 		return 0;
3450 
3451 	if (!force && !obj_needs_vmlinux_btf(obj))
3452 		return 0;
3453 
3454 	obj->btf_vmlinux = btf__load_vmlinux_btf();
3455 	err = libbpf_get_error(obj->btf_vmlinux);
3456 	if (err) {
3457 		pr_warn("Error loading vmlinux BTF: %d\n", err);
3458 		obj->btf_vmlinux = NULL;
3459 		return err;
3460 	}
3461 	return 0;
3462 }
3463 
bpf_object__sanitize_and_load_btf(struct bpf_object * obj)3464 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
3465 {
3466 	struct btf *kern_btf = obj->btf;
3467 	bool btf_mandatory, sanitize;
3468 	int i, err = 0;
3469 
3470 	if (!obj->btf)
3471 		return 0;
3472 
3473 	if (!kernel_supports(obj, FEAT_BTF)) {
3474 		if (kernel_needs_btf(obj)) {
3475 			err = -EOPNOTSUPP;
3476 			goto report;
3477 		}
3478 		pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
3479 		return 0;
3480 	}
3481 
3482 	/* Even though some subprogs are global/weak, user might prefer more
3483 	 * permissive BPF verification process that BPF verifier performs for
3484 	 * static functions, taking into account more context from the caller
3485 	 * functions. In such case, they need to mark such subprogs with
3486 	 * __attribute__((visibility("hidden"))) and libbpf will adjust
3487 	 * corresponding FUNC BTF type to be marked as static and trigger more
3488 	 * involved BPF verification process.
3489 	 */
3490 	for (i = 0; i < obj->nr_programs; i++) {
3491 		struct bpf_program *prog = &obj->programs[i];
3492 		struct btf_type *t;
3493 		const char *name;
3494 		int j, n;
3495 
3496 		if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3497 			continue;
3498 
3499 		n = btf__type_cnt(obj->btf);
3500 		for (j = 1; j < n; j++) {
3501 			t = btf_type_by_id(obj->btf, j);
3502 			if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
3503 				continue;
3504 
3505 			name = btf__str_by_offset(obj->btf, t->name_off);
3506 			if (strcmp(name, prog->name) != 0)
3507 				continue;
3508 
3509 			t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
3510 			break;
3511 		}
3512 	}
3513 
3514 	sanitize = btf_needs_sanitization(obj);
3515 	if (sanitize) {
3516 		const void *raw_data;
3517 		__u32 sz;
3518 
3519 		/* clone BTF to sanitize a copy and leave the original intact */
3520 		raw_data = btf__raw_data(obj->btf, &sz);
3521 		kern_btf = btf__new(raw_data, sz);
3522 		err = libbpf_get_error(kern_btf);
3523 		if (err)
3524 			return err;
3525 
3526 		/* enforce 8-byte pointers for BPF-targeted BTFs */
3527 		btf__set_pointer_size(obj->btf, 8);
3528 		err = bpf_object__sanitize_btf(obj, kern_btf);
3529 		if (err)
3530 			return err;
3531 	}
3532 
3533 	if (obj->gen_loader) {
3534 		__u32 raw_size = 0;
3535 		const void *raw_data = btf__raw_data(kern_btf, &raw_size);
3536 
3537 		if (!raw_data)
3538 			return -ENOMEM;
3539 		bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3540 		/* Pretend to have valid FD to pass various fd >= 0 checks.
3541 		 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
3542 		 */
3543 		btf__set_fd(kern_btf, 0);
3544 	} else {
3545 		/* currently BPF_BTF_LOAD only supports log_level 1 */
3546 		err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3547 					   obj->log_level ? 1 : 0, obj->token_fd);
3548 	}
3549 	if (sanitize) {
3550 		if (!err) {
3551 			/* move fd to libbpf's BTF */
3552 			btf__set_fd(obj->btf, btf__fd(kern_btf));
3553 			btf__set_fd(kern_btf, -1);
3554 		}
3555 		btf__free(kern_btf);
3556 	}
3557 report:
3558 	if (err) {
3559 		btf_mandatory = kernel_needs_btf(obj);
3560 		pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
3561 			btf_mandatory ? "BTF is mandatory, can't proceed."
3562 				      : "BTF is optional, ignoring.");
3563 		if (!btf_mandatory)
3564 			err = 0;
3565 	}
3566 	return err;
3567 }
3568 
elf_sym_str(const struct bpf_object * obj,size_t off)3569 static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3570 {
3571 	const char *name;
3572 
3573 	name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3574 	if (!name) {
3575 		pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3576 			off, obj->path, elf_errmsg(-1));
3577 		return NULL;
3578 	}
3579 
3580 	return name;
3581 }
3582 
elf_sec_str(const struct bpf_object * obj,size_t off)3583 static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3584 {
3585 	const char *name;
3586 
3587 	name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3588 	if (!name) {
3589 		pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3590 			off, obj->path, elf_errmsg(-1));
3591 		return NULL;
3592 	}
3593 
3594 	return name;
3595 }
3596 
elf_sec_by_idx(const struct bpf_object * obj,size_t idx)3597 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3598 {
3599 	Elf_Scn *scn;
3600 
3601 	scn = elf_getscn(obj->efile.elf, idx);
3602 	if (!scn) {
3603 		pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3604 			idx, obj->path, elf_errmsg(-1));
3605 		return NULL;
3606 	}
3607 	return scn;
3608 }
3609 
elf_sec_by_name(const struct bpf_object * obj,const char * name)3610 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3611 {
3612 	Elf_Scn *scn = NULL;
3613 	Elf *elf = obj->efile.elf;
3614 	const char *sec_name;
3615 
3616 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
3617 		sec_name = elf_sec_name(obj, scn);
3618 		if (!sec_name)
3619 			return NULL;
3620 
3621 		if (strcmp(sec_name, name) != 0)
3622 			continue;
3623 
3624 		return scn;
3625 	}
3626 	return NULL;
3627 }
3628 
elf_sec_hdr(const struct bpf_object * obj,Elf_Scn * scn)3629 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
3630 {
3631 	Elf64_Shdr *shdr;
3632 
3633 	if (!scn)
3634 		return NULL;
3635 
3636 	shdr = elf64_getshdr(scn);
3637 	if (!shdr) {
3638 		pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3639 			elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3640 		return NULL;
3641 	}
3642 
3643 	return shdr;
3644 }
3645 
elf_sec_name(const struct bpf_object * obj,Elf_Scn * scn)3646 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3647 {
3648 	const char *name;
3649 	Elf64_Shdr *sh;
3650 
3651 	if (!scn)
3652 		return NULL;
3653 
3654 	sh = elf_sec_hdr(obj, scn);
3655 	if (!sh)
3656 		return NULL;
3657 
3658 	name = elf_sec_str(obj, sh->sh_name);
3659 	if (!name) {
3660 		pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3661 			elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3662 		return NULL;
3663 	}
3664 
3665 	return name;
3666 }
3667 
elf_sec_data(const struct bpf_object * obj,Elf_Scn * scn)3668 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3669 {
3670 	Elf_Data *data;
3671 
3672 	if (!scn)
3673 		return NULL;
3674 
3675 	data = elf_getdata(scn, 0);
3676 	if (!data) {
3677 		pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3678 			elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3679 			obj->path, elf_errmsg(-1));
3680 		return NULL;
3681 	}
3682 
3683 	return data;
3684 }
3685 
elf_sym_by_idx(const struct bpf_object * obj,size_t idx)3686 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3687 {
3688 	if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3689 		return NULL;
3690 
3691 	return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3692 }
3693 
elf_rel_by_idx(Elf_Data * data,size_t idx)3694 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3695 {
3696 	if (idx >= data->d_size / sizeof(Elf64_Rel))
3697 		return NULL;
3698 
3699 	return (Elf64_Rel *)data->d_buf + idx;
3700 }
3701 
is_sec_name_dwarf(const char * name)3702 static bool is_sec_name_dwarf(const char *name)
3703 {
3704 	/* approximation, but the actual list is too long */
3705 	return str_has_pfx(name, ".debug_");
3706 }
3707 
ignore_elf_section(Elf64_Shdr * hdr,const char * name)3708 static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
3709 {
3710 	/* no special handling of .strtab */
3711 	if (hdr->sh_type == SHT_STRTAB)
3712 		return true;
3713 
3714 	/* ignore .llvm_addrsig section as well */
3715 	if (hdr->sh_type == SHT_LLVM_ADDRSIG)
3716 		return true;
3717 
3718 	/* no subprograms will lead to an empty .text section, ignore it */
3719 	if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3720 	    strcmp(name, ".text") == 0)
3721 		return true;
3722 
3723 	/* DWARF sections */
3724 	if (is_sec_name_dwarf(name))
3725 		return true;
3726 
3727 	if (str_has_pfx(name, ".rel")) {
3728 		name += sizeof(".rel") - 1;
3729 		/* DWARF section relocations */
3730 		if (is_sec_name_dwarf(name))
3731 			return true;
3732 
3733 		/* .BTF and .BTF.ext don't need relocations */
3734 		if (strcmp(name, BTF_ELF_SEC) == 0 ||
3735 		    strcmp(name, BTF_EXT_ELF_SEC) == 0)
3736 			return true;
3737 	}
3738 
3739 	return false;
3740 }
3741 
cmp_progs(const void * _a,const void * _b)3742 static int cmp_progs(const void *_a, const void *_b)
3743 {
3744 	const struct bpf_program *a = _a;
3745 	const struct bpf_program *b = _b;
3746 
3747 	if (a->sec_idx != b->sec_idx)
3748 		return a->sec_idx < b->sec_idx ? -1 : 1;
3749 
3750 	/* sec_insn_off can't be the same within the section */
3751 	return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3752 }
3753 
bpf_object__elf_collect(struct bpf_object * obj)3754 static int bpf_object__elf_collect(struct bpf_object *obj)
3755 {
3756 	struct elf_sec_desc *sec_desc;
3757 	Elf *elf = obj->efile.elf;
3758 	Elf_Data *btf_ext_data = NULL;
3759 	Elf_Data *btf_data = NULL;
3760 	int idx = 0, err = 0;
3761 	const char *name;
3762 	Elf_Data *data;
3763 	Elf_Scn *scn;
3764 	Elf64_Shdr *sh;
3765 
3766 	/* ELF section indices are 0-based, but sec #0 is special "invalid"
3767 	 * section. Since section count retrieved by elf_getshdrnum() does
3768 	 * include sec #0, it is already the necessary size of an array to keep
3769 	 * all the sections.
3770 	 */
3771 	if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
3772 		pr_warn("elf: failed to get the number of sections for %s: %s\n",
3773 			obj->path, elf_errmsg(-1));
3774 		return -LIBBPF_ERRNO__FORMAT;
3775 	}
3776 	obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3777 	if (!obj->efile.secs)
3778 		return -ENOMEM;
3779 
3780 	/* a bunch of ELF parsing functionality depends on processing symbols,
3781 	 * so do the first pass and find the symbol table
3782 	 */
3783 	scn = NULL;
3784 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
3785 		sh = elf_sec_hdr(obj, scn);
3786 		if (!sh)
3787 			return -LIBBPF_ERRNO__FORMAT;
3788 
3789 		if (sh->sh_type == SHT_SYMTAB) {
3790 			if (obj->efile.symbols) {
3791 				pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3792 				return -LIBBPF_ERRNO__FORMAT;
3793 			}
3794 
3795 			data = elf_sec_data(obj, scn);
3796 			if (!data)
3797 				return -LIBBPF_ERRNO__FORMAT;
3798 
3799 			idx = elf_ndxscn(scn);
3800 
3801 			obj->efile.symbols = data;
3802 			obj->efile.symbols_shndx = idx;
3803 			obj->efile.strtabidx = sh->sh_link;
3804 		}
3805 	}
3806 
3807 	if (!obj->efile.symbols) {
3808 		pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3809 			obj->path);
3810 		return -ENOENT;
3811 	}
3812 
3813 	scn = NULL;
3814 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
3815 		idx = elf_ndxscn(scn);
3816 		sec_desc = &obj->efile.secs[idx];
3817 
3818 		sh = elf_sec_hdr(obj, scn);
3819 		if (!sh)
3820 			return -LIBBPF_ERRNO__FORMAT;
3821 
3822 		name = elf_sec_str(obj, sh->sh_name);
3823 		if (!name)
3824 			return -LIBBPF_ERRNO__FORMAT;
3825 
3826 		if (ignore_elf_section(sh, name))
3827 			continue;
3828 
3829 		data = elf_sec_data(obj, scn);
3830 		if (!data)
3831 			return -LIBBPF_ERRNO__FORMAT;
3832 
3833 		pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3834 			 idx, name, (unsigned long)data->d_size,
3835 			 (int)sh->sh_link, (unsigned long)sh->sh_flags,
3836 			 (int)sh->sh_type);
3837 
3838 		if (strcmp(name, "license") == 0) {
3839 			err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3840 			if (err)
3841 				return err;
3842 		} else if (strcmp(name, "version") == 0) {
3843 			err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3844 			if (err)
3845 				return err;
3846 		} else if (strcmp(name, "maps") == 0) {
3847 			pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n");
3848 			return -ENOTSUP;
3849 		} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3850 			obj->efile.btf_maps_shndx = idx;
3851 		} else if (strcmp(name, BTF_ELF_SEC) == 0) {
3852 			if (sh->sh_type != SHT_PROGBITS)
3853 				return -LIBBPF_ERRNO__FORMAT;
3854 			btf_data = data;
3855 		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3856 			if (sh->sh_type != SHT_PROGBITS)
3857 				return -LIBBPF_ERRNO__FORMAT;
3858 			btf_ext_data = data;
3859 		} else if (sh->sh_type == SHT_SYMTAB) {
3860 			/* already processed during the first pass above */
3861 		} else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3862 			if (sh->sh_flags & SHF_EXECINSTR) {
3863 				if (strcmp(name, ".text") == 0)
3864 					obj->efile.text_shndx = idx;
3865 				err = bpf_object__add_programs(obj, data, name, idx);
3866 				if (err)
3867 					return err;
3868 			} else if (strcmp(name, DATA_SEC) == 0 ||
3869 				   str_has_pfx(name, DATA_SEC ".")) {
3870 				sec_desc->sec_type = SEC_DATA;
3871 				sec_desc->shdr = sh;
3872 				sec_desc->data = data;
3873 			} else if (strcmp(name, RODATA_SEC) == 0 ||
3874 				   str_has_pfx(name, RODATA_SEC ".")) {
3875 				sec_desc->sec_type = SEC_RODATA;
3876 				sec_desc->shdr = sh;
3877 				sec_desc->data = data;
3878 			} else if (strcmp(name, STRUCT_OPS_SEC) == 0 ||
3879 				   strcmp(name, STRUCT_OPS_LINK_SEC) == 0 ||
3880 				   strcmp(name, "?" STRUCT_OPS_SEC) == 0 ||
3881 				   strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) {
3882 				sec_desc->sec_type = SEC_ST_OPS;
3883 				sec_desc->shdr = sh;
3884 				sec_desc->data = data;
3885 				obj->efile.has_st_ops = true;
3886 			} else if (strcmp(name, ARENA_SEC) == 0) {
3887 				obj->efile.arena_data = data;
3888 				obj->efile.arena_data_shndx = idx;
3889 			} else {
3890 				pr_info("elf: skipping unrecognized data section(%d) %s\n",
3891 					idx, name);
3892 			}
3893 		} else if (sh->sh_type == SHT_REL) {
3894 			int targ_sec_idx = sh->sh_info; /* points to other section */
3895 
3896 			if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3897 			    targ_sec_idx >= obj->efile.sec_cnt)
3898 				return -LIBBPF_ERRNO__FORMAT;
3899 
3900 			/* Only do relo for section with exec instructions */
3901 			if (!section_have_execinstr(obj, targ_sec_idx) &&
3902 			    strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3903 			    strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
3904 			    strcmp(name, ".rel?" STRUCT_OPS_SEC) &&
3905 			    strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) &&
3906 			    strcmp(name, ".rel" MAPS_ELF_SEC)) {
3907 				pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3908 					idx, name, targ_sec_idx,
3909 					elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
3910 				continue;
3911 			}
3912 
3913 			sec_desc->sec_type = SEC_RELO;
3914 			sec_desc->shdr = sh;
3915 			sec_desc->data = data;
3916 		} else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 ||
3917 							 str_has_pfx(name, BSS_SEC "."))) {
3918 			sec_desc->sec_type = SEC_BSS;
3919 			sec_desc->shdr = sh;
3920 			sec_desc->data = data;
3921 		} else {
3922 			pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3923 				(size_t)sh->sh_size);
3924 		}
3925 	}
3926 
3927 	if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3928 		pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3929 		return -LIBBPF_ERRNO__FORMAT;
3930 	}
3931 
3932 	/* sort BPF programs by section name and in-section instruction offset
3933 	 * for faster search
3934 	 */
3935 	if (obj->nr_programs)
3936 		qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3937 
3938 	return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3939 }
3940 
sym_is_extern(const Elf64_Sym * sym)3941 static bool sym_is_extern(const Elf64_Sym *sym)
3942 {
3943 	int bind = ELF64_ST_BIND(sym->st_info);
3944 	/* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3945 	return sym->st_shndx == SHN_UNDEF &&
3946 	       (bind == STB_GLOBAL || bind == STB_WEAK) &&
3947 	       ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
3948 }
3949 
sym_is_subprog(const Elf64_Sym * sym,int text_shndx)3950 static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
3951 {
3952 	int bind = ELF64_ST_BIND(sym->st_info);
3953 	int type = ELF64_ST_TYPE(sym->st_info);
3954 
3955 	/* in .text section */
3956 	if (sym->st_shndx != text_shndx)
3957 		return false;
3958 
3959 	/* local function */
3960 	if (bind == STB_LOCAL && type == STT_SECTION)
3961 		return true;
3962 
3963 	/* global function */
3964 	return bind == STB_GLOBAL && type == STT_FUNC;
3965 }
3966 
find_extern_btf_id(const struct btf * btf,const char * ext_name)3967 static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3968 {
3969 	const struct btf_type *t;
3970 	const char *tname;
3971 	int i, n;
3972 
3973 	if (!btf)
3974 		return -ESRCH;
3975 
3976 	n = btf__type_cnt(btf);
3977 	for (i = 1; i < n; i++) {
3978 		t = btf__type_by_id(btf, i);
3979 
3980 		if (!btf_is_var(t) && !btf_is_func(t))
3981 			continue;
3982 
3983 		tname = btf__name_by_offset(btf, t->name_off);
3984 		if (strcmp(tname, ext_name))
3985 			continue;
3986 
3987 		if (btf_is_var(t) &&
3988 		    btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3989 			return -EINVAL;
3990 
3991 		if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
3992 			return -EINVAL;
3993 
3994 		return i;
3995 	}
3996 
3997 	return -ENOENT;
3998 }
3999 
find_extern_sec_btf_id(struct btf * btf,int ext_btf_id)4000 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
4001 	const struct btf_var_secinfo *vs;
4002 	const struct btf_type *t;
4003 	int i, j, n;
4004 
4005 	if (!btf)
4006 		return -ESRCH;
4007 
4008 	n = btf__type_cnt(btf);
4009 	for (i = 1; i < n; i++) {
4010 		t = btf__type_by_id(btf, i);
4011 
4012 		if (!btf_is_datasec(t))
4013 			continue;
4014 
4015 		vs = btf_var_secinfos(t);
4016 		for (j = 0; j < btf_vlen(t); j++, vs++) {
4017 			if (vs->type == ext_btf_id)
4018 				return i;
4019 		}
4020 	}
4021 
4022 	return -ENOENT;
4023 }
4024 
find_kcfg_type(const struct btf * btf,int id,bool * is_signed)4025 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
4026 				     bool *is_signed)
4027 {
4028 	const struct btf_type *t;
4029 	const char *name;
4030 
4031 	t = skip_mods_and_typedefs(btf, id, NULL);
4032 	name = btf__name_by_offset(btf, t->name_off);
4033 
4034 	if (is_signed)
4035 		*is_signed = false;
4036 	switch (btf_kind(t)) {
4037 	case BTF_KIND_INT: {
4038 		int enc = btf_int_encoding(t);
4039 
4040 		if (enc & BTF_INT_BOOL)
4041 			return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
4042 		if (is_signed)
4043 			*is_signed = enc & BTF_INT_SIGNED;
4044 		if (t->size == 1)
4045 			return KCFG_CHAR;
4046 		if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
4047 			return KCFG_UNKNOWN;
4048 		return KCFG_INT;
4049 	}
4050 	case BTF_KIND_ENUM:
4051 		if (t->size != 4)
4052 			return KCFG_UNKNOWN;
4053 		if (strcmp(name, "libbpf_tristate"))
4054 			return KCFG_UNKNOWN;
4055 		return KCFG_TRISTATE;
4056 	case BTF_KIND_ENUM64:
4057 		if (strcmp(name, "libbpf_tristate"))
4058 			return KCFG_UNKNOWN;
4059 		return KCFG_TRISTATE;
4060 	case BTF_KIND_ARRAY:
4061 		if (btf_array(t)->nelems == 0)
4062 			return KCFG_UNKNOWN;
4063 		if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
4064 			return KCFG_UNKNOWN;
4065 		return KCFG_CHAR_ARR;
4066 	default:
4067 		return KCFG_UNKNOWN;
4068 	}
4069 }
4070 
cmp_externs(const void * _a,const void * _b)4071 static int cmp_externs(const void *_a, const void *_b)
4072 {
4073 	const struct extern_desc *a = _a;
4074 	const struct extern_desc *b = _b;
4075 
4076 	if (a->type != b->type)
4077 		return a->type < b->type ? -1 : 1;
4078 
4079 	if (a->type == EXT_KCFG) {
4080 		/* descending order by alignment requirements */
4081 		if (a->kcfg.align != b->kcfg.align)
4082 			return a->kcfg.align > b->kcfg.align ? -1 : 1;
4083 		/* ascending order by size, within same alignment class */
4084 		if (a->kcfg.sz != b->kcfg.sz)
4085 			return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
4086 	}
4087 
4088 	/* resolve ties by name */
4089 	return strcmp(a->name, b->name);
4090 }
4091 
find_int_btf_id(const struct btf * btf)4092 static int find_int_btf_id(const struct btf *btf)
4093 {
4094 	const struct btf_type *t;
4095 	int i, n;
4096 
4097 	n = btf__type_cnt(btf);
4098 	for (i = 1; i < n; i++) {
4099 		t = btf__type_by_id(btf, i);
4100 
4101 		if (btf_is_int(t) && btf_int_bits(t) == 32)
4102 			return i;
4103 	}
4104 
4105 	return 0;
4106 }
4107 
add_dummy_ksym_var(struct btf * btf)4108 static int add_dummy_ksym_var(struct btf *btf)
4109 {
4110 	int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
4111 	const struct btf_var_secinfo *vs;
4112 	const struct btf_type *sec;
4113 
4114 	if (!btf)
4115 		return 0;
4116 
4117 	sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
4118 					    BTF_KIND_DATASEC);
4119 	if (sec_btf_id < 0)
4120 		return 0;
4121 
4122 	sec = btf__type_by_id(btf, sec_btf_id);
4123 	vs = btf_var_secinfos(sec);
4124 	for (i = 0; i < btf_vlen(sec); i++, vs++) {
4125 		const struct btf_type *vt;
4126 
4127 		vt = btf__type_by_id(btf, vs->type);
4128 		if (btf_is_func(vt))
4129 			break;
4130 	}
4131 
4132 	/* No func in ksyms sec.  No need to add dummy var. */
4133 	if (i == btf_vlen(sec))
4134 		return 0;
4135 
4136 	int_btf_id = find_int_btf_id(btf);
4137 	dummy_var_btf_id = btf__add_var(btf,
4138 					"dummy_ksym",
4139 					BTF_VAR_GLOBAL_ALLOCATED,
4140 					int_btf_id);
4141 	if (dummy_var_btf_id < 0)
4142 		pr_warn("cannot create a dummy_ksym var\n");
4143 
4144 	return dummy_var_btf_id;
4145 }
4146 
bpf_object__collect_externs(struct bpf_object * obj)4147 static int bpf_object__collect_externs(struct bpf_object *obj)
4148 {
4149 	struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
4150 	const struct btf_type *t;
4151 	struct extern_desc *ext;
4152 	int i, n, off, dummy_var_btf_id;
4153 	const char *ext_name, *sec_name;
4154 	size_t ext_essent_len;
4155 	Elf_Scn *scn;
4156 	Elf64_Shdr *sh;
4157 
4158 	if (!obj->efile.symbols)
4159 		return 0;
4160 
4161 	scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
4162 	sh = elf_sec_hdr(obj, scn);
4163 	if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
4164 		return -LIBBPF_ERRNO__FORMAT;
4165 
4166 	dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
4167 	if (dummy_var_btf_id < 0)
4168 		return dummy_var_btf_id;
4169 
4170 	n = sh->sh_size / sh->sh_entsize;
4171 	pr_debug("looking for externs among %d symbols...\n", n);
4172 
4173 	for (i = 0; i < n; i++) {
4174 		Elf64_Sym *sym = elf_sym_by_idx(obj, i);
4175 
4176 		if (!sym)
4177 			return -LIBBPF_ERRNO__FORMAT;
4178 		if (!sym_is_extern(sym))
4179 			continue;
4180 		ext_name = elf_sym_str(obj, sym->st_name);
4181 		if (!ext_name || !ext_name[0])
4182 			continue;
4183 
4184 		ext = obj->externs;
4185 		ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
4186 		if (!ext)
4187 			return -ENOMEM;
4188 		obj->externs = ext;
4189 		ext = &ext[obj->nr_extern];
4190 		memset(ext, 0, sizeof(*ext));
4191 		obj->nr_extern++;
4192 
4193 		ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
4194 		if (ext->btf_id <= 0) {
4195 			pr_warn("failed to find BTF for extern '%s': %d\n",
4196 				ext_name, ext->btf_id);
4197 			return ext->btf_id;
4198 		}
4199 		t = btf__type_by_id(obj->btf, ext->btf_id);
4200 		ext->name = btf__name_by_offset(obj->btf, t->name_off);
4201 		ext->sym_idx = i;
4202 		ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
4203 
4204 		ext_essent_len = bpf_core_essential_name_len(ext->name);
4205 		ext->essent_name = NULL;
4206 		if (ext_essent_len != strlen(ext->name)) {
4207 			ext->essent_name = strndup(ext->name, ext_essent_len);
4208 			if (!ext->essent_name)
4209 				return -ENOMEM;
4210 		}
4211 
4212 		ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
4213 		if (ext->sec_btf_id <= 0) {
4214 			pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
4215 				ext_name, ext->btf_id, ext->sec_btf_id);
4216 			return ext->sec_btf_id;
4217 		}
4218 		sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
4219 		sec_name = btf__name_by_offset(obj->btf, sec->name_off);
4220 
4221 		if (strcmp(sec_name, KCONFIG_SEC) == 0) {
4222 			if (btf_is_func(t)) {
4223 				pr_warn("extern function %s is unsupported under %s section\n",
4224 					ext->name, KCONFIG_SEC);
4225 				return -ENOTSUP;
4226 			}
4227 			kcfg_sec = sec;
4228 			ext->type = EXT_KCFG;
4229 			ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
4230 			if (ext->kcfg.sz <= 0) {
4231 				pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
4232 					ext_name, ext->kcfg.sz);
4233 				return ext->kcfg.sz;
4234 			}
4235 			ext->kcfg.align = btf__align_of(obj->btf, t->type);
4236 			if (ext->kcfg.align <= 0) {
4237 				pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
4238 					ext_name, ext->kcfg.align);
4239 				return -EINVAL;
4240 			}
4241 			ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
4242 							&ext->kcfg.is_signed);
4243 			if (ext->kcfg.type == KCFG_UNKNOWN) {
4244 				pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
4245 				return -ENOTSUP;
4246 			}
4247 		} else if (strcmp(sec_name, KSYMS_SEC) == 0) {
4248 			ksym_sec = sec;
4249 			ext->type = EXT_KSYM;
4250 			skip_mods_and_typedefs(obj->btf, t->type,
4251 					       &ext->ksym.type_id);
4252 		} else {
4253 			pr_warn("unrecognized extern section '%s'\n", sec_name);
4254 			return -ENOTSUP;
4255 		}
4256 	}
4257 	pr_debug("collected %d externs total\n", obj->nr_extern);
4258 
4259 	if (!obj->nr_extern)
4260 		return 0;
4261 
4262 	/* sort externs by type, for kcfg ones also by (align, size, name) */
4263 	qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
4264 
4265 	/* for .ksyms section, we need to turn all externs into allocated
4266 	 * variables in BTF to pass kernel verification; we do this by
4267 	 * pretending that each extern is a 8-byte variable
4268 	 */
4269 	if (ksym_sec) {
4270 		/* find existing 4-byte integer type in BTF to use for fake
4271 		 * extern variables in DATASEC
4272 		 */
4273 		int int_btf_id = find_int_btf_id(obj->btf);
4274 		/* For extern function, a dummy_var added earlier
4275 		 * will be used to replace the vs->type and
4276 		 * its name string will be used to refill
4277 		 * the missing param's name.
4278 		 */
4279 		const struct btf_type *dummy_var;
4280 
4281 		dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
4282 		for (i = 0; i < obj->nr_extern; i++) {
4283 			ext = &obj->externs[i];
4284 			if (ext->type != EXT_KSYM)
4285 				continue;
4286 			pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
4287 				 i, ext->sym_idx, ext->name);
4288 		}
4289 
4290 		sec = ksym_sec;
4291 		n = btf_vlen(sec);
4292 		for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
4293 			struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4294 			struct btf_type *vt;
4295 
4296 			vt = (void *)btf__type_by_id(obj->btf, vs->type);
4297 			ext_name = btf__name_by_offset(obj->btf, vt->name_off);
4298 			ext = find_extern_by_name(obj, ext_name);
4299 			if (!ext) {
4300 				pr_warn("failed to find extern definition for BTF %s '%s'\n",
4301 					btf_kind_str(vt), ext_name);
4302 				return -ESRCH;
4303 			}
4304 			if (btf_is_func(vt)) {
4305 				const struct btf_type *func_proto;
4306 				struct btf_param *param;
4307 				int j;
4308 
4309 				func_proto = btf__type_by_id(obj->btf,
4310 							     vt->type);
4311 				param = btf_params(func_proto);
4312 				/* Reuse the dummy_var string if the
4313 				 * func proto does not have param name.
4314 				 */
4315 				for (j = 0; j < btf_vlen(func_proto); j++)
4316 					if (param[j].type && !param[j].name_off)
4317 						param[j].name_off =
4318 							dummy_var->name_off;
4319 				vs->type = dummy_var_btf_id;
4320 				vt->info &= ~0xffff;
4321 				vt->info |= BTF_FUNC_GLOBAL;
4322 			} else {
4323 				btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4324 				vt->type = int_btf_id;
4325 			}
4326 			vs->offset = off;
4327 			vs->size = sizeof(int);
4328 		}
4329 		sec->size = off;
4330 	}
4331 
4332 	if (kcfg_sec) {
4333 		sec = kcfg_sec;
4334 		/* for kcfg externs calculate their offsets within a .kconfig map */
4335 		off = 0;
4336 		for (i = 0; i < obj->nr_extern; i++) {
4337 			ext = &obj->externs[i];
4338 			if (ext->type != EXT_KCFG)
4339 				continue;
4340 
4341 			ext->kcfg.data_off = roundup(off, ext->kcfg.align);
4342 			off = ext->kcfg.data_off + ext->kcfg.sz;
4343 			pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
4344 				 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
4345 		}
4346 		sec->size = off;
4347 		n = btf_vlen(sec);
4348 		for (i = 0; i < n; i++) {
4349 			struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4350 
4351 			t = btf__type_by_id(obj->btf, vs->type);
4352 			ext_name = btf__name_by_offset(obj->btf, t->name_off);
4353 			ext = find_extern_by_name(obj, ext_name);
4354 			if (!ext) {
4355 				pr_warn("failed to find extern definition for BTF var '%s'\n",
4356 					ext_name);
4357 				return -ESRCH;
4358 			}
4359 			btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4360 			vs->offset = ext->kcfg.data_off;
4361 		}
4362 	}
4363 	return 0;
4364 }
4365 
prog_is_subprog(const struct bpf_object * obj,const struct bpf_program * prog)4366 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
4367 {
4368 	return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
4369 }
4370 
4371 struct bpf_program *
bpf_object__find_program_by_name(const struct bpf_object * obj,const char * name)4372 bpf_object__find_program_by_name(const struct bpf_object *obj,
4373 				 const char *name)
4374 {
4375 	struct bpf_program *prog;
4376 
4377 	bpf_object__for_each_program(prog, obj) {
4378 		if (prog_is_subprog(obj, prog))
4379 			continue;
4380 		if (!strcmp(prog->name, name))
4381 			return prog;
4382 	}
4383 	return errno = ENOENT, NULL;
4384 }
4385 
bpf_object__shndx_is_data(const struct bpf_object * obj,int shndx)4386 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
4387 				      int shndx)
4388 {
4389 	switch (obj->efile.secs[shndx].sec_type) {
4390 	case SEC_BSS:
4391 	case SEC_DATA:
4392 	case SEC_RODATA:
4393 		return true;
4394 	default:
4395 		return false;
4396 	}
4397 }
4398 
bpf_object__shndx_is_maps(const struct bpf_object * obj,int shndx)4399 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
4400 				      int shndx)
4401 {
4402 	return shndx == obj->efile.btf_maps_shndx;
4403 }
4404 
4405 static enum libbpf_map_type
bpf_object__section_to_libbpf_map_type(const struct bpf_object * obj,int shndx)4406 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
4407 {
4408 	if (shndx == obj->efile.symbols_shndx)
4409 		return LIBBPF_MAP_KCONFIG;
4410 
4411 	switch (obj->efile.secs[shndx].sec_type) {
4412 	case SEC_BSS:
4413 		return LIBBPF_MAP_BSS;
4414 	case SEC_DATA:
4415 		return LIBBPF_MAP_DATA;
4416 	case SEC_RODATA:
4417 		return LIBBPF_MAP_RODATA;
4418 	default:
4419 		return LIBBPF_MAP_UNSPEC;
4420 	}
4421 }
4422 
bpf_program__record_reloc(struct bpf_program * prog,struct reloc_desc * reloc_desc,__u32 insn_idx,const char * sym_name,const Elf64_Sym * sym,const Elf64_Rel * rel)4423 static int bpf_program__record_reloc(struct bpf_program *prog,
4424 				     struct reloc_desc *reloc_desc,
4425 				     __u32 insn_idx, const char *sym_name,
4426 				     const Elf64_Sym *sym, const Elf64_Rel *rel)
4427 {
4428 	struct bpf_insn *insn = &prog->insns[insn_idx];
4429 	size_t map_idx, nr_maps = prog->obj->nr_maps;
4430 	struct bpf_object *obj = prog->obj;
4431 	__u32 shdr_idx = sym->st_shndx;
4432 	enum libbpf_map_type type;
4433 	const char *sym_sec_name;
4434 	struct bpf_map *map;
4435 
4436 	if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
4437 		pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
4438 			prog->name, sym_name, insn_idx, insn->code);
4439 		return -LIBBPF_ERRNO__RELOC;
4440 	}
4441 
4442 	if (sym_is_extern(sym)) {
4443 		int sym_idx = ELF64_R_SYM(rel->r_info);
4444 		int i, n = obj->nr_extern;
4445 		struct extern_desc *ext;
4446 
4447 		for (i = 0; i < n; i++) {
4448 			ext = &obj->externs[i];
4449 			if (ext->sym_idx == sym_idx)
4450 				break;
4451 		}
4452 		if (i >= n) {
4453 			pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
4454 				prog->name, sym_name, sym_idx);
4455 			return -LIBBPF_ERRNO__RELOC;
4456 		}
4457 		pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
4458 			 prog->name, i, ext->name, ext->sym_idx, insn_idx);
4459 		if (insn->code == (BPF_JMP | BPF_CALL))
4460 			reloc_desc->type = RELO_EXTERN_CALL;
4461 		else
4462 			reloc_desc->type = RELO_EXTERN_LD64;
4463 		reloc_desc->insn_idx = insn_idx;
4464 		reloc_desc->ext_idx = i;
4465 		return 0;
4466 	}
4467 
4468 	/* sub-program call relocation */
4469 	if (is_call_insn(insn)) {
4470 		if (insn->src_reg != BPF_PSEUDO_CALL) {
4471 			pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
4472 			return -LIBBPF_ERRNO__RELOC;
4473 		}
4474 		/* text_shndx can be 0, if no default "main" program exists */
4475 		if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
4476 			sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4477 			pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
4478 				prog->name, sym_name, sym_sec_name);
4479 			return -LIBBPF_ERRNO__RELOC;
4480 		}
4481 		if (sym->st_value % BPF_INSN_SZ) {
4482 			pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
4483 				prog->name, sym_name, (size_t)sym->st_value);
4484 			return -LIBBPF_ERRNO__RELOC;
4485 		}
4486 		reloc_desc->type = RELO_CALL;
4487 		reloc_desc->insn_idx = insn_idx;
4488 		reloc_desc->sym_off = sym->st_value;
4489 		return 0;
4490 	}
4491 
4492 	if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
4493 		pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
4494 			prog->name, sym_name, shdr_idx);
4495 		return -LIBBPF_ERRNO__RELOC;
4496 	}
4497 
4498 	/* loading subprog addresses */
4499 	if (sym_is_subprog(sym, obj->efile.text_shndx)) {
4500 		/* global_func: sym->st_value = offset in the section, insn->imm = 0.
4501 		 * local_func: sym->st_value = 0, insn->imm = offset in the section.
4502 		 */
4503 		if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
4504 			pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
4505 				prog->name, sym_name, (size_t)sym->st_value, insn->imm);
4506 			return -LIBBPF_ERRNO__RELOC;
4507 		}
4508 
4509 		reloc_desc->type = RELO_SUBPROG_ADDR;
4510 		reloc_desc->insn_idx = insn_idx;
4511 		reloc_desc->sym_off = sym->st_value;
4512 		return 0;
4513 	}
4514 
4515 	type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
4516 	sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4517 
4518 	/* arena data relocation */
4519 	if (shdr_idx == obj->efile.arena_data_shndx) {
4520 		reloc_desc->type = RELO_DATA;
4521 		reloc_desc->insn_idx = insn_idx;
4522 		reloc_desc->map_idx = obj->arena_map - obj->maps;
4523 		reloc_desc->sym_off = sym->st_value;
4524 		return 0;
4525 	}
4526 
4527 	/* generic map reference relocation */
4528 	if (type == LIBBPF_MAP_UNSPEC) {
4529 		if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
4530 			pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
4531 				prog->name, sym_name, sym_sec_name);
4532 			return -LIBBPF_ERRNO__RELOC;
4533 		}
4534 		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4535 			map = &obj->maps[map_idx];
4536 			if (map->libbpf_type != type ||
4537 			    map->sec_idx != sym->st_shndx ||
4538 			    map->sec_offset != sym->st_value)
4539 				continue;
4540 			pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4541 				 prog->name, map_idx, map->name, map->sec_idx,
4542 				 map->sec_offset, insn_idx);
4543 			break;
4544 		}
4545 		if (map_idx >= nr_maps) {
4546 			pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4547 				prog->name, sym_sec_name, (size_t)sym->st_value);
4548 			return -LIBBPF_ERRNO__RELOC;
4549 		}
4550 		reloc_desc->type = RELO_LD64;
4551 		reloc_desc->insn_idx = insn_idx;
4552 		reloc_desc->map_idx = map_idx;
4553 		reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
4554 		return 0;
4555 	}
4556 
4557 	/* global data map relocation */
4558 	if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
4559 		pr_warn("prog '%s': bad data relo against section '%s'\n",
4560 			prog->name, sym_sec_name);
4561 		return -LIBBPF_ERRNO__RELOC;
4562 	}
4563 	for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4564 		map = &obj->maps[map_idx];
4565 		if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
4566 			continue;
4567 		pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4568 			 prog->name, map_idx, map->name, map->sec_idx,
4569 			 map->sec_offset, insn_idx);
4570 		break;
4571 	}
4572 	if (map_idx >= nr_maps) {
4573 		pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4574 			prog->name, sym_sec_name);
4575 		return -LIBBPF_ERRNO__RELOC;
4576 	}
4577 
4578 	reloc_desc->type = RELO_DATA;
4579 	reloc_desc->insn_idx = insn_idx;
4580 	reloc_desc->map_idx = map_idx;
4581 	reloc_desc->sym_off = sym->st_value;
4582 	return 0;
4583 }
4584 
prog_contains_insn(const struct bpf_program * prog,size_t insn_idx)4585 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4586 {
4587 	return insn_idx >= prog->sec_insn_off &&
4588 	       insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4589 }
4590 
find_prog_by_sec_insn(const struct bpf_object * obj,size_t sec_idx,size_t insn_idx)4591 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4592 						 size_t sec_idx, size_t insn_idx)
4593 {
4594 	int l = 0, r = obj->nr_programs - 1, m;
4595 	struct bpf_program *prog;
4596 
4597 	if (!obj->nr_programs)
4598 		return NULL;
4599 
4600 	while (l < r) {
4601 		m = l + (r - l + 1) / 2;
4602 		prog = &obj->programs[m];
4603 
4604 		if (prog->sec_idx < sec_idx ||
4605 		    (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4606 			l = m;
4607 		else
4608 			r = m - 1;
4609 	}
4610 	/* matching program could be at index l, but it still might be the
4611 	 * wrong one, so we need to double check conditions for the last time
4612 	 */
4613 	prog = &obj->programs[l];
4614 	if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4615 		return prog;
4616 	return NULL;
4617 }
4618 
4619 static int
bpf_object__collect_prog_relos(struct bpf_object * obj,Elf64_Shdr * shdr,Elf_Data * data)4620 bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
4621 {
4622 	const char *relo_sec_name, *sec_name;
4623 	size_t sec_idx = shdr->sh_info, sym_idx;
4624 	struct bpf_program *prog;
4625 	struct reloc_desc *relos;
4626 	int err, i, nrels;
4627 	const char *sym_name;
4628 	__u32 insn_idx;
4629 	Elf_Scn *scn;
4630 	Elf_Data *scn_data;
4631 	Elf64_Sym *sym;
4632 	Elf64_Rel *rel;
4633 
4634 	if (sec_idx >= obj->efile.sec_cnt)
4635 		return -EINVAL;
4636 
4637 	scn = elf_sec_by_idx(obj, sec_idx);
4638 	scn_data = elf_sec_data(obj, scn);
4639 	if (!scn_data)
4640 		return -LIBBPF_ERRNO__FORMAT;
4641 
4642 	relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4643 	sec_name = elf_sec_name(obj, scn);
4644 	if (!relo_sec_name || !sec_name)
4645 		return -EINVAL;
4646 
4647 	pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4648 		 relo_sec_name, sec_idx, sec_name);
4649 	nrels = shdr->sh_size / shdr->sh_entsize;
4650 
4651 	for (i = 0; i < nrels; i++) {
4652 		rel = elf_rel_by_idx(data, i);
4653 		if (!rel) {
4654 			pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
4655 			return -LIBBPF_ERRNO__FORMAT;
4656 		}
4657 
4658 		sym_idx = ELF64_R_SYM(rel->r_info);
4659 		sym = elf_sym_by_idx(obj, sym_idx);
4660 		if (!sym) {
4661 			pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4662 				relo_sec_name, sym_idx, i);
4663 			return -LIBBPF_ERRNO__FORMAT;
4664 		}
4665 
4666 		if (sym->st_shndx >= obj->efile.sec_cnt) {
4667 			pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4668 				relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
4669 			return -LIBBPF_ERRNO__FORMAT;
4670 		}
4671 
4672 		if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
4673 			pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
4674 				relo_sec_name, (size_t)rel->r_offset, i);
4675 			return -LIBBPF_ERRNO__FORMAT;
4676 		}
4677 
4678 		insn_idx = rel->r_offset / BPF_INSN_SZ;
4679 		/* relocations against static functions are recorded as
4680 		 * relocations against the section that contains a function;
4681 		 * in such case, symbol will be STT_SECTION and sym.st_name
4682 		 * will point to empty string (0), so fetch section name
4683 		 * instead
4684 		 */
4685 		if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4686 			sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4687 		else
4688 			sym_name = elf_sym_str(obj, sym->st_name);
4689 		sym_name = sym_name ?: "<?";
4690 
4691 		pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4692 			 relo_sec_name, i, insn_idx, sym_name);
4693 
4694 		prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4695 		if (!prog) {
4696 			pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
4697 				relo_sec_name, i, sec_name, insn_idx);
4698 			continue;
4699 		}
4700 
4701 		relos = libbpf_reallocarray(prog->reloc_desc,
4702 					    prog->nr_reloc + 1, sizeof(*relos));
4703 		if (!relos)
4704 			return -ENOMEM;
4705 		prog->reloc_desc = relos;
4706 
4707 		/* adjust insn_idx to local BPF program frame of reference */
4708 		insn_idx -= prog->sec_insn_off;
4709 		err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
4710 						insn_idx, sym_name, sym, rel);
4711 		if (err)
4712 			return err;
4713 
4714 		prog->nr_reloc++;
4715 	}
4716 	return 0;
4717 }
4718 
map_fill_btf_type_info(struct bpf_object * obj,struct bpf_map * map)4719 static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
4720 {
4721 	int id;
4722 
4723 	if (!obj->btf)
4724 		return -ENOENT;
4725 
4726 	/* if it's BTF-defined map, we don't need to search for type IDs.
4727 	 * For struct_ops map, it does not need btf_key_type_id and
4728 	 * btf_value_type_id.
4729 	 */
4730 	if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
4731 		return 0;
4732 
4733 	/*
4734 	 * LLVM annotates global data differently in BTF, that is,
4735 	 * only as '.data', '.bss' or '.rodata'.
4736 	 */
4737 	if (!bpf_map__is_internal(map))
4738 		return -ENOENT;
4739 
4740 	id = btf__find_by_name(obj->btf, map->real_name);
4741 	if (id < 0)
4742 		return id;
4743 
4744 	map->btf_key_type_id = 0;
4745 	map->btf_value_type_id = id;
4746 	return 0;
4747 }
4748 
bpf_get_map_info_from_fdinfo(int fd,struct bpf_map_info * info)4749 static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4750 {
4751 	char file[PATH_MAX], buff[4096];
4752 	FILE *fp;
4753 	__u32 val;
4754 	int err;
4755 
4756 	snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4757 	memset(info, 0, sizeof(*info));
4758 
4759 	fp = fopen(file, "re");
4760 	if (!fp) {
4761 		err = -errno;
4762 		pr_warn("failed to open %s: %d. No procfs support?\n", file,
4763 			err);
4764 		return err;
4765 	}
4766 
4767 	while (fgets(buff, sizeof(buff), fp)) {
4768 		if (sscanf(buff, "map_type:\t%u", &val) == 1)
4769 			info->type = val;
4770 		else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4771 			info->key_size = val;
4772 		else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4773 			info->value_size = val;
4774 		else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4775 			info->max_entries = val;
4776 		else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4777 			info->map_flags = val;
4778 	}
4779 
4780 	fclose(fp);
4781 
4782 	return 0;
4783 }
4784 
bpf_map__autocreate(const struct bpf_map * map)4785 bool bpf_map__autocreate(const struct bpf_map *map)
4786 {
4787 	return map->autocreate;
4788 }
4789 
bpf_map__set_autocreate(struct bpf_map * map,bool autocreate)4790 int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
4791 {
4792 	if (map->obj->loaded)
4793 		return libbpf_err(-EBUSY);
4794 
4795 	map->autocreate = autocreate;
4796 	return 0;
4797 }
4798 
bpf_map__reuse_fd(struct bpf_map * map,int fd)4799 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4800 {
4801 	struct bpf_map_info info;
4802 	__u32 len = sizeof(info), name_len;
4803 	int new_fd, err;
4804 	char *new_name;
4805 
4806 	memset(&info, 0, len);
4807 	err = bpf_map_get_info_by_fd(fd, &info, &len);
4808 	if (err && errno == EINVAL)
4809 		err = bpf_get_map_info_from_fdinfo(fd, &info);
4810 	if (err)
4811 		return libbpf_err(err);
4812 
4813 	name_len = strlen(info.name);
4814 	if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
4815 		new_name = strdup(map->name);
4816 	else
4817 		new_name = strdup(info.name);
4818 
4819 	if (!new_name)
4820 		return libbpf_err(-errno);
4821 
4822 	/*
4823 	 * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set.
4824 	 * This is similar to what we do in ensure_good_fd(), but without
4825 	 * closing original FD.
4826 	 */
4827 	new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
4828 	if (new_fd < 0) {
4829 		err = -errno;
4830 		goto err_free_new_name;
4831 	}
4832 
4833 	err = reuse_fd(map->fd, new_fd);
4834 	if (err)
4835 		goto err_free_new_name;
4836 
4837 	free(map->name);
4838 
4839 	map->name = new_name;
4840 	map->def.type = info.type;
4841 	map->def.key_size = info.key_size;
4842 	map->def.value_size = info.value_size;
4843 	map->def.max_entries = info.max_entries;
4844 	map->def.map_flags = info.map_flags;
4845 	map->btf_key_type_id = info.btf_key_type_id;
4846 	map->btf_value_type_id = info.btf_value_type_id;
4847 	map->reused = true;
4848 	map->map_extra = info.map_extra;
4849 
4850 	return 0;
4851 
4852 err_free_new_name:
4853 	free(new_name);
4854 	return libbpf_err(err);
4855 }
4856 
bpf_map__max_entries(const struct bpf_map * map)4857 __u32 bpf_map__max_entries(const struct bpf_map *map)
4858 {
4859 	return map->def.max_entries;
4860 }
4861 
bpf_map__inner_map(struct bpf_map * map)4862 struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4863 {
4864 	if (!bpf_map_type__is_map_in_map(map->def.type))
4865 		return errno = EINVAL, NULL;
4866 
4867 	return map->inner_map;
4868 }
4869 
bpf_map__set_max_entries(struct bpf_map * map,__u32 max_entries)4870 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4871 {
4872 	if (map->obj->loaded)
4873 		return libbpf_err(-EBUSY);
4874 
4875 	map->def.max_entries = max_entries;
4876 
4877 	/* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
4878 	if (map_is_ringbuf(map))
4879 		map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
4880 
4881 	return 0;
4882 }
4883 
bpf_object_prepare_token(struct bpf_object * obj)4884 static int bpf_object_prepare_token(struct bpf_object *obj)
4885 {
4886 	const char *bpffs_path;
4887 	int bpffs_fd = -1, token_fd, err;
4888 	bool mandatory;
4889 	enum libbpf_print_level level;
4890 
4891 	/* token is explicitly prevented */
4892 	if (obj->token_path && obj->token_path[0] == '\0') {
4893 		pr_debug("object '%s': token is prevented, skipping...\n", obj->name);
4894 		return 0;
4895 	}
4896 
4897 	mandatory = obj->token_path != NULL;
4898 	level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG;
4899 
4900 	bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH;
4901 	bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR);
4902 	if (bpffs_fd < 0) {
4903 		err = -errno;
4904 		__pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n",
4905 		     obj->name, err, bpffs_path,
4906 		     mandatory ? "" : ", skipping optional step...");
4907 		return mandatory ? err : 0;
4908 	}
4909 
4910 	token_fd = bpf_token_create(bpffs_fd, 0);
4911 	close(bpffs_fd);
4912 	if (token_fd < 0) {
4913 		if (!mandatory && token_fd == -ENOENT) {
4914 			pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n",
4915 				 obj->name, bpffs_path);
4916 			return 0;
4917 		}
4918 		__pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n",
4919 		     obj->name, token_fd, bpffs_path,
4920 		     mandatory ? "" : ", skipping optional step...");
4921 		return mandatory ? token_fd : 0;
4922 	}
4923 
4924 	obj->feat_cache = calloc(1, sizeof(*obj->feat_cache));
4925 	if (!obj->feat_cache) {
4926 		close(token_fd);
4927 		return -ENOMEM;
4928 	}
4929 
4930 	obj->token_fd = token_fd;
4931 	obj->feat_cache->token_fd = token_fd;
4932 
4933 	return 0;
4934 }
4935 
4936 static int
bpf_object__probe_loading(struct bpf_object * obj)4937 bpf_object__probe_loading(struct bpf_object *obj)
4938 {
4939 	char *cp, errmsg[STRERR_BUFSIZE];
4940 	struct bpf_insn insns[] = {
4941 		BPF_MOV64_IMM(BPF_REG_0, 0),
4942 		BPF_EXIT_INSN(),
4943 	};
4944 	int ret, insn_cnt = ARRAY_SIZE(insns);
4945 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
4946 		.token_fd = obj->token_fd,
4947 		.prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0,
4948 	);
4949 
4950 	if (obj->gen_loader)
4951 		return 0;
4952 
4953 	ret = bump_rlimit_memlock();
4954 	if (ret)
4955 		pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
4956 
4957 	/* make sure basic loading works */
4958 	ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts);
4959 	if (ret < 0)
4960 		ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
4961 	if (ret < 0) {
4962 		ret = errno;
4963 		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4964 		pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4965 			"program. Make sure your kernel supports BPF "
4966 			"(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4967 			"set to big enough value.\n", __func__, cp, ret);
4968 		return -ret;
4969 	}
4970 	close(ret);
4971 
4972 	return 0;
4973 }
4974 
kernel_supports(const struct bpf_object * obj,enum kern_feature_id feat_id)4975 bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4976 {
4977 	if (obj->gen_loader)
4978 		/* To generate loader program assume the latest kernel
4979 		 * to avoid doing extra prog_load, map_create syscalls.
4980 		 */
4981 		return true;
4982 
4983 	if (obj->token_fd)
4984 		return feat_supported(obj->feat_cache, feat_id);
4985 
4986 	return feat_supported(NULL, feat_id);
4987 }
4988 
map_is_reuse_compat(const struct bpf_map * map,int map_fd)4989 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4990 {
4991 	struct bpf_map_info map_info;
4992 	char msg[STRERR_BUFSIZE];
4993 	__u32 map_info_len = sizeof(map_info);
4994 	int err;
4995 
4996 	memset(&map_info, 0, map_info_len);
4997 	err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
4998 	if (err && errno == EINVAL)
4999 		err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
5000 	if (err) {
5001 		pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
5002 			libbpf_strerror_r(errno, msg, sizeof(msg)));
5003 		return false;
5004 	}
5005 
5006 	return (map_info.type == map->def.type &&
5007 		map_info.key_size == map->def.key_size &&
5008 		map_info.value_size == map->def.value_size &&
5009 		map_info.max_entries == map->def.max_entries &&
5010 		map_info.map_flags == map->def.map_flags &&
5011 		map_info.map_extra == map->map_extra);
5012 }
5013 
5014 static int
bpf_object__reuse_map(struct bpf_map * map)5015 bpf_object__reuse_map(struct bpf_map *map)
5016 {
5017 	char *cp, errmsg[STRERR_BUFSIZE];
5018 	int err, pin_fd;
5019 
5020 	pin_fd = bpf_obj_get(map->pin_path);
5021 	if (pin_fd < 0) {
5022 		err = -errno;
5023 		if (err == -ENOENT) {
5024 			pr_debug("found no pinned map to reuse at '%s'\n",
5025 				 map->pin_path);
5026 			return 0;
5027 		}
5028 
5029 		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
5030 		pr_warn("couldn't retrieve pinned map '%s': %s\n",
5031 			map->pin_path, cp);
5032 		return err;
5033 	}
5034 
5035 	if (!map_is_reuse_compat(map, pin_fd)) {
5036 		pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
5037 			map->pin_path);
5038 		close(pin_fd);
5039 		return -EINVAL;
5040 	}
5041 
5042 	err = bpf_map__reuse_fd(map, pin_fd);
5043 	close(pin_fd);
5044 	if (err)
5045 		return err;
5046 
5047 	map->pinned = true;
5048 	pr_debug("reused pinned map at '%s'\n", map->pin_path);
5049 
5050 	return 0;
5051 }
5052 
5053 static int
bpf_object__populate_internal_map(struct bpf_object * obj,struct bpf_map * map)5054 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
5055 {
5056 	enum libbpf_map_type map_type = map->libbpf_type;
5057 	char *cp, errmsg[STRERR_BUFSIZE];
5058 	int err, zero = 0;
5059 
5060 	if (obj->gen_loader) {
5061 		bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
5062 					 map->mmaped, map->def.value_size);
5063 		if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
5064 			bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
5065 		return 0;
5066 	}
5067 
5068 	err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
5069 	if (err) {
5070 		err = -errno;
5071 		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5072 		pr_warn("Error setting initial map(%s) contents: %s\n",
5073 			map->name, cp);
5074 		return err;
5075 	}
5076 
5077 	/* Freeze .rodata and .kconfig map as read-only from syscall side. */
5078 	if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
5079 		err = bpf_map_freeze(map->fd);
5080 		if (err) {
5081 			err = -errno;
5082 			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5083 			pr_warn("Error freezing map(%s) as read-only: %s\n",
5084 				map->name, cp);
5085 			return err;
5086 		}
5087 	}
5088 	return 0;
5089 }
5090 
5091 static void bpf_map__destroy(struct bpf_map *map);
5092 
map_is_created(const struct bpf_map * map)5093 static bool map_is_created(const struct bpf_map *map)
5094 {
5095 	return map->obj->loaded || map->reused;
5096 }
5097 
bpf_object__create_map(struct bpf_object * obj,struct bpf_map * map,bool is_inner)5098 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
5099 {
5100 	LIBBPF_OPTS(bpf_map_create_opts, create_attr);
5101 	struct bpf_map_def *def = &map->def;
5102 	const char *map_name = NULL;
5103 	int err = 0, map_fd;
5104 
5105 	if (kernel_supports(obj, FEAT_PROG_NAME))
5106 		map_name = map->name;
5107 	create_attr.map_ifindex = map->map_ifindex;
5108 	create_attr.map_flags = def->map_flags;
5109 	create_attr.numa_node = map->numa_node;
5110 	create_attr.map_extra = map->map_extra;
5111 	create_attr.token_fd = obj->token_fd;
5112 	if (obj->token_fd)
5113 		create_attr.map_flags |= BPF_F_TOKEN_FD;
5114 
5115 	if (bpf_map__is_struct_ops(map)) {
5116 		create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
5117 		if (map->mod_btf_fd >= 0) {
5118 			create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
5119 			create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD;
5120 		}
5121 	}
5122 
5123 	if (obj->btf && btf__fd(obj->btf) >= 0) {
5124 		create_attr.btf_fd = btf__fd(obj->btf);
5125 		create_attr.btf_key_type_id = map->btf_key_type_id;
5126 		create_attr.btf_value_type_id = map->btf_value_type_id;
5127 	}
5128 
5129 	if (bpf_map_type__is_map_in_map(def->type)) {
5130 		if (map->inner_map) {
5131 			err = map_set_def_max_entries(map->inner_map);
5132 			if (err)
5133 				return err;
5134 			err = bpf_object__create_map(obj, map->inner_map, true);
5135 			if (err) {
5136 				pr_warn("map '%s': failed to create inner map: %d\n",
5137 					map->name, err);
5138 				return err;
5139 			}
5140 			map->inner_map_fd = map->inner_map->fd;
5141 		}
5142 		if (map->inner_map_fd >= 0)
5143 			create_attr.inner_map_fd = map->inner_map_fd;
5144 	}
5145 
5146 	switch (def->type) {
5147 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5148 	case BPF_MAP_TYPE_CGROUP_ARRAY:
5149 	case BPF_MAP_TYPE_STACK_TRACE:
5150 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
5151 	case BPF_MAP_TYPE_HASH_OF_MAPS:
5152 	case BPF_MAP_TYPE_DEVMAP:
5153 	case BPF_MAP_TYPE_DEVMAP_HASH:
5154 	case BPF_MAP_TYPE_CPUMAP:
5155 	case BPF_MAP_TYPE_XSKMAP:
5156 	case BPF_MAP_TYPE_SOCKMAP:
5157 	case BPF_MAP_TYPE_SOCKHASH:
5158 	case BPF_MAP_TYPE_QUEUE:
5159 	case BPF_MAP_TYPE_STACK:
5160 	case BPF_MAP_TYPE_ARENA:
5161 		create_attr.btf_fd = 0;
5162 		create_attr.btf_key_type_id = 0;
5163 		create_attr.btf_value_type_id = 0;
5164 		map->btf_key_type_id = 0;
5165 		map->btf_value_type_id = 0;
5166 		break;
5167 	case BPF_MAP_TYPE_STRUCT_OPS:
5168 		create_attr.btf_value_type_id = 0;
5169 		break;
5170 	default:
5171 		break;
5172 	}
5173 
5174 	if (obj->gen_loader) {
5175 		bpf_gen__map_create(obj->gen_loader, def->type, map_name,
5176 				    def->key_size, def->value_size, def->max_entries,
5177 				    &create_attr, is_inner ? -1 : map - obj->maps);
5178 		/* We keep pretenting we have valid FD to pass various fd >= 0
5179 		 * checks by just keeping original placeholder FDs in place.
5180 		 * See bpf_object__add_map() comment.
5181 		 * This placeholder fd will not be used with any syscall and
5182 		 * will be reset to -1 eventually.
5183 		 */
5184 		map_fd = map->fd;
5185 	} else {
5186 		map_fd = bpf_map_create(def->type, map_name,
5187 					def->key_size, def->value_size,
5188 					def->max_entries, &create_attr);
5189 	}
5190 	if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) {
5191 		char *cp, errmsg[STRERR_BUFSIZE];
5192 
5193 		err = -errno;
5194 		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5195 		pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
5196 			map->name, cp, err);
5197 		create_attr.btf_fd = 0;
5198 		create_attr.btf_key_type_id = 0;
5199 		create_attr.btf_value_type_id = 0;
5200 		map->btf_key_type_id = 0;
5201 		map->btf_value_type_id = 0;
5202 		map_fd = bpf_map_create(def->type, map_name,
5203 					def->key_size, def->value_size,
5204 					def->max_entries, &create_attr);
5205 	}
5206 
5207 	if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
5208 		if (obj->gen_loader)
5209 			map->inner_map->fd = -1;
5210 		bpf_map__destroy(map->inner_map);
5211 		zfree(&map->inner_map);
5212 	}
5213 
5214 	if (map_fd < 0)
5215 		return map_fd;
5216 
5217 	/* obj->gen_loader case, prevent reuse_fd() from closing map_fd */
5218 	if (map->fd == map_fd)
5219 		return 0;
5220 
5221 	/* Keep placeholder FD value but now point it to the BPF map object.
5222 	 * This way everything that relied on this map's FD (e.g., relocated
5223 	 * ldimm64 instructions) will stay valid and won't need adjustments.
5224 	 * map->fd stays valid but now point to what map_fd points to.
5225 	 */
5226 	return reuse_fd(map->fd, map_fd);
5227 }
5228 
init_map_in_map_slots(struct bpf_object * obj,struct bpf_map * map)5229 static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
5230 {
5231 	const struct bpf_map *targ_map;
5232 	unsigned int i;
5233 	int fd, err = 0;
5234 
5235 	for (i = 0; i < map->init_slots_sz; i++) {
5236 		if (!map->init_slots[i])
5237 			continue;
5238 
5239 		targ_map = map->init_slots[i];
5240 		fd = targ_map->fd;
5241 
5242 		if (obj->gen_loader) {
5243 			bpf_gen__populate_outer_map(obj->gen_loader,
5244 						    map - obj->maps, i,
5245 						    targ_map - obj->maps);
5246 		} else {
5247 			err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5248 		}
5249 		if (err) {
5250 			err = -errno;
5251 			pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
5252 				map->name, i, targ_map->name, fd, err);
5253 			return err;
5254 		}
5255 		pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
5256 			 map->name, i, targ_map->name, fd);
5257 	}
5258 
5259 	zfree(&map->init_slots);
5260 	map->init_slots_sz = 0;
5261 
5262 	return 0;
5263 }
5264 
init_prog_array_slots(struct bpf_object * obj,struct bpf_map * map)5265 static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5266 {
5267 	const struct bpf_program *targ_prog;
5268 	unsigned int i;
5269 	int fd, err;
5270 
5271 	if (obj->gen_loader)
5272 		return -ENOTSUP;
5273 
5274 	for (i = 0; i < map->init_slots_sz; i++) {
5275 		if (!map->init_slots[i])
5276 			continue;
5277 
5278 		targ_prog = map->init_slots[i];
5279 		fd = bpf_program__fd(targ_prog);
5280 
5281 		err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5282 		if (err) {
5283 			err = -errno;
5284 			pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
5285 				map->name, i, targ_prog->name, fd, err);
5286 			return err;
5287 		}
5288 		pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5289 			 map->name, i, targ_prog->name, fd);
5290 	}
5291 
5292 	zfree(&map->init_slots);
5293 	map->init_slots_sz = 0;
5294 
5295 	return 0;
5296 }
5297 
bpf_object_init_prog_arrays(struct bpf_object * obj)5298 static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5299 {
5300 	struct bpf_map *map;
5301 	int i, err;
5302 
5303 	for (i = 0; i < obj->nr_maps; i++) {
5304 		map = &obj->maps[i];
5305 
5306 		if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5307 			continue;
5308 
5309 		err = init_prog_array_slots(obj, map);
5310 		if (err < 0)
5311 			return err;
5312 	}
5313 	return 0;
5314 }
5315 
map_set_def_max_entries(struct bpf_map * map)5316 static int map_set_def_max_entries(struct bpf_map *map)
5317 {
5318 	if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5319 		int nr_cpus;
5320 
5321 		nr_cpus = libbpf_num_possible_cpus();
5322 		if (nr_cpus < 0) {
5323 			pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
5324 				map->name, nr_cpus);
5325 			return nr_cpus;
5326 		}
5327 		pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5328 		map->def.max_entries = nr_cpus;
5329 	}
5330 
5331 	return 0;
5332 }
5333 
5334 static int
bpf_object__create_maps(struct bpf_object * obj)5335 bpf_object__create_maps(struct bpf_object *obj)
5336 {
5337 	struct bpf_map *map;
5338 	char *cp, errmsg[STRERR_BUFSIZE];
5339 	unsigned int i, j;
5340 	int err;
5341 	bool retried;
5342 
5343 	for (i = 0; i < obj->nr_maps; i++) {
5344 		map = &obj->maps[i];
5345 
5346 		/* To support old kernels, we skip creating global data maps
5347 		 * (.rodata, .data, .kconfig, etc); later on, during program
5348 		 * loading, if we detect that at least one of the to-be-loaded
5349 		 * programs is referencing any global data map, we'll error
5350 		 * out with program name and relocation index logged.
5351 		 * This approach allows to accommodate Clang emitting
5352 		 * unnecessary .rodata.str1.1 sections for string literals,
5353 		 * but also it allows to have CO-RE applications that use
5354 		 * global variables in some of BPF programs, but not others.
5355 		 * If those global variable-using programs are not loaded at
5356 		 * runtime due to bpf_program__set_autoload(prog, false),
5357 		 * bpf_object loading will succeed just fine even on old
5358 		 * kernels.
5359 		 */
5360 		if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
5361 			map->autocreate = false;
5362 
5363 		if (!map->autocreate) {
5364 			pr_debug("map '%s': skipped auto-creating...\n", map->name);
5365 			continue;
5366 		}
5367 
5368 		err = map_set_def_max_entries(map);
5369 		if (err)
5370 			goto err_out;
5371 
5372 		retried = false;
5373 retry:
5374 		if (map->pin_path) {
5375 			err = bpf_object__reuse_map(map);
5376 			if (err) {
5377 				pr_warn("map '%s': error reusing pinned map\n",
5378 					map->name);
5379 				goto err_out;
5380 			}
5381 			if (retried && map->fd < 0) {
5382 				pr_warn("map '%s': cannot find pinned map\n",
5383 					map->name);
5384 				err = -ENOENT;
5385 				goto err_out;
5386 			}
5387 		}
5388 
5389 		if (map->reused) {
5390 			pr_debug("map '%s': skipping creation (preset fd=%d)\n",
5391 				 map->name, map->fd);
5392 		} else {
5393 			err = bpf_object__create_map(obj, map, false);
5394 			if (err)
5395 				goto err_out;
5396 
5397 			pr_debug("map '%s': created successfully, fd=%d\n",
5398 				 map->name, map->fd);
5399 
5400 			if (bpf_map__is_internal(map)) {
5401 				err = bpf_object__populate_internal_map(obj, map);
5402 				if (err < 0)
5403 					goto err_out;
5404 			}
5405 			if (map->def.type == BPF_MAP_TYPE_ARENA) {
5406 				map->mmaped = mmap((void *)(long)map->map_extra,
5407 						   bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
5408 						   map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
5409 						   map->fd, 0);
5410 				if (map->mmaped == MAP_FAILED) {
5411 					err = -errno;
5412 					map->mmaped = NULL;
5413 					pr_warn("map '%s': failed to mmap arena: %d\n",
5414 						map->name, err);
5415 					return err;
5416 				}
5417 				if (obj->arena_data) {
5418 					memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz);
5419 					zfree(&obj->arena_data);
5420 				}
5421 			}
5422 			if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5423 				err = init_map_in_map_slots(obj, map);
5424 				if (err < 0)
5425 					goto err_out;
5426 			}
5427 		}
5428 
5429 		if (map->pin_path && !map->pinned) {
5430 			err = bpf_map__pin(map, NULL);
5431 			if (err) {
5432 				if (!retried && err == -EEXIST) {
5433 					retried = true;
5434 					goto retry;
5435 				}
5436 				pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
5437 					map->name, map->pin_path, err);
5438 				goto err_out;
5439 			}
5440 		}
5441 	}
5442 
5443 	return 0;
5444 
5445 err_out:
5446 	cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5447 	pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
5448 	pr_perm_msg(err);
5449 	for (j = 0; j < i; j++)
5450 		zclose(obj->maps[j].fd);
5451 	return err;
5452 }
5453 
bpf_core_is_flavor_sep(const char * s)5454 static bool bpf_core_is_flavor_sep(const char *s)
5455 {
5456 	/* check X___Y name pattern, where X and Y are not underscores */
5457 	return s[0] != '_' &&				      /* X */
5458 	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
5459 	       s[4] != '_';				      /* Y */
5460 }
5461 
5462 /* Given 'some_struct_name___with_flavor' return the length of a name prefix
5463  * before last triple underscore. Struct name part after last triple
5464  * underscore is ignored by BPF CO-RE relocation during relocation matching.
5465  */
bpf_core_essential_name_len(const char * name)5466 size_t bpf_core_essential_name_len(const char *name)
5467 {
5468 	size_t n = strlen(name);
5469 	int i;
5470 
5471 	for (i = n - 5; i >= 0; i--) {
5472 		if (bpf_core_is_flavor_sep(name + i))
5473 			return i + 1;
5474 	}
5475 	return n;
5476 }
5477 
bpf_core_free_cands(struct bpf_core_cand_list * cands)5478 void bpf_core_free_cands(struct bpf_core_cand_list *cands)
5479 {
5480 	if (!cands)
5481 		return;
5482 
5483 	free(cands->cands);
5484 	free(cands);
5485 }
5486 
bpf_core_add_cands(struct bpf_core_cand * local_cand,size_t local_essent_len,const struct btf * targ_btf,const char * targ_btf_name,int targ_start_id,struct bpf_core_cand_list * cands)5487 int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5488 		       size_t local_essent_len,
5489 		       const struct btf *targ_btf,
5490 		       const char *targ_btf_name,
5491 		       int targ_start_id,
5492 		       struct bpf_core_cand_list *cands)
5493 {
5494 	struct bpf_core_cand *new_cands, *cand;
5495 	const struct btf_type *t, *local_t;
5496 	const char *targ_name, *local_name;
5497 	size_t targ_essent_len;
5498 	int n, i;
5499 
5500 	local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5501 	local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5502 
5503 	n = btf__type_cnt(targ_btf);
5504 	for (i = targ_start_id; i < n; i++) {
5505 		t = btf__type_by_id(targ_btf, i);
5506 		if (!btf_kind_core_compat(t, local_t))
5507 			continue;
5508 
5509 		targ_name = btf__name_by_offset(targ_btf, t->name_off);
5510 		if (str_is_empty(targ_name))
5511 			continue;
5512 
5513 		targ_essent_len = bpf_core_essential_name_len(targ_name);
5514 		if (targ_essent_len != local_essent_len)
5515 			continue;
5516 
5517 		if (strncmp(local_name, targ_name, local_essent_len) != 0)
5518 			continue;
5519 
5520 		pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5521 			 local_cand->id, btf_kind_str(local_t),
5522 			 local_name, i, btf_kind_str(t), targ_name,
5523 			 targ_btf_name);
5524 		new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5525 					      sizeof(*cands->cands));
5526 		if (!new_cands)
5527 			return -ENOMEM;
5528 
5529 		cand = &new_cands[cands->len];
5530 		cand->btf = targ_btf;
5531 		cand->id = i;
5532 
5533 		cands->cands = new_cands;
5534 		cands->len++;
5535 	}
5536 	return 0;
5537 }
5538 
load_module_btfs(struct bpf_object * obj)5539 static int load_module_btfs(struct bpf_object *obj)
5540 {
5541 	struct bpf_btf_info info;
5542 	struct module_btf *mod_btf;
5543 	struct btf *btf;
5544 	char name[64];
5545 	__u32 id = 0, len;
5546 	int err, fd;
5547 
5548 	if (obj->btf_modules_loaded)
5549 		return 0;
5550 
5551 	if (obj->gen_loader)
5552 		return 0;
5553 
5554 	/* don't do this again, even if we find no module BTFs */
5555 	obj->btf_modules_loaded = true;
5556 
5557 	/* kernel too old to support module BTFs */
5558 	if (!kernel_supports(obj, FEAT_MODULE_BTF))
5559 		return 0;
5560 
5561 	while (true) {
5562 		err = bpf_btf_get_next_id(id, &id);
5563 		if (err && errno == ENOENT)
5564 			return 0;
5565 		if (err && errno == EPERM) {
5566 			pr_debug("skipping module BTFs loading, missing privileges\n");
5567 			return 0;
5568 		}
5569 		if (err) {
5570 			err = -errno;
5571 			pr_warn("failed to iterate BTF objects: %d\n", err);
5572 			return err;
5573 		}
5574 
5575 		fd = bpf_btf_get_fd_by_id(id);
5576 		if (fd < 0) {
5577 			if (errno == ENOENT)
5578 				continue; /* expected race: BTF was unloaded */
5579 			err = -errno;
5580 			pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5581 			return err;
5582 		}
5583 
5584 		len = sizeof(info);
5585 		memset(&info, 0, sizeof(info));
5586 		info.name = ptr_to_u64(name);
5587 		info.name_len = sizeof(name);
5588 
5589 		err = bpf_btf_get_info_by_fd(fd, &info, &len);
5590 		if (err) {
5591 			err = -errno;
5592 			pr_warn("failed to get BTF object #%d info: %d\n", id, err);
5593 			goto err_out;
5594 		}
5595 
5596 		/* ignore non-module BTFs */
5597 		if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5598 			close(fd);
5599 			continue;
5600 		}
5601 
5602 		btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5603 		err = libbpf_get_error(btf);
5604 		if (err) {
5605 			pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
5606 				name, id, err);
5607 			goto err_out;
5608 		}
5609 
5610 		err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5611 					sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5612 		if (err)
5613 			goto err_out;
5614 
5615 		mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5616 
5617 		mod_btf->btf = btf;
5618 		mod_btf->id = id;
5619 		mod_btf->fd = fd;
5620 		mod_btf->name = strdup(name);
5621 		if (!mod_btf->name) {
5622 			err = -ENOMEM;
5623 			goto err_out;
5624 		}
5625 		continue;
5626 
5627 err_out:
5628 		close(fd);
5629 		return err;
5630 	}
5631 
5632 	return 0;
5633 }
5634 
5635 static struct bpf_core_cand_list *
bpf_core_find_cands(struct bpf_object * obj,const struct btf * local_btf,__u32 local_type_id)5636 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5637 {
5638 	struct bpf_core_cand local_cand = {};
5639 	struct bpf_core_cand_list *cands;
5640 	const struct btf *main_btf;
5641 	const struct btf_type *local_t;
5642 	const char *local_name;
5643 	size_t local_essent_len;
5644 	int err, i;
5645 
5646 	local_cand.btf = local_btf;
5647 	local_cand.id = local_type_id;
5648 	local_t = btf__type_by_id(local_btf, local_type_id);
5649 	if (!local_t)
5650 		return ERR_PTR(-EINVAL);
5651 
5652 	local_name = btf__name_by_offset(local_btf, local_t->name_off);
5653 	if (str_is_empty(local_name))
5654 		return ERR_PTR(-EINVAL);
5655 	local_essent_len = bpf_core_essential_name_len(local_name);
5656 
5657 	cands = calloc(1, sizeof(*cands));
5658 	if (!cands)
5659 		return ERR_PTR(-ENOMEM);
5660 
5661 	/* Attempt to find target candidates in vmlinux BTF first */
5662 	main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5663 	err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5664 	if (err)
5665 		goto err_out;
5666 
5667 	/* if vmlinux BTF has any candidate, don't got for module BTFs */
5668 	if (cands->len)
5669 		return cands;
5670 
5671 	/* if vmlinux BTF was overridden, don't attempt to load module BTFs */
5672 	if (obj->btf_vmlinux_override)
5673 		return cands;
5674 
5675 	/* now look through module BTFs, trying to still find candidates */
5676 	err = load_module_btfs(obj);
5677 	if (err)
5678 		goto err_out;
5679 
5680 	for (i = 0; i < obj->btf_module_cnt; i++) {
5681 		err = bpf_core_add_cands(&local_cand, local_essent_len,
5682 					 obj->btf_modules[i].btf,
5683 					 obj->btf_modules[i].name,
5684 					 btf__type_cnt(obj->btf_vmlinux),
5685 					 cands);
5686 		if (err)
5687 			goto err_out;
5688 	}
5689 
5690 	return cands;
5691 err_out:
5692 	bpf_core_free_cands(cands);
5693 	return ERR_PTR(err);
5694 }
5695 
5696 /* Check local and target types for compatibility. This check is used for
5697  * type-based CO-RE relocations and follow slightly different rules than
5698  * field-based relocations. This function assumes that root types were already
5699  * checked for name match. Beyond that initial root-level name check, names
5700  * are completely ignored. Compatibility rules are as follows:
5701  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5702  *     kind should match for local and target types (i.e., STRUCT is not
5703  *     compatible with UNION);
5704  *   - for ENUMs, the size is ignored;
5705  *   - for INT, size and signedness are ignored;
5706  *   - for ARRAY, dimensionality is ignored, element types are checked for
5707  *     compatibility recursively;
5708  *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
5709  *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5710  *   - FUNC_PROTOs are compatible if they have compatible signature: same
5711  *     number of input args and compatible return and argument types.
5712  * These rules are not set in stone and probably will be adjusted as we get
5713  * more experience with using BPF CO-RE relocations.
5714  */
bpf_core_types_are_compat(const struct btf * local_btf,__u32 local_id,const struct btf * targ_btf,__u32 targ_id)5715 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5716 			      const struct btf *targ_btf, __u32 targ_id)
5717 {
5718 	return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
5719 }
5720 
bpf_core_types_match(const struct btf * local_btf,__u32 local_id,const struct btf * targ_btf,__u32 targ_id)5721 int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
5722 			 const struct btf *targ_btf, __u32 targ_id)
5723 {
5724 	return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
5725 }
5726 
bpf_core_hash_fn(const long key,void * ctx)5727 static size_t bpf_core_hash_fn(const long key, void *ctx)
5728 {
5729 	return key;
5730 }
5731 
bpf_core_equal_fn(const long k1,const long k2,void * ctx)5732 static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx)
5733 {
5734 	return k1 == k2;
5735 }
5736 
record_relo_core(struct bpf_program * prog,const struct bpf_core_relo * core_relo,int insn_idx)5737 static int record_relo_core(struct bpf_program *prog,
5738 			    const struct bpf_core_relo *core_relo, int insn_idx)
5739 {
5740 	struct reloc_desc *relos, *relo;
5741 
5742 	relos = libbpf_reallocarray(prog->reloc_desc,
5743 				    prog->nr_reloc + 1, sizeof(*relos));
5744 	if (!relos)
5745 		return -ENOMEM;
5746 	relo = &relos[prog->nr_reloc];
5747 	relo->type = RELO_CORE;
5748 	relo->insn_idx = insn_idx;
5749 	relo->core_relo = core_relo;
5750 	prog->reloc_desc = relos;
5751 	prog->nr_reloc++;
5752 	return 0;
5753 }
5754 
find_relo_core(struct bpf_program * prog,int insn_idx)5755 static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
5756 {
5757 	struct reloc_desc *relo;
5758 	int i;
5759 
5760 	for (i = 0; i < prog->nr_reloc; i++) {
5761 		relo = &prog->reloc_desc[i];
5762 		if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
5763 			continue;
5764 
5765 		return relo->core_relo;
5766 	}
5767 
5768 	return NULL;
5769 }
5770 
bpf_core_resolve_relo(struct bpf_program * prog,const struct bpf_core_relo * relo,int relo_idx,const struct btf * local_btf,struct hashmap * cand_cache,struct bpf_core_relo_res * targ_res)5771 static int bpf_core_resolve_relo(struct bpf_program *prog,
5772 				 const struct bpf_core_relo *relo,
5773 				 int relo_idx,
5774 				 const struct btf *local_btf,
5775 				 struct hashmap *cand_cache,
5776 				 struct bpf_core_relo_res *targ_res)
5777 {
5778 	struct bpf_core_spec specs_scratch[3] = {};
5779 	struct bpf_core_cand_list *cands = NULL;
5780 	const char *prog_name = prog->name;
5781 	const struct btf_type *local_type;
5782 	const char *local_name;
5783 	__u32 local_id = relo->type_id;
5784 	int err;
5785 
5786 	local_type = btf__type_by_id(local_btf, local_id);
5787 	if (!local_type)
5788 		return -EINVAL;
5789 
5790 	local_name = btf__name_by_offset(local_btf, local_type->name_off);
5791 	if (!local_name)
5792 		return -EINVAL;
5793 
5794 	if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
5795 	    !hashmap__find(cand_cache, local_id, &cands)) {
5796 		cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5797 		if (IS_ERR(cands)) {
5798 			pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5799 				prog_name, relo_idx, local_id, btf_kind_str(local_type),
5800 				local_name, PTR_ERR(cands));
5801 			return PTR_ERR(cands);
5802 		}
5803 		err = hashmap__set(cand_cache, local_id, cands, NULL, NULL);
5804 		if (err) {
5805 			bpf_core_free_cands(cands);
5806 			return err;
5807 		}
5808 	}
5809 
5810 	return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
5811 				       targ_res);
5812 }
5813 
5814 static int
bpf_object__relocate_core(struct bpf_object * obj,const char * targ_btf_path)5815 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
5816 {
5817 	const struct btf_ext_info_sec *sec;
5818 	struct bpf_core_relo_res targ_res;
5819 	const struct bpf_core_relo *rec;
5820 	const struct btf_ext_info *seg;
5821 	struct hashmap_entry *entry;
5822 	struct hashmap *cand_cache = NULL;
5823 	struct bpf_program *prog;
5824 	struct bpf_insn *insn;
5825 	const char *sec_name;
5826 	int i, err = 0, insn_idx, sec_idx, sec_num;
5827 
5828 	if (obj->btf_ext->core_relo_info.len == 0)
5829 		return 0;
5830 
5831 	if (targ_btf_path) {
5832 		obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
5833 		err = libbpf_get_error(obj->btf_vmlinux_override);
5834 		if (err) {
5835 			pr_warn("failed to parse target BTF: %d\n", err);
5836 			return err;
5837 		}
5838 	}
5839 
5840 	cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5841 	if (IS_ERR(cand_cache)) {
5842 		err = PTR_ERR(cand_cache);
5843 		goto out;
5844 	}
5845 
5846 	seg = &obj->btf_ext->core_relo_info;
5847 	sec_num = 0;
5848 	for_each_btf_ext_sec(seg, sec) {
5849 		sec_idx = seg->sec_idxs[sec_num];
5850 		sec_num++;
5851 
5852 		sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5853 		if (str_is_empty(sec_name)) {
5854 			err = -EINVAL;
5855 			goto out;
5856 		}
5857 
5858 		pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
5859 
5860 		for_each_btf_ext_rec(seg, sec, i, rec) {
5861 			if (rec->insn_off % BPF_INSN_SZ)
5862 				return -EINVAL;
5863 			insn_idx = rec->insn_off / BPF_INSN_SZ;
5864 			prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5865 			if (!prog) {
5866 				/* When __weak subprog is "overridden" by another instance
5867 				 * of the subprog from a different object file, linker still
5868 				 * appends all the .BTF.ext info that used to belong to that
5869 				 * eliminated subprogram.
5870 				 * This is similar to what x86-64 linker does for relocations.
5871 				 * So just ignore such relocations just like we ignore
5872 				 * subprog instructions when discovering subprograms.
5873 				 */
5874 				pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
5875 					 sec_name, i, insn_idx);
5876 				continue;
5877 			}
5878 			/* no need to apply CO-RE relocation if the program is
5879 			 * not going to be loaded
5880 			 */
5881 			if (!prog->autoload)
5882 				continue;
5883 
5884 			/* adjust insn_idx from section frame of reference to the local
5885 			 * program's frame of reference; (sub-)program code is not yet
5886 			 * relocated, so it's enough to just subtract in-section offset
5887 			 */
5888 			insn_idx = insn_idx - prog->sec_insn_off;
5889 			if (insn_idx >= prog->insns_cnt)
5890 				return -EINVAL;
5891 			insn = &prog->insns[insn_idx];
5892 
5893 			err = record_relo_core(prog, rec, insn_idx);
5894 			if (err) {
5895 				pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
5896 					prog->name, i, err);
5897 				goto out;
5898 			}
5899 
5900 			if (prog->obj->gen_loader)
5901 				continue;
5902 
5903 			err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
5904 			if (err) {
5905 				pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
5906 					prog->name, i, err);
5907 				goto out;
5908 			}
5909 
5910 			err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
5911 			if (err) {
5912 				pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
5913 					prog->name, i, insn_idx, err);
5914 				goto out;
5915 			}
5916 		}
5917 	}
5918 
5919 out:
5920 	/* obj->btf_vmlinux and module BTFs are freed after object load */
5921 	btf__free(obj->btf_vmlinux_override);
5922 	obj->btf_vmlinux_override = NULL;
5923 
5924 	if (!IS_ERR_OR_NULL(cand_cache)) {
5925 		hashmap__for_each_entry(cand_cache, entry, i) {
5926 			bpf_core_free_cands(entry->pvalue);
5927 		}
5928 		hashmap__free(cand_cache);
5929 	}
5930 	return err;
5931 }
5932 
5933 /* base map load ldimm64 special constant, used also for log fixup logic */
5934 #define POISON_LDIMM64_MAP_BASE 2001000000
5935 #define POISON_LDIMM64_MAP_PFX "200100"
5936 
poison_map_ldimm64(struct bpf_program * prog,int relo_idx,int insn_idx,struct bpf_insn * insn,int map_idx,const struct bpf_map * map)5937 static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
5938 			       int insn_idx, struct bpf_insn *insn,
5939 			       int map_idx, const struct bpf_map *map)
5940 {
5941 	int i;
5942 
5943 	pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
5944 		 prog->name, relo_idx, insn_idx, map_idx, map->name);
5945 
5946 	/* we turn single ldimm64 into two identical invalid calls */
5947 	for (i = 0; i < 2; i++) {
5948 		insn->code = BPF_JMP | BPF_CALL;
5949 		insn->dst_reg = 0;
5950 		insn->src_reg = 0;
5951 		insn->off = 0;
5952 		/* if this instruction is reachable (not a dead code),
5953 		 * verifier will complain with something like:
5954 		 * invalid func unknown#2001000123
5955 		 * where lower 123 is map index into obj->maps[] array
5956 		 */
5957 		insn->imm = POISON_LDIMM64_MAP_BASE + map_idx;
5958 
5959 		insn++;
5960 	}
5961 }
5962 
5963 /* unresolved kfunc call special constant, used also for log fixup logic */
5964 #define POISON_CALL_KFUNC_BASE 2002000000
5965 #define POISON_CALL_KFUNC_PFX "2002"
5966 
poison_kfunc_call(struct bpf_program * prog,int relo_idx,int insn_idx,struct bpf_insn * insn,int ext_idx,const struct extern_desc * ext)5967 static void poison_kfunc_call(struct bpf_program *prog, int relo_idx,
5968 			      int insn_idx, struct bpf_insn *insn,
5969 			      int ext_idx, const struct extern_desc *ext)
5970 {
5971 	pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n",
5972 		 prog->name, relo_idx, insn_idx, ext->name);
5973 
5974 	/* we turn kfunc call into invalid helper call with identifiable constant */
5975 	insn->code = BPF_JMP | BPF_CALL;
5976 	insn->dst_reg = 0;
5977 	insn->src_reg = 0;
5978 	insn->off = 0;
5979 	/* if this instruction is reachable (not a dead code),
5980 	 * verifier will complain with something like:
5981 	 * invalid func unknown#2001000123
5982 	 * where lower 123 is extern index into obj->externs[] array
5983 	 */
5984 	insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
5985 }
5986 
5987 /* Relocate data references within program code:
5988  *  - map references;
5989  *  - global variable references;
5990  *  - extern references.
5991  */
5992 static int
bpf_object__relocate_data(struct bpf_object * obj,struct bpf_program * prog)5993 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
5994 {
5995 	int i;
5996 
5997 	for (i = 0; i < prog->nr_reloc; i++) {
5998 		struct reloc_desc *relo = &prog->reloc_desc[i];
5999 		struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6000 		const struct bpf_map *map;
6001 		struct extern_desc *ext;
6002 
6003 		switch (relo->type) {
6004 		case RELO_LD64:
6005 			map = &obj->maps[relo->map_idx];
6006 			if (obj->gen_loader) {
6007 				insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
6008 				insn[0].imm = relo->map_idx;
6009 			} else if (map->autocreate) {
6010 				insn[0].src_reg = BPF_PSEUDO_MAP_FD;
6011 				insn[0].imm = map->fd;
6012 			} else {
6013 				poison_map_ldimm64(prog, i, relo->insn_idx, insn,
6014 						   relo->map_idx, map);
6015 			}
6016 			break;
6017 		case RELO_DATA:
6018 			map = &obj->maps[relo->map_idx];
6019 			insn[1].imm = insn[0].imm + relo->sym_off;
6020 			if (obj->gen_loader) {
6021 				insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
6022 				insn[0].imm = relo->map_idx;
6023 			} else if (map->autocreate) {
6024 				insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6025 				insn[0].imm = map->fd;
6026 			} else {
6027 				poison_map_ldimm64(prog, i, relo->insn_idx, insn,
6028 						   relo->map_idx, map);
6029 			}
6030 			break;
6031 		case RELO_EXTERN_LD64:
6032 			ext = &obj->externs[relo->ext_idx];
6033 			if (ext->type == EXT_KCFG) {
6034 				if (obj->gen_loader) {
6035 					insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
6036 					insn[0].imm = obj->kconfig_map_idx;
6037 				} else {
6038 					insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6039 					insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6040 				}
6041 				insn[1].imm = ext->kcfg.data_off;
6042 			} else /* EXT_KSYM */ {
6043 				if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
6044 					insn[0].src_reg = BPF_PSEUDO_BTF_ID;
6045 					insn[0].imm = ext->ksym.kernel_btf_id;
6046 					insn[1].imm = ext->ksym.kernel_btf_obj_fd;
6047 				} else { /* typeless ksyms or unresolved typed ksyms */
6048 					insn[0].imm = (__u32)ext->ksym.addr;
6049 					insn[1].imm = ext->ksym.addr >> 32;
6050 				}
6051 			}
6052 			break;
6053 		case RELO_EXTERN_CALL:
6054 			ext = &obj->externs[relo->ext_idx];
6055 			insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
6056 			if (ext->is_set) {
6057 				insn[0].imm = ext->ksym.kernel_btf_id;
6058 				insn[0].off = ext->ksym.btf_fd_idx;
6059 			} else { /* unresolved weak kfunc call */
6060 				poison_kfunc_call(prog, i, relo->insn_idx, insn,
6061 						  relo->ext_idx, ext);
6062 			}
6063 			break;
6064 		case RELO_SUBPROG_ADDR:
6065 			if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
6066 				pr_warn("prog '%s': relo #%d: bad insn\n",
6067 					prog->name, i);
6068 				return -EINVAL;
6069 			}
6070 			/* handled already */
6071 			break;
6072 		case RELO_CALL:
6073 			/* handled already */
6074 			break;
6075 		case RELO_CORE:
6076 			/* will be handled by bpf_program_record_relos() */
6077 			break;
6078 		default:
6079 			pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6080 				prog->name, i, relo->type);
6081 			return -EINVAL;
6082 		}
6083 	}
6084 
6085 	return 0;
6086 }
6087 
adjust_prog_btf_ext_info(const struct bpf_object * obj,const struct bpf_program * prog,const struct btf_ext_info * ext_info,void ** prog_info,__u32 * prog_rec_cnt,__u32 * prog_rec_sz)6088 static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6089 				    const struct bpf_program *prog,
6090 				    const struct btf_ext_info *ext_info,
6091 				    void **prog_info, __u32 *prog_rec_cnt,
6092 				    __u32 *prog_rec_sz)
6093 {
6094 	void *copy_start = NULL, *copy_end = NULL;
6095 	void *rec, *rec_end, *new_prog_info;
6096 	const struct btf_ext_info_sec *sec;
6097 	size_t old_sz, new_sz;
6098 	int i, sec_num, sec_idx, off_adj;
6099 
6100 	sec_num = 0;
6101 	for_each_btf_ext_sec(ext_info, sec) {
6102 		sec_idx = ext_info->sec_idxs[sec_num];
6103 		sec_num++;
6104 		if (prog->sec_idx != sec_idx)
6105 			continue;
6106 
6107 		for_each_btf_ext_rec(ext_info, sec, i, rec) {
6108 			__u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6109 
6110 			if (insn_off < prog->sec_insn_off)
6111 				continue;
6112 			if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6113 				break;
6114 
6115 			if (!copy_start)
6116 				copy_start = rec;
6117 			copy_end = rec + ext_info->rec_size;
6118 		}
6119 
6120 		if (!copy_start)
6121 			return -ENOENT;
6122 
6123 		/* append func/line info of a given (sub-)program to the main
6124 		 * program func/line info
6125 		 */
6126 		old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6127 		new_sz = old_sz + (copy_end - copy_start);
6128 		new_prog_info = realloc(*prog_info, new_sz);
6129 		if (!new_prog_info)
6130 			return -ENOMEM;
6131 		*prog_info = new_prog_info;
6132 		*prog_rec_cnt = new_sz / ext_info->rec_size;
6133 		memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6134 
6135 		/* Kernel instruction offsets are in units of 8-byte
6136 		 * instructions, while .BTF.ext instruction offsets generated
6137 		 * by Clang are in units of bytes. So convert Clang offsets
6138 		 * into kernel offsets and adjust offset according to program
6139 		 * relocated position.
6140 		 */
6141 		off_adj = prog->sub_insn_off - prog->sec_insn_off;
6142 		rec = new_prog_info + old_sz;
6143 		rec_end = new_prog_info + new_sz;
6144 		for (; rec < rec_end; rec += ext_info->rec_size) {
6145 			__u32 *insn_off = rec;
6146 
6147 			*insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6148 		}
6149 		*prog_rec_sz = ext_info->rec_size;
6150 		return 0;
6151 	}
6152 
6153 	return -ENOENT;
6154 }
6155 
6156 static int
reloc_prog_func_and_line_info(const struct bpf_object * obj,struct bpf_program * main_prog,const struct bpf_program * prog)6157 reloc_prog_func_and_line_info(const struct bpf_object *obj,
6158 			      struct bpf_program *main_prog,
6159 			      const struct bpf_program *prog)
6160 {
6161 	int err;
6162 
6163 	/* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6164 	 * support func/line info
6165 	 */
6166 	if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
6167 		return 0;
6168 
6169 	/* only attempt func info relocation if main program's func_info
6170 	 * relocation was successful
6171 	 */
6172 	if (main_prog != prog && !main_prog->func_info)
6173 		goto line_info;
6174 
6175 	err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6176 				       &main_prog->func_info,
6177 				       &main_prog->func_info_cnt,
6178 				       &main_prog->func_info_rec_size);
6179 	if (err) {
6180 		if (err != -ENOENT) {
6181 			pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6182 				prog->name, err);
6183 			return err;
6184 		}
6185 		if (main_prog->func_info) {
6186 			/*
6187 			 * Some info has already been found but has problem
6188 			 * in the last btf_ext reloc. Must have to error out.
6189 			 */
6190 			pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6191 			return err;
6192 		}
6193 		/* Have problem loading the very first info. Ignore the rest. */
6194 		pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6195 			prog->name);
6196 	}
6197 
6198 line_info:
6199 	/* don't relocate line info if main program's relocation failed */
6200 	if (main_prog != prog && !main_prog->line_info)
6201 		return 0;
6202 
6203 	err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6204 				       &main_prog->line_info,
6205 				       &main_prog->line_info_cnt,
6206 				       &main_prog->line_info_rec_size);
6207 	if (err) {
6208 		if (err != -ENOENT) {
6209 			pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6210 				prog->name, err);
6211 			return err;
6212 		}
6213 		if (main_prog->line_info) {
6214 			/*
6215 			 * Some info has already been found but has problem
6216 			 * in the last btf_ext reloc. Must have to error out.
6217 			 */
6218 			pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6219 			return err;
6220 		}
6221 		/* Have problem loading the very first info. Ignore the rest. */
6222 		pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6223 			prog->name);
6224 	}
6225 	return 0;
6226 }
6227 
cmp_relo_by_insn_idx(const void * key,const void * elem)6228 static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6229 {
6230 	size_t insn_idx = *(const size_t *)key;
6231 	const struct reloc_desc *relo = elem;
6232 
6233 	if (insn_idx == relo->insn_idx)
6234 		return 0;
6235 	return insn_idx < relo->insn_idx ? -1 : 1;
6236 }
6237 
find_prog_insn_relo(const struct bpf_program * prog,size_t insn_idx)6238 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6239 {
6240 	if (!prog->nr_reloc)
6241 		return NULL;
6242 	return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6243 		       sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6244 }
6245 
append_subprog_relos(struct bpf_program * main_prog,struct bpf_program * subprog)6246 static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
6247 {
6248 	int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
6249 	struct reloc_desc *relos;
6250 	int i;
6251 
6252 	if (main_prog == subprog)
6253 		return 0;
6254 	relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
6255 	/* if new count is zero, reallocarray can return a valid NULL result;
6256 	 * in this case the previous pointer will be freed, so we *have to*
6257 	 * reassign old pointer to the new value (even if it's NULL)
6258 	 */
6259 	if (!relos && new_cnt)
6260 		return -ENOMEM;
6261 	if (subprog->nr_reloc)
6262 		memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
6263 		       sizeof(*relos) * subprog->nr_reloc);
6264 
6265 	for (i = main_prog->nr_reloc; i < new_cnt; i++)
6266 		relos[i].insn_idx += subprog->sub_insn_off;
6267 	/* After insn_idx adjustment the 'relos' array is still sorted
6268 	 * by insn_idx and doesn't break bsearch.
6269 	 */
6270 	main_prog->reloc_desc = relos;
6271 	main_prog->nr_reloc = new_cnt;
6272 	return 0;
6273 }
6274 
6275 static int
bpf_object__append_subprog_code(struct bpf_object * obj,struct bpf_program * main_prog,struct bpf_program * subprog)6276 bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
6277 				struct bpf_program *subprog)
6278 {
6279        struct bpf_insn *insns;
6280        size_t new_cnt;
6281        int err;
6282 
6283        subprog->sub_insn_off = main_prog->insns_cnt;
6284 
6285        new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6286        insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6287        if (!insns) {
6288                pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6289                return -ENOMEM;
6290        }
6291        main_prog->insns = insns;
6292        main_prog->insns_cnt = new_cnt;
6293 
6294        memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6295               subprog->insns_cnt * sizeof(*insns));
6296 
6297        pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6298                 main_prog->name, subprog->insns_cnt, subprog->name);
6299 
6300        /* The subprog insns are now appended. Append its relos too. */
6301        err = append_subprog_relos(main_prog, subprog);
6302        if (err)
6303                return err;
6304        return 0;
6305 }
6306 
6307 static int
bpf_object__reloc_code(struct bpf_object * obj,struct bpf_program * main_prog,struct bpf_program * prog)6308 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6309 		       struct bpf_program *prog)
6310 {
6311 	size_t sub_insn_idx, insn_idx;
6312 	struct bpf_program *subprog;
6313 	struct reloc_desc *relo;
6314 	struct bpf_insn *insn;
6315 	int err;
6316 
6317 	err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6318 	if (err)
6319 		return err;
6320 
6321 	for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6322 		insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6323 		if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
6324 			continue;
6325 
6326 		relo = find_prog_insn_relo(prog, insn_idx);
6327 		if (relo && relo->type == RELO_EXTERN_CALL)
6328 			/* kfunc relocations will be handled later
6329 			 * in bpf_object__relocate_data()
6330 			 */
6331 			continue;
6332 		if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
6333 			pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6334 				prog->name, insn_idx, relo->type);
6335 			return -LIBBPF_ERRNO__RELOC;
6336 		}
6337 		if (relo) {
6338 			/* sub-program instruction index is a combination of
6339 			 * an offset of a symbol pointed to by relocation and
6340 			 * call instruction's imm field; for global functions,
6341 			 * call always has imm = -1, but for static functions
6342 			 * relocation is against STT_SECTION and insn->imm
6343 			 * points to a start of a static function
6344 			 *
6345 			 * for subprog addr relocation, the relo->sym_off + insn->imm is
6346 			 * the byte offset in the corresponding section.
6347 			 */
6348 			if (relo->type == RELO_CALL)
6349 				sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6350 			else
6351 				sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6352 		} else if (insn_is_pseudo_func(insn)) {
6353 			/*
6354 			 * RELO_SUBPROG_ADDR relo is always emitted even if both
6355 			 * functions are in the same section, so it shouldn't reach here.
6356 			 */
6357 			pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6358 				prog->name, insn_idx);
6359 			return -LIBBPF_ERRNO__RELOC;
6360 		} else {
6361 			/* if subprogram call is to a static function within
6362 			 * the same ELF section, there won't be any relocation
6363 			 * emitted, but it also means there is no additional
6364 			 * offset necessary, insns->imm is relative to
6365 			 * instruction's original position within the section
6366 			 */
6367 			sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6368 		}
6369 
6370 		/* we enforce that sub-programs should be in .text section */
6371 		subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6372 		if (!subprog) {
6373 			pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6374 				prog->name);
6375 			return -LIBBPF_ERRNO__RELOC;
6376 		}
6377 
6378 		/* if it's the first call instruction calling into this
6379 		 * subprogram (meaning this subprog hasn't been processed
6380 		 * yet) within the context of current main program:
6381 		 *   - append it at the end of main program's instructions blog;
6382 		 *   - process is recursively, while current program is put on hold;
6383 		 *   - if that subprogram calls some other not yet processes
6384 		 *   subprogram, same thing will happen recursively until
6385 		 *   there are no more unprocesses subprograms left to append
6386 		 *   and relocate.
6387 		 */
6388 		if (subprog->sub_insn_off == 0) {
6389 			err = bpf_object__append_subprog_code(obj, main_prog, subprog);
6390 			if (err)
6391 				return err;
6392 			err = bpf_object__reloc_code(obj, main_prog, subprog);
6393 			if (err)
6394 				return err;
6395 		}
6396 
6397 		/* main_prog->insns memory could have been re-allocated, so
6398 		 * calculate pointer again
6399 		 */
6400 		insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6401 		/* calculate correct instruction position within current main
6402 		 * prog; each main prog can have a different set of
6403 		 * subprograms appended (potentially in different order as
6404 		 * well), so position of any subprog can be different for
6405 		 * different main programs
6406 		 */
6407 		insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6408 
6409 		pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6410 			 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6411 	}
6412 
6413 	return 0;
6414 }
6415 
6416 /*
6417  * Relocate sub-program calls.
6418  *
6419  * Algorithm operates as follows. Each entry-point BPF program (referred to as
6420  * main prog) is processed separately. For each subprog (non-entry functions,
6421  * that can be called from either entry progs or other subprogs) gets their
6422  * sub_insn_off reset to zero. This serves as indicator that this subprogram
6423  * hasn't been yet appended and relocated within current main prog. Once its
6424  * relocated, sub_insn_off will point at the position within current main prog
6425  * where given subprog was appended. This will further be used to relocate all
6426  * the call instructions jumping into this subprog.
6427  *
6428  * We start with main program and process all call instructions. If the call
6429  * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6430  * is zero), subprog instructions are appended at the end of main program's
6431  * instruction array. Then main program is "put on hold" while we recursively
6432  * process newly appended subprogram. If that subprogram calls into another
6433  * subprogram that hasn't been appended, new subprogram is appended again to
6434  * the *main* prog's instructions (subprog's instructions are always left
6435  * untouched, as they need to be in unmodified state for subsequent main progs
6436  * and subprog instructions are always sent only as part of a main prog) and
6437  * the process continues recursively. Once all the subprogs called from a main
6438  * prog or any of its subprogs are appended (and relocated), all their
6439  * positions within finalized instructions array are known, so it's easy to
6440  * rewrite call instructions with correct relative offsets, corresponding to
6441  * desired target subprog.
6442  *
6443  * Its important to realize that some subprogs might not be called from some
6444  * main prog and any of its called/used subprogs. Those will keep their
6445  * subprog->sub_insn_off as zero at all times and won't be appended to current
6446  * main prog and won't be relocated within the context of current main prog.
6447  * They might still be used from other main progs later.
6448  *
6449  * Visually this process can be shown as below. Suppose we have two main
6450  * programs mainA and mainB and BPF object contains three subprogs: subA,
6451  * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6452  * subC both call subB:
6453  *
6454  *        +--------+ +-------+
6455  *        |        v v       |
6456  *     +--+---+ +--+-+-+ +---+--+
6457  *     | subA | | subB | | subC |
6458  *     +--+---+ +------+ +---+--+
6459  *        ^                  ^
6460  *        |                  |
6461  *    +---+-------+   +------+----+
6462  *    |   mainA   |   |   mainB   |
6463  *    +-----------+   +-----------+
6464  *
6465  * We'll start relocating mainA, will find subA, append it and start
6466  * processing sub A recursively:
6467  *
6468  *    +-----------+------+
6469  *    |   mainA   | subA |
6470  *    +-----------+------+
6471  *
6472  * At this point we notice that subB is used from subA, so we append it and
6473  * relocate (there are no further subcalls from subB):
6474  *
6475  *    +-----------+------+------+
6476  *    |   mainA   | subA | subB |
6477  *    +-----------+------+------+
6478  *
6479  * At this point, we relocate subA calls, then go one level up and finish with
6480  * relocatin mainA calls. mainA is done.
6481  *
6482  * For mainB process is similar but results in different order. We start with
6483  * mainB and skip subA and subB, as mainB never calls them (at least
6484  * directly), but we see subC is needed, so we append and start processing it:
6485  *
6486  *    +-----------+------+
6487  *    |   mainB   | subC |
6488  *    +-----------+------+
6489  * Now we see subC needs subB, so we go back to it, append and relocate it:
6490  *
6491  *    +-----------+------+------+
6492  *    |   mainB   | subC | subB |
6493  *    +-----------+------+------+
6494  *
6495  * At this point we unwind recursion, relocate calls in subC, then in mainB.
6496  */
6497 static int
bpf_object__relocate_calls(struct bpf_object * obj,struct bpf_program * prog)6498 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6499 {
6500 	struct bpf_program *subprog;
6501 	int i, err;
6502 
6503 	/* mark all subprogs as not relocated (yet) within the context of
6504 	 * current main program
6505 	 */
6506 	for (i = 0; i < obj->nr_programs; i++) {
6507 		subprog = &obj->programs[i];
6508 		if (!prog_is_subprog(obj, subprog))
6509 			continue;
6510 
6511 		subprog->sub_insn_off = 0;
6512 	}
6513 
6514 	err = bpf_object__reloc_code(obj, prog, prog);
6515 	if (err)
6516 		return err;
6517 
6518 	return 0;
6519 }
6520 
6521 static void
bpf_object__free_relocs(struct bpf_object * obj)6522 bpf_object__free_relocs(struct bpf_object *obj)
6523 {
6524 	struct bpf_program *prog;
6525 	int i;
6526 
6527 	/* free up relocation descriptors */
6528 	for (i = 0; i < obj->nr_programs; i++) {
6529 		prog = &obj->programs[i];
6530 		zfree(&prog->reloc_desc);
6531 		prog->nr_reloc = 0;
6532 	}
6533 }
6534 
cmp_relocs(const void * _a,const void * _b)6535 static int cmp_relocs(const void *_a, const void *_b)
6536 {
6537 	const struct reloc_desc *a = _a;
6538 	const struct reloc_desc *b = _b;
6539 
6540 	if (a->insn_idx != b->insn_idx)
6541 		return a->insn_idx < b->insn_idx ? -1 : 1;
6542 
6543 	/* no two relocations should have the same insn_idx, but ... */
6544 	if (a->type != b->type)
6545 		return a->type < b->type ? -1 : 1;
6546 
6547 	return 0;
6548 }
6549 
bpf_object__sort_relos(struct bpf_object * obj)6550 static void bpf_object__sort_relos(struct bpf_object *obj)
6551 {
6552 	int i;
6553 
6554 	for (i = 0; i < obj->nr_programs; i++) {
6555 		struct bpf_program *p = &obj->programs[i];
6556 
6557 		if (!p->nr_reloc)
6558 			continue;
6559 
6560 		qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6561 	}
6562 }
6563 
bpf_prog_assign_exc_cb(struct bpf_object * obj,struct bpf_program * prog)6564 static int bpf_prog_assign_exc_cb(struct bpf_object *obj, struct bpf_program *prog)
6565 {
6566 	const char *str = "exception_callback:";
6567 	size_t pfx_len = strlen(str);
6568 	int i, j, n;
6569 
6570 	if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG))
6571 		return 0;
6572 
6573 	n = btf__type_cnt(obj->btf);
6574 	for (i = 1; i < n; i++) {
6575 		const char *name;
6576 		struct btf_type *t;
6577 
6578 		t = btf_type_by_id(obj->btf, i);
6579 		if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
6580 			continue;
6581 
6582 		name = btf__str_by_offset(obj->btf, t->name_off);
6583 		if (strncmp(name, str, pfx_len) != 0)
6584 			continue;
6585 
6586 		t = btf_type_by_id(obj->btf, t->type);
6587 		if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
6588 			pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n",
6589 				prog->name);
6590 			return -EINVAL;
6591 		}
6592 		if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0)
6593 			continue;
6594 		/* Multiple callbacks are specified for the same prog,
6595 		 * the verifier will eventually return an error for this
6596 		 * case, hence simply skip appending a subprog.
6597 		 */
6598 		if (prog->exception_cb_idx >= 0) {
6599 			prog->exception_cb_idx = -1;
6600 			break;
6601 		}
6602 
6603 		name += pfx_len;
6604 		if (str_is_empty(name)) {
6605 			pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n",
6606 				prog->name);
6607 			return -EINVAL;
6608 		}
6609 
6610 		for (j = 0; j < obj->nr_programs; j++) {
6611 			struct bpf_program *subprog = &obj->programs[j];
6612 
6613 			if (!prog_is_subprog(obj, subprog))
6614 				continue;
6615 			if (strcmp(name, subprog->name) != 0)
6616 				continue;
6617 			/* Enforce non-hidden, as from verifier point of
6618 			 * view it expects global functions, whereas the
6619 			 * mark_btf_static fixes up linkage as static.
6620 			 */
6621 			if (!subprog->sym_global || subprog->mark_btf_static) {
6622 				pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
6623 					prog->name, subprog->name);
6624 				return -EINVAL;
6625 			}
6626 			/* Let's see if we already saw a static exception callback with the same name */
6627 			if (prog->exception_cb_idx >= 0) {
6628 				pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n",
6629 					prog->name, subprog->name);
6630 				return -EINVAL;
6631 			}
6632 			prog->exception_cb_idx = j;
6633 			break;
6634 		}
6635 
6636 		if (prog->exception_cb_idx >= 0)
6637 			continue;
6638 
6639 		pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
6640 		return -ENOENT;
6641 	}
6642 
6643 	return 0;
6644 }
6645 
6646 static struct {
6647 	enum bpf_prog_type prog_type;
6648 	const char *ctx_name;
6649 } global_ctx_map[] = {
6650 	{ BPF_PROG_TYPE_CGROUP_DEVICE,           "bpf_cgroup_dev_ctx" },
6651 	{ BPF_PROG_TYPE_CGROUP_SKB,              "__sk_buff" },
6652 	{ BPF_PROG_TYPE_CGROUP_SOCK,             "bpf_sock" },
6653 	{ BPF_PROG_TYPE_CGROUP_SOCK_ADDR,        "bpf_sock_addr" },
6654 	{ BPF_PROG_TYPE_CGROUP_SOCKOPT,          "bpf_sockopt" },
6655 	{ BPF_PROG_TYPE_CGROUP_SYSCTL,           "bpf_sysctl" },
6656 	{ BPF_PROG_TYPE_FLOW_DISSECTOR,          "__sk_buff" },
6657 	{ BPF_PROG_TYPE_KPROBE,                  "bpf_user_pt_regs_t" },
6658 	{ BPF_PROG_TYPE_LWT_IN,                  "__sk_buff" },
6659 	{ BPF_PROG_TYPE_LWT_OUT,                 "__sk_buff" },
6660 	{ BPF_PROG_TYPE_LWT_SEG6LOCAL,           "__sk_buff" },
6661 	{ BPF_PROG_TYPE_LWT_XMIT,                "__sk_buff" },
6662 	{ BPF_PROG_TYPE_NETFILTER,               "bpf_nf_ctx" },
6663 	{ BPF_PROG_TYPE_PERF_EVENT,              "bpf_perf_event_data" },
6664 	{ BPF_PROG_TYPE_RAW_TRACEPOINT,          "bpf_raw_tracepoint_args" },
6665 	{ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, "bpf_raw_tracepoint_args" },
6666 	{ BPF_PROG_TYPE_SCHED_ACT,               "__sk_buff" },
6667 	{ BPF_PROG_TYPE_SCHED_CLS,               "__sk_buff" },
6668 	{ BPF_PROG_TYPE_SK_LOOKUP,               "bpf_sk_lookup" },
6669 	{ BPF_PROG_TYPE_SK_MSG,                  "sk_msg_md" },
6670 	{ BPF_PROG_TYPE_SK_REUSEPORT,            "sk_reuseport_md" },
6671 	{ BPF_PROG_TYPE_SK_SKB,                  "__sk_buff" },
6672 	{ BPF_PROG_TYPE_SOCK_OPS,                "bpf_sock_ops" },
6673 	{ BPF_PROG_TYPE_SOCKET_FILTER,           "__sk_buff" },
6674 	{ BPF_PROG_TYPE_XDP,                     "xdp_md" },
6675 	/* all other program types don't have "named" context structs */
6676 };
6677 
6678 /* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef,
6679  * for below __builtin_types_compatible_p() checks;
6680  * with this approach we don't need any extra arch-specific #ifdef guards
6681  */
6682 struct pt_regs;
6683 struct user_pt_regs;
6684 struct user_regs_struct;
6685 
need_func_arg_type_fixup(const struct btf * btf,const struct bpf_program * prog,const char * subprog_name,int arg_idx,int arg_type_id,const char * ctx_name)6686 static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog,
6687 				     const char *subprog_name, int arg_idx,
6688 				     int arg_type_id, const char *ctx_name)
6689 {
6690 	const struct btf_type *t;
6691 	const char *tname;
6692 
6693 	/* check if existing parameter already matches verifier expectations */
6694 	t = skip_mods_and_typedefs(btf, arg_type_id, NULL);
6695 	if (!btf_is_ptr(t))
6696 		goto out_warn;
6697 
6698 	/* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe
6699 	 * and perf_event programs, so check this case early on and forget
6700 	 * about it for subsequent checks
6701 	 */
6702 	while (btf_is_mod(t))
6703 		t = btf__type_by_id(btf, t->type);
6704 	if (btf_is_typedef(t) &&
6705 	    (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) {
6706 		tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6707 		if (strcmp(tname, "bpf_user_pt_regs_t") == 0)
6708 			return false; /* canonical type for kprobe/perf_event */
6709 	}
6710 
6711 	/* now we can ignore typedefs moving forward */
6712 	t = skip_mods_and_typedefs(btf, t->type, NULL);
6713 
6714 	/* if it's `void *`, definitely fix up BTF info */
6715 	if (btf_is_void(t))
6716 		return true;
6717 
6718 	/* if it's already proper canonical type, no need to fix up */
6719 	tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6720 	if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0)
6721 		return false;
6722 
6723 	/* special cases */
6724 	switch (prog->type) {
6725 	case BPF_PROG_TYPE_KPROBE:
6726 		/* `struct pt_regs *` is expected, but we need to fix up */
6727 		if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6728 			return true;
6729 		break;
6730 	case BPF_PROG_TYPE_PERF_EVENT:
6731 		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
6732 		    btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6733 			return true;
6734 		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
6735 		    btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
6736 			return true;
6737 		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
6738 		    btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
6739 			return true;
6740 		break;
6741 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
6742 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
6743 		/* allow u64* as ctx */
6744 		if (btf_is_int(t) && t->size == 8)
6745 			return true;
6746 		break;
6747 	default:
6748 		break;
6749 	}
6750 
6751 out_warn:
6752 	pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n",
6753 		prog->name, subprog_name, arg_idx, ctx_name);
6754 	return false;
6755 }
6756 
clone_func_btf_info(struct btf * btf,int orig_fn_id,struct bpf_program * prog)6757 static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog)
6758 {
6759 	int fn_id, fn_proto_id, ret_type_id, orig_proto_id;
6760 	int i, err, arg_cnt, fn_name_off, linkage;
6761 	struct btf_type *fn_t, *fn_proto_t, *t;
6762 	struct btf_param *p;
6763 
6764 	/* caller already validated FUNC -> FUNC_PROTO validity */
6765 	fn_t = btf_type_by_id(btf, orig_fn_id);
6766 	fn_proto_t = btf_type_by_id(btf, fn_t->type);
6767 
6768 	/* Note that each btf__add_xxx() operation invalidates
6769 	 * all btf_type and string pointers, so we need to be
6770 	 * very careful when cloning BTF types. BTF type
6771 	 * pointers have to be always refetched. And to avoid
6772 	 * problems with invalidated string pointers, we
6773 	 * add empty strings initially, then just fix up
6774 	 * name_off offsets in place. Offsets are stable for
6775 	 * existing strings, so that works out.
6776 	 */
6777 	fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */
6778 	linkage = btf_func_linkage(fn_t);
6779 	orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */
6780 	ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */
6781 	arg_cnt = btf_vlen(fn_proto_t);
6782 
6783 	/* clone FUNC_PROTO and its params */
6784 	fn_proto_id = btf__add_func_proto(btf, ret_type_id);
6785 	if (fn_proto_id < 0)
6786 		return -EINVAL;
6787 
6788 	for (i = 0; i < arg_cnt; i++) {
6789 		int name_off;
6790 
6791 		/* copy original parameter data */
6792 		t = btf_type_by_id(btf, orig_proto_id);
6793 		p = &btf_params(t)[i];
6794 		name_off = p->name_off;
6795 
6796 		err = btf__add_func_param(btf, "", p->type);
6797 		if (err)
6798 			return err;
6799 
6800 		fn_proto_t = btf_type_by_id(btf, fn_proto_id);
6801 		p = &btf_params(fn_proto_t)[i];
6802 		p->name_off = name_off; /* use remembered str offset */
6803 	}
6804 
6805 	/* clone FUNC now, btf__add_func() enforces non-empty name, so use
6806 	 * entry program's name as a placeholder, which we replace immediately
6807 	 * with original name_off
6808 	 */
6809 	fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id);
6810 	if (fn_id < 0)
6811 		return -EINVAL;
6812 
6813 	fn_t = btf_type_by_id(btf, fn_id);
6814 	fn_t->name_off = fn_name_off; /* reuse original string */
6815 
6816 	return fn_id;
6817 }
6818 
6819 /* Check if main program or global subprog's function prototype has `arg:ctx`
6820  * argument tags, and, if necessary, substitute correct type to match what BPF
6821  * verifier would expect, taking into account specific program type. This
6822  * allows to support __arg_ctx tag transparently on old kernels that don't yet
6823  * have a native support for it in the verifier, making user's life much
6824  * easier.
6825  */
bpf_program_fixup_func_info(struct bpf_object * obj,struct bpf_program * prog)6826 static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog)
6827 {
6828 	const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name;
6829 	struct bpf_func_info_min *func_rec;
6830 	struct btf_type *fn_t, *fn_proto_t;
6831 	struct btf *btf = obj->btf;
6832 	const struct btf_type *t;
6833 	struct btf_param *p;
6834 	int ptr_id = 0, struct_id, tag_id, orig_fn_id;
6835 	int i, n, arg_idx, arg_cnt, err, rec_idx;
6836 	int *orig_ids;
6837 
6838 	/* no .BTF.ext, no problem */
6839 	if (!obj->btf_ext || !prog->func_info)
6840 		return 0;
6841 
6842 	/* don't do any fix ups if kernel natively supports __arg_ctx */
6843 	if (kernel_supports(obj, FEAT_ARG_CTX_TAG))
6844 		return 0;
6845 
6846 	/* some BPF program types just don't have named context structs, so
6847 	 * this fallback mechanism doesn't work for them
6848 	 */
6849 	for (i = 0; i < ARRAY_SIZE(global_ctx_map); i++) {
6850 		if (global_ctx_map[i].prog_type != prog->type)
6851 			continue;
6852 		ctx_name = global_ctx_map[i].ctx_name;
6853 		break;
6854 	}
6855 	if (!ctx_name)
6856 		return 0;
6857 
6858 	/* remember original func BTF IDs to detect if we already cloned them */
6859 	orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids));
6860 	if (!orig_ids)
6861 		return -ENOMEM;
6862 	for (i = 0; i < prog->func_info_cnt; i++) {
6863 		func_rec = prog->func_info + prog->func_info_rec_size * i;
6864 		orig_ids[i] = func_rec->type_id;
6865 	}
6866 
6867 	/* go through each DECL_TAG with "arg:ctx" and see if it points to one
6868 	 * of our subprogs; if yes and subprog is global and needs adjustment,
6869 	 * clone and adjust FUNC -> FUNC_PROTO combo
6870 	 */
6871 	for (i = 1, n = btf__type_cnt(btf); i < n; i++) {
6872 		/* only DECL_TAG with "arg:ctx" value are interesting */
6873 		t = btf__type_by_id(btf, i);
6874 		if (!btf_is_decl_tag(t))
6875 			continue;
6876 		if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0)
6877 			continue;
6878 
6879 		/* only global funcs need adjustment, if at all */
6880 		orig_fn_id = t->type;
6881 		fn_t = btf_type_by_id(btf, orig_fn_id);
6882 		if (!btf_is_func(fn_t) || btf_func_linkage(fn_t) != BTF_FUNC_GLOBAL)
6883 			continue;
6884 
6885 		/* sanity check FUNC -> FUNC_PROTO chain, just in case */
6886 		fn_proto_t = btf_type_by_id(btf, fn_t->type);
6887 		if (!fn_proto_t || !btf_is_func_proto(fn_proto_t))
6888 			continue;
6889 
6890 		/* find corresponding func_info record */
6891 		func_rec = NULL;
6892 		for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) {
6893 			if (orig_ids[rec_idx] == t->type) {
6894 				func_rec = prog->func_info + prog->func_info_rec_size * rec_idx;
6895 				break;
6896 			}
6897 		}
6898 		/* current main program doesn't call into this subprog */
6899 		if (!func_rec)
6900 			continue;
6901 
6902 		/* some more sanity checking of DECL_TAG */
6903 		arg_cnt = btf_vlen(fn_proto_t);
6904 		arg_idx = btf_decl_tag(t)->component_idx;
6905 		if (arg_idx < 0 || arg_idx >= arg_cnt)
6906 			continue;
6907 
6908 		/* check if we should fix up argument type */
6909 		p = &btf_params(fn_proto_t)[arg_idx];
6910 		fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>";
6911 		if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name))
6912 			continue;
6913 
6914 		/* clone fn/fn_proto, unless we already did it for another arg */
6915 		if (func_rec->type_id == orig_fn_id) {
6916 			int fn_id;
6917 
6918 			fn_id = clone_func_btf_info(btf, orig_fn_id, prog);
6919 			if (fn_id < 0) {
6920 				err = fn_id;
6921 				goto err_out;
6922 			}
6923 
6924 			/* point func_info record to a cloned FUNC type */
6925 			func_rec->type_id = fn_id;
6926 		}
6927 
6928 		/* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument;
6929 		 * we do it just once per main BPF program, as all global
6930 		 * funcs share the same program type, so need only PTR ->
6931 		 * STRUCT type chain
6932 		 */
6933 		if (ptr_id == 0) {
6934 			struct_id = btf__add_struct(btf, ctx_name, 0);
6935 			ptr_id = btf__add_ptr(btf, struct_id);
6936 			if (ptr_id < 0 || struct_id < 0) {
6937 				err = -EINVAL;
6938 				goto err_out;
6939 			}
6940 		}
6941 
6942 		/* for completeness, clone DECL_TAG and point it to cloned param */
6943 		tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx);
6944 		if (tag_id < 0) {
6945 			err = -EINVAL;
6946 			goto err_out;
6947 		}
6948 
6949 		/* all the BTF manipulations invalidated pointers, refetch them */
6950 		fn_t = btf_type_by_id(btf, func_rec->type_id);
6951 		fn_proto_t = btf_type_by_id(btf, fn_t->type);
6952 
6953 		/* fix up type ID pointed to by param */
6954 		p = &btf_params(fn_proto_t)[arg_idx];
6955 		p->type = ptr_id;
6956 	}
6957 
6958 	free(orig_ids);
6959 	return 0;
6960 err_out:
6961 	free(orig_ids);
6962 	return err;
6963 }
6964 
bpf_object__relocate(struct bpf_object * obj,const char * targ_btf_path)6965 static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6966 {
6967 	struct bpf_program *prog;
6968 	size_t i, j;
6969 	int err;
6970 
6971 	if (obj->btf_ext) {
6972 		err = bpf_object__relocate_core(obj, targ_btf_path);
6973 		if (err) {
6974 			pr_warn("failed to perform CO-RE relocations: %d\n",
6975 				err);
6976 			return err;
6977 		}
6978 		bpf_object__sort_relos(obj);
6979 	}
6980 
6981 	/* Before relocating calls pre-process relocations and mark
6982 	 * few ld_imm64 instructions that points to subprogs.
6983 	 * Otherwise bpf_object__reloc_code() later would have to consider
6984 	 * all ld_imm64 insns as relocation candidates. That would
6985 	 * reduce relocation speed, since amount of find_prog_insn_relo()
6986 	 * would increase and most of them will fail to find a relo.
6987 	 */
6988 	for (i = 0; i < obj->nr_programs; i++) {
6989 		prog = &obj->programs[i];
6990 		for (j = 0; j < prog->nr_reloc; j++) {
6991 			struct reloc_desc *relo = &prog->reloc_desc[j];
6992 			struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6993 
6994 			/* mark the insn, so it's recognized by insn_is_pseudo_func() */
6995 			if (relo->type == RELO_SUBPROG_ADDR)
6996 				insn[0].src_reg = BPF_PSEUDO_FUNC;
6997 		}
6998 	}
6999 
7000 	/* relocate subprogram calls and append used subprograms to main
7001 	 * programs; each copy of subprogram code needs to be relocated
7002 	 * differently for each main program, because its code location might
7003 	 * have changed.
7004 	 * Append subprog relos to main programs to allow data relos to be
7005 	 * processed after text is completely relocated.
7006 	 */
7007 	for (i = 0; i < obj->nr_programs; i++) {
7008 		prog = &obj->programs[i];
7009 		/* sub-program's sub-calls are relocated within the context of
7010 		 * its main program only
7011 		 */
7012 		if (prog_is_subprog(obj, prog))
7013 			continue;
7014 		if (!prog->autoload)
7015 			continue;
7016 
7017 		err = bpf_object__relocate_calls(obj, prog);
7018 		if (err) {
7019 			pr_warn("prog '%s': failed to relocate calls: %d\n",
7020 				prog->name, err);
7021 			return err;
7022 		}
7023 
7024 		err = bpf_prog_assign_exc_cb(obj, prog);
7025 		if (err)
7026 			return err;
7027 		/* Now, also append exception callback if it has not been done already. */
7028 		if (prog->exception_cb_idx >= 0) {
7029 			struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
7030 
7031 			/* Calling exception callback directly is disallowed, which the
7032 			 * verifier will reject later. In case it was processed already,
7033 			 * we can skip this step, otherwise for all other valid cases we
7034 			 * have to append exception callback now.
7035 			 */
7036 			if (subprog->sub_insn_off == 0) {
7037 				err = bpf_object__append_subprog_code(obj, prog, subprog);
7038 				if (err)
7039 					return err;
7040 				err = bpf_object__reloc_code(obj, prog, subprog);
7041 				if (err)
7042 					return err;
7043 			}
7044 		}
7045 	}
7046 	for (i = 0; i < obj->nr_programs; i++) {
7047 		prog = &obj->programs[i];
7048 		if (prog_is_subprog(obj, prog))
7049 			continue;
7050 		if (!prog->autoload)
7051 			continue;
7052 
7053 		/* Process data relos for main programs */
7054 		err = bpf_object__relocate_data(obj, prog);
7055 		if (err) {
7056 			pr_warn("prog '%s': failed to relocate data references: %d\n",
7057 				prog->name, err);
7058 			return err;
7059 		}
7060 
7061 		/* Fix up .BTF.ext information, if necessary */
7062 		err = bpf_program_fixup_func_info(obj, prog);
7063 		if (err) {
7064 			pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %d\n",
7065 				prog->name, err);
7066 			return err;
7067 		}
7068 	}
7069 
7070 	return 0;
7071 }
7072 
7073 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
7074 					    Elf64_Shdr *shdr, Elf_Data *data);
7075 
bpf_object__collect_map_relos(struct bpf_object * obj,Elf64_Shdr * shdr,Elf_Data * data)7076 static int bpf_object__collect_map_relos(struct bpf_object *obj,
7077 					 Elf64_Shdr *shdr, Elf_Data *data)
7078 {
7079 	const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
7080 	int i, j, nrels, new_sz;
7081 	const struct btf_var_secinfo *vi = NULL;
7082 	const struct btf_type *sec, *var, *def;
7083 	struct bpf_map *map = NULL, *targ_map = NULL;
7084 	struct bpf_program *targ_prog = NULL;
7085 	bool is_prog_array, is_map_in_map;
7086 	const struct btf_member *member;
7087 	const char *name, *mname, *type;
7088 	unsigned int moff;
7089 	Elf64_Sym *sym;
7090 	Elf64_Rel *rel;
7091 	void *tmp;
7092 
7093 	if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
7094 		return -EINVAL;
7095 	sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
7096 	if (!sec)
7097 		return -EINVAL;
7098 
7099 	nrels = shdr->sh_size / shdr->sh_entsize;
7100 	for (i = 0; i < nrels; i++) {
7101 		rel = elf_rel_by_idx(data, i);
7102 		if (!rel) {
7103 			pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
7104 			return -LIBBPF_ERRNO__FORMAT;
7105 		}
7106 
7107 		sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
7108 		if (!sym) {
7109 			pr_warn(".maps relo #%d: symbol %zx not found\n",
7110 				i, (size_t)ELF64_R_SYM(rel->r_info));
7111 			return -LIBBPF_ERRNO__FORMAT;
7112 		}
7113 		name = elf_sym_str(obj, sym->st_name) ?: "<?>";
7114 
7115 		pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
7116 			 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
7117 			 (size_t)rel->r_offset, sym->st_name, name);
7118 
7119 		for (j = 0; j < obj->nr_maps; j++) {
7120 			map = &obj->maps[j];
7121 			if (map->sec_idx != obj->efile.btf_maps_shndx)
7122 				continue;
7123 
7124 			vi = btf_var_secinfos(sec) + map->btf_var_idx;
7125 			if (vi->offset <= rel->r_offset &&
7126 			    rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
7127 				break;
7128 		}
7129 		if (j == obj->nr_maps) {
7130 			pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
7131 				i, name, (size_t)rel->r_offset);
7132 			return -EINVAL;
7133 		}
7134 
7135 		is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
7136 		is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
7137 		type = is_map_in_map ? "map" : "prog";
7138 		if (is_map_in_map) {
7139 			if (sym->st_shndx != obj->efile.btf_maps_shndx) {
7140 				pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
7141 					i, name);
7142 				return -LIBBPF_ERRNO__RELOC;
7143 			}
7144 			if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
7145 			    map->def.key_size != sizeof(int)) {
7146 				pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
7147 					i, map->name, sizeof(int));
7148 				return -EINVAL;
7149 			}
7150 			targ_map = bpf_object__find_map_by_name(obj, name);
7151 			if (!targ_map) {
7152 				pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
7153 					i, name);
7154 				return -ESRCH;
7155 			}
7156 		} else if (is_prog_array) {
7157 			targ_prog = bpf_object__find_program_by_name(obj, name);
7158 			if (!targ_prog) {
7159 				pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
7160 					i, name);
7161 				return -ESRCH;
7162 			}
7163 			if (targ_prog->sec_idx != sym->st_shndx ||
7164 			    targ_prog->sec_insn_off * 8 != sym->st_value ||
7165 			    prog_is_subprog(obj, targ_prog)) {
7166 				pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
7167 					i, name);
7168 				return -LIBBPF_ERRNO__RELOC;
7169 			}
7170 		} else {
7171 			return -EINVAL;
7172 		}
7173 
7174 		var = btf__type_by_id(obj->btf, vi->type);
7175 		def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
7176 		if (btf_vlen(def) == 0)
7177 			return -EINVAL;
7178 		member = btf_members(def) + btf_vlen(def) - 1;
7179 		mname = btf__name_by_offset(obj->btf, member->name_off);
7180 		if (strcmp(mname, "values"))
7181 			return -EINVAL;
7182 
7183 		moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
7184 		if (rel->r_offset - vi->offset < moff)
7185 			return -EINVAL;
7186 
7187 		moff = rel->r_offset - vi->offset - moff;
7188 		/* here we use BPF pointer size, which is always 64 bit, as we
7189 		 * are parsing ELF that was built for BPF target
7190 		 */
7191 		if (moff % bpf_ptr_sz)
7192 			return -EINVAL;
7193 		moff /= bpf_ptr_sz;
7194 		if (moff >= map->init_slots_sz) {
7195 			new_sz = moff + 1;
7196 			tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
7197 			if (!tmp)
7198 				return -ENOMEM;
7199 			map->init_slots = tmp;
7200 			memset(map->init_slots + map->init_slots_sz, 0,
7201 			       (new_sz - map->init_slots_sz) * host_ptr_sz);
7202 			map->init_slots_sz = new_sz;
7203 		}
7204 		map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
7205 
7206 		pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
7207 			 i, map->name, moff, type, name);
7208 	}
7209 
7210 	return 0;
7211 }
7212 
bpf_object__collect_relos(struct bpf_object * obj)7213 static int bpf_object__collect_relos(struct bpf_object *obj)
7214 {
7215 	int i, err;
7216 
7217 	for (i = 0; i < obj->efile.sec_cnt; i++) {
7218 		struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
7219 		Elf64_Shdr *shdr;
7220 		Elf_Data *data;
7221 		int idx;
7222 
7223 		if (sec_desc->sec_type != SEC_RELO)
7224 			continue;
7225 
7226 		shdr = sec_desc->shdr;
7227 		data = sec_desc->data;
7228 		idx = shdr->sh_info;
7229 
7230 		if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) {
7231 			pr_warn("internal error at %d\n", __LINE__);
7232 			return -LIBBPF_ERRNO__INTERNAL;
7233 		}
7234 
7235 		if (obj->efile.secs[idx].sec_type == SEC_ST_OPS)
7236 			err = bpf_object__collect_st_ops_relos(obj, shdr, data);
7237 		else if (idx == obj->efile.btf_maps_shndx)
7238 			err = bpf_object__collect_map_relos(obj, shdr, data);
7239 		else
7240 			err = bpf_object__collect_prog_relos(obj, shdr, data);
7241 		if (err)
7242 			return err;
7243 	}
7244 
7245 	bpf_object__sort_relos(obj);
7246 	return 0;
7247 }
7248 
insn_is_helper_call(struct bpf_insn * insn,enum bpf_func_id * func_id)7249 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
7250 {
7251 	if (BPF_CLASS(insn->code) == BPF_JMP &&
7252 	    BPF_OP(insn->code) == BPF_CALL &&
7253 	    BPF_SRC(insn->code) == BPF_K &&
7254 	    insn->src_reg == 0 &&
7255 	    insn->dst_reg == 0) {
7256 		    *func_id = insn->imm;
7257 		    return true;
7258 	}
7259 	return false;
7260 }
7261 
bpf_object__sanitize_prog(struct bpf_object * obj,struct bpf_program * prog)7262 static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
7263 {
7264 	struct bpf_insn *insn = prog->insns;
7265 	enum bpf_func_id func_id;
7266 	int i;
7267 
7268 	if (obj->gen_loader)
7269 		return 0;
7270 
7271 	for (i = 0; i < prog->insns_cnt; i++, insn++) {
7272 		if (!insn_is_helper_call(insn, &func_id))
7273 			continue;
7274 
7275 		/* on kernels that don't yet support
7276 		 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
7277 		 * to bpf_probe_read() which works well for old kernels
7278 		 */
7279 		switch (func_id) {
7280 		case BPF_FUNC_probe_read_kernel:
7281 		case BPF_FUNC_probe_read_user:
7282 			if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7283 				insn->imm = BPF_FUNC_probe_read;
7284 			break;
7285 		case BPF_FUNC_probe_read_kernel_str:
7286 		case BPF_FUNC_probe_read_user_str:
7287 			if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7288 				insn->imm = BPF_FUNC_probe_read_str;
7289 			break;
7290 		default:
7291 			break;
7292 		}
7293 	}
7294 	return 0;
7295 }
7296 
7297 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
7298 				     int *btf_obj_fd, int *btf_type_id);
7299 
7300 /* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
libbpf_prepare_prog_load(struct bpf_program * prog,struct bpf_prog_load_opts * opts,long cookie)7301 static int libbpf_prepare_prog_load(struct bpf_program *prog,
7302 				    struct bpf_prog_load_opts *opts, long cookie)
7303 {
7304 	enum sec_def_flags def = cookie;
7305 
7306 	/* old kernels might not support specifying expected_attach_type */
7307 	if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
7308 		opts->expected_attach_type = 0;
7309 
7310 	if (def & SEC_SLEEPABLE)
7311 		opts->prog_flags |= BPF_F_SLEEPABLE;
7312 
7313 	if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
7314 		opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
7315 
7316 	/* special check for usdt to use uprobe_multi link */
7317 	if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
7318 		prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
7319 
7320 	if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
7321 		int btf_obj_fd = 0, btf_type_id = 0, err;
7322 		const char *attach_name;
7323 
7324 		attach_name = strchr(prog->sec_name, '/');
7325 		if (!attach_name) {
7326 			/* if BPF program is annotated with just SEC("fentry")
7327 			 * (or similar) without declaratively specifying
7328 			 * target, then it is expected that target will be
7329 			 * specified with bpf_program__set_attach_target() at
7330 			 * runtime before BPF object load step. If not, then
7331 			 * there is nothing to load into the kernel as BPF
7332 			 * verifier won't be able to validate BPF program
7333 			 * correctness anyways.
7334 			 */
7335 			pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
7336 				prog->name);
7337 			return -EINVAL;
7338 		}
7339 		attach_name++; /* skip over / */
7340 
7341 		err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
7342 		if (err)
7343 			return err;
7344 
7345 		/* cache resolved BTF FD and BTF type ID in the prog */
7346 		prog->attach_btf_obj_fd = btf_obj_fd;
7347 		prog->attach_btf_id = btf_type_id;
7348 
7349 		/* but by now libbpf common logic is not utilizing
7350 		 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
7351 		 * this callback is called after opts were populated by
7352 		 * libbpf, so this callback has to update opts explicitly here
7353 		 */
7354 		opts->attach_btf_obj_fd = btf_obj_fd;
7355 		opts->attach_btf_id = btf_type_id;
7356 	}
7357 	return 0;
7358 }
7359 
7360 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
7361 
bpf_object_load_prog(struct bpf_object * obj,struct bpf_program * prog,struct bpf_insn * insns,int insns_cnt,const char * license,__u32 kern_version,int * prog_fd)7362 static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
7363 				struct bpf_insn *insns, int insns_cnt,
7364 				const char *license, __u32 kern_version, int *prog_fd)
7365 {
7366 	LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
7367 	const char *prog_name = NULL;
7368 	char *cp, errmsg[STRERR_BUFSIZE];
7369 	size_t log_buf_size = 0;
7370 	char *log_buf = NULL, *tmp;
7371 	bool own_log_buf = true;
7372 	__u32 log_level = prog->log_level;
7373 	int ret, err;
7374 
7375 	/* Be more helpful by rejecting programs that can't be validated early
7376 	 * with more meaningful and actionable error message.
7377 	 */
7378 	switch (prog->type) {
7379 	case BPF_PROG_TYPE_UNSPEC:
7380 		/*
7381 		 * The program type must be set.  Most likely we couldn't find a proper
7382 		 * section definition at load time, and thus we didn't infer the type.
7383 		 */
7384 		pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
7385 			prog->name, prog->sec_name);
7386 		return -EINVAL;
7387 	case BPF_PROG_TYPE_STRUCT_OPS:
7388 		if (prog->attach_btf_id == 0) {
7389 			pr_warn("prog '%s': SEC(\"struct_ops\") program isn't referenced anywhere, did you forget to use it?\n",
7390 				prog->name);
7391 			return -EINVAL;
7392 		}
7393 		break;
7394 	default:
7395 		break;
7396 	}
7397 
7398 	if (!insns || !insns_cnt)
7399 		return -EINVAL;
7400 
7401 	if (kernel_supports(obj, FEAT_PROG_NAME))
7402 		prog_name = prog->name;
7403 	load_attr.attach_prog_fd = prog->attach_prog_fd;
7404 	load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
7405 	load_attr.attach_btf_id = prog->attach_btf_id;
7406 	load_attr.kern_version = kern_version;
7407 	load_attr.prog_ifindex = prog->prog_ifindex;
7408 
7409 	/* specify func_info/line_info only if kernel supports them */
7410 	if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
7411 		load_attr.prog_btf_fd = btf__fd(obj->btf);
7412 		load_attr.func_info = prog->func_info;
7413 		load_attr.func_info_rec_size = prog->func_info_rec_size;
7414 		load_attr.func_info_cnt = prog->func_info_cnt;
7415 		load_attr.line_info = prog->line_info;
7416 		load_attr.line_info_rec_size = prog->line_info_rec_size;
7417 		load_attr.line_info_cnt = prog->line_info_cnt;
7418 	}
7419 	load_attr.log_level = log_level;
7420 	load_attr.prog_flags = prog->prog_flags;
7421 	load_attr.fd_array = obj->fd_array;
7422 
7423 	load_attr.token_fd = obj->token_fd;
7424 	if (obj->token_fd)
7425 		load_attr.prog_flags |= BPF_F_TOKEN_FD;
7426 
7427 	/* adjust load_attr if sec_def provides custom preload callback */
7428 	if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
7429 		err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
7430 		if (err < 0) {
7431 			pr_warn("prog '%s': failed to prepare load attributes: %d\n",
7432 				prog->name, err);
7433 			return err;
7434 		}
7435 		insns = prog->insns;
7436 		insns_cnt = prog->insns_cnt;
7437 	}
7438 
7439 	/* allow prog_prepare_load_fn to change expected_attach_type */
7440 	load_attr.expected_attach_type = prog->expected_attach_type;
7441 
7442 	if (obj->gen_loader) {
7443 		bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
7444 				   license, insns, insns_cnt, &load_attr,
7445 				   prog - obj->programs);
7446 		*prog_fd = -1;
7447 		return 0;
7448 	}
7449 
7450 retry_load:
7451 	/* if log_level is zero, we don't request logs initially even if
7452 	 * custom log_buf is specified; if the program load fails, then we'll
7453 	 * bump log_level to 1 and use either custom log_buf or we'll allocate
7454 	 * our own and retry the load to get details on what failed
7455 	 */
7456 	if (log_level) {
7457 		if (prog->log_buf) {
7458 			log_buf = prog->log_buf;
7459 			log_buf_size = prog->log_size;
7460 			own_log_buf = false;
7461 		} else if (obj->log_buf) {
7462 			log_buf = obj->log_buf;
7463 			log_buf_size = obj->log_size;
7464 			own_log_buf = false;
7465 		} else {
7466 			log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
7467 			tmp = realloc(log_buf, log_buf_size);
7468 			if (!tmp) {
7469 				ret = -ENOMEM;
7470 				goto out;
7471 			}
7472 			log_buf = tmp;
7473 			log_buf[0] = '\0';
7474 			own_log_buf = true;
7475 		}
7476 	}
7477 
7478 	load_attr.log_buf = log_buf;
7479 	load_attr.log_size = log_buf_size;
7480 	load_attr.log_level = log_level;
7481 
7482 	ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
7483 	if (ret >= 0) {
7484 		if (log_level && own_log_buf) {
7485 			pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7486 				 prog->name, log_buf);
7487 		}
7488 
7489 		if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
7490 			struct bpf_map *map;
7491 			int i;
7492 
7493 			for (i = 0; i < obj->nr_maps; i++) {
7494 				map = &prog->obj->maps[i];
7495 				if (map->libbpf_type != LIBBPF_MAP_RODATA)
7496 					continue;
7497 
7498 				if (bpf_prog_bind_map(ret, map->fd, NULL)) {
7499 					cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7500 					pr_warn("prog '%s': failed to bind map '%s': %s\n",
7501 						prog->name, map->real_name, cp);
7502 					/* Don't fail hard if can't bind rodata. */
7503 				}
7504 			}
7505 		}
7506 
7507 		*prog_fd = ret;
7508 		ret = 0;
7509 		goto out;
7510 	}
7511 
7512 	if (log_level == 0) {
7513 		log_level = 1;
7514 		goto retry_load;
7515 	}
7516 	/* On ENOSPC, increase log buffer size and retry, unless custom
7517 	 * log_buf is specified.
7518 	 * Be careful to not overflow u32, though. Kernel's log buf size limit
7519 	 * isn't part of UAPI so it can always be bumped to full 4GB. So don't
7520 	 * multiply by 2 unless we are sure we'll fit within 32 bits.
7521 	 * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
7522 	 */
7523 	if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
7524 		goto retry_load;
7525 
7526 	ret = -errno;
7527 
7528 	/* post-process verifier log to improve error descriptions */
7529 	fixup_verifier_log(prog, log_buf, log_buf_size);
7530 
7531 	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7532 	pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
7533 	pr_perm_msg(ret);
7534 
7535 	if (own_log_buf && log_buf && log_buf[0] != '\0') {
7536 		pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7537 			prog->name, log_buf);
7538 	}
7539 
7540 out:
7541 	if (own_log_buf)
7542 		free(log_buf);
7543 	return ret;
7544 }
7545 
find_prev_line(char * buf,char * cur)7546 static char *find_prev_line(char *buf, char *cur)
7547 {
7548 	char *p;
7549 
7550 	if (cur == buf) /* end of a log buf */
7551 		return NULL;
7552 
7553 	p = cur - 1;
7554 	while (p - 1 >= buf && *(p - 1) != '\n')
7555 		p--;
7556 
7557 	return p;
7558 }
7559 
patch_log(char * buf,size_t buf_sz,size_t log_sz,char * orig,size_t orig_sz,const char * patch)7560 static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
7561 		      char *orig, size_t orig_sz, const char *patch)
7562 {
7563 	/* size of the remaining log content to the right from the to-be-replaced part */
7564 	size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
7565 	size_t patch_sz = strlen(patch);
7566 
7567 	if (patch_sz != orig_sz) {
7568 		/* If patch line(s) are longer than original piece of verifier log,
7569 		 * shift log contents by (patch_sz - orig_sz) bytes to the right
7570 		 * starting from after to-be-replaced part of the log.
7571 		 *
7572 		 * If patch line(s) are shorter than original piece of verifier log,
7573 		 * shift log contents by (orig_sz - patch_sz) bytes to the left
7574 		 * starting from after to-be-replaced part of the log
7575 		 *
7576 		 * We need to be careful about not overflowing available
7577 		 * buf_sz capacity. If that's the case, we'll truncate the end
7578 		 * of the original log, as necessary.
7579 		 */
7580 		if (patch_sz > orig_sz) {
7581 			if (orig + patch_sz >= buf + buf_sz) {
7582 				/* patch is big enough to cover remaining space completely */
7583 				patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
7584 				rem_sz = 0;
7585 			} else if (patch_sz - orig_sz > buf_sz - log_sz) {
7586 				/* patch causes part of remaining log to be truncated */
7587 				rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
7588 			}
7589 		}
7590 		/* shift remaining log to the right by calculated amount */
7591 		memmove(orig + patch_sz, orig + orig_sz, rem_sz);
7592 	}
7593 
7594 	memcpy(orig, patch, patch_sz);
7595 }
7596 
fixup_log_failed_core_relo(struct bpf_program * prog,char * buf,size_t buf_sz,size_t log_sz,char * line1,char * line2,char * line3)7597 static void fixup_log_failed_core_relo(struct bpf_program *prog,
7598 				       char *buf, size_t buf_sz, size_t log_sz,
7599 				       char *line1, char *line2, char *line3)
7600 {
7601 	/* Expected log for failed and not properly guarded CO-RE relocation:
7602 	 * line1 -> 123: (85) call unknown#195896080
7603 	 * line2 -> invalid func unknown#195896080
7604 	 * line3 -> <anything else or end of buffer>
7605 	 *
7606 	 * "123" is the index of the instruction that was poisoned. We extract
7607 	 * instruction index to find corresponding CO-RE relocation and
7608 	 * replace this part of the log with more relevant information about
7609 	 * failed CO-RE relocation.
7610 	 */
7611 	const struct bpf_core_relo *relo;
7612 	struct bpf_core_spec spec;
7613 	char patch[512], spec_buf[256];
7614 	int insn_idx, err, spec_len;
7615 
7616 	if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
7617 		return;
7618 
7619 	relo = find_relo_core(prog, insn_idx);
7620 	if (!relo)
7621 		return;
7622 
7623 	err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
7624 	if (err)
7625 		return;
7626 
7627 	spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
7628 	snprintf(patch, sizeof(patch),
7629 		 "%d: <invalid CO-RE relocation>\n"
7630 		 "failed to resolve CO-RE relocation %s%s\n",
7631 		 insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : "");
7632 
7633 	patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7634 }
7635 
fixup_log_missing_map_load(struct bpf_program * prog,char * buf,size_t buf_sz,size_t log_sz,char * line1,char * line2,char * line3)7636 static void fixup_log_missing_map_load(struct bpf_program *prog,
7637 				       char *buf, size_t buf_sz, size_t log_sz,
7638 				       char *line1, char *line2, char *line3)
7639 {
7640 	/* Expected log for failed and not properly guarded map reference:
7641 	 * line1 -> 123: (85) call unknown#2001000345
7642 	 * line2 -> invalid func unknown#2001000345
7643 	 * line3 -> <anything else or end of buffer>
7644 	 *
7645 	 * "123" is the index of the instruction that was poisoned.
7646 	 * "345" in "2001000345" is a map index in obj->maps to fetch map name.
7647 	 */
7648 	struct bpf_object *obj = prog->obj;
7649 	const struct bpf_map *map;
7650 	int insn_idx, map_idx;
7651 	char patch[128];
7652 
7653 	if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2)
7654 		return;
7655 
7656 	map_idx -= POISON_LDIMM64_MAP_BASE;
7657 	if (map_idx < 0 || map_idx >= obj->nr_maps)
7658 		return;
7659 	map = &obj->maps[map_idx];
7660 
7661 	snprintf(patch, sizeof(patch),
7662 		 "%d: <invalid BPF map reference>\n"
7663 		 "BPF map '%s' is referenced but wasn't created\n",
7664 		 insn_idx, map->name);
7665 
7666 	patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7667 }
7668 
fixup_log_missing_kfunc_call(struct bpf_program * prog,char * buf,size_t buf_sz,size_t log_sz,char * line1,char * line2,char * line3)7669 static void fixup_log_missing_kfunc_call(struct bpf_program *prog,
7670 					 char *buf, size_t buf_sz, size_t log_sz,
7671 					 char *line1, char *line2, char *line3)
7672 {
7673 	/* Expected log for failed and not properly guarded kfunc call:
7674 	 * line1 -> 123: (85) call unknown#2002000345
7675 	 * line2 -> invalid func unknown#2002000345
7676 	 * line3 -> <anything else or end of buffer>
7677 	 *
7678 	 * "123" is the index of the instruction that was poisoned.
7679 	 * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name.
7680 	 */
7681 	struct bpf_object *obj = prog->obj;
7682 	const struct extern_desc *ext;
7683 	int insn_idx, ext_idx;
7684 	char patch[128];
7685 
7686 	if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2)
7687 		return;
7688 
7689 	ext_idx -= POISON_CALL_KFUNC_BASE;
7690 	if (ext_idx < 0 || ext_idx >= obj->nr_extern)
7691 		return;
7692 	ext = &obj->externs[ext_idx];
7693 
7694 	snprintf(patch, sizeof(patch),
7695 		 "%d: <invalid kfunc call>\n"
7696 		 "kfunc '%s' is referenced but wasn't resolved\n",
7697 		 insn_idx, ext->name);
7698 
7699 	patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7700 }
7701 
fixup_verifier_log(struct bpf_program * prog,char * buf,size_t buf_sz)7702 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
7703 {
7704 	/* look for familiar error patterns in last N lines of the log */
7705 	const size_t max_last_line_cnt = 10;
7706 	char *prev_line, *cur_line, *next_line;
7707 	size_t log_sz;
7708 	int i;
7709 
7710 	if (!buf)
7711 		return;
7712 
7713 	log_sz = strlen(buf) + 1;
7714 	next_line = buf + log_sz - 1;
7715 
7716 	for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
7717 		cur_line = find_prev_line(buf, next_line);
7718 		if (!cur_line)
7719 			return;
7720 
7721 		if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
7722 			prev_line = find_prev_line(buf, cur_line);
7723 			if (!prev_line)
7724 				continue;
7725 
7726 			/* failed CO-RE relocation case */
7727 			fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
7728 						   prev_line, cur_line, next_line);
7729 			return;
7730 		} else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) {
7731 			prev_line = find_prev_line(buf, cur_line);
7732 			if (!prev_line)
7733 				continue;
7734 
7735 			/* reference to uncreated BPF map */
7736 			fixup_log_missing_map_load(prog, buf, buf_sz, log_sz,
7737 						   prev_line, cur_line, next_line);
7738 			return;
7739 		} else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) {
7740 			prev_line = find_prev_line(buf, cur_line);
7741 			if (!prev_line)
7742 				continue;
7743 
7744 			/* reference to unresolved kfunc */
7745 			fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz,
7746 						     prev_line, cur_line, next_line);
7747 			return;
7748 		}
7749 	}
7750 }
7751 
bpf_program_record_relos(struct bpf_program * prog)7752 static int bpf_program_record_relos(struct bpf_program *prog)
7753 {
7754 	struct bpf_object *obj = prog->obj;
7755 	int i;
7756 
7757 	for (i = 0; i < prog->nr_reloc; i++) {
7758 		struct reloc_desc *relo = &prog->reloc_desc[i];
7759 		struct extern_desc *ext = &obj->externs[relo->ext_idx];
7760 		int kind;
7761 
7762 		switch (relo->type) {
7763 		case RELO_EXTERN_LD64:
7764 			if (ext->type != EXT_KSYM)
7765 				continue;
7766 			kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ?
7767 				BTF_KIND_VAR : BTF_KIND_FUNC;
7768 			bpf_gen__record_extern(obj->gen_loader, ext->name,
7769 					       ext->is_weak, !ext->ksym.type_id,
7770 					       true, kind, relo->insn_idx);
7771 			break;
7772 		case RELO_EXTERN_CALL:
7773 			bpf_gen__record_extern(obj->gen_loader, ext->name,
7774 					       ext->is_weak, false, false, BTF_KIND_FUNC,
7775 					       relo->insn_idx);
7776 			break;
7777 		case RELO_CORE: {
7778 			struct bpf_core_relo cr = {
7779 				.insn_off = relo->insn_idx * 8,
7780 				.type_id = relo->core_relo->type_id,
7781 				.access_str_off = relo->core_relo->access_str_off,
7782 				.kind = relo->core_relo->kind,
7783 			};
7784 
7785 			bpf_gen__record_relo_core(obj->gen_loader, &cr);
7786 			break;
7787 		}
7788 		default:
7789 			continue;
7790 		}
7791 	}
7792 	return 0;
7793 }
7794 
7795 static int
bpf_object__load_progs(struct bpf_object * obj,int log_level)7796 bpf_object__load_progs(struct bpf_object *obj, int log_level)
7797 {
7798 	struct bpf_program *prog;
7799 	size_t i;
7800 	int err;
7801 
7802 	for (i = 0; i < obj->nr_programs; i++) {
7803 		prog = &obj->programs[i];
7804 		err = bpf_object__sanitize_prog(obj, prog);
7805 		if (err)
7806 			return err;
7807 	}
7808 
7809 	for (i = 0; i < obj->nr_programs; i++) {
7810 		prog = &obj->programs[i];
7811 		if (prog_is_subprog(obj, prog))
7812 			continue;
7813 		if (!prog->autoload) {
7814 			pr_debug("prog '%s': skipped loading\n", prog->name);
7815 			continue;
7816 		}
7817 		prog->log_level |= log_level;
7818 
7819 		if (obj->gen_loader)
7820 			bpf_program_record_relos(prog);
7821 
7822 		err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
7823 					   obj->license, obj->kern_version, &prog->fd);
7824 		if (err) {
7825 			pr_warn("prog '%s': failed to load: %d\n", prog->name, err);
7826 			return err;
7827 		}
7828 	}
7829 
7830 	bpf_object__free_relocs(obj);
7831 	return 0;
7832 }
7833 
7834 static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7835 
bpf_object_init_progs(struct bpf_object * obj,const struct bpf_object_open_opts * opts)7836 static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
7837 {
7838 	struct bpf_program *prog;
7839 	int err;
7840 
7841 	bpf_object__for_each_program(prog, obj) {
7842 		prog->sec_def = find_sec_def(prog->sec_name);
7843 		if (!prog->sec_def) {
7844 			/* couldn't guess, but user might manually specify */
7845 			pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7846 				prog->name, prog->sec_name);
7847 			continue;
7848 		}
7849 
7850 		prog->type = prog->sec_def->prog_type;
7851 		prog->expected_attach_type = prog->sec_def->expected_attach_type;
7852 
7853 		/* sec_def can have custom callback which should be called
7854 		 * after bpf_program is initialized to adjust its properties
7855 		 */
7856 		if (prog->sec_def->prog_setup_fn) {
7857 			err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
7858 			if (err < 0) {
7859 				pr_warn("prog '%s': failed to initialize: %d\n",
7860 					prog->name, err);
7861 				return err;
7862 			}
7863 		}
7864 	}
7865 
7866 	return 0;
7867 }
7868 
bpf_object_open(const char * path,const void * obj_buf,size_t obj_buf_sz,const struct bpf_object_open_opts * opts)7869 static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7870 					  const struct bpf_object_open_opts *opts)
7871 {
7872 	const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
7873 	struct bpf_object *obj;
7874 	char tmp_name[64];
7875 	int err;
7876 	char *log_buf;
7877 	size_t log_size;
7878 	__u32 log_level;
7879 
7880 	if (elf_version(EV_CURRENT) == EV_NONE) {
7881 		pr_warn("failed to init libelf for %s\n",
7882 			path ? : "(mem buf)");
7883 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7884 	}
7885 
7886 	if (!OPTS_VALID(opts, bpf_object_open_opts))
7887 		return ERR_PTR(-EINVAL);
7888 
7889 	obj_name = OPTS_GET(opts, object_name, NULL);
7890 	if (obj_buf) {
7891 		if (!obj_name) {
7892 			snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7893 				 (unsigned long)obj_buf,
7894 				 (unsigned long)obj_buf_sz);
7895 			obj_name = tmp_name;
7896 		}
7897 		path = obj_name;
7898 		pr_debug("loading object '%s' from buffer\n", obj_name);
7899 	}
7900 
7901 	log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
7902 	log_size = OPTS_GET(opts, kernel_log_size, 0);
7903 	log_level = OPTS_GET(opts, kernel_log_level, 0);
7904 	if (log_size > UINT_MAX)
7905 		return ERR_PTR(-EINVAL);
7906 	if (log_size && !log_buf)
7907 		return ERR_PTR(-EINVAL);
7908 
7909 	token_path = OPTS_GET(opts, bpf_token_path, NULL);
7910 	/* if user didn't specify bpf_token_path explicitly, check if
7911 	 * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path
7912 	 * option
7913 	 */
7914 	if (!token_path)
7915 		token_path = getenv("LIBBPF_BPF_TOKEN_PATH");
7916 	if (token_path && strlen(token_path) >= PATH_MAX)
7917 		return ERR_PTR(-ENAMETOOLONG);
7918 
7919 	obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7920 	if (IS_ERR(obj))
7921 		return obj;
7922 
7923 	obj->log_buf = log_buf;
7924 	obj->log_size = log_size;
7925 	obj->log_level = log_level;
7926 
7927 	if (token_path) {
7928 		obj->token_path = strdup(token_path);
7929 		if (!obj->token_path) {
7930 			err = -ENOMEM;
7931 			goto out;
7932 		}
7933 	}
7934 
7935 	btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
7936 	if (btf_tmp_path) {
7937 		if (strlen(btf_tmp_path) >= PATH_MAX) {
7938 			err = -ENAMETOOLONG;
7939 			goto out;
7940 		}
7941 		obj->btf_custom_path = strdup(btf_tmp_path);
7942 		if (!obj->btf_custom_path) {
7943 			err = -ENOMEM;
7944 			goto out;
7945 		}
7946 	}
7947 
7948 	kconfig = OPTS_GET(opts, kconfig, NULL);
7949 	if (kconfig) {
7950 		obj->kconfig = strdup(kconfig);
7951 		if (!obj->kconfig) {
7952 			err = -ENOMEM;
7953 			goto out;
7954 		}
7955 	}
7956 
7957 	err = bpf_object__elf_init(obj);
7958 	err = err ? : bpf_object__check_endianness(obj);
7959 	err = err ? : bpf_object__elf_collect(obj);
7960 	err = err ? : bpf_object__collect_externs(obj);
7961 	err = err ? : bpf_object_fixup_btf(obj);
7962 	err = err ? : bpf_object__init_maps(obj, opts);
7963 	err = err ? : bpf_object_init_progs(obj, opts);
7964 	err = err ? : bpf_object__collect_relos(obj);
7965 	if (err)
7966 		goto out;
7967 
7968 	bpf_object__elf_finish(obj);
7969 
7970 	return obj;
7971 out:
7972 	bpf_object__close(obj);
7973 	return ERR_PTR(err);
7974 }
7975 
7976 struct bpf_object *
bpf_object__open_file(const char * path,const struct bpf_object_open_opts * opts)7977 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7978 {
7979 	if (!path)
7980 		return libbpf_err_ptr(-EINVAL);
7981 
7982 	pr_debug("loading %s\n", path);
7983 
7984 	return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
7985 }
7986 
bpf_object__open(const char * path)7987 struct bpf_object *bpf_object__open(const char *path)
7988 {
7989 	return bpf_object__open_file(path, NULL);
7990 }
7991 
7992 struct bpf_object *
bpf_object__open_mem(const void * obj_buf,size_t obj_buf_sz,const struct bpf_object_open_opts * opts)7993 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7994 		     const struct bpf_object_open_opts *opts)
7995 {
7996 	if (!obj_buf || obj_buf_sz == 0)
7997 		return libbpf_err_ptr(-EINVAL);
7998 
7999 	return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
8000 }
8001 
bpf_object_unload(struct bpf_object * obj)8002 static int bpf_object_unload(struct bpf_object *obj)
8003 {
8004 	size_t i;
8005 
8006 	if (!obj)
8007 		return libbpf_err(-EINVAL);
8008 
8009 	for (i = 0; i < obj->nr_maps; i++) {
8010 		zclose(obj->maps[i].fd);
8011 		if (obj->maps[i].st_ops)
8012 			zfree(&obj->maps[i].st_ops->kern_vdata);
8013 	}
8014 
8015 	for (i = 0; i < obj->nr_programs; i++)
8016 		bpf_program__unload(&obj->programs[i]);
8017 
8018 	return 0;
8019 }
8020 
bpf_object__sanitize_maps(struct bpf_object * obj)8021 static int bpf_object__sanitize_maps(struct bpf_object *obj)
8022 {
8023 	struct bpf_map *m;
8024 
8025 	bpf_object__for_each_map(m, obj) {
8026 		if (!bpf_map__is_internal(m))
8027 			continue;
8028 		if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
8029 			m->def.map_flags &= ~BPF_F_MMAPABLE;
8030 	}
8031 
8032 	return 0;
8033 }
8034 
8035 typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
8036 			     const char *sym_name, void *ctx);
8037 
libbpf_kallsyms_parse(kallsyms_cb_t cb,void * ctx)8038 static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
8039 {
8040 	char sym_type, sym_name[500];
8041 	unsigned long long sym_addr;
8042 	int ret, err = 0;
8043 	FILE *f;
8044 
8045 	f = fopen("/proc/kallsyms", "re");
8046 	if (!f) {
8047 		err = -errno;
8048 		pr_warn("failed to open /proc/kallsyms: %d\n", err);
8049 		return err;
8050 	}
8051 
8052 	while (true) {
8053 		ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
8054 			     &sym_addr, &sym_type, sym_name);
8055 		if (ret == EOF && feof(f))
8056 			break;
8057 		if (ret != 3) {
8058 			pr_warn("failed to read kallsyms entry: %d\n", ret);
8059 			err = -EINVAL;
8060 			break;
8061 		}
8062 
8063 		err = cb(sym_addr, sym_type, sym_name, ctx);
8064 		if (err)
8065 			break;
8066 	}
8067 
8068 	fclose(f);
8069 	return err;
8070 }
8071 
kallsyms_cb(unsigned long long sym_addr,char sym_type,const char * sym_name,void * ctx)8072 static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
8073 		       const char *sym_name, void *ctx)
8074 {
8075 	struct bpf_object *obj = ctx;
8076 	const struct btf_type *t;
8077 	struct extern_desc *ext;
8078 	char *res;
8079 
8080 	res = strstr(sym_name, ".llvm.");
8081 	if (sym_type == 'd' && res)
8082 		ext = find_extern_by_name_with_len(obj, sym_name, res - sym_name);
8083 	else
8084 		ext = find_extern_by_name(obj, sym_name);
8085 	if (!ext || ext->type != EXT_KSYM)
8086 		return 0;
8087 
8088 	t = btf__type_by_id(obj->btf, ext->btf_id);
8089 	if (!btf_is_var(t))
8090 		return 0;
8091 
8092 	if (ext->is_set && ext->ksym.addr != sym_addr) {
8093 		pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n",
8094 			sym_name, ext->ksym.addr, sym_addr);
8095 		return -EINVAL;
8096 	}
8097 	if (!ext->is_set) {
8098 		ext->is_set = true;
8099 		ext->ksym.addr = sym_addr;
8100 		pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr);
8101 	}
8102 	return 0;
8103 }
8104 
bpf_object__read_kallsyms_file(struct bpf_object * obj)8105 static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
8106 {
8107 	return libbpf_kallsyms_parse(kallsyms_cb, obj);
8108 }
8109 
find_ksym_btf_id(struct bpf_object * obj,const char * ksym_name,__u16 kind,struct btf ** res_btf,struct module_btf ** res_mod_btf)8110 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
8111 			    __u16 kind, struct btf **res_btf,
8112 			    struct module_btf **res_mod_btf)
8113 {
8114 	struct module_btf *mod_btf;
8115 	struct btf *btf;
8116 	int i, id, err;
8117 
8118 	btf = obj->btf_vmlinux;
8119 	mod_btf = NULL;
8120 	id = btf__find_by_name_kind(btf, ksym_name, kind);
8121 
8122 	if (id == -ENOENT) {
8123 		err = load_module_btfs(obj);
8124 		if (err)
8125 			return err;
8126 
8127 		for (i = 0; i < obj->btf_module_cnt; i++) {
8128 			/* we assume module_btf's BTF FD is always >0 */
8129 			mod_btf = &obj->btf_modules[i];
8130 			btf = mod_btf->btf;
8131 			id = btf__find_by_name_kind_own(btf, ksym_name, kind);
8132 			if (id != -ENOENT)
8133 				break;
8134 		}
8135 	}
8136 	if (id <= 0)
8137 		return -ESRCH;
8138 
8139 	*res_btf = btf;
8140 	*res_mod_btf = mod_btf;
8141 	return id;
8142 }
8143 
bpf_object__resolve_ksym_var_btf_id(struct bpf_object * obj,struct extern_desc * ext)8144 static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
8145 					       struct extern_desc *ext)
8146 {
8147 	const struct btf_type *targ_var, *targ_type;
8148 	__u32 targ_type_id, local_type_id;
8149 	struct module_btf *mod_btf = NULL;
8150 	const char *targ_var_name;
8151 	struct btf *btf = NULL;
8152 	int id, err;
8153 
8154 	id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
8155 	if (id < 0) {
8156 		if (id == -ESRCH && ext->is_weak)
8157 			return 0;
8158 		pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
8159 			ext->name);
8160 		return id;
8161 	}
8162 
8163 	/* find local type_id */
8164 	local_type_id = ext->ksym.type_id;
8165 
8166 	/* find target type_id */
8167 	targ_var = btf__type_by_id(btf, id);
8168 	targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
8169 	targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
8170 
8171 	err = bpf_core_types_are_compat(obj->btf, local_type_id,
8172 					btf, targ_type_id);
8173 	if (err <= 0) {
8174 		const struct btf_type *local_type;
8175 		const char *targ_name, *local_name;
8176 
8177 		local_type = btf__type_by_id(obj->btf, local_type_id);
8178 		local_name = btf__name_by_offset(obj->btf, local_type->name_off);
8179 		targ_name = btf__name_by_offset(btf, targ_type->name_off);
8180 
8181 		pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
8182 			ext->name, local_type_id,
8183 			btf_kind_str(local_type), local_name, targ_type_id,
8184 			btf_kind_str(targ_type), targ_name);
8185 		return -EINVAL;
8186 	}
8187 
8188 	ext->is_set = true;
8189 	ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8190 	ext->ksym.kernel_btf_id = id;
8191 	pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
8192 		 ext->name, id, btf_kind_str(targ_var), targ_var_name);
8193 
8194 	return 0;
8195 }
8196 
bpf_object__resolve_ksym_func_btf_id(struct bpf_object * obj,struct extern_desc * ext)8197 static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
8198 						struct extern_desc *ext)
8199 {
8200 	int local_func_proto_id, kfunc_proto_id, kfunc_id;
8201 	struct module_btf *mod_btf = NULL;
8202 	const struct btf_type *kern_func;
8203 	struct btf *kern_btf = NULL;
8204 	int ret;
8205 
8206 	local_func_proto_id = ext->ksym.type_id;
8207 
8208 	kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf,
8209 				    &mod_btf);
8210 	if (kfunc_id < 0) {
8211 		if (kfunc_id == -ESRCH && ext->is_weak)
8212 			return 0;
8213 		pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
8214 			ext->name);
8215 		return kfunc_id;
8216 	}
8217 
8218 	kern_func = btf__type_by_id(kern_btf, kfunc_id);
8219 	kfunc_proto_id = kern_func->type;
8220 
8221 	ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
8222 					kern_btf, kfunc_proto_id);
8223 	if (ret <= 0) {
8224 		if (ext->is_weak)
8225 			return 0;
8226 
8227 		pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n",
8228 			ext->name, local_func_proto_id,
8229 			mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id);
8230 		return -EINVAL;
8231 	}
8232 
8233 	/* set index for module BTF fd in fd_array, if unset */
8234 	if (mod_btf && !mod_btf->fd_array_idx) {
8235 		/* insn->off is s16 */
8236 		if (obj->fd_array_cnt == INT16_MAX) {
8237 			pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
8238 				ext->name, mod_btf->fd_array_idx);
8239 			return -E2BIG;
8240 		}
8241 		/* Cannot use index 0 for module BTF fd */
8242 		if (!obj->fd_array_cnt)
8243 			obj->fd_array_cnt = 1;
8244 
8245 		ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
8246 					obj->fd_array_cnt + 1);
8247 		if (ret)
8248 			return ret;
8249 		mod_btf->fd_array_idx = obj->fd_array_cnt;
8250 		/* we assume module BTF FD is always >0 */
8251 		obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
8252 	}
8253 
8254 	ext->is_set = true;
8255 	ext->ksym.kernel_btf_id = kfunc_id;
8256 	ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
8257 	/* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data()
8258 	 * populates FD into ld_imm64 insn when it's used to point to kfunc.
8259 	 * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
8260 	 * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
8261 	 */
8262 	ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8263 	pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n",
8264 		 ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id);
8265 
8266 	return 0;
8267 }
8268 
bpf_object__resolve_ksyms_btf_id(struct bpf_object * obj)8269 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
8270 {
8271 	const struct btf_type *t;
8272 	struct extern_desc *ext;
8273 	int i, err;
8274 
8275 	for (i = 0; i < obj->nr_extern; i++) {
8276 		ext = &obj->externs[i];
8277 		if (ext->type != EXT_KSYM || !ext->ksym.type_id)
8278 			continue;
8279 
8280 		if (obj->gen_loader) {
8281 			ext->is_set = true;
8282 			ext->ksym.kernel_btf_obj_fd = 0;
8283 			ext->ksym.kernel_btf_id = 0;
8284 			continue;
8285 		}
8286 		t = btf__type_by_id(obj->btf, ext->btf_id);
8287 		if (btf_is_var(t))
8288 			err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
8289 		else
8290 			err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
8291 		if (err)
8292 			return err;
8293 	}
8294 	return 0;
8295 }
8296 
bpf_object__resolve_externs(struct bpf_object * obj,const char * extra_kconfig)8297 static int bpf_object__resolve_externs(struct bpf_object *obj,
8298 				       const char *extra_kconfig)
8299 {
8300 	bool need_config = false, need_kallsyms = false;
8301 	bool need_vmlinux_btf = false;
8302 	struct extern_desc *ext;
8303 	void *kcfg_data = NULL;
8304 	int err, i;
8305 
8306 	if (obj->nr_extern == 0)
8307 		return 0;
8308 
8309 	if (obj->kconfig_map_idx >= 0)
8310 		kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
8311 
8312 	for (i = 0; i < obj->nr_extern; i++) {
8313 		ext = &obj->externs[i];
8314 
8315 		if (ext->type == EXT_KSYM) {
8316 			if (ext->ksym.type_id)
8317 				need_vmlinux_btf = true;
8318 			else
8319 				need_kallsyms = true;
8320 			continue;
8321 		} else if (ext->type == EXT_KCFG) {
8322 			void *ext_ptr = kcfg_data + ext->kcfg.data_off;
8323 			__u64 value = 0;
8324 
8325 			/* Kconfig externs need actual /proc/config.gz */
8326 			if (str_has_pfx(ext->name, "CONFIG_")) {
8327 				need_config = true;
8328 				continue;
8329 			}
8330 
8331 			/* Virtual kcfg externs are customly handled by libbpf */
8332 			if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
8333 				value = get_kernel_version();
8334 				if (!value) {
8335 					pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name);
8336 					return -EINVAL;
8337 				}
8338 			} else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) {
8339 				value = kernel_supports(obj, FEAT_BPF_COOKIE);
8340 			} else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) {
8341 				value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER);
8342 			} else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) {
8343 				/* Currently libbpf supports only CONFIG_ and LINUX_ prefixed
8344 				 * __kconfig externs, where LINUX_ ones are virtual and filled out
8345 				 * customly by libbpf (their values don't come from Kconfig).
8346 				 * If LINUX_xxx variable is not recognized by libbpf, but is marked
8347 				 * __weak, it defaults to zero value, just like for CONFIG_xxx
8348 				 * externs.
8349 				 */
8350 				pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name);
8351 				return -EINVAL;
8352 			}
8353 
8354 			err = set_kcfg_value_num(ext, ext_ptr, value);
8355 			if (err)
8356 				return err;
8357 			pr_debug("extern (kcfg) '%s': set to 0x%llx\n",
8358 				 ext->name, (long long)value);
8359 		} else {
8360 			pr_warn("extern '%s': unrecognized extern kind\n", ext->name);
8361 			return -EINVAL;
8362 		}
8363 	}
8364 	if (need_config && extra_kconfig) {
8365 		err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
8366 		if (err)
8367 			return -EINVAL;
8368 		need_config = false;
8369 		for (i = 0; i < obj->nr_extern; i++) {
8370 			ext = &obj->externs[i];
8371 			if (ext->type == EXT_KCFG && !ext->is_set) {
8372 				need_config = true;
8373 				break;
8374 			}
8375 		}
8376 	}
8377 	if (need_config) {
8378 		err = bpf_object__read_kconfig_file(obj, kcfg_data);
8379 		if (err)
8380 			return -EINVAL;
8381 	}
8382 	if (need_kallsyms) {
8383 		err = bpf_object__read_kallsyms_file(obj);
8384 		if (err)
8385 			return -EINVAL;
8386 	}
8387 	if (need_vmlinux_btf) {
8388 		err = bpf_object__resolve_ksyms_btf_id(obj);
8389 		if (err)
8390 			return -EINVAL;
8391 	}
8392 	for (i = 0; i < obj->nr_extern; i++) {
8393 		ext = &obj->externs[i];
8394 
8395 		if (!ext->is_set && !ext->is_weak) {
8396 			pr_warn("extern '%s' (strong): not resolved\n", ext->name);
8397 			return -ESRCH;
8398 		} else if (!ext->is_set) {
8399 			pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n",
8400 				 ext->name);
8401 		}
8402 	}
8403 
8404 	return 0;
8405 }
8406 
bpf_map_prepare_vdata(const struct bpf_map * map)8407 static void bpf_map_prepare_vdata(const struct bpf_map *map)
8408 {
8409 	struct bpf_struct_ops *st_ops;
8410 	__u32 i;
8411 
8412 	st_ops = map->st_ops;
8413 	for (i = 0; i < btf_vlen(st_ops->type); i++) {
8414 		struct bpf_program *prog = st_ops->progs[i];
8415 		void *kern_data;
8416 		int prog_fd;
8417 
8418 		if (!prog)
8419 			continue;
8420 
8421 		prog_fd = bpf_program__fd(prog);
8422 		kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
8423 		*(unsigned long *)kern_data = prog_fd;
8424 	}
8425 }
8426 
bpf_object_prepare_struct_ops(struct bpf_object * obj)8427 static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
8428 {
8429 	struct bpf_map *map;
8430 	int i;
8431 
8432 	for (i = 0; i < obj->nr_maps; i++) {
8433 		map = &obj->maps[i];
8434 
8435 		if (!bpf_map__is_struct_ops(map))
8436 			continue;
8437 
8438 		if (!map->autocreate)
8439 			continue;
8440 
8441 		bpf_map_prepare_vdata(map);
8442 	}
8443 
8444 	return 0;
8445 }
8446 
bpf_object_load(struct bpf_object * obj,int extra_log_level,const char * target_btf_path)8447 static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
8448 {
8449 	int err, i;
8450 
8451 	if (!obj)
8452 		return libbpf_err(-EINVAL);
8453 
8454 	if (obj->loaded) {
8455 		pr_warn("object '%s': load can't be attempted twice\n", obj->name);
8456 		return libbpf_err(-EINVAL);
8457 	}
8458 
8459 	if (obj->gen_loader)
8460 		bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
8461 
8462 	err = bpf_object_prepare_token(obj);
8463 	err = err ? : bpf_object__probe_loading(obj);
8464 	err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8465 	err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
8466 	err = err ? : bpf_object__sanitize_maps(obj);
8467 	err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
8468 	err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
8469 	err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
8470 	err = err ? : bpf_object__sanitize_and_load_btf(obj);
8471 	err = err ? : bpf_object__create_maps(obj);
8472 	err = err ? : bpf_object__load_progs(obj, extra_log_level);
8473 	err = err ? : bpf_object_init_prog_arrays(obj);
8474 	err = err ? : bpf_object_prepare_struct_ops(obj);
8475 
8476 	if (obj->gen_loader) {
8477 		/* reset FDs */
8478 		if (obj->btf)
8479 			btf__set_fd(obj->btf, -1);
8480 		if (!err)
8481 			err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
8482 	}
8483 
8484 	/* clean up fd_array */
8485 	zfree(&obj->fd_array);
8486 
8487 	/* clean up module BTFs */
8488 	for (i = 0; i < obj->btf_module_cnt; i++) {
8489 		close(obj->btf_modules[i].fd);
8490 		btf__free(obj->btf_modules[i].btf);
8491 		free(obj->btf_modules[i].name);
8492 	}
8493 	free(obj->btf_modules);
8494 
8495 	/* clean up vmlinux BTF */
8496 	btf__free(obj->btf_vmlinux);
8497 	obj->btf_vmlinux = NULL;
8498 
8499 	obj->loaded = true; /* doesn't matter if successfully or not */
8500 
8501 	if (err)
8502 		goto out;
8503 
8504 	return 0;
8505 out:
8506 	/* unpin any maps that were auto-pinned during load */
8507 	for (i = 0; i < obj->nr_maps; i++)
8508 		if (obj->maps[i].pinned && !obj->maps[i].reused)
8509 			bpf_map__unpin(&obj->maps[i], NULL);
8510 
8511 	bpf_object_unload(obj);
8512 	pr_warn("failed to load object '%s'\n", obj->path);
8513 	return libbpf_err(err);
8514 }
8515 
bpf_object__load(struct bpf_object * obj)8516 int bpf_object__load(struct bpf_object *obj)
8517 {
8518 	return bpf_object_load(obj, 0, NULL);
8519 }
8520 
make_parent_dir(const char * path)8521 static int make_parent_dir(const char *path)
8522 {
8523 	char *cp, errmsg[STRERR_BUFSIZE];
8524 	char *dname, *dir;
8525 	int err = 0;
8526 
8527 	dname = strdup(path);
8528 	if (dname == NULL)
8529 		return -ENOMEM;
8530 
8531 	dir = dirname(dname);
8532 	if (mkdir(dir, 0700) && errno != EEXIST)
8533 		err = -errno;
8534 
8535 	free(dname);
8536 	if (err) {
8537 		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8538 		pr_warn("failed to mkdir %s: %s\n", path, cp);
8539 	}
8540 	return err;
8541 }
8542 
check_path(const char * path)8543 static int check_path(const char *path)
8544 {
8545 	char *cp, errmsg[STRERR_BUFSIZE];
8546 	struct statfs st_fs;
8547 	char *dname, *dir;
8548 	int err = 0;
8549 
8550 	if (path == NULL)
8551 		return -EINVAL;
8552 
8553 	dname = strdup(path);
8554 	if (dname == NULL)
8555 		return -ENOMEM;
8556 
8557 	dir = dirname(dname);
8558 	if (statfs(dir, &st_fs)) {
8559 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
8560 		pr_warn("failed to statfs %s: %s\n", dir, cp);
8561 		err = -errno;
8562 	}
8563 	free(dname);
8564 
8565 	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
8566 		pr_warn("specified path %s is not on BPF FS\n", path);
8567 		err = -EINVAL;
8568 	}
8569 
8570 	return err;
8571 }
8572 
bpf_program__pin(struct bpf_program * prog,const char * path)8573 int bpf_program__pin(struct bpf_program *prog, const char *path)
8574 {
8575 	char *cp, errmsg[STRERR_BUFSIZE];
8576 	int err;
8577 
8578 	if (prog->fd < 0) {
8579 		pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name);
8580 		return libbpf_err(-EINVAL);
8581 	}
8582 
8583 	err = make_parent_dir(path);
8584 	if (err)
8585 		return libbpf_err(err);
8586 
8587 	err = check_path(path);
8588 	if (err)
8589 		return libbpf_err(err);
8590 
8591 	if (bpf_obj_pin(prog->fd, path)) {
8592 		err = -errno;
8593 		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
8594 		pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp);
8595 		return libbpf_err(err);
8596 	}
8597 
8598 	pr_debug("prog '%s': pinned at '%s'\n", prog->name, path);
8599 	return 0;
8600 }
8601 
bpf_program__unpin(struct bpf_program * prog,const char * path)8602 int bpf_program__unpin(struct bpf_program *prog, const char *path)
8603 {
8604 	int err;
8605 
8606 	if (prog->fd < 0) {
8607 		pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name);
8608 		return libbpf_err(-EINVAL);
8609 	}
8610 
8611 	err = check_path(path);
8612 	if (err)
8613 		return libbpf_err(err);
8614 
8615 	err = unlink(path);
8616 	if (err)
8617 		return libbpf_err(-errno);
8618 
8619 	pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path);
8620 	return 0;
8621 }
8622 
bpf_map__pin(struct bpf_map * map,const char * path)8623 int bpf_map__pin(struct bpf_map *map, const char *path)
8624 {
8625 	char *cp, errmsg[STRERR_BUFSIZE];
8626 	int err;
8627 
8628 	if (map == NULL) {
8629 		pr_warn("invalid map pointer\n");
8630 		return libbpf_err(-EINVAL);
8631 	}
8632 
8633 	if (map->fd < 0) {
8634 		pr_warn("map '%s': can't pin BPF map without FD (was it created?)\n", map->name);
8635 		return libbpf_err(-EINVAL);
8636 	}
8637 
8638 	if (map->pin_path) {
8639 		if (path && strcmp(path, map->pin_path)) {
8640 			pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8641 				bpf_map__name(map), map->pin_path, path);
8642 			return libbpf_err(-EINVAL);
8643 		} else if (map->pinned) {
8644 			pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
8645 				 bpf_map__name(map), map->pin_path);
8646 			return 0;
8647 		}
8648 	} else {
8649 		if (!path) {
8650 			pr_warn("missing a path to pin map '%s' at\n",
8651 				bpf_map__name(map));
8652 			return libbpf_err(-EINVAL);
8653 		} else if (map->pinned) {
8654 			pr_warn("map '%s' already pinned\n", bpf_map__name(map));
8655 			return libbpf_err(-EEXIST);
8656 		}
8657 
8658 		map->pin_path = strdup(path);
8659 		if (!map->pin_path) {
8660 			err = -errno;
8661 			goto out_err;
8662 		}
8663 	}
8664 
8665 	err = make_parent_dir(map->pin_path);
8666 	if (err)
8667 		return libbpf_err(err);
8668 
8669 	err = check_path(map->pin_path);
8670 	if (err)
8671 		return libbpf_err(err);
8672 
8673 	if (bpf_obj_pin(map->fd, map->pin_path)) {
8674 		err = -errno;
8675 		goto out_err;
8676 	}
8677 
8678 	map->pinned = true;
8679 	pr_debug("pinned map '%s'\n", map->pin_path);
8680 
8681 	return 0;
8682 
8683 out_err:
8684 	cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8685 	pr_warn("failed to pin map: %s\n", cp);
8686 	return libbpf_err(err);
8687 }
8688 
bpf_map__unpin(struct bpf_map * map,const char * path)8689 int bpf_map__unpin(struct bpf_map *map, const char *path)
8690 {
8691 	int err;
8692 
8693 	if (map == NULL) {
8694 		pr_warn("invalid map pointer\n");
8695 		return libbpf_err(-EINVAL);
8696 	}
8697 
8698 	if (map->pin_path) {
8699 		if (path && strcmp(path, map->pin_path)) {
8700 			pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8701 				bpf_map__name(map), map->pin_path, path);
8702 			return libbpf_err(-EINVAL);
8703 		}
8704 		path = map->pin_path;
8705 	} else if (!path) {
8706 		pr_warn("no path to unpin map '%s' from\n",
8707 			bpf_map__name(map));
8708 		return libbpf_err(-EINVAL);
8709 	}
8710 
8711 	err = check_path(path);
8712 	if (err)
8713 		return libbpf_err(err);
8714 
8715 	err = unlink(path);
8716 	if (err != 0)
8717 		return libbpf_err(-errno);
8718 
8719 	map->pinned = false;
8720 	pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
8721 
8722 	return 0;
8723 }
8724 
bpf_map__set_pin_path(struct bpf_map * map,const char * path)8725 int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
8726 {
8727 	char *new = NULL;
8728 
8729 	if (path) {
8730 		new = strdup(path);
8731 		if (!new)
8732 			return libbpf_err(-errno);
8733 	}
8734 
8735 	free(map->pin_path);
8736 	map->pin_path = new;
8737 	return 0;
8738 }
8739 
8740 __alias(bpf_map__pin_path)
8741 const char *bpf_map__get_pin_path(const struct bpf_map *map);
8742 
bpf_map__pin_path(const struct bpf_map * map)8743 const char *bpf_map__pin_path(const struct bpf_map *map)
8744 {
8745 	return map->pin_path;
8746 }
8747 
bpf_map__is_pinned(const struct bpf_map * map)8748 bool bpf_map__is_pinned(const struct bpf_map *map)
8749 {
8750 	return map->pinned;
8751 }
8752 
sanitize_pin_path(char * s)8753 static void sanitize_pin_path(char *s)
8754 {
8755 	/* bpffs disallows periods in path names */
8756 	while (*s) {
8757 		if (*s == '.')
8758 			*s = '_';
8759 		s++;
8760 	}
8761 }
8762 
bpf_object__pin_maps(struct bpf_object * obj,const char * path)8763 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
8764 {
8765 	struct bpf_map *map;
8766 	int err;
8767 
8768 	if (!obj)
8769 		return libbpf_err(-ENOENT);
8770 
8771 	if (!obj->loaded) {
8772 		pr_warn("object not yet loaded; load it first\n");
8773 		return libbpf_err(-ENOENT);
8774 	}
8775 
8776 	bpf_object__for_each_map(map, obj) {
8777 		char *pin_path = NULL;
8778 		char buf[PATH_MAX];
8779 
8780 		if (!map->autocreate)
8781 			continue;
8782 
8783 		if (path) {
8784 			err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8785 			if (err)
8786 				goto err_unpin_maps;
8787 			sanitize_pin_path(buf);
8788 			pin_path = buf;
8789 		} else if (!map->pin_path) {
8790 			continue;
8791 		}
8792 
8793 		err = bpf_map__pin(map, pin_path);
8794 		if (err)
8795 			goto err_unpin_maps;
8796 	}
8797 
8798 	return 0;
8799 
8800 err_unpin_maps:
8801 	while ((map = bpf_object__prev_map(obj, map))) {
8802 		if (!map->pin_path)
8803 			continue;
8804 
8805 		bpf_map__unpin(map, NULL);
8806 	}
8807 
8808 	return libbpf_err(err);
8809 }
8810 
bpf_object__unpin_maps(struct bpf_object * obj,const char * path)8811 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8812 {
8813 	struct bpf_map *map;
8814 	int err;
8815 
8816 	if (!obj)
8817 		return libbpf_err(-ENOENT);
8818 
8819 	bpf_object__for_each_map(map, obj) {
8820 		char *pin_path = NULL;
8821 		char buf[PATH_MAX];
8822 
8823 		if (path) {
8824 			err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8825 			if (err)
8826 				return libbpf_err(err);
8827 			sanitize_pin_path(buf);
8828 			pin_path = buf;
8829 		} else if (!map->pin_path) {
8830 			continue;
8831 		}
8832 
8833 		err = bpf_map__unpin(map, pin_path);
8834 		if (err)
8835 			return libbpf_err(err);
8836 	}
8837 
8838 	return 0;
8839 }
8840 
bpf_object__pin_programs(struct bpf_object * obj,const char * path)8841 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8842 {
8843 	struct bpf_program *prog;
8844 	char buf[PATH_MAX];
8845 	int err;
8846 
8847 	if (!obj)
8848 		return libbpf_err(-ENOENT);
8849 
8850 	if (!obj->loaded) {
8851 		pr_warn("object not yet loaded; load it first\n");
8852 		return libbpf_err(-ENOENT);
8853 	}
8854 
8855 	bpf_object__for_each_program(prog, obj) {
8856 		err = pathname_concat(buf, sizeof(buf), path, prog->name);
8857 		if (err)
8858 			goto err_unpin_programs;
8859 
8860 		err = bpf_program__pin(prog, buf);
8861 		if (err)
8862 			goto err_unpin_programs;
8863 	}
8864 
8865 	return 0;
8866 
8867 err_unpin_programs:
8868 	while ((prog = bpf_object__prev_program(obj, prog))) {
8869 		if (pathname_concat(buf, sizeof(buf), path, prog->name))
8870 			continue;
8871 
8872 		bpf_program__unpin(prog, buf);
8873 	}
8874 
8875 	return libbpf_err(err);
8876 }
8877 
bpf_object__unpin_programs(struct bpf_object * obj,const char * path)8878 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8879 {
8880 	struct bpf_program *prog;
8881 	int err;
8882 
8883 	if (!obj)
8884 		return libbpf_err(-ENOENT);
8885 
8886 	bpf_object__for_each_program(prog, obj) {
8887 		char buf[PATH_MAX];
8888 
8889 		err = pathname_concat(buf, sizeof(buf), path, prog->name);
8890 		if (err)
8891 			return libbpf_err(err);
8892 
8893 		err = bpf_program__unpin(prog, buf);
8894 		if (err)
8895 			return libbpf_err(err);
8896 	}
8897 
8898 	return 0;
8899 }
8900 
bpf_object__pin(struct bpf_object * obj,const char * path)8901 int bpf_object__pin(struct bpf_object *obj, const char *path)
8902 {
8903 	int err;
8904 
8905 	err = bpf_object__pin_maps(obj, path);
8906 	if (err)
8907 		return libbpf_err(err);
8908 
8909 	err = bpf_object__pin_programs(obj, path);
8910 	if (err) {
8911 		bpf_object__unpin_maps(obj, path);
8912 		return libbpf_err(err);
8913 	}
8914 
8915 	return 0;
8916 }
8917 
bpf_object__unpin(struct bpf_object * obj,const char * path)8918 int bpf_object__unpin(struct bpf_object *obj, const char *path)
8919 {
8920 	int err;
8921 
8922 	err = bpf_object__unpin_programs(obj, path);
8923 	if (err)
8924 		return libbpf_err(err);
8925 
8926 	err = bpf_object__unpin_maps(obj, path);
8927 	if (err)
8928 		return libbpf_err(err);
8929 
8930 	return 0;
8931 }
8932 
bpf_map__destroy(struct bpf_map * map)8933 static void bpf_map__destroy(struct bpf_map *map)
8934 {
8935 	if (map->inner_map) {
8936 		bpf_map__destroy(map->inner_map);
8937 		zfree(&map->inner_map);
8938 	}
8939 
8940 	zfree(&map->init_slots);
8941 	map->init_slots_sz = 0;
8942 
8943 	if (map->mmaped && map->mmaped != map->obj->arena_data)
8944 		munmap(map->mmaped, bpf_map_mmap_sz(map));
8945 	map->mmaped = NULL;
8946 
8947 	if (map->st_ops) {
8948 		zfree(&map->st_ops->data);
8949 		zfree(&map->st_ops->progs);
8950 		zfree(&map->st_ops->kern_func_off);
8951 		zfree(&map->st_ops);
8952 	}
8953 
8954 	zfree(&map->name);
8955 	zfree(&map->real_name);
8956 	zfree(&map->pin_path);
8957 
8958 	if (map->fd >= 0)
8959 		zclose(map->fd);
8960 }
8961 
bpf_object__close(struct bpf_object * obj)8962 void bpf_object__close(struct bpf_object *obj)
8963 {
8964 	size_t i;
8965 
8966 	if (IS_ERR_OR_NULL(obj))
8967 		return;
8968 
8969 	usdt_manager_free(obj->usdt_man);
8970 	obj->usdt_man = NULL;
8971 
8972 	bpf_gen__free(obj->gen_loader);
8973 	bpf_object__elf_finish(obj);
8974 	bpf_object_unload(obj);
8975 	btf__free(obj->btf);
8976 	btf__free(obj->btf_vmlinux);
8977 	btf_ext__free(obj->btf_ext);
8978 
8979 	for (i = 0; i < obj->nr_maps; i++)
8980 		bpf_map__destroy(&obj->maps[i]);
8981 
8982 	zfree(&obj->btf_custom_path);
8983 	zfree(&obj->kconfig);
8984 
8985 	for (i = 0; i < obj->nr_extern; i++)
8986 		zfree(&obj->externs[i].essent_name);
8987 
8988 	zfree(&obj->externs);
8989 	obj->nr_extern = 0;
8990 
8991 	zfree(&obj->maps);
8992 	obj->nr_maps = 0;
8993 
8994 	if (obj->programs && obj->nr_programs) {
8995 		for (i = 0; i < obj->nr_programs; i++)
8996 			bpf_program__exit(&obj->programs[i]);
8997 	}
8998 	zfree(&obj->programs);
8999 
9000 	zfree(&obj->feat_cache);
9001 	zfree(&obj->token_path);
9002 	if (obj->token_fd > 0)
9003 		close(obj->token_fd);
9004 
9005 	zfree(&obj->arena_data);
9006 
9007 	free(obj);
9008 }
9009 
bpf_object__name(const struct bpf_object * obj)9010 const char *bpf_object__name(const struct bpf_object *obj)
9011 {
9012 	return obj ? obj->name : libbpf_err_ptr(-EINVAL);
9013 }
9014 
bpf_object__kversion(const struct bpf_object * obj)9015 unsigned int bpf_object__kversion(const struct bpf_object *obj)
9016 {
9017 	return obj ? obj->kern_version : 0;
9018 }
9019 
bpf_object__btf(const struct bpf_object * obj)9020 struct btf *bpf_object__btf(const struct bpf_object *obj)
9021 {
9022 	return obj ? obj->btf : NULL;
9023 }
9024 
bpf_object__btf_fd(const struct bpf_object * obj)9025 int bpf_object__btf_fd(const struct bpf_object *obj)
9026 {
9027 	return obj->btf ? btf__fd(obj->btf) : -1;
9028 }
9029 
bpf_object__set_kversion(struct bpf_object * obj,__u32 kern_version)9030 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
9031 {
9032 	if (obj->loaded)
9033 		return libbpf_err(-EINVAL);
9034 
9035 	obj->kern_version = kern_version;
9036 
9037 	return 0;
9038 }
9039 
bpf_object__gen_loader(struct bpf_object * obj,struct gen_loader_opts * opts)9040 int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
9041 {
9042 	struct bpf_gen *gen;
9043 
9044 	if (!opts)
9045 		return -EFAULT;
9046 	if (!OPTS_VALID(opts, gen_loader_opts))
9047 		return -EINVAL;
9048 	gen = calloc(sizeof(*gen), 1);
9049 	if (!gen)
9050 		return -ENOMEM;
9051 	gen->opts = opts;
9052 	obj->gen_loader = gen;
9053 	return 0;
9054 }
9055 
9056 static struct bpf_program *
__bpf_program__iter(const struct bpf_program * p,const struct bpf_object * obj,bool forward)9057 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
9058 		    bool forward)
9059 {
9060 	size_t nr_programs = obj->nr_programs;
9061 	ssize_t idx;
9062 
9063 	if (!nr_programs)
9064 		return NULL;
9065 
9066 	if (!p)
9067 		/* Iter from the beginning */
9068 		return forward ? &obj->programs[0] :
9069 			&obj->programs[nr_programs - 1];
9070 
9071 	if (p->obj != obj) {
9072 		pr_warn("error: program handler doesn't match object\n");
9073 		return errno = EINVAL, NULL;
9074 	}
9075 
9076 	idx = (p - obj->programs) + (forward ? 1 : -1);
9077 	if (idx >= obj->nr_programs || idx < 0)
9078 		return NULL;
9079 	return &obj->programs[idx];
9080 }
9081 
9082 struct bpf_program *
bpf_object__next_program(const struct bpf_object * obj,struct bpf_program * prev)9083 bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
9084 {
9085 	struct bpf_program *prog = prev;
9086 
9087 	do {
9088 		prog = __bpf_program__iter(prog, obj, true);
9089 	} while (prog && prog_is_subprog(obj, prog));
9090 
9091 	return prog;
9092 }
9093 
9094 struct bpf_program *
bpf_object__prev_program(const struct bpf_object * obj,struct bpf_program * next)9095 bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
9096 {
9097 	struct bpf_program *prog = next;
9098 
9099 	do {
9100 		prog = __bpf_program__iter(prog, obj, false);
9101 	} while (prog && prog_is_subprog(obj, prog));
9102 
9103 	return prog;
9104 }
9105 
bpf_program__set_ifindex(struct bpf_program * prog,__u32 ifindex)9106 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
9107 {
9108 	prog->prog_ifindex = ifindex;
9109 }
9110 
bpf_program__name(const struct bpf_program * prog)9111 const char *bpf_program__name(const struct bpf_program *prog)
9112 {
9113 	return prog->name;
9114 }
9115 
bpf_program__section_name(const struct bpf_program * prog)9116 const char *bpf_program__section_name(const struct bpf_program *prog)
9117 {
9118 	return prog->sec_name;
9119 }
9120 
bpf_program__autoload(const struct bpf_program * prog)9121 bool bpf_program__autoload(const struct bpf_program *prog)
9122 {
9123 	return prog->autoload;
9124 }
9125 
bpf_program__set_autoload(struct bpf_program * prog,bool autoload)9126 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
9127 {
9128 	if (prog->obj->loaded)
9129 		return libbpf_err(-EINVAL);
9130 
9131 	prog->autoload = autoload;
9132 	return 0;
9133 }
9134 
bpf_program__autoattach(const struct bpf_program * prog)9135 bool bpf_program__autoattach(const struct bpf_program *prog)
9136 {
9137 	return prog->autoattach;
9138 }
9139 
bpf_program__set_autoattach(struct bpf_program * prog,bool autoattach)9140 void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach)
9141 {
9142 	prog->autoattach = autoattach;
9143 }
9144 
bpf_program__insns(const struct bpf_program * prog)9145 const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
9146 {
9147 	return prog->insns;
9148 }
9149 
bpf_program__insn_cnt(const struct bpf_program * prog)9150 size_t bpf_program__insn_cnt(const struct bpf_program *prog)
9151 {
9152 	return prog->insns_cnt;
9153 }
9154 
bpf_program__set_insns(struct bpf_program * prog,struct bpf_insn * new_insns,size_t new_insn_cnt)9155 int bpf_program__set_insns(struct bpf_program *prog,
9156 			   struct bpf_insn *new_insns, size_t new_insn_cnt)
9157 {
9158 	struct bpf_insn *insns;
9159 
9160 	if (prog->obj->loaded)
9161 		return -EBUSY;
9162 
9163 	insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
9164 	/* NULL is a valid return from reallocarray if the new count is zero */
9165 	if (!insns && new_insn_cnt) {
9166 		pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
9167 		return -ENOMEM;
9168 	}
9169 	memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
9170 
9171 	prog->insns = insns;
9172 	prog->insns_cnt = new_insn_cnt;
9173 	return 0;
9174 }
9175 
bpf_program__fd(const struct bpf_program * prog)9176 int bpf_program__fd(const struct bpf_program *prog)
9177 {
9178 	if (!prog)
9179 		return libbpf_err(-EINVAL);
9180 
9181 	if (prog->fd < 0)
9182 		return libbpf_err(-ENOENT);
9183 
9184 	return prog->fd;
9185 }
9186 
9187 __alias(bpf_program__type)
9188 enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
9189 
bpf_program__type(const struct bpf_program * prog)9190 enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
9191 {
9192 	return prog->type;
9193 }
9194 
9195 static size_t custom_sec_def_cnt;
9196 static struct bpf_sec_def *custom_sec_defs;
9197 static struct bpf_sec_def custom_fallback_def;
9198 static bool has_custom_fallback_def;
9199 static int last_custom_sec_def_handler_id;
9200 
bpf_program__set_type(struct bpf_program * prog,enum bpf_prog_type type)9201 int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
9202 {
9203 	if (prog->obj->loaded)
9204 		return libbpf_err(-EBUSY);
9205 
9206 	/* if type is not changed, do nothing */
9207 	if (prog->type == type)
9208 		return 0;
9209 
9210 	prog->type = type;
9211 
9212 	/* If a program type was changed, we need to reset associated SEC()
9213 	 * handler, as it will be invalid now. The only exception is a generic
9214 	 * fallback handler, which by definition is program type-agnostic and
9215 	 * is a catch-all custom handler, optionally set by the application,
9216 	 * so should be able to handle any type of BPF program.
9217 	 */
9218 	if (prog->sec_def != &custom_fallback_def)
9219 		prog->sec_def = NULL;
9220 	return 0;
9221 }
9222 
9223 __alias(bpf_program__expected_attach_type)
9224 enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
9225 
bpf_program__expected_attach_type(const struct bpf_program * prog)9226 enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
9227 {
9228 	return prog->expected_attach_type;
9229 }
9230 
bpf_program__set_expected_attach_type(struct bpf_program * prog,enum bpf_attach_type type)9231 int bpf_program__set_expected_attach_type(struct bpf_program *prog,
9232 					   enum bpf_attach_type type)
9233 {
9234 	if (prog->obj->loaded)
9235 		return libbpf_err(-EBUSY);
9236 
9237 	prog->expected_attach_type = type;
9238 	return 0;
9239 }
9240 
bpf_program__flags(const struct bpf_program * prog)9241 __u32 bpf_program__flags(const struct bpf_program *prog)
9242 {
9243 	return prog->prog_flags;
9244 }
9245 
bpf_program__set_flags(struct bpf_program * prog,__u32 flags)9246 int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
9247 {
9248 	if (prog->obj->loaded)
9249 		return libbpf_err(-EBUSY);
9250 
9251 	prog->prog_flags = flags;
9252 	return 0;
9253 }
9254 
bpf_program__log_level(const struct bpf_program * prog)9255 __u32 bpf_program__log_level(const struct bpf_program *prog)
9256 {
9257 	return prog->log_level;
9258 }
9259 
bpf_program__set_log_level(struct bpf_program * prog,__u32 log_level)9260 int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
9261 {
9262 	if (prog->obj->loaded)
9263 		return libbpf_err(-EBUSY);
9264 
9265 	prog->log_level = log_level;
9266 	return 0;
9267 }
9268 
bpf_program__log_buf(const struct bpf_program * prog,size_t * log_size)9269 const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
9270 {
9271 	*log_size = prog->log_size;
9272 	return prog->log_buf;
9273 }
9274 
bpf_program__set_log_buf(struct bpf_program * prog,char * log_buf,size_t log_size)9275 int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
9276 {
9277 	if (log_size && !log_buf)
9278 		return -EINVAL;
9279 	if (prog->log_size > UINT_MAX)
9280 		return -EINVAL;
9281 	if (prog->obj->loaded)
9282 		return -EBUSY;
9283 
9284 	prog->log_buf = log_buf;
9285 	prog->log_size = log_size;
9286 	return 0;
9287 }
9288 
9289 #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) {			    \
9290 	.sec = (char *)sec_pfx,						    \
9291 	.prog_type = BPF_PROG_TYPE_##ptype,				    \
9292 	.expected_attach_type = atype,					    \
9293 	.cookie = (long)(flags),					    \
9294 	.prog_prepare_load_fn = libbpf_prepare_prog_load,		    \
9295 	__VA_ARGS__							    \
9296 }
9297 
9298 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9299 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9300 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9301 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9302 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9303 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9304 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9305 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9306 static int attach_kprobe_session(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9307 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9308 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9309 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9310 
9311 static const struct bpf_sec_def section_defs[] = {
9312 	SEC_DEF("socket",		SOCKET_FILTER, 0, SEC_NONE),
9313 	SEC_DEF("sk_reuseport/migrate",	SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE),
9314 	SEC_DEF("sk_reuseport",		SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE),
9315 	SEC_DEF("kprobe+",		KPROBE,	0, SEC_NONE, attach_kprobe),
9316 	SEC_DEF("uprobe+",		KPROBE,	0, SEC_NONE, attach_uprobe),
9317 	SEC_DEF("uprobe.s+",		KPROBE,	0, SEC_SLEEPABLE, attach_uprobe),
9318 	SEC_DEF("kretprobe+",		KPROBE, 0, SEC_NONE, attach_kprobe),
9319 	SEC_DEF("uretprobe+",		KPROBE, 0, SEC_NONE, attach_uprobe),
9320 	SEC_DEF("uretprobe.s+",		KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9321 	SEC_DEF("kprobe.multi+",	KPROBE,	BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9322 	SEC_DEF("kretprobe.multi+",	KPROBE,	BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9323 	SEC_DEF("kprobe.session+",	KPROBE,	BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session),
9324 	SEC_DEF("uprobe.multi+",	KPROBE,	BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9325 	SEC_DEF("uretprobe.multi+",	KPROBE,	BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9326 	SEC_DEF("uprobe.multi.s+",	KPROBE,	BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9327 	SEC_DEF("uretprobe.multi.s+",	KPROBE,	BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9328 	SEC_DEF("ksyscall+",		KPROBE,	0, SEC_NONE, attach_ksyscall),
9329 	SEC_DEF("kretsyscall+",		KPROBE, 0, SEC_NONE, attach_ksyscall),
9330 	SEC_DEF("usdt+",		KPROBE,	0, SEC_USDT, attach_usdt),
9331 	SEC_DEF("usdt.s+",		KPROBE,	0, SEC_USDT | SEC_SLEEPABLE, attach_usdt),
9332 	SEC_DEF("tc/ingress",		SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */
9333 	SEC_DEF("tc/egress",		SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE),  /* alias for tcx */
9334 	SEC_DEF("tcx/ingress",		SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE),
9335 	SEC_DEF("tcx/egress",		SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE),
9336 	SEC_DEF("tc",			SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9337 	SEC_DEF("classifier",		SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9338 	SEC_DEF("action",		SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9339 	SEC_DEF("netkit/primary",	SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE),
9340 	SEC_DEF("netkit/peer",		SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE),
9341 	SEC_DEF("tracepoint+",		TRACEPOINT, 0, SEC_NONE, attach_tp),
9342 	SEC_DEF("tp+",			TRACEPOINT, 0, SEC_NONE, attach_tp),
9343 	SEC_DEF("raw_tracepoint+",	RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9344 	SEC_DEF("raw_tp+",		RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9345 	SEC_DEF("raw_tracepoint.w+",	RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9346 	SEC_DEF("raw_tp.w+",		RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9347 	SEC_DEF("tp_btf+",		TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
9348 	SEC_DEF("fentry+",		TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
9349 	SEC_DEF("fmod_ret+",		TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
9350 	SEC_DEF("fexit+",		TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
9351 	SEC_DEF("fentry.s+",		TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9352 	SEC_DEF("fmod_ret.s+",		TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9353 	SEC_DEF("fexit.s+",		TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9354 	SEC_DEF("freplace+",		EXT, 0, SEC_ATTACH_BTF, attach_trace),
9355 	SEC_DEF("lsm+",			LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
9356 	SEC_DEF("lsm.s+",		LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
9357 	SEC_DEF("lsm_cgroup+",		LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF),
9358 	SEC_DEF("iter+",		TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
9359 	SEC_DEF("iter.s+",		TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
9360 	SEC_DEF("syscall",		SYSCALL, 0, SEC_SLEEPABLE),
9361 	SEC_DEF("xdp.frags/devmap",	XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
9362 	SEC_DEF("xdp/devmap",		XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
9363 	SEC_DEF("xdp.frags/cpumap",	XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
9364 	SEC_DEF("xdp/cpumap",		XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
9365 	SEC_DEF("xdp.frags",		XDP, BPF_XDP, SEC_XDP_FRAGS),
9366 	SEC_DEF("xdp",			XDP, BPF_XDP, SEC_ATTACHABLE_OPT),
9367 	SEC_DEF("perf_event",		PERF_EVENT, 0, SEC_NONE),
9368 	SEC_DEF("lwt_in",		LWT_IN, 0, SEC_NONE),
9369 	SEC_DEF("lwt_out",		LWT_OUT, 0, SEC_NONE),
9370 	SEC_DEF("lwt_xmit",		LWT_XMIT, 0, SEC_NONE),
9371 	SEC_DEF("lwt_seg6local",	LWT_SEG6LOCAL, 0, SEC_NONE),
9372 	SEC_DEF("sockops",		SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
9373 	SEC_DEF("sk_skb/stream_parser",	SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
9374 	SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
9375 	SEC_DEF("sk_skb/verdict",	SK_SKB, BPF_SK_SKB_VERDICT, SEC_ATTACHABLE_OPT),
9376 	SEC_DEF("sk_skb",		SK_SKB, 0, SEC_NONE),
9377 	SEC_DEF("sk_msg",		SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
9378 	SEC_DEF("lirc_mode2",		LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
9379 	SEC_DEF("flow_dissector",	FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT),
9380 	SEC_DEF("cgroup_skb/ingress",	CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT),
9381 	SEC_DEF("cgroup_skb/egress",	CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT),
9382 	SEC_DEF("cgroup/skb",		CGROUP_SKB, 0, SEC_NONE),
9383 	SEC_DEF("cgroup/sock_create",	CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE),
9384 	SEC_DEF("cgroup/sock_release",	CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE),
9385 	SEC_DEF("cgroup/sock",		CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT),
9386 	SEC_DEF("cgroup/post_bind4",	CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE),
9387 	SEC_DEF("cgroup/post_bind6",	CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE),
9388 	SEC_DEF("cgroup/bind4",		CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE),
9389 	SEC_DEF("cgroup/bind6",		CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE),
9390 	SEC_DEF("cgroup/connect4",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE),
9391 	SEC_DEF("cgroup/connect6",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE),
9392 	SEC_DEF("cgroup/connect_unix",	CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT, SEC_ATTACHABLE),
9393 	SEC_DEF("cgroup/sendmsg4",	CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE),
9394 	SEC_DEF("cgroup/sendmsg6",	CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE),
9395 	SEC_DEF("cgroup/sendmsg_unix",	CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG, SEC_ATTACHABLE),
9396 	SEC_DEF("cgroup/recvmsg4",	CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE),
9397 	SEC_DEF("cgroup/recvmsg6",	CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE),
9398 	SEC_DEF("cgroup/recvmsg_unix",	CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG, SEC_ATTACHABLE),
9399 	SEC_DEF("cgroup/getpeername4",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE),
9400 	SEC_DEF("cgroup/getpeername6",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE),
9401 	SEC_DEF("cgroup/getpeername_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME, SEC_ATTACHABLE),
9402 	SEC_DEF("cgroup/getsockname4",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE),
9403 	SEC_DEF("cgroup/getsockname6",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE),
9404 	SEC_DEF("cgroup/getsockname_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME, SEC_ATTACHABLE),
9405 	SEC_DEF("cgroup/sysctl",	CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE),
9406 	SEC_DEF("cgroup/getsockopt",	CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE),
9407 	SEC_DEF("cgroup/setsockopt",	CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
9408 	SEC_DEF("cgroup/dev",		CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
9409 	SEC_DEF("struct_ops+",		STRUCT_OPS, 0, SEC_NONE),
9410 	SEC_DEF("struct_ops.s+",	STRUCT_OPS, 0, SEC_SLEEPABLE),
9411 	SEC_DEF("sk_lookup",		SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
9412 	SEC_DEF("netfilter",		NETFILTER, BPF_NETFILTER, SEC_NONE),
9413 };
9414 
libbpf_register_prog_handler(const char * sec,enum bpf_prog_type prog_type,enum bpf_attach_type exp_attach_type,const struct libbpf_prog_handler_opts * opts)9415 int libbpf_register_prog_handler(const char *sec,
9416 				 enum bpf_prog_type prog_type,
9417 				 enum bpf_attach_type exp_attach_type,
9418 				 const struct libbpf_prog_handler_opts *opts)
9419 {
9420 	struct bpf_sec_def *sec_def;
9421 
9422 	if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
9423 		return libbpf_err(-EINVAL);
9424 
9425 	if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */
9426 		return libbpf_err(-E2BIG);
9427 
9428 	if (sec) {
9429 		sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
9430 					      sizeof(*sec_def));
9431 		if (!sec_def)
9432 			return libbpf_err(-ENOMEM);
9433 
9434 		custom_sec_defs = sec_def;
9435 		sec_def = &custom_sec_defs[custom_sec_def_cnt];
9436 	} else {
9437 		if (has_custom_fallback_def)
9438 			return libbpf_err(-EBUSY);
9439 
9440 		sec_def = &custom_fallback_def;
9441 	}
9442 
9443 	sec_def->sec = sec ? strdup(sec) : NULL;
9444 	if (sec && !sec_def->sec)
9445 		return libbpf_err(-ENOMEM);
9446 
9447 	sec_def->prog_type = prog_type;
9448 	sec_def->expected_attach_type = exp_attach_type;
9449 	sec_def->cookie = OPTS_GET(opts, cookie, 0);
9450 
9451 	sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
9452 	sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
9453 	sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
9454 
9455 	sec_def->handler_id = ++last_custom_sec_def_handler_id;
9456 
9457 	if (sec)
9458 		custom_sec_def_cnt++;
9459 	else
9460 		has_custom_fallback_def = true;
9461 
9462 	return sec_def->handler_id;
9463 }
9464 
libbpf_unregister_prog_handler(int handler_id)9465 int libbpf_unregister_prog_handler(int handler_id)
9466 {
9467 	struct bpf_sec_def *sec_defs;
9468 	int i;
9469 
9470 	if (handler_id <= 0)
9471 		return libbpf_err(-EINVAL);
9472 
9473 	if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
9474 		memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
9475 		has_custom_fallback_def = false;
9476 		return 0;
9477 	}
9478 
9479 	for (i = 0; i < custom_sec_def_cnt; i++) {
9480 		if (custom_sec_defs[i].handler_id == handler_id)
9481 			break;
9482 	}
9483 
9484 	if (i == custom_sec_def_cnt)
9485 		return libbpf_err(-ENOENT);
9486 
9487 	free(custom_sec_defs[i].sec);
9488 	for (i = i + 1; i < custom_sec_def_cnt; i++)
9489 		custom_sec_defs[i - 1] = custom_sec_defs[i];
9490 	custom_sec_def_cnt--;
9491 
9492 	/* try to shrink the array, but it's ok if we couldn't */
9493 	sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
9494 	/* if new count is zero, reallocarray can return a valid NULL result;
9495 	 * in this case the previous pointer will be freed, so we *have to*
9496 	 * reassign old pointer to the new value (even if it's NULL)
9497 	 */
9498 	if (sec_defs || custom_sec_def_cnt == 0)
9499 		custom_sec_defs = sec_defs;
9500 
9501 	return 0;
9502 }
9503 
sec_def_matches(const struct bpf_sec_def * sec_def,const char * sec_name)9504 static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name)
9505 {
9506 	size_t len = strlen(sec_def->sec);
9507 
9508 	/* "type/" always has to have proper SEC("type/extras") form */
9509 	if (sec_def->sec[len - 1] == '/') {
9510 		if (str_has_pfx(sec_name, sec_def->sec))
9511 			return true;
9512 		return false;
9513 	}
9514 
9515 	/* "type+" means it can be either exact SEC("type") or
9516 	 * well-formed SEC("type/extras") with proper '/' separator
9517 	 */
9518 	if (sec_def->sec[len - 1] == '+') {
9519 		len--;
9520 		/* not even a prefix */
9521 		if (strncmp(sec_name, sec_def->sec, len) != 0)
9522 			return false;
9523 		/* exact match or has '/' separator */
9524 		if (sec_name[len] == '\0' || sec_name[len] == '/')
9525 			return true;
9526 		return false;
9527 	}
9528 
9529 	return strcmp(sec_name, sec_def->sec) == 0;
9530 }
9531 
find_sec_def(const char * sec_name)9532 static const struct bpf_sec_def *find_sec_def(const char *sec_name)
9533 {
9534 	const struct bpf_sec_def *sec_def;
9535 	int i, n;
9536 
9537 	n = custom_sec_def_cnt;
9538 	for (i = 0; i < n; i++) {
9539 		sec_def = &custom_sec_defs[i];
9540 		if (sec_def_matches(sec_def, sec_name))
9541 			return sec_def;
9542 	}
9543 
9544 	n = ARRAY_SIZE(section_defs);
9545 	for (i = 0; i < n; i++) {
9546 		sec_def = &section_defs[i];
9547 		if (sec_def_matches(sec_def, sec_name))
9548 			return sec_def;
9549 	}
9550 
9551 	if (has_custom_fallback_def)
9552 		return &custom_fallback_def;
9553 
9554 	return NULL;
9555 }
9556 
9557 #define MAX_TYPE_NAME_SIZE 32
9558 
libbpf_get_type_names(bool attach_type)9559 static char *libbpf_get_type_names(bool attach_type)
9560 {
9561 	int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
9562 	char *buf;
9563 
9564 	buf = malloc(len);
9565 	if (!buf)
9566 		return NULL;
9567 
9568 	buf[0] = '\0';
9569 	/* Forge string buf with all available names */
9570 	for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9571 		const struct bpf_sec_def *sec_def = &section_defs[i];
9572 
9573 		if (attach_type) {
9574 			if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9575 				continue;
9576 
9577 			if (!(sec_def->cookie & SEC_ATTACHABLE))
9578 				continue;
9579 		}
9580 
9581 		if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
9582 			free(buf);
9583 			return NULL;
9584 		}
9585 		strcat(buf, " ");
9586 		strcat(buf, section_defs[i].sec);
9587 	}
9588 
9589 	return buf;
9590 }
9591 
libbpf_prog_type_by_name(const char * name,enum bpf_prog_type * prog_type,enum bpf_attach_type * expected_attach_type)9592 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
9593 			     enum bpf_attach_type *expected_attach_type)
9594 {
9595 	const struct bpf_sec_def *sec_def;
9596 	char *type_names;
9597 
9598 	if (!name)
9599 		return libbpf_err(-EINVAL);
9600 
9601 	sec_def = find_sec_def(name);
9602 	if (sec_def) {
9603 		*prog_type = sec_def->prog_type;
9604 		*expected_attach_type = sec_def->expected_attach_type;
9605 		return 0;
9606 	}
9607 
9608 	pr_debug("failed to guess program type from ELF section '%s'\n", name);
9609 	type_names = libbpf_get_type_names(false);
9610 	if (type_names != NULL) {
9611 		pr_debug("supported section(type) names are:%s\n", type_names);
9612 		free(type_names);
9613 	}
9614 
9615 	return libbpf_err(-ESRCH);
9616 }
9617 
libbpf_bpf_attach_type_str(enum bpf_attach_type t)9618 const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t)
9619 {
9620 	if (t < 0 || t >= ARRAY_SIZE(attach_type_name))
9621 		return NULL;
9622 
9623 	return attach_type_name[t];
9624 }
9625 
libbpf_bpf_link_type_str(enum bpf_link_type t)9626 const char *libbpf_bpf_link_type_str(enum bpf_link_type t)
9627 {
9628 	if (t < 0 || t >= ARRAY_SIZE(link_type_name))
9629 		return NULL;
9630 
9631 	return link_type_name[t];
9632 }
9633 
libbpf_bpf_map_type_str(enum bpf_map_type t)9634 const char *libbpf_bpf_map_type_str(enum bpf_map_type t)
9635 {
9636 	if (t < 0 || t >= ARRAY_SIZE(map_type_name))
9637 		return NULL;
9638 
9639 	return map_type_name[t];
9640 }
9641 
libbpf_bpf_prog_type_str(enum bpf_prog_type t)9642 const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
9643 {
9644 	if (t < 0 || t >= ARRAY_SIZE(prog_type_name))
9645 		return NULL;
9646 
9647 	return prog_type_name[t];
9648 }
9649 
find_struct_ops_map_by_offset(struct bpf_object * obj,int sec_idx,size_t offset)9650 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
9651 						     int sec_idx,
9652 						     size_t offset)
9653 {
9654 	struct bpf_map *map;
9655 	size_t i;
9656 
9657 	for (i = 0; i < obj->nr_maps; i++) {
9658 		map = &obj->maps[i];
9659 		if (!bpf_map__is_struct_ops(map))
9660 			continue;
9661 		if (map->sec_idx == sec_idx &&
9662 		    map->sec_offset <= offset &&
9663 		    offset - map->sec_offset < map->def.value_size)
9664 			return map;
9665 	}
9666 
9667 	return NULL;
9668 }
9669 
9670 /* Collect the reloc from ELF, populate the st_ops->progs[], and update
9671  * st_ops->data for shadow type.
9672  */
bpf_object__collect_st_ops_relos(struct bpf_object * obj,Elf64_Shdr * shdr,Elf_Data * data)9673 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
9674 					    Elf64_Shdr *shdr, Elf_Data *data)
9675 {
9676 	const struct btf_member *member;
9677 	struct bpf_struct_ops *st_ops;
9678 	struct bpf_program *prog;
9679 	unsigned int shdr_idx;
9680 	const struct btf *btf;
9681 	struct bpf_map *map;
9682 	unsigned int moff, insn_idx;
9683 	const char *name;
9684 	__u32 member_idx;
9685 	Elf64_Sym *sym;
9686 	Elf64_Rel *rel;
9687 	int i, nrels;
9688 
9689 	btf = obj->btf;
9690 	nrels = shdr->sh_size / shdr->sh_entsize;
9691 	for (i = 0; i < nrels; i++) {
9692 		rel = elf_rel_by_idx(data, i);
9693 		if (!rel) {
9694 			pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
9695 			return -LIBBPF_ERRNO__FORMAT;
9696 		}
9697 
9698 		sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
9699 		if (!sym) {
9700 			pr_warn("struct_ops reloc: symbol %zx not found\n",
9701 				(size_t)ELF64_R_SYM(rel->r_info));
9702 			return -LIBBPF_ERRNO__FORMAT;
9703 		}
9704 
9705 		name = elf_sym_str(obj, sym->st_name) ?: "<?>";
9706 		map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
9707 		if (!map) {
9708 			pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
9709 				(size_t)rel->r_offset);
9710 			return -EINVAL;
9711 		}
9712 
9713 		moff = rel->r_offset - map->sec_offset;
9714 		shdr_idx = sym->st_shndx;
9715 		st_ops = map->st_ops;
9716 		pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
9717 			 map->name,
9718 			 (long long)(rel->r_info >> 32),
9719 			 (long long)sym->st_value,
9720 			 shdr_idx, (size_t)rel->r_offset,
9721 			 map->sec_offset, sym->st_name, name);
9722 
9723 		if (shdr_idx >= SHN_LORESERVE) {
9724 			pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
9725 				map->name, (size_t)rel->r_offset, shdr_idx);
9726 			return -LIBBPF_ERRNO__RELOC;
9727 		}
9728 		if (sym->st_value % BPF_INSN_SZ) {
9729 			pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
9730 				map->name, (unsigned long long)sym->st_value);
9731 			return -LIBBPF_ERRNO__FORMAT;
9732 		}
9733 		insn_idx = sym->st_value / BPF_INSN_SZ;
9734 
9735 		member = find_member_by_offset(st_ops->type, moff * 8);
9736 		if (!member) {
9737 			pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
9738 				map->name, moff);
9739 			return -EINVAL;
9740 		}
9741 		member_idx = member - btf_members(st_ops->type);
9742 		name = btf__name_by_offset(btf, member->name_off);
9743 
9744 		if (!resolve_func_ptr(btf, member->type, NULL)) {
9745 			pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
9746 				map->name, name);
9747 			return -EINVAL;
9748 		}
9749 
9750 		prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
9751 		if (!prog) {
9752 			pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
9753 				map->name, shdr_idx, name);
9754 			return -EINVAL;
9755 		}
9756 
9757 		/* prevent the use of BPF prog with invalid type */
9758 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
9759 			pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
9760 				map->name, prog->name);
9761 			return -EINVAL;
9762 		}
9763 
9764 		st_ops->progs[member_idx] = prog;
9765 
9766 		/* st_ops->data will be exposed to users, being returned by
9767 		 * bpf_map__initial_value() as a pointer to the shadow
9768 		 * type. All function pointers in the original struct type
9769 		 * should be converted to a pointer to struct bpf_program
9770 		 * in the shadow type.
9771 		 */
9772 		*((struct bpf_program **)(st_ops->data + moff)) = prog;
9773 	}
9774 
9775 	return 0;
9776 }
9777 
9778 #define BTF_TRACE_PREFIX "btf_trace_"
9779 #define BTF_LSM_PREFIX "bpf_lsm_"
9780 #define BTF_ITER_PREFIX "bpf_iter_"
9781 #define BTF_MAX_NAME_SIZE 128
9782 
btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,const char ** prefix,int * kind)9783 void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
9784 				const char **prefix, int *kind)
9785 {
9786 	switch (attach_type) {
9787 	case BPF_TRACE_RAW_TP:
9788 		*prefix = BTF_TRACE_PREFIX;
9789 		*kind = BTF_KIND_TYPEDEF;
9790 		break;
9791 	case BPF_LSM_MAC:
9792 	case BPF_LSM_CGROUP:
9793 		*prefix = BTF_LSM_PREFIX;
9794 		*kind = BTF_KIND_FUNC;
9795 		break;
9796 	case BPF_TRACE_ITER:
9797 		*prefix = BTF_ITER_PREFIX;
9798 		*kind = BTF_KIND_FUNC;
9799 		break;
9800 	default:
9801 		*prefix = "";
9802 		*kind = BTF_KIND_FUNC;
9803 	}
9804 }
9805 
find_btf_by_prefix_kind(const struct btf * btf,const char * prefix,const char * name,__u32 kind)9806 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
9807 				   const char *name, __u32 kind)
9808 {
9809 	char btf_type_name[BTF_MAX_NAME_SIZE];
9810 	int ret;
9811 
9812 	ret = snprintf(btf_type_name, sizeof(btf_type_name),
9813 		       "%s%s", prefix, name);
9814 	/* snprintf returns the number of characters written excluding the
9815 	 * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
9816 	 * indicates truncation.
9817 	 */
9818 	if (ret < 0 || ret >= sizeof(btf_type_name))
9819 		return -ENAMETOOLONG;
9820 	return btf__find_by_name_kind(btf, btf_type_name, kind);
9821 }
9822 
find_attach_btf_id(struct btf * btf,const char * name,enum bpf_attach_type attach_type)9823 static inline int find_attach_btf_id(struct btf *btf, const char *name,
9824 				     enum bpf_attach_type attach_type)
9825 {
9826 	const char *prefix;
9827 	int kind;
9828 
9829 	btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
9830 	return find_btf_by_prefix_kind(btf, prefix, name, kind);
9831 }
9832 
libbpf_find_vmlinux_btf_id(const char * name,enum bpf_attach_type attach_type)9833 int libbpf_find_vmlinux_btf_id(const char *name,
9834 			       enum bpf_attach_type attach_type)
9835 {
9836 	struct btf *btf;
9837 	int err;
9838 
9839 	btf = btf__load_vmlinux_btf();
9840 	err = libbpf_get_error(btf);
9841 	if (err) {
9842 		pr_warn("vmlinux BTF is not found\n");
9843 		return libbpf_err(err);
9844 	}
9845 
9846 	err = find_attach_btf_id(btf, name, attach_type);
9847 	if (err <= 0)
9848 		pr_warn("%s is not found in vmlinux BTF\n", name);
9849 
9850 	btf__free(btf);
9851 	return libbpf_err(err);
9852 }
9853 
libbpf_find_prog_btf_id(const char * name,__u32 attach_prog_fd)9854 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
9855 {
9856 	struct bpf_prog_info info;
9857 	__u32 info_len = sizeof(info);
9858 	struct btf *btf;
9859 	int err;
9860 
9861 	memset(&info, 0, info_len);
9862 	err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
9863 	if (err) {
9864 		pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n",
9865 			attach_prog_fd, err);
9866 		return err;
9867 	}
9868 
9869 	err = -EINVAL;
9870 	if (!info.btf_id) {
9871 		pr_warn("The target program doesn't have BTF\n");
9872 		goto out;
9873 	}
9874 	btf = btf__load_from_kernel_by_id(info.btf_id);
9875 	err = libbpf_get_error(btf);
9876 	if (err) {
9877 		pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
9878 		goto out;
9879 	}
9880 	err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9881 	btf__free(btf);
9882 	if (err <= 0) {
9883 		pr_warn("%s is not found in prog's BTF\n", name);
9884 		goto out;
9885 	}
9886 out:
9887 	return err;
9888 }
9889 
find_kernel_btf_id(struct bpf_object * obj,const char * attach_name,enum bpf_attach_type attach_type,int * btf_obj_fd,int * btf_type_id)9890 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9891 			      enum bpf_attach_type attach_type,
9892 			      int *btf_obj_fd, int *btf_type_id)
9893 {
9894 	int ret, i, mod_len;
9895 	const char *fn_name, *mod_name = NULL;
9896 
9897 	fn_name = strchr(attach_name, ':');
9898 	if (fn_name) {
9899 		mod_name = attach_name;
9900 		mod_len = fn_name - mod_name;
9901 		fn_name++;
9902 	}
9903 
9904 	if (!mod_name || strncmp(mod_name, "vmlinux", mod_len) == 0) {
9905 		ret = find_attach_btf_id(obj->btf_vmlinux,
9906 					 mod_name ? fn_name : attach_name,
9907 					 attach_type);
9908 		if (ret > 0) {
9909 			*btf_obj_fd = 0; /* vmlinux BTF */
9910 			*btf_type_id = ret;
9911 			return 0;
9912 		}
9913 		if (ret != -ENOENT)
9914 			return ret;
9915 	}
9916 
9917 	ret = load_module_btfs(obj);
9918 	if (ret)
9919 		return ret;
9920 
9921 	for (i = 0; i < obj->btf_module_cnt; i++) {
9922 		const struct module_btf *mod = &obj->btf_modules[i];
9923 
9924 		if (mod_name && strncmp(mod->name, mod_name, mod_len) != 0)
9925 			continue;
9926 
9927 		ret = find_attach_btf_id(mod->btf,
9928 					 mod_name ? fn_name : attach_name,
9929 					 attach_type);
9930 		if (ret > 0) {
9931 			*btf_obj_fd = mod->fd;
9932 			*btf_type_id = ret;
9933 			return 0;
9934 		}
9935 		if (ret == -ENOENT)
9936 			continue;
9937 
9938 		return ret;
9939 	}
9940 
9941 	return -ESRCH;
9942 }
9943 
libbpf_find_attach_btf_id(struct bpf_program * prog,const char * attach_name,int * btf_obj_fd,int * btf_type_id)9944 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
9945 				     int *btf_obj_fd, int *btf_type_id)
9946 {
9947 	enum bpf_attach_type attach_type = prog->expected_attach_type;
9948 	__u32 attach_prog_fd = prog->attach_prog_fd;
9949 	int err = 0;
9950 
9951 	/* BPF program's BTF ID */
9952 	if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) {
9953 		if (!attach_prog_fd) {
9954 			pr_warn("prog '%s': attach program FD is not set\n", prog->name);
9955 			return -EINVAL;
9956 		}
9957 		err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9958 		if (err < 0) {
9959 			pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9960 				 prog->name, attach_prog_fd, attach_name, err);
9961 			return err;
9962 		}
9963 		*btf_obj_fd = 0;
9964 		*btf_type_id = err;
9965 		return 0;
9966 	}
9967 
9968 	/* kernel/module BTF ID */
9969 	if (prog->obj->gen_loader) {
9970 		bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9971 		*btf_obj_fd = 0;
9972 		*btf_type_id = 1;
9973 	} else {
9974 		err = find_kernel_btf_id(prog->obj, attach_name,
9975 					 attach_type, btf_obj_fd,
9976 					 btf_type_id);
9977 	}
9978 	if (err) {
9979 		pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
9980 			prog->name, attach_name, err);
9981 		return err;
9982 	}
9983 	return 0;
9984 }
9985 
libbpf_attach_type_by_name(const char * name,enum bpf_attach_type * attach_type)9986 int libbpf_attach_type_by_name(const char *name,
9987 			       enum bpf_attach_type *attach_type)
9988 {
9989 	char *type_names;
9990 	const struct bpf_sec_def *sec_def;
9991 
9992 	if (!name)
9993 		return libbpf_err(-EINVAL);
9994 
9995 	sec_def = find_sec_def(name);
9996 	if (!sec_def) {
9997 		pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9998 		type_names = libbpf_get_type_names(true);
9999 		if (type_names != NULL) {
10000 			pr_debug("attachable section(type) names are:%s\n", type_names);
10001 			free(type_names);
10002 		}
10003 
10004 		return libbpf_err(-EINVAL);
10005 	}
10006 
10007 	if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
10008 		return libbpf_err(-EINVAL);
10009 	if (!(sec_def->cookie & SEC_ATTACHABLE))
10010 		return libbpf_err(-EINVAL);
10011 
10012 	*attach_type = sec_def->expected_attach_type;
10013 	return 0;
10014 }
10015 
bpf_map__fd(const struct bpf_map * map)10016 int bpf_map__fd(const struct bpf_map *map)
10017 {
10018 	if (!map)
10019 		return libbpf_err(-EINVAL);
10020 	if (!map_is_created(map))
10021 		return -1;
10022 	return map->fd;
10023 }
10024 
map_uses_real_name(const struct bpf_map * map)10025 static bool map_uses_real_name(const struct bpf_map *map)
10026 {
10027 	/* Since libbpf started to support custom .data.* and .rodata.* maps,
10028 	 * their user-visible name differs from kernel-visible name. Users see
10029 	 * such map's corresponding ELF section name as a map name.
10030 	 * This check distinguishes .data/.rodata from .data.* and .rodata.*
10031 	 * maps to know which name has to be returned to the user.
10032 	 */
10033 	if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
10034 		return true;
10035 	if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
10036 		return true;
10037 	return false;
10038 }
10039 
bpf_map__name(const struct bpf_map * map)10040 const char *bpf_map__name(const struct bpf_map *map)
10041 {
10042 	if (!map)
10043 		return NULL;
10044 
10045 	if (map_uses_real_name(map))
10046 		return map->real_name;
10047 
10048 	return map->name;
10049 }
10050 
bpf_map__type(const struct bpf_map * map)10051 enum bpf_map_type bpf_map__type(const struct bpf_map *map)
10052 {
10053 	return map->def.type;
10054 }
10055 
bpf_map__set_type(struct bpf_map * map,enum bpf_map_type type)10056 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
10057 {
10058 	if (map_is_created(map))
10059 		return libbpf_err(-EBUSY);
10060 	map->def.type = type;
10061 	return 0;
10062 }
10063 
bpf_map__map_flags(const struct bpf_map * map)10064 __u32 bpf_map__map_flags(const struct bpf_map *map)
10065 {
10066 	return map->def.map_flags;
10067 }
10068 
bpf_map__set_map_flags(struct bpf_map * map,__u32 flags)10069 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
10070 {
10071 	if (map_is_created(map))
10072 		return libbpf_err(-EBUSY);
10073 	map->def.map_flags = flags;
10074 	return 0;
10075 }
10076 
bpf_map__map_extra(const struct bpf_map * map)10077 __u64 bpf_map__map_extra(const struct bpf_map *map)
10078 {
10079 	return map->map_extra;
10080 }
10081 
bpf_map__set_map_extra(struct bpf_map * map,__u64 map_extra)10082 int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
10083 {
10084 	if (map_is_created(map))
10085 		return libbpf_err(-EBUSY);
10086 	map->map_extra = map_extra;
10087 	return 0;
10088 }
10089 
bpf_map__numa_node(const struct bpf_map * map)10090 __u32 bpf_map__numa_node(const struct bpf_map *map)
10091 {
10092 	return map->numa_node;
10093 }
10094 
bpf_map__set_numa_node(struct bpf_map * map,__u32 numa_node)10095 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
10096 {
10097 	if (map_is_created(map))
10098 		return libbpf_err(-EBUSY);
10099 	map->numa_node = numa_node;
10100 	return 0;
10101 }
10102 
bpf_map__key_size(const struct bpf_map * map)10103 __u32 bpf_map__key_size(const struct bpf_map *map)
10104 {
10105 	return map->def.key_size;
10106 }
10107 
bpf_map__set_key_size(struct bpf_map * map,__u32 size)10108 int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
10109 {
10110 	if (map_is_created(map))
10111 		return libbpf_err(-EBUSY);
10112 	map->def.key_size = size;
10113 	return 0;
10114 }
10115 
bpf_map__value_size(const struct bpf_map * map)10116 __u32 bpf_map__value_size(const struct bpf_map *map)
10117 {
10118 	return map->def.value_size;
10119 }
10120 
map_btf_datasec_resize(struct bpf_map * map,__u32 size)10121 static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
10122 {
10123 	struct btf *btf;
10124 	struct btf_type *datasec_type, *var_type;
10125 	struct btf_var_secinfo *var;
10126 	const struct btf_type *array_type;
10127 	const struct btf_array *array;
10128 	int vlen, element_sz, new_array_id;
10129 	__u32 nr_elements;
10130 
10131 	/* check btf existence */
10132 	btf = bpf_object__btf(map->obj);
10133 	if (!btf)
10134 		return -ENOENT;
10135 
10136 	/* verify map is datasec */
10137 	datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map));
10138 	if (!btf_is_datasec(datasec_type)) {
10139 		pr_warn("map '%s': cannot be resized, map value type is not a datasec\n",
10140 			bpf_map__name(map));
10141 		return -EINVAL;
10142 	}
10143 
10144 	/* verify datasec has at least one var */
10145 	vlen = btf_vlen(datasec_type);
10146 	if (vlen == 0) {
10147 		pr_warn("map '%s': cannot be resized, map value datasec is empty\n",
10148 			bpf_map__name(map));
10149 		return -EINVAL;
10150 	}
10151 
10152 	/* verify last var in the datasec is an array */
10153 	var = &btf_var_secinfos(datasec_type)[vlen - 1];
10154 	var_type = btf_type_by_id(btf, var->type);
10155 	array_type = skip_mods_and_typedefs(btf, var_type->type, NULL);
10156 	if (!btf_is_array(array_type)) {
10157 		pr_warn("map '%s': cannot be resized, last var must be an array\n",
10158 			bpf_map__name(map));
10159 		return -EINVAL;
10160 	}
10161 
10162 	/* verify request size aligns with array */
10163 	array = btf_array(array_type);
10164 	element_sz = btf__resolve_size(btf, array->type);
10165 	if (element_sz <= 0 || (size - var->offset) % element_sz != 0) {
10166 		pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n",
10167 			bpf_map__name(map), element_sz, size);
10168 		return -EINVAL;
10169 	}
10170 
10171 	/* create a new array based on the existing array, but with new length */
10172 	nr_elements = (size - var->offset) / element_sz;
10173 	new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements);
10174 	if (new_array_id < 0)
10175 		return new_array_id;
10176 
10177 	/* adding a new btf type invalidates existing pointers to btf objects,
10178 	 * so refresh pointers before proceeding
10179 	 */
10180 	datasec_type = btf_type_by_id(btf, map->btf_value_type_id);
10181 	var = &btf_var_secinfos(datasec_type)[vlen - 1];
10182 	var_type = btf_type_by_id(btf, var->type);
10183 
10184 	/* finally update btf info */
10185 	datasec_type->size = size;
10186 	var->size = size - var->offset;
10187 	var_type->type = new_array_id;
10188 
10189 	return 0;
10190 }
10191 
bpf_map__set_value_size(struct bpf_map * map,__u32 size)10192 int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
10193 {
10194 	if (map->obj->loaded || map->reused)
10195 		return libbpf_err(-EBUSY);
10196 
10197 	if (map->mmaped) {
10198 		size_t mmap_old_sz, mmap_new_sz;
10199 		int err;
10200 
10201 		if (map->def.type != BPF_MAP_TYPE_ARRAY)
10202 			return -EOPNOTSUPP;
10203 
10204 		mmap_old_sz = bpf_map_mmap_sz(map);
10205 		mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
10206 		err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
10207 		if (err) {
10208 			pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
10209 				bpf_map__name(map), err);
10210 			return err;
10211 		}
10212 		err = map_btf_datasec_resize(map, size);
10213 		if (err && err != -ENOENT) {
10214 			pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n",
10215 				bpf_map__name(map), err);
10216 			map->btf_value_type_id = 0;
10217 			map->btf_key_type_id = 0;
10218 		}
10219 	}
10220 
10221 	map->def.value_size = size;
10222 	return 0;
10223 }
10224 
bpf_map__btf_key_type_id(const struct bpf_map * map)10225 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
10226 {
10227 	return map ? map->btf_key_type_id : 0;
10228 }
10229 
bpf_map__btf_value_type_id(const struct bpf_map * map)10230 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
10231 {
10232 	return map ? map->btf_value_type_id : 0;
10233 }
10234 
bpf_map__set_initial_value(struct bpf_map * map,const void * data,size_t size)10235 int bpf_map__set_initial_value(struct bpf_map *map,
10236 			       const void *data, size_t size)
10237 {
10238 	size_t actual_sz;
10239 
10240 	if (map->obj->loaded || map->reused)
10241 		return libbpf_err(-EBUSY);
10242 
10243 	if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
10244 		return libbpf_err(-EINVAL);
10245 
10246 	if (map->def.type == BPF_MAP_TYPE_ARENA)
10247 		actual_sz = map->obj->arena_data_sz;
10248 	else
10249 		actual_sz = map->def.value_size;
10250 	if (size != actual_sz)
10251 		return libbpf_err(-EINVAL);
10252 
10253 	memcpy(map->mmaped, data, size);
10254 	return 0;
10255 }
10256 
bpf_map__initial_value(const struct bpf_map * map,size_t * psize)10257 void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize)
10258 {
10259 	if (bpf_map__is_struct_ops(map)) {
10260 		if (psize)
10261 			*psize = map->def.value_size;
10262 		return map->st_ops->data;
10263 	}
10264 
10265 	if (!map->mmaped)
10266 		return NULL;
10267 
10268 	if (map->def.type == BPF_MAP_TYPE_ARENA)
10269 		*psize = map->obj->arena_data_sz;
10270 	else
10271 		*psize = map->def.value_size;
10272 
10273 	return map->mmaped;
10274 }
10275 
bpf_map__is_internal(const struct bpf_map * map)10276 bool bpf_map__is_internal(const struct bpf_map *map)
10277 {
10278 	return map->libbpf_type != LIBBPF_MAP_UNSPEC;
10279 }
10280 
bpf_map__ifindex(const struct bpf_map * map)10281 __u32 bpf_map__ifindex(const struct bpf_map *map)
10282 {
10283 	return map->map_ifindex;
10284 }
10285 
bpf_map__set_ifindex(struct bpf_map * map,__u32 ifindex)10286 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
10287 {
10288 	if (map_is_created(map))
10289 		return libbpf_err(-EBUSY);
10290 	map->map_ifindex = ifindex;
10291 	return 0;
10292 }
10293 
bpf_map__set_inner_map_fd(struct bpf_map * map,int fd)10294 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
10295 {
10296 	if (!bpf_map_type__is_map_in_map(map->def.type)) {
10297 		pr_warn("error: unsupported map type\n");
10298 		return libbpf_err(-EINVAL);
10299 	}
10300 	if (map->inner_map_fd != -1) {
10301 		pr_warn("error: inner_map_fd already specified\n");
10302 		return libbpf_err(-EINVAL);
10303 	}
10304 	if (map->inner_map) {
10305 		bpf_map__destroy(map->inner_map);
10306 		zfree(&map->inner_map);
10307 	}
10308 	map->inner_map_fd = fd;
10309 	return 0;
10310 }
10311 
10312 static struct bpf_map *
__bpf_map__iter(const struct bpf_map * m,const struct bpf_object * obj,int i)10313 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
10314 {
10315 	ssize_t idx;
10316 	struct bpf_map *s, *e;
10317 
10318 	if (!obj || !obj->maps)
10319 		return errno = EINVAL, NULL;
10320 
10321 	s = obj->maps;
10322 	e = obj->maps + obj->nr_maps;
10323 
10324 	if ((m < s) || (m >= e)) {
10325 		pr_warn("error in %s: map handler doesn't belong to object\n",
10326 			 __func__);
10327 		return errno = EINVAL, NULL;
10328 	}
10329 
10330 	idx = (m - obj->maps) + i;
10331 	if (idx >= obj->nr_maps || idx < 0)
10332 		return NULL;
10333 	return &obj->maps[idx];
10334 }
10335 
10336 struct bpf_map *
bpf_object__next_map(const struct bpf_object * obj,const struct bpf_map * prev)10337 bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
10338 {
10339 	if (prev == NULL)
10340 		return obj->maps;
10341 
10342 	return __bpf_map__iter(prev, obj, 1);
10343 }
10344 
10345 struct bpf_map *
bpf_object__prev_map(const struct bpf_object * obj,const struct bpf_map * next)10346 bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
10347 {
10348 	if (next == NULL) {
10349 		if (!obj->nr_maps)
10350 			return NULL;
10351 		return obj->maps + obj->nr_maps - 1;
10352 	}
10353 
10354 	return __bpf_map__iter(next, obj, -1);
10355 }
10356 
10357 struct bpf_map *
bpf_object__find_map_by_name(const struct bpf_object * obj,const char * name)10358 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
10359 {
10360 	struct bpf_map *pos;
10361 
10362 	bpf_object__for_each_map(pos, obj) {
10363 		/* if it's a special internal map name (which always starts
10364 		 * with dot) then check if that special name matches the
10365 		 * real map name (ELF section name)
10366 		 */
10367 		if (name[0] == '.') {
10368 			if (pos->real_name && strcmp(pos->real_name, name) == 0)
10369 				return pos;
10370 			continue;
10371 		}
10372 		/* otherwise map name has to be an exact match */
10373 		if (map_uses_real_name(pos)) {
10374 			if (strcmp(pos->real_name, name) == 0)
10375 				return pos;
10376 			continue;
10377 		}
10378 		if (strcmp(pos->name, name) == 0)
10379 			return pos;
10380 	}
10381 	return errno = ENOENT, NULL;
10382 }
10383 
10384 int
bpf_object__find_map_fd_by_name(const struct bpf_object * obj,const char * name)10385 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
10386 {
10387 	return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
10388 }
10389 
validate_map_op(const struct bpf_map * map,size_t key_sz,size_t value_sz,bool check_value_sz)10390 static int validate_map_op(const struct bpf_map *map, size_t key_sz,
10391 			   size_t value_sz, bool check_value_sz)
10392 {
10393 	if (!map_is_created(map)) /* map is not yet created */
10394 		return -ENOENT;
10395 
10396 	if (map->def.key_size != key_sz) {
10397 		pr_warn("map '%s': unexpected key size %zu provided, expected %u\n",
10398 			map->name, key_sz, map->def.key_size);
10399 		return -EINVAL;
10400 	}
10401 
10402 	if (map->fd < 0) {
10403 		pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
10404 		return -EINVAL;
10405 	}
10406 
10407 	if (!check_value_sz)
10408 		return 0;
10409 
10410 	switch (map->def.type) {
10411 	case BPF_MAP_TYPE_PERCPU_ARRAY:
10412 	case BPF_MAP_TYPE_PERCPU_HASH:
10413 	case BPF_MAP_TYPE_LRU_PERCPU_HASH:
10414 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: {
10415 		int num_cpu = libbpf_num_possible_cpus();
10416 		size_t elem_sz = roundup(map->def.value_size, 8);
10417 
10418 		if (value_sz != num_cpu * elem_sz) {
10419 			pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
10420 				map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
10421 			return -EINVAL;
10422 		}
10423 		break;
10424 	}
10425 	default:
10426 		if (map->def.value_size != value_sz) {
10427 			pr_warn("map '%s': unexpected value size %zu provided, expected %u\n",
10428 				map->name, value_sz, map->def.value_size);
10429 			return -EINVAL;
10430 		}
10431 		break;
10432 	}
10433 	return 0;
10434 }
10435 
bpf_map__lookup_elem(const struct bpf_map * map,const void * key,size_t key_sz,void * value,size_t value_sz,__u64 flags)10436 int bpf_map__lookup_elem(const struct bpf_map *map,
10437 			 const void *key, size_t key_sz,
10438 			 void *value, size_t value_sz, __u64 flags)
10439 {
10440 	int err;
10441 
10442 	err = validate_map_op(map, key_sz, value_sz, true);
10443 	if (err)
10444 		return libbpf_err(err);
10445 
10446 	return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
10447 }
10448 
bpf_map__update_elem(const struct bpf_map * map,const void * key,size_t key_sz,const void * value,size_t value_sz,__u64 flags)10449 int bpf_map__update_elem(const struct bpf_map *map,
10450 			 const void *key, size_t key_sz,
10451 			 const void *value, size_t value_sz, __u64 flags)
10452 {
10453 	int err;
10454 
10455 	err = validate_map_op(map, key_sz, value_sz, true);
10456 	if (err)
10457 		return libbpf_err(err);
10458 
10459 	return bpf_map_update_elem(map->fd, key, value, flags);
10460 }
10461 
bpf_map__delete_elem(const struct bpf_map * map,const void * key,size_t key_sz,__u64 flags)10462 int bpf_map__delete_elem(const struct bpf_map *map,
10463 			 const void *key, size_t key_sz, __u64 flags)
10464 {
10465 	int err;
10466 
10467 	err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10468 	if (err)
10469 		return libbpf_err(err);
10470 
10471 	return bpf_map_delete_elem_flags(map->fd, key, flags);
10472 }
10473 
bpf_map__lookup_and_delete_elem(const struct bpf_map * map,const void * key,size_t key_sz,void * value,size_t value_sz,__u64 flags)10474 int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
10475 				    const void *key, size_t key_sz,
10476 				    void *value, size_t value_sz, __u64 flags)
10477 {
10478 	int err;
10479 
10480 	err = validate_map_op(map, key_sz, value_sz, true);
10481 	if (err)
10482 		return libbpf_err(err);
10483 
10484 	return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
10485 }
10486 
bpf_map__get_next_key(const struct bpf_map * map,const void * cur_key,void * next_key,size_t key_sz)10487 int bpf_map__get_next_key(const struct bpf_map *map,
10488 			  const void *cur_key, void *next_key, size_t key_sz)
10489 {
10490 	int err;
10491 
10492 	err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10493 	if (err)
10494 		return libbpf_err(err);
10495 
10496 	return bpf_map_get_next_key(map->fd, cur_key, next_key);
10497 }
10498 
libbpf_get_error(const void * ptr)10499 long libbpf_get_error(const void *ptr)
10500 {
10501 	if (!IS_ERR_OR_NULL(ptr))
10502 		return 0;
10503 
10504 	if (IS_ERR(ptr))
10505 		errno = -PTR_ERR(ptr);
10506 
10507 	/* If ptr == NULL, then errno should be already set by the failing
10508 	 * API, because libbpf never returns NULL on success and it now always
10509 	 * sets errno on error. So no extra errno handling for ptr == NULL
10510 	 * case.
10511 	 */
10512 	return -errno;
10513 }
10514 
10515 /* Replace link's underlying BPF program with the new one */
bpf_link__update_program(struct bpf_link * link,struct bpf_program * prog)10516 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
10517 {
10518 	int ret;
10519 	int prog_fd = bpf_program__fd(prog);
10520 
10521 	if (prog_fd < 0) {
10522 		pr_warn("prog '%s': can't use BPF program without FD (was it loaded?)\n",
10523 			prog->name);
10524 		return libbpf_err(-EINVAL);
10525 	}
10526 
10527 	ret = bpf_link_update(bpf_link__fd(link), prog_fd, NULL);
10528 	return libbpf_err_errno(ret);
10529 }
10530 
10531 /* Release "ownership" of underlying BPF resource (typically, BPF program
10532  * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
10533  * link, when destructed through bpf_link__destroy() call won't attempt to
10534  * detach/unregisted that BPF resource. This is useful in situations where,
10535  * say, attached BPF program has to outlive userspace program that attached it
10536  * in the system. Depending on type of BPF program, though, there might be
10537  * additional steps (like pinning BPF program in BPF FS) necessary to ensure
10538  * exit of userspace program doesn't trigger automatic detachment and clean up
10539  * inside the kernel.
10540  */
bpf_link__disconnect(struct bpf_link * link)10541 void bpf_link__disconnect(struct bpf_link *link)
10542 {
10543 	link->disconnected = true;
10544 }
10545 
bpf_link__destroy(struct bpf_link * link)10546 int bpf_link__destroy(struct bpf_link *link)
10547 {
10548 	int err = 0;
10549 
10550 	if (IS_ERR_OR_NULL(link))
10551 		return 0;
10552 
10553 	if (!link->disconnected && link->detach)
10554 		err = link->detach(link);
10555 	if (link->pin_path)
10556 		free(link->pin_path);
10557 	if (link->dealloc)
10558 		link->dealloc(link);
10559 	else
10560 		free(link);
10561 
10562 	return libbpf_err(err);
10563 }
10564 
bpf_link__fd(const struct bpf_link * link)10565 int bpf_link__fd(const struct bpf_link *link)
10566 {
10567 	return link->fd;
10568 }
10569 
bpf_link__pin_path(const struct bpf_link * link)10570 const char *bpf_link__pin_path(const struct bpf_link *link)
10571 {
10572 	return link->pin_path;
10573 }
10574 
bpf_link__detach_fd(struct bpf_link * link)10575 static int bpf_link__detach_fd(struct bpf_link *link)
10576 {
10577 	return libbpf_err_errno(close(link->fd));
10578 }
10579 
bpf_link__open(const char * path)10580 struct bpf_link *bpf_link__open(const char *path)
10581 {
10582 	struct bpf_link *link;
10583 	int fd;
10584 
10585 	fd = bpf_obj_get(path);
10586 	if (fd < 0) {
10587 		fd = -errno;
10588 		pr_warn("failed to open link at %s: %d\n", path, fd);
10589 		return libbpf_err_ptr(fd);
10590 	}
10591 
10592 	link = calloc(1, sizeof(*link));
10593 	if (!link) {
10594 		close(fd);
10595 		return libbpf_err_ptr(-ENOMEM);
10596 	}
10597 	link->detach = &bpf_link__detach_fd;
10598 	link->fd = fd;
10599 
10600 	link->pin_path = strdup(path);
10601 	if (!link->pin_path) {
10602 		bpf_link__destroy(link);
10603 		return libbpf_err_ptr(-ENOMEM);
10604 	}
10605 
10606 	return link;
10607 }
10608 
bpf_link__detach(struct bpf_link * link)10609 int bpf_link__detach(struct bpf_link *link)
10610 {
10611 	return bpf_link_detach(link->fd) ? -errno : 0;
10612 }
10613 
bpf_link__pin(struct bpf_link * link,const char * path)10614 int bpf_link__pin(struct bpf_link *link, const char *path)
10615 {
10616 	int err;
10617 
10618 	if (link->pin_path)
10619 		return libbpf_err(-EBUSY);
10620 	err = make_parent_dir(path);
10621 	if (err)
10622 		return libbpf_err(err);
10623 	err = check_path(path);
10624 	if (err)
10625 		return libbpf_err(err);
10626 
10627 	link->pin_path = strdup(path);
10628 	if (!link->pin_path)
10629 		return libbpf_err(-ENOMEM);
10630 
10631 	if (bpf_obj_pin(link->fd, link->pin_path)) {
10632 		err = -errno;
10633 		zfree(&link->pin_path);
10634 		return libbpf_err(err);
10635 	}
10636 
10637 	pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
10638 	return 0;
10639 }
10640 
bpf_link__unpin(struct bpf_link * link)10641 int bpf_link__unpin(struct bpf_link *link)
10642 {
10643 	int err;
10644 
10645 	if (!link->pin_path)
10646 		return libbpf_err(-EINVAL);
10647 
10648 	err = unlink(link->pin_path);
10649 	if (err != 0)
10650 		return -errno;
10651 
10652 	pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
10653 	zfree(&link->pin_path);
10654 	return 0;
10655 }
10656 
10657 struct bpf_link_perf {
10658 	struct bpf_link link;
10659 	int perf_event_fd;
10660 	/* legacy kprobe support: keep track of probe identifier and type */
10661 	char *legacy_probe_name;
10662 	bool legacy_is_kprobe;
10663 	bool legacy_is_retprobe;
10664 };
10665 
10666 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
10667 static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
10668 
bpf_link_perf_detach(struct bpf_link * link)10669 static int bpf_link_perf_detach(struct bpf_link *link)
10670 {
10671 	struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10672 	int err = 0;
10673 
10674 	if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
10675 		err = -errno;
10676 
10677 	if (perf_link->perf_event_fd != link->fd)
10678 		close(perf_link->perf_event_fd);
10679 	close(link->fd);
10680 
10681 	/* legacy uprobe/kprobe needs to be removed after perf event fd closure */
10682 	if (perf_link->legacy_probe_name) {
10683 		if (perf_link->legacy_is_kprobe) {
10684 			err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
10685 							 perf_link->legacy_is_retprobe);
10686 		} else {
10687 			err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
10688 							 perf_link->legacy_is_retprobe);
10689 		}
10690 	}
10691 
10692 	return err;
10693 }
10694 
bpf_link_perf_dealloc(struct bpf_link * link)10695 static void bpf_link_perf_dealloc(struct bpf_link *link)
10696 {
10697 	struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10698 
10699 	free(perf_link->legacy_probe_name);
10700 	free(perf_link);
10701 }
10702 
bpf_program__attach_perf_event_opts(const struct bpf_program * prog,int pfd,const struct bpf_perf_event_opts * opts)10703 struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
10704 						     const struct bpf_perf_event_opts *opts)
10705 {
10706 	char errmsg[STRERR_BUFSIZE];
10707 	struct bpf_link_perf *link;
10708 	int prog_fd, link_fd = -1, err;
10709 	bool force_ioctl_attach;
10710 
10711 	if (!OPTS_VALID(opts, bpf_perf_event_opts))
10712 		return libbpf_err_ptr(-EINVAL);
10713 
10714 	if (pfd < 0) {
10715 		pr_warn("prog '%s': invalid perf event FD %d\n",
10716 			prog->name, pfd);
10717 		return libbpf_err_ptr(-EINVAL);
10718 	}
10719 	prog_fd = bpf_program__fd(prog);
10720 	if (prog_fd < 0) {
10721 		pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
10722 			prog->name);
10723 		return libbpf_err_ptr(-EINVAL);
10724 	}
10725 
10726 	link = calloc(1, sizeof(*link));
10727 	if (!link)
10728 		return libbpf_err_ptr(-ENOMEM);
10729 	link->link.detach = &bpf_link_perf_detach;
10730 	link->link.dealloc = &bpf_link_perf_dealloc;
10731 	link->perf_event_fd = pfd;
10732 
10733 	force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
10734 	if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
10735 		DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
10736 			.perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
10737 
10738 		link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
10739 		if (link_fd < 0) {
10740 			err = -errno;
10741 			pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
10742 				prog->name, pfd,
10743 				err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10744 			goto err_out;
10745 		}
10746 		link->link.fd = link_fd;
10747 	} else {
10748 		if (OPTS_GET(opts, bpf_cookie, 0)) {
10749 			pr_warn("prog '%s': user context value is not supported\n", prog->name);
10750 			err = -EOPNOTSUPP;
10751 			goto err_out;
10752 		}
10753 
10754 		if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
10755 			err = -errno;
10756 			pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
10757 				prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10758 			if (err == -EPROTO)
10759 				pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
10760 					prog->name, pfd);
10761 			goto err_out;
10762 		}
10763 		link->link.fd = pfd;
10764 	}
10765 	if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10766 		err = -errno;
10767 		pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
10768 			prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10769 		goto err_out;
10770 	}
10771 
10772 	return &link->link;
10773 err_out:
10774 	if (link_fd >= 0)
10775 		close(link_fd);
10776 	free(link);
10777 	return libbpf_err_ptr(err);
10778 }
10779 
bpf_program__attach_perf_event(const struct bpf_program * prog,int pfd)10780 struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
10781 {
10782 	return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
10783 }
10784 
10785 /*
10786  * this function is expected to parse integer in the range of [0, 2^31-1] from
10787  * given file using scanf format string fmt. If actual parsed value is
10788  * negative, the result might be indistinguishable from error
10789  */
parse_uint_from_file(const char * file,const char * fmt)10790 static int parse_uint_from_file(const char *file, const char *fmt)
10791 {
10792 	char buf[STRERR_BUFSIZE];
10793 	int err, ret;
10794 	FILE *f;
10795 
10796 	f = fopen(file, "re");
10797 	if (!f) {
10798 		err = -errno;
10799 		pr_debug("failed to open '%s': %s\n", file,
10800 			 libbpf_strerror_r(err, buf, sizeof(buf)));
10801 		return err;
10802 	}
10803 	err = fscanf(f, fmt, &ret);
10804 	if (err != 1) {
10805 		err = err == EOF ? -EIO : -errno;
10806 		pr_debug("failed to parse '%s': %s\n", file,
10807 			libbpf_strerror_r(err, buf, sizeof(buf)));
10808 		fclose(f);
10809 		return err;
10810 	}
10811 	fclose(f);
10812 	return ret;
10813 }
10814 
determine_kprobe_perf_type(void)10815 static int determine_kprobe_perf_type(void)
10816 {
10817 	const char *file = "/sys/bus/event_source/devices/kprobe/type";
10818 
10819 	return parse_uint_from_file(file, "%d\n");
10820 }
10821 
determine_uprobe_perf_type(void)10822 static int determine_uprobe_perf_type(void)
10823 {
10824 	const char *file = "/sys/bus/event_source/devices/uprobe/type";
10825 
10826 	return parse_uint_from_file(file, "%d\n");
10827 }
10828 
determine_kprobe_retprobe_bit(void)10829 static int determine_kprobe_retprobe_bit(void)
10830 {
10831 	const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
10832 
10833 	return parse_uint_from_file(file, "config:%d\n");
10834 }
10835 
determine_uprobe_retprobe_bit(void)10836 static int determine_uprobe_retprobe_bit(void)
10837 {
10838 	const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
10839 
10840 	return parse_uint_from_file(file, "config:%d\n");
10841 }
10842 
10843 #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
10844 #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
10845 
perf_event_open_probe(bool uprobe,bool retprobe,const char * name,uint64_t offset,int pid,size_t ref_ctr_off)10846 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
10847 				 uint64_t offset, int pid, size_t ref_ctr_off)
10848 {
10849 	const size_t attr_sz = sizeof(struct perf_event_attr);
10850 	struct perf_event_attr attr;
10851 	char errmsg[STRERR_BUFSIZE];
10852 	int type, pfd;
10853 
10854 	if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
10855 		return -EINVAL;
10856 
10857 	memset(&attr, 0, attr_sz);
10858 
10859 	type = uprobe ? determine_uprobe_perf_type()
10860 		      : determine_kprobe_perf_type();
10861 	if (type < 0) {
10862 		pr_warn("failed to determine %s perf type: %s\n",
10863 			uprobe ? "uprobe" : "kprobe",
10864 			libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
10865 		return type;
10866 	}
10867 	if (retprobe) {
10868 		int bit = uprobe ? determine_uprobe_retprobe_bit()
10869 				 : determine_kprobe_retprobe_bit();
10870 
10871 		if (bit < 0) {
10872 			pr_warn("failed to determine %s retprobe bit: %s\n",
10873 				uprobe ? "uprobe" : "kprobe",
10874 				libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
10875 			return bit;
10876 		}
10877 		attr.config |= 1 << bit;
10878 	}
10879 	attr.size = attr_sz;
10880 	attr.type = type;
10881 	attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
10882 	attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
10883 	attr.config2 = offset;		 /* kprobe_addr or probe_offset */
10884 
10885 	/* pid filter is meaningful only for uprobes */
10886 	pfd = syscall(__NR_perf_event_open, &attr,
10887 		      pid < 0 ? -1 : pid /* pid */,
10888 		      pid == -1 ? 0 : -1 /* cpu */,
10889 		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10890 	return pfd >= 0 ? pfd : -errno;
10891 }
10892 
append_to_file(const char * file,const char * fmt,...)10893 static int append_to_file(const char *file, const char *fmt, ...)
10894 {
10895 	int fd, n, err = 0;
10896 	va_list ap;
10897 	char buf[1024];
10898 
10899 	va_start(ap, fmt);
10900 	n = vsnprintf(buf, sizeof(buf), fmt, ap);
10901 	va_end(ap);
10902 
10903 	if (n < 0 || n >= sizeof(buf))
10904 		return -EINVAL;
10905 
10906 	fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
10907 	if (fd < 0)
10908 		return -errno;
10909 
10910 	if (write(fd, buf, n) < 0)
10911 		err = -errno;
10912 
10913 	close(fd);
10914 	return err;
10915 }
10916 
10917 #define DEBUGFS "/sys/kernel/debug/tracing"
10918 #define TRACEFS "/sys/kernel/tracing"
10919 
use_debugfs(void)10920 static bool use_debugfs(void)
10921 {
10922 	static int has_debugfs = -1;
10923 
10924 	if (has_debugfs < 0)
10925 		has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0;
10926 
10927 	return has_debugfs == 1;
10928 }
10929 
tracefs_path(void)10930 static const char *tracefs_path(void)
10931 {
10932 	return use_debugfs() ? DEBUGFS : TRACEFS;
10933 }
10934 
tracefs_kprobe_events(void)10935 static const char *tracefs_kprobe_events(void)
10936 {
10937 	return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events";
10938 }
10939 
tracefs_uprobe_events(void)10940 static const char *tracefs_uprobe_events(void)
10941 {
10942 	return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events";
10943 }
10944 
tracefs_available_filter_functions(void)10945 static const char *tracefs_available_filter_functions(void)
10946 {
10947 	return use_debugfs() ? DEBUGFS"/available_filter_functions"
10948 			     : TRACEFS"/available_filter_functions";
10949 }
10950 
tracefs_available_filter_functions_addrs(void)10951 static const char *tracefs_available_filter_functions_addrs(void)
10952 {
10953 	return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs"
10954 			     : TRACEFS"/available_filter_functions_addrs";
10955 }
10956 
gen_kprobe_legacy_event_name(char * buf,size_t buf_sz,const char * kfunc_name,size_t offset)10957 static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
10958 					 const char *kfunc_name, size_t offset)
10959 {
10960 	static int index = 0;
10961 	int i;
10962 
10963 	snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
10964 		 __sync_fetch_and_add(&index, 1));
10965 
10966 	/* sanitize binary_path in the probe name */
10967 	for (i = 0; buf[i]; i++) {
10968 		if (!isalnum(buf[i]))
10969 			buf[i] = '_';
10970 	}
10971 }
10972 
add_kprobe_event_legacy(const char * probe_name,bool retprobe,const char * kfunc_name,size_t offset)10973 static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
10974 				   const char *kfunc_name, size_t offset)
10975 {
10976 	return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx",
10977 			      retprobe ? 'r' : 'p',
10978 			      retprobe ? "kretprobes" : "kprobes",
10979 			      probe_name, kfunc_name, offset);
10980 }
10981 
remove_kprobe_event_legacy(const char * probe_name,bool retprobe)10982 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
10983 {
10984 	return append_to_file(tracefs_kprobe_events(), "-:%s/%s",
10985 			      retprobe ? "kretprobes" : "kprobes", probe_name);
10986 }
10987 
determine_kprobe_perf_type_legacy(const char * probe_name,bool retprobe)10988 static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10989 {
10990 	char file[256];
10991 
10992 	snprintf(file, sizeof(file), "%s/events/%s/%s/id",
10993 		 tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name);
10994 
10995 	return parse_uint_from_file(file, "%d\n");
10996 }
10997 
perf_event_kprobe_open_legacy(const char * probe_name,bool retprobe,const char * kfunc_name,size_t offset,int pid)10998 static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
10999 					 const char *kfunc_name, size_t offset, int pid)
11000 {
11001 	const size_t attr_sz = sizeof(struct perf_event_attr);
11002 	struct perf_event_attr attr;
11003 	char errmsg[STRERR_BUFSIZE];
11004 	int type, pfd, err;
11005 
11006 	err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
11007 	if (err < 0) {
11008 		pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
11009 			kfunc_name, offset,
11010 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11011 		return err;
11012 	}
11013 	type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
11014 	if (type < 0) {
11015 		err = type;
11016 		pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
11017 			kfunc_name, offset,
11018 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11019 		goto err_clean_legacy;
11020 	}
11021 
11022 	memset(&attr, 0, attr_sz);
11023 	attr.size = attr_sz;
11024 	attr.config = type;
11025 	attr.type = PERF_TYPE_TRACEPOINT;
11026 
11027 	pfd = syscall(__NR_perf_event_open, &attr,
11028 		      pid < 0 ? -1 : pid, /* pid */
11029 		      pid == -1 ? 0 : -1, /* cpu */
11030 		      -1 /* group_fd */,  PERF_FLAG_FD_CLOEXEC);
11031 	if (pfd < 0) {
11032 		err = -errno;
11033 		pr_warn("legacy kprobe perf_event_open() failed: %s\n",
11034 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11035 		goto err_clean_legacy;
11036 	}
11037 	return pfd;
11038 
11039 err_clean_legacy:
11040 	/* Clear the newly added legacy kprobe_event */
11041 	remove_kprobe_event_legacy(probe_name, retprobe);
11042 	return err;
11043 }
11044 
arch_specific_syscall_pfx(void)11045 static const char *arch_specific_syscall_pfx(void)
11046 {
11047 #if defined(__x86_64__)
11048 	return "x64";
11049 #elif defined(__i386__)
11050 	return "ia32";
11051 #elif defined(__s390x__)
11052 	return "s390x";
11053 #elif defined(__s390__)
11054 	return "s390";
11055 #elif defined(__arm__)
11056 	return "arm";
11057 #elif defined(__aarch64__)
11058 	return "arm64";
11059 #elif defined(__mips__)
11060 	return "mips";
11061 #elif defined(__riscv)
11062 	return "riscv";
11063 #elif defined(__powerpc__)
11064 	return "powerpc";
11065 #elif defined(__powerpc64__)
11066 	return "powerpc64";
11067 #else
11068 	return NULL;
11069 #endif
11070 }
11071 
probe_kern_syscall_wrapper(int token_fd)11072 int probe_kern_syscall_wrapper(int token_fd)
11073 {
11074 	char syscall_name[64];
11075 	const char *ksys_pfx;
11076 
11077 	ksys_pfx = arch_specific_syscall_pfx();
11078 	if (!ksys_pfx)
11079 		return 0;
11080 
11081 	snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx);
11082 
11083 	if (determine_kprobe_perf_type() >= 0) {
11084 		int pfd;
11085 
11086 		pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0);
11087 		if (pfd >= 0)
11088 			close(pfd);
11089 
11090 		return pfd >= 0 ? 1 : 0;
11091 	} else { /* legacy mode */
11092 		char probe_name[128];
11093 
11094 		gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
11095 		if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
11096 			return 0;
11097 
11098 		(void)remove_kprobe_event_legacy(probe_name, false);
11099 		return 1;
11100 	}
11101 }
11102 
11103 struct bpf_link *
bpf_program__attach_kprobe_opts(const struct bpf_program * prog,const char * func_name,const struct bpf_kprobe_opts * opts)11104 bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
11105 				const char *func_name,
11106 				const struct bpf_kprobe_opts *opts)
11107 {
11108 	DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
11109 	enum probe_attach_mode attach_mode;
11110 	char errmsg[STRERR_BUFSIZE];
11111 	char *legacy_probe = NULL;
11112 	struct bpf_link *link;
11113 	size_t offset;
11114 	bool retprobe, legacy;
11115 	int pfd, err;
11116 
11117 	if (!OPTS_VALID(opts, bpf_kprobe_opts))
11118 		return libbpf_err_ptr(-EINVAL);
11119 
11120 	attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
11121 	retprobe = OPTS_GET(opts, retprobe, false);
11122 	offset = OPTS_GET(opts, offset, 0);
11123 	pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11124 
11125 	legacy = determine_kprobe_perf_type() < 0;
11126 	switch (attach_mode) {
11127 	case PROBE_ATTACH_MODE_LEGACY:
11128 		legacy = true;
11129 		pe_opts.force_ioctl_attach = true;
11130 		break;
11131 	case PROBE_ATTACH_MODE_PERF:
11132 		if (legacy)
11133 			return libbpf_err_ptr(-ENOTSUP);
11134 		pe_opts.force_ioctl_attach = true;
11135 		break;
11136 	case PROBE_ATTACH_MODE_LINK:
11137 		if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11138 			return libbpf_err_ptr(-ENOTSUP);
11139 		break;
11140 	case PROBE_ATTACH_MODE_DEFAULT:
11141 		break;
11142 	default:
11143 		return libbpf_err_ptr(-EINVAL);
11144 	}
11145 
11146 	if (!legacy) {
11147 		pfd = perf_event_open_probe(false /* uprobe */, retprobe,
11148 					    func_name, offset,
11149 					    -1 /* pid */, 0 /* ref_ctr_off */);
11150 	} else {
11151 		char probe_name[256];
11152 
11153 		gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
11154 					     func_name, offset);
11155 
11156 		legacy_probe = strdup(probe_name);
11157 		if (!legacy_probe)
11158 			return libbpf_err_ptr(-ENOMEM);
11159 
11160 		pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
11161 						    offset, -1 /* pid */);
11162 	}
11163 	if (pfd < 0) {
11164 		err = -errno;
11165 		pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
11166 			prog->name, retprobe ? "kretprobe" : "kprobe",
11167 			func_name, offset,
11168 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11169 		goto err_out;
11170 	}
11171 	link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
11172 	err = libbpf_get_error(link);
11173 	if (err) {
11174 		close(pfd);
11175 		pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
11176 			prog->name, retprobe ? "kretprobe" : "kprobe",
11177 			func_name, offset,
11178 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11179 		goto err_clean_legacy;
11180 	}
11181 	if (legacy) {
11182 		struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11183 
11184 		perf_link->legacy_probe_name = legacy_probe;
11185 		perf_link->legacy_is_kprobe = true;
11186 		perf_link->legacy_is_retprobe = retprobe;
11187 	}
11188 
11189 	return link;
11190 
11191 err_clean_legacy:
11192 	if (legacy)
11193 		remove_kprobe_event_legacy(legacy_probe, retprobe);
11194 err_out:
11195 	free(legacy_probe);
11196 	return libbpf_err_ptr(err);
11197 }
11198 
bpf_program__attach_kprobe(const struct bpf_program * prog,bool retprobe,const char * func_name)11199 struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
11200 					    bool retprobe,
11201 					    const char *func_name)
11202 {
11203 	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
11204 		.retprobe = retprobe,
11205 	);
11206 
11207 	return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
11208 }
11209 
bpf_program__attach_ksyscall(const struct bpf_program * prog,const char * syscall_name,const struct bpf_ksyscall_opts * opts)11210 struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
11211 					      const char *syscall_name,
11212 					      const struct bpf_ksyscall_opts *opts)
11213 {
11214 	LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
11215 	char func_name[128];
11216 
11217 	if (!OPTS_VALID(opts, bpf_ksyscall_opts))
11218 		return libbpf_err_ptr(-EINVAL);
11219 
11220 	if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
11221 		/* arch_specific_syscall_pfx() should never return NULL here
11222 		 * because it is guarded by kernel_supports(). However, since
11223 		 * compiler does not know that we have an explicit conditional
11224 		 * as well.
11225 		 */
11226 		snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
11227 			 arch_specific_syscall_pfx() ? : "", syscall_name);
11228 	} else {
11229 		snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
11230 	}
11231 
11232 	kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false);
11233 	kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11234 
11235 	return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts);
11236 }
11237 
11238 /* Adapted from perf/util/string.c */
glob_match(const char * str,const char * pat)11239 bool glob_match(const char *str, const char *pat)
11240 {
11241 	while (*str && *pat && *pat != '*') {
11242 		if (*pat == '?') {      /* Matches any single character */
11243 			str++;
11244 			pat++;
11245 			continue;
11246 		}
11247 		if (*str != *pat)
11248 			return false;
11249 		str++;
11250 		pat++;
11251 	}
11252 	/* Check wild card */
11253 	if (*pat == '*') {
11254 		while (*pat == '*')
11255 			pat++;
11256 		if (!*pat) /* Tail wild card matches all */
11257 			return true;
11258 		while (*str)
11259 			if (glob_match(str++, pat))
11260 				return true;
11261 	}
11262 	return !*str && !*pat;
11263 }
11264 
11265 struct kprobe_multi_resolve {
11266 	const char *pattern;
11267 	unsigned long *addrs;
11268 	size_t cap;
11269 	size_t cnt;
11270 };
11271 
11272 struct avail_kallsyms_data {
11273 	char **syms;
11274 	size_t cnt;
11275 	struct kprobe_multi_resolve *res;
11276 };
11277 
avail_func_cmp(const void * a,const void * b)11278 static int avail_func_cmp(const void *a, const void *b)
11279 {
11280 	return strcmp(*(const char **)a, *(const char **)b);
11281 }
11282 
avail_kallsyms_cb(unsigned long long sym_addr,char sym_type,const char * sym_name,void * ctx)11283 static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type,
11284 			     const char *sym_name, void *ctx)
11285 {
11286 	struct avail_kallsyms_data *data = ctx;
11287 	struct kprobe_multi_resolve *res = data->res;
11288 	int err;
11289 
11290 	if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
11291 		return 0;
11292 
11293 	err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1);
11294 	if (err)
11295 		return err;
11296 
11297 	res->addrs[res->cnt++] = (unsigned long)sym_addr;
11298 	return 0;
11299 }
11300 
libbpf_available_kallsyms_parse(struct kprobe_multi_resolve * res)11301 static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res)
11302 {
11303 	const char *available_functions_file = tracefs_available_filter_functions();
11304 	struct avail_kallsyms_data data;
11305 	char sym_name[500];
11306 	FILE *f;
11307 	int err = 0, ret, i;
11308 	char **syms = NULL;
11309 	size_t cap = 0, cnt = 0;
11310 
11311 	f = fopen(available_functions_file, "re");
11312 	if (!f) {
11313 		err = -errno;
11314 		pr_warn("failed to open %s: %d\n", available_functions_file, err);
11315 		return err;
11316 	}
11317 
11318 	while (true) {
11319 		char *name;
11320 
11321 		ret = fscanf(f, "%499s%*[^\n]\n", sym_name);
11322 		if (ret == EOF && feof(f))
11323 			break;
11324 
11325 		if (ret != 1) {
11326 			pr_warn("failed to parse available_filter_functions entry: %d\n", ret);
11327 			err = -EINVAL;
11328 			goto cleanup;
11329 		}
11330 
11331 		if (!glob_match(sym_name, res->pattern))
11332 			continue;
11333 
11334 		err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1);
11335 		if (err)
11336 			goto cleanup;
11337 
11338 		name = strdup(sym_name);
11339 		if (!name) {
11340 			err = -errno;
11341 			goto cleanup;
11342 		}
11343 
11344 		syms[cnt++] = name;
11345 	}
11346 
11347 	/* no entries found, bail out */
11348 	if (cnt == 0) {
11349 		err = -ENOENT;
11350 		goto cleanup;
11351 	}
11352 
11353 	/* sort available functions */
11354 	qsort(syms, cnt, sizeof(*syms), avail_func_cmp);
11355 
11356 	data.syms = syms;
11357 	data.res = res;
11358 	data.cnt = cnt;
11359 	libbpf_kallsyms_parse(avail_kallsyms_cb, &data);
11360 
11361 	if (res->cnt == 0)
11362 		err = -ENOENT;
11363 
11364 cleanup:
11365 	for (i = 0; i < cnt; i++)
11366 		free((char *)syms[i]);
11367 	free(syms);
11368 
11369 	fclose(f);
11370 	return err;
11371 }
11372 
has_available_filter_functions_addrs(void)11373 static bool has_available_filter_functions_addrs(void)
11374 {
11375 	return access(tracefs_available_filter_functions_addrs(), R_OK) != -1;
11376 }
11377 
libbpf_available_kprobes_parse(struct kprobe_multi_resolve * res)11378 static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res)
11379 {
11380 	const char *available_path = tracefs_available_filter_functions_addrs();
11381 	char sym_name[500];
11382 	FILE *f;
11383 	int ret, err = 0;
11384 	unsigned long long sym_addr;
11385 
11386 	f = fopen(available_path, "re");
11387 	if (!f) {
11388 		err = -errno;
11389 		pr_warn("failed to open %s: %d\n", available_path, err);
11390 		return err;
11391 	}
11392 
11393 	while (true) {
11394 		ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name);
11395 		if (ret == EOF && feof(f))
11396 			break;
11397 
11398 		if (ret != 2) {
11399 			pr_warn("failed to parse available_filter_functions_addrs entry: %d\n",
11400 				ret);
11401 			err = -EINVAL;
11402 			goto cleanup;
11403 		}
11404 
11405 		if (!glob_match(sym_name, res->pattern))
11406 			continue;
11407 
11408 		err = libbpf_ensure_mem((void **)&res->addrs, &res->cap,
11409 					sizeof(*res->addrs), res->cnt + 1);
11410 		if (err)
11411 			goto cleanup;
11412 
11413 		res->addrs[res->cnt++] = (unsigned long)sym_addr;
11414 	}
11415 
11416 	if (res->cnt == 0)
11417 		err = -ENOENT;
11418 
11419 cleanup:
11420 	fclose(f);
11421 	return err;
11422 }
11423 
11424 struct bpf_link *
bpf_program__attach_kprobe_multi_opts(const struct bpf_program * prog,const char * pattern,const struct bpf_kprobe_multi_opts * opts)11425 bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
11426 				      const char *pattern,
11427 				      const struct bpf_kprobe_multi_opts *opts)
11428 {
11429 	LIBBPF_OPTS(bpf_link_create_opts, lopts);
11430 	struct kprobe_multi_resolve res = {
11431 		.pattern = pattern,
11432 	};
11433 	enum bpf_attach_type attach_type;
11434 	struct bpf_link *link = NULL;
11435 	char errmsg[STRERR_BUFSIZE];
11436 	const unsigned long *addrs;
11437 	int err, link_fd, prog_fd;
11438 	bool retprobe, session;
11439 	const __u64 *cookies;
11440 	const char **syms;
11441 	size_t cnt;
11442 
11443 	if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
11444 		return libbpf_err_ptr(-EINVAL);
11445 
11446 	prog_fd = bpf_program__fd(prog);
11447 	if (prog_fd < 0) {
11448 		pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
11449 			prog->name);
11450 		return libbpf_err_ptr(-EINVAL);
11451 	}
11452 
11453 	syms    = OPTS_GET(opts, syms, false);
11454 	addrs   = OPTS_GET(opts, addrs, false);
11455 	cnt     = OPTS_GET(opts, cnt, false);
11456 	cookies = OPTS_GET(opts, cookies, false);
11457 
11458 	if (!pattern && !addrs && !syms)
11459 		return libbpf_err_ptr(-EINVAL);
11460 	if (pattern && (addrs || syms || cookies || cnt))
11461 		return libbpf_err_ptr(-EINVAL);
11462 	if (!pattern && !cnt)
11463 		return libbpf_err_ptr(-EINVAL);
11464 	if (addrs && syms)
11465 		return libbpf_err_ptr(-EINVAL);
11466 
11467 	if (pattern) {
11468 		if (has_available_filter_functions_addrs())
11469 			err = libbpf_available_kprobes_parse(&res);
11470 		else
11471 			err = libbpf_available_kallsyms_parse(&res);
11472 		if (err)
11473 			goto error;
11474 		addrs = res.addrs;
11475 		cnt = res.cnt;
11476 	}
11477 
11478 	retprobe = OPTS_GET(opts, retprobe, false);
11479 	session  = OPTS_GET(opts, session, false);
11480 
11481 	if (retprobe && session)
11482 		return libbpf_err_ptr(-EINVAL);
11483 
11484 	attach_type = session ? BPF_TRACE_KPROBE_SESSION : BPF_TRACE_KPROBE_MULTI;
11485 
11486 	lopts.kprobe_multi.syms = syms;
11487 	lopts.kprobe_multi.addrs = addrs;
11488 	lopts.kprobe_multi.cookies = cookies;
11489 	lopts.kprobe_multi.cnt = cnt;
11490 	lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
11491 
11492 	link = calloc(1, sizeof(*link));
11493 	if (!link) {
11494 		err = -ENOMEM;
11495 		goto error;
11496 	}
11497 	link->detach = &bpf_link__detach_fd;
11498 
11499 	link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts);
11500 	if (link_fd < 0) {
11501 		err = -errno;
11502 		pr_warn("prog '%s': failed to attach: %s\n",
11503 			prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11504 		goto error;
11505 	}
11506 	link->fd = link_fd;
11507 	free(res.addrs);
11508 	return link;
11509 
11510 error:
11511 	free(link);
11512 	free(res.addrs);
11513 	return libbpf_err_ptr(err);
11514 }
11515 
attach_kprobe(const struct bpf_program * prog,long cookie,struct bpf_link ** link)11516 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11517 {
11518 	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
11519 	unsigned long offset = 0;
11520 	const char *func_name;
11521 	char *func;
11522 	int n;
11523 
11524 	*link = NULL;
11525 
11526 	/* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
11527 	if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
11528 		return 0;
11529 
11530 	opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
11531 	if (opts.retprobe)
11532 		func_name = prog->sec_name + sizeof("kretprobe/") - 1;
11533 	else
11534 		func_name = prog->sec_name + sizeof("kprobe/") - 1;
11535 
11536 	n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
11537 	if (n < 1) {
11538 		pr_warn("kprobe name is invalid: %s\n", func_name);
11539 		return -EINVAL;
11540 	}
11541 	if (opts.retprobe && offset != 0) {
11542 		free(func);
11543 		pr_warn("kretprobes do not support offset specification\n");
11544 		return -EINVAL;
11545 	}
11546 
11547 	opts.offset = offset;
11548 	*link = bpf_program__attach_kprobe_opts(prog, func, &opts);
11549 	free(func);
11550 	return libbpf_get_error(*link);
11551 }
11552 
attach_ksyscall(const struct bpf_program * prog,long cookie,struct bpf_link ** link)11553 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11554 {
11555 	LIBBPF_OPTS(bpf_ksyscall_opts, opts);
11556 	const char *syscall_name;
11557 
11558 	*link = NULL;
11559 
11560 	/* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */
11561 	if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0)
11562 		return 0;
11563 
11564 	opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/");
11565 	if (opts.retprobe)
11566 		syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1;
11567 	else
11568 		syscall_name = prog->sec_name + sizeof("ksyscall/") - 1;
11569 
11570 	*link = bpf_program__attach_ksyscall(prog, syscall_name, &opts);
11571 	return *link ? 0 : -errno;
11572 }
11573 
attach_kprobe_multi(const struct bpf_program * prog,long cookie,struct bpf_link ** link)11574 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11575 {
11576 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
11577 	const char *spec;
11578 	char *pattern;
11579 	int n;
11580 
11581 	*link = NULL;
11582 
11583 	/* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
11584 	if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
11585 	    strcmp(prog->sec_name, "kretprobe.multi") == 0)
11586 		return 0;
11587 
11588 	opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
11589 	if (opts.retprobe)
11590 		spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
11591 	else
11592 		spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
11593 
11594 	n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
11595 	if (n < 1) {
11596 		pr_warn("kprobe multi pattern is invalid: %s\n", spec);
11597 		return -EINVAL;
11598 	}
11599 
11600 	*link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
11601 	free(pattern);
11602 	return libbpf_get_error(*link);
11603 }
11604 
attach_kprobe_session(const struct bpf_program * prog,long cookie,struct bpf_link ** link)11605 static int attach_kprobe_session(const struct bpf_program *prog, long cookie,
11606 				 struct bpf_link **link)
11607 {
11608 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts, .session = true);
11609 	const char *spec;
11610 	char *pattern;
11611 	int n;
11612 
11613 	*link = NULL;
11614 
11615 	/* no auto-attach for SEC("kprobe.session") */
11616 	if (strcmp(prog->sec_name, "kprobe.session") == 0)
11617 		return 0;
11618 
11619 	spec = prog->sec_name + sizeof("kprobe.session/") - 1;
11620 	n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
11621 	if (n < 1) {
11622 		pr_warn("kprobe session pattern is invalid: %s\n", spec);
11623 		return -EINVAL;
11624 	}
11625 
11626 	*link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
11627 	free(pattern);
11628 	return *link ? 0 : -errno;
11629 }
11630 
attach_uprobe_multi(const struct bpf_program * prog,long cookie,struct bpf_link ** link)11631 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11632 {
11633 	char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
11634 	LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
11635 	int n, ret = -EINVAL;
11636 
11637 	*link = NULL;
11638 
11639 	n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
11640 		   &probe_type, &binary_path, &func_name);
11641 	switch (n) {
11642 	case 1:
11643 		/* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
11644 		ret = 0;
11645 		break;
11646 	case 3:
11647 		opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0;
11648 		*link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
11649 		ret = libbpf_get_error(*link);
11650 		break;
11651 	default:
11652 		pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
11653 			prog->sec_name);
11654 		break;
11655 	}
11656 	free(probe_type);
11657 	free(binary_path);
11658 	free(func_name);
11659 	return ret;
11660 }
11661 
gen_uprobe_legacy_event_name(char * buf,size_t buf_sz,const char * binary_path,uint64_t offset)11662 static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
11663 					 const char *binary_path, uint64_t offset)
11664 {
11665 	int i;
11666 
11667 	snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
11668 
11669 	/* sanitize binary_path in the probe name */
11670 	for (i = 0; buf[i]; i++) {
11671 		if (!isalnum(buf[i]))
11672 			buf[i] = '_';
11673 	}
11674 }
11675 
add_uprobe_event_legacy(const char * probe_name,bool retprobe,const char * binary_path,size_t offset)11676 static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
11677 					  const char *binary_path, size_t offset)
11678 {
11679 	return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx",
11680 			      retprobe ? 'r' : 'p',
11681 			      retprobe ? "uretprobes" : "uprobes",
11682 			      probe_name, binary_path, offset);
11683 }
11684 
remove_uprobe_event_legacy(const char * probe_name,bool retprobe)11685 static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
11686 {
11687 	return append_to_file(tracefs_uprobe_events(), "-:%s/%s",
11688 			      retprobe ? "uretprobes" : "uprobes", probe_name);
11689 }
11690 
determine_uprobe_perf_type_legacy(const char * probe_name,bool retprobe)11691 static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
11692 {
11693 	char file[512];
11694 
11695 	snprintf(file, sizeof(file), "%s/events/%s/%s/id",
11696 		 tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name);
11697 
11698 	return parse_uint_from_file(file, "%d\n");
11699 }
11700 
perf_event_uprobe_open_legacy(const char * probe_name,bool retprobe,const char * binary_path,size_t offset,int pid)11701 static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
11702 					 const char *binary_path, size_t offset, int pid)
11703 {
11704 	const size_t attr_sz = sizeof(struct perf_event_attr);
11705 	struct perf_event_attr attr;
11706 	int type, pfd, err;
11707 
11708 	err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
11709 	if (err < 0) {
11710 		pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
11711 			binary_path, (size_t)offset, err);
11712 		return err;
11713 	}
11714 	type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
11715 	if (type < 0) {
11716 		err = type;
11717 		pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
11718 			binary_path, offset, err);
11719 		goto err_clean_legacy;
11720 	}
11721 
11722 	memset(&attr, 0, attr_sz);
11723 	attr.size = attr_sz;
11724 	attr.config = type;
11725 	attr.type = PERF_TYPE_TRACEPOINT;
11726 
11727 	pfd = syscall(__NR_perf_event_open, &attr,
11728 		      pid < 0 ? -1 : pid, /* pid */
11729 		      pid == -1 ? 0 : -1, /* cpu */
11730 		      -1 /* group_fd */,  PERF_FLAG_FD_CLOEXEC);
11731 	if (pfd < 0) {
11732 		err = -errno;
11733 		pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
11734 		goto err_clean_legacy;
11735 	}
11736 	return pfd;
11737 
11738 err_clean_legacy:
11739 	/* Clear the newly added legacy uprobe_event */
11740 	remove_uprobe_event_legacy(probe_name, retprobe);
11741 	return err;
11742 }
11743 
11744 /* Find offset of function name in archive specified by path. Currently
11745  * supported are .zip files that do not compress their contents, as used on
11746  * Android in the form of APKs, for example. "file_name" is the name of the ELF
11747  * file inside the archive. "func_name" matches symbol name or name@@LIB for
11748  * library functions.
11749  *
11750  * An overview of the APK format specifically provided here:
11751  * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents
11752  */
elf_find_func_offset_from_archive(const char * archive_path,const char * file_name,const char * func_name)11753 static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name,
11754 					      const char *func_name)
11755 {
11756 	struct zip_archive *archive;
11757 	struct zip_entry entry;
11758 	long ret;
11759 	Elf *elf;
11760 
11761 	archive = zip_archive_open(archive_path);
11762 	if (IS_ERR(archive)) {
11763 		ret = PTR_ERR(archive);
11764 		pr_warn("zip: failed to open %s: %ld\n", archive_path, ret);
11765 		return ret;
11766 	}
11767 
11768 	ret = zip_archive_find_entry(archive, file_name, &entry);
11769 	if (ret) {
11770 		pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name,
11771 			archive_path, ret);
11772 		goto out;
11773 	}
11774 	pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path,
11775 		 (unsigned long)entry.data_offset);
11776 
11777 	if (entry.compression) {
11778 		pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name,
11779 			archive_path);
11780 		ret = -LIBBPF_ERRNO__FORMAT;
11781 		goto out;
11782 	}
11783 
11784 	elf = elf_memory((void *)entry.data, entry.data_length);
11785 	if (!elf) {
11786 		pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path,
11787 			elf_errmsg(-1));
11788 		ret = -LIBBPF_ERRNO__LIBELF;
11789 		goto out;
11790 	}
11791 
11792 	ret = elf_find_func_offset(elf, file_name, func_name);
11793 	if (ret > 0) {
11794 		pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n",
11795 			 func_name, file_name, archive_path, entry.data_offset, ret,
11796 			 ret + entry.data_offset);
11797 		ret += entry.data_offset;
11798 	}
11799 	elf_end(elf);
11800 
11801 out:
11802 	zip_archive_close(archive);
11803 	return ret;
11804 }
11805 
arch_specific_lib_paths(void)11806 static const char *arch_specific_lib_paths(void)
11807 {
11808 	/*
11809 	 * Based on https://packages.debian.org/sid/libc6.
11810 	 *
11811 	 * Assume that the traced program is built for the same architecture
11812 	 * as libbpf, which should cover the vast majority of cases.
11813 	 */
11814 #if defined(__x86_64__)
11815 	return "/lib/x86_64-linux-gnu";
11816 #elif defined(__i386__)
11817 	return "/lib/i386-linux-gnu";
11818 #elif defined(__s390x__)
11819 	return "/lib/s390x-linux-gnu";
11820 #elif defined(__s390__)
11821 	return "/lib/s390-linux-gnu";
11822 #elif defined(__arm__) && defined(__SOFTFP__)
11823 	return "/lib/arm-linux-gnueabi";
11824 #elif defined(__arm__) && !defined(__SOFTFP__)
11825 	return "/lib/arm-linux-gnueabihf";
11826 #elif defined(__aarch64__)
11827 	return "/lib/aarch64-linux-gnu";
11828 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
11829 	return "/lib/mips64el-linux-gnuabi64";
11830 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
11831 	return "/lib/mipsel-linux-gnu";
11832 #elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
11833 	return "/lib/powerpc64le-linux-gnu";
11834 #elif defined(__sparc__) && defined(__arch64__)
11835 	return "/lib/sparc64-linux-gnu";
11836 #elif defined(__riscv) && __riscv_xlen == 64
11837 	return "/lib/riscv64-linux-gnu";
11838 #else
11839 	return NULL;
11840 #endif
11841 }
11842 
11843 /* Get full path to program/shared library. */
resolve_full_path(const char * file,char * result,size_t result_sz)11844 static int resolve_full_path(const char *file, char *result, size_t result_sz)
11845 {
11846 	const char *search_paths[3] = {};
11847 	int i, perm;
11848 
11849 	if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
11850 		search_paths[0] = getenv("LD_LIBRARY_PATH");
11851 		search_paths[1] = "/usr/lib64:/usr/lib";
11852 		search_paths[2] = arch_specific_lib_paths();
11853 		perm = R_OK;
11854 	} else {
11855 		search_paths[0] = getenv("PATH");
11856 		search_paths[1] = "/usr/bin:/usr/sbin";
11857 		perm = R_OK | X_OK;
11858 	}
11859 
11860 	for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
11861 		const char *s;
11862 
11863 		if (!search_paths[i])
11864 			continue;
11865 		for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
11866 			char *next_path;
11867 			int seg_len;
11868 
11869 			if (s[0] == ':')
11870 				s++;
11871 			next_path = strchr(s, ':');
11872 			seg_len = next_path ? next_path - s : strlen(s);
11873 			if (!seg_len)
11874 				continue;
11875 			snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
11876 			/* ensure it has required permissions */
11877 			if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0)
11878 				continue;
11879 			pr_debug("resolved '%s' to '%s'\n", file, result);
11880 			return 0;
11881 		}
11882 	}
11883 	return -ENOENT;
11884 }
11885 
11886 struct bpf_link *
bpf_program__attach_uprobe_multi(const struct bpf_program * prog,pid_t pid,const char * path,const char * func_pattern,const struct bpf_uprobe_multi_opts * opts)11887 bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
11888 				 pid_t pid,
11889 				 const char *path,
11890 				 const char *func_pattern,
11891 				 const struct bpf_uprobe_multi_opts *opts)
11892 {
11893 	const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL;
11894 	LIBBPF_OPTS(bpf_link_create_opts, lopts);
11895 	unsigned long *resolved_offsets = NULL;
11896 	int err = 0, link_fd, prog_fd;
11897 	struct bpf_link *link = NULL;
11898 	char errmsg[STRERR_BUFSIZE];
11899 	char full_path[PATH_MAX];
11900 	const __u64 *cookies;
11901 	const char **syms;
11902 	size_t cnt;
11903 
11904 	if (!OPTS_VALID(opts, bpf_uprobe_multi_opts))
11905 		return libbpf_err_ptr(-EINVAL);
11906 
11907 	prog_fd = bpf_program__fd(prog);
11908 	if (prog_fd < 0) {
11909 		pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
11910 			prog->name);
11911 		return libbpf_err_ptr(-EINVAL);
11912 	}
11913 
11914 	syms = OPTS_GET(opts, syms, NULL);
11915 	offsets = OPTS_GET(opts, offsets, NULL);
11916 	ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL);
11917 	cookies = OPTS_GET(opts, cookies, NULL);
11918 	cnt = OPTS_GET(opts, cnt, 0);
11919 
11920 	/*
11921 	 * User can specify 2 mutually exclusive set of inputs:
11922 	 *
11923 	 * 1) use only path/func_pattern/pid arguments
11924 	 *
11925 	 * 2) use path/pid with allowed combinations of:
11926 	 *    syms/offsets/ref_ctr_offsets/cookies/cnt
11927 	 *
11928 	 *    - syms and offsets are mutually exclusive
11929 	 *    - ref_ctr_offsets and cookies are optional
11930 	 *
11931 	 * Any other usage results in error.
11932 	 */
11933 
11934 	if (!path)
11935 		return libbpf_err_ptr(-EINVAL);
11936 	if (!func_pattern && cnt == 0)
11937 		return libbpf_err_ptr(-EINVAL);
11938 
11939 	if (func_pattern) {
11940 		if (syms || offsets || ref_ctr_offsets || cookies || cnt)
11941 			return libbpf_err_ptr(-EINVAL);
11942 	} else {
11943 		if (!!syms == !!offsets)
11944 			return libbpf_err_ptr(-EINVAL);
11945 	}
11946 
11947 	if (func_pattern) {
11948 		if (!strchr(path, '/')) {
11949 			err = resolve_full_path(path, full_path, sizeof(full_path));
11950 			if (err) {
11951 				pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11952 					prog->name, path, err);
11953 				return libbpf_err_ptr(err);
11954 			}
11955 			path = full_path;
11956 		}
11957 
11958 		err = elf_resolve_pattern_offsets(path, func_pattern,
11959 						  &resolved_offsets, &cnt);
11960 		if (err < 0)
11961 			return libbpf_err_ptr(err);
11962 		offsets = resolved_offsets;
11963 	} else if (syms) {
11964 		err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC);
11965 		if (err < 0)
11966 			return libbpf_err_ptr(err);
11967 		offsets = resolved_offsets;
11968 	}
11969 
11970 	lopts.uprobe_multi.path = path;
11971 	lopts.uprobe_multi.offsets = offsets;
11972 	lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets;
11973 	lopts.uprobe_multi.cookies = cookies;
11974 	lopts.uprobe_multi.cnt = cnt;
11975 	lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0;
11976 
11977 	if (pid == 0)
11978 		pid = getpid();
11979 	if (pid > 0)
11980 		lopts.uprobe_multi.pid = pid;
11981 
11982 	link = calloc(1, sizeof(*link));
11983 	if (!link) {
11984 		err = -ENOMEM;
11985 		goto error;
11986 	}
11987 	link->detach = &bpf_link__detach_fd;
11988 
11989 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts);
11990 	if (link_fd < 0) {
11991 		err = -errno;
11992 		pr_warn("prog '%s': failed to attach multi-uprobe: %s\n",
11993 			prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11994 		goto error;
11995 	}
11996 	link->fd = link_fd;
11997 	free(resolved_offsets);
11998 	return link;
11999 
12000 error:
12001 	free(resolved_offsets);
12002 	free(link);
12003 	return libbpf_err_ptr(err);
12004 }
12005 
12006 LIBBPF_API struct bpf_link *
bpf_program__attach_uprobe_opts(const struct bpf_program * prog,pid_t pid,const char * binary_path,size_t func_offset,const struct bpf_uprobe_opts * opts)12007 bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
12008 				const char *binary_path, size_t func_offset,
12009 				const struct bpf_uprobe_opts *opts)
12010 {
12011 	const char *archive_path = NULL, *archive_sep = NULL;
12012 	char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
12013 	DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
12014 	enum probe_attach_mode attach_mode;
12015 	char full_path[PATH_MAX];
12016 	struct bpf_link *link;
12017 	size_t ref_ctr_off;
12018 	int pfd, err;
12019 	bool retprobe, legacy;
12020 	const char *func_name;
12021 
12022 	if (!OPTS_VALID(opts, bpf_uprobe_opts))
12023 		return libbpf_err_ptr(-EINVAL);
12024 
12025 	attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
12026 	retprobe = OPTS_GET(opts, retprobe, false);
12027 	ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
12028 	pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
12029 
12030 	if (!binary_path)
12031 		return libbpf_err_ptr(-EINVAL);
12032 
12033 	/* Check if "binary_path" refers to an archive. */
12034 	archive_sep = strstr(binary_path, "!/");
12035 	if (archive_sep) {
12036 		full_path[0] = '\0';
12037 		libbpf_strlcpy(full_path, binary_path,
12038 			       min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1)));
12039 		archive_path = full_path;
12040 		binary_path = archive_sep + 2;
12041 	} else if (!strchr(binary_path, '/')) {
12042 		err = resolve_full_path(binary_path, full_path, sizeof(full_path));
12043 		if (err) {
12044 			pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
12045 				prog->name, binary_path, err);
12046 			return libbpf_err_ptr(err);
12047 		}
12048 		binary_path = full_path;
12049 	}
12050 	func_name = OPTS_GET(opts, func_name, NULL);
12051 	if (func_name) {
12052 		long sym_off;
12053 
12054 		if (archive_path) {
12055 			sym_off = elf_find_func_offset_from_archive(archive_path, binary_path,
12056 								    func_name);
12057 			binary_path = archive_path;
12058 		} else {
12059 			sym_off = elf_find_func_offset_from_file(binary_path, func_name);
12060 		}
12061 		if (sym_off < 0)
12062 			return libbpf_err_ptr(sym_off);
12063 		func_offset += sym_off;
12064 	}
12065 
12066 	legacy = determine_uprobe_perf_type() < 0;
12067 	switch (attach_mode) {
12068 	case PROBE_ATTACH_MODE_LEGACY:
12069 		legacy = true;
12070 		pe_opts.force_ioctl_attach = true;
12071 		break;
12072 	case PROBE_ATTACH_MODE_PERF:
12073 		if (legacy)
12074 			return libbpf_err_ptr(-ENOTSUP);
12075 		pe_opts.force_ioctl_attach = true;
12076 		break;
12077 	case PROBE_ATTACH_MODE_LINK:
12078 		if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
12079 			return libbpf_err_ptr(-ENOTSUP);
12080 		break;
12081 	case PROBE_ATTACH_MODE_DEFAULT:
12082 		break;
12083 	default:
12084 		return libbpf_err_ptr(-EINVAL);
12085 	}
12086 
12087 	if (!legacy) {
12088 		pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
12089 					    func_offset, pid, ref_ctr_off);
12090 	} else {
12091 		char probe_name[PATH_MAX + 64];
12092 
12093 		if (ref_ctr_off)
12094 			return libbpf_err_ptr(-EINVAL);
12095 
12096 		gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
12097 					     binary_path, func_offset);
12098 
12099 		legacy_probe = strdup(probe_name);
12100 		if (!legacy_probe)
12101 			return libbpf_err_ptr(-ENOMEM);
12102 
12103 		pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
12104 						    binary_path, func_offset, pid);
12105 	}
12106 	if (pfd < 0) {
12107 		err = -errno;
12108 		pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
12109 			prog->name, retprobe ? "uretprobe" : "uprobe",
12110 			binary_path, func_offset,
12111 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12112 		goto err_out;
12113 	}
12114 
12115 	link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
12116 	err = libbpf_get_error(link);
12117 	if (err) {
12118 		close(pfd);
12119 		pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
12120 			prog->name, retprobe ? "uretprobe" : "uprobe",
12121 			binary_path, func_offset,
12122 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12123 		goto err_clean_legacy;
12124 	}
12125 	if (legacy) {
12126 		struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
12127 
12128 		perf_link->legacy_probe_name = legacy_probe;
12129 		perf_link->legacy_is_kprobe = false;
12130 		perf_link->legacy_is_retprobe = retprobe;
12131 	}
12132 	return link;
12133 
12134 err_clean_legacy:
12135 	if (legacy)
12136 		remove_uprobe_event_legacy(legacy_probe, retprobe);
12137 err_out:
12138 	free(legacy_probe);
12139 	return libbpf_err_ptr(err);
12140 }
12141 
12142 /* Format of u[ret]probe section definition supporting auto-attach:
12143  * u[ret]probe/binary:function[+offset]
12144  *
12145  * binary can be an absolute/relative path or a filename; the latter is resolved to a
12146  * full binary path via bpf_program__attach_uprobe_opts.
12147  *
12148  * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
12149  * specified (and auto-attach is not possible) or the above format is specified for
12150  * auto-attach.
12151  */
attach_uprobe(const struct bpf_program * prog,long cookie,struct bpf_link ** link)12152 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12153 {
12154 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
12155 	char *probe_type = NULL, *binary_path = NULL, *func_name = NULL, *func_off;
12156 	int n, c, ret = -EINVAL;
12157 	long offset = 0;
12158 
12159 	*link = NULL;
12160 
12161 	n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
12162 		   &probe_type, &binary_path, &func_name);
12163 	switch (n) {
12164 	case 1:
12165 		/* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
12166 		ret = 0;
12167 		break;
12168 	case 2:
12169 		pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
12170 			prog->name, prog->sec_name);
12171 		break;
12172 	case 3:
12173 		/* check if user specifies `+offset`, if yes, this should be
12174 		 * the last part of the string, make sure sscanf read to EOL
12175 		 */
12176 		func_off = strrchr(func_name, '+');
12177 		if (func_off) {
12178 			n = sscanf(func_off, "+%li%n", &offset, &c);
12179 			if (n == 1 && *(func_off + c) == '\0')
12180 				func_off[0] = '\0';
12181 			else
12182 				offset = 0;
12183 		}
12184 		opts.retprobe = strcmp(probe_type, "uretprobe") == 0 ||
12185 				strcmp(probe_type, "uretprobe.s") == 0;
12186 		if (opts.retprobe && offset != 0) {
12187 			pr_warn("prog '%s': uretprobes do not support offset specification\n",
12188 				prog->name);
12189 			break;
12190 		}
12191 		opts.func_name = func_name;
12192 		*link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
12193 		ret = libbpf_get_error(*link);
12194 		break;
12195 	default:
12196 		pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
12197 			prog->sec_name);
12198 		break;
12199 	}
12200 	free(probe_type);
12201 	free(binary_path);
12202 	free(func_name);
12203 
12204 	return ret;
12205 }
12206 
bpf_program__attach_uprobe(const struct bpf_program * prog,bool retprobe,pid_t pid,const char * binary_path,size_t func_offset)12207 struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
12208 					    bool retprobe, pid_t pid,
12209 					    const char *binary_path,
12210 					    size_t func_offset)
12211 {
12212 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
12213 
12214 	return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
12215 }
12216 
bpf_program__attach_usdt(const struct bpf_program * prog,pid_t pid,const char * binary_path,const char * usdt_provider,const char * usdt_name,const struct bpf_usdt_opts * opts)12217 struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
12218 					  pid_t pid, const char *binary_path,
12219 					  const char *usdt_provider, const char *usdt_name,
12220 					  const struct bpf_usdt_opts *opts)
12221 {
12222 	char resolved_path[512];
12223 	struct bpf_object *obj = prog->obj;
12224 	struct bpf_link *link;
12225 	__u64 usdt_cookie;
12226 	int err;
12227 
12228 	if (!OPTS_VALID(opts, bpf_uprobe_opts))
12229 		return libbpf_err_ptr(-EINVAL);
12230 
12231 	if (bpf_program__fd(prog) < 0) {
12232 		pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
12233 			prog->name);
12234 		return libbpf_err_ptr(-EINVAL);
12235 	}
12236 
12237 	if (!binary_path)
12238 		return libbpf_err_ptr(-EINVAL);
12239 
12240 	if (!strchr(binary_path, '/')) {
12241 		err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
12242 		if (err) {
12243 			pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
12244 				prog->name, binary_path, err);
12245 			return libbpf_err_ptr(err);
12246 		}
12247 		binary_path = resolved_path;
12248 	}
12249 
12250 	/* USDT manager is instantiated lazily on first USDT attach. It will
12251 	 * be destroyed together with BPF object in bpf_object__close().
12252 	 */
12253 	if (IS_ERR(obj->usdt_man))
12254 		return libbpf_ptr(obj->usdt_man);
12255 	if (!obj->usdt_man) {
12256 		obj->usdt_man = usdt_manager_new(obj);
12257 		if (IS_ERR(obj->usdt_man))
12258 			return libbpf_ptr(obj->usdt_man);
12259 	}
12260 
12261 	usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
12262 	link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
12263 					usdt_provider, usdt_name, usdt_cookie);
12264 	err = libbpf_get_error(link);
12265 	if (err)
12266 		return libbpf_err_ptr(err);
12267 	return link;
12268 }
12269 
attach_usdt(const struct bpf_program * prog,long cookie,struct bpf_link ** link)12270 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12271 {
12272 	char *path = NULL, *provider = NULL, *name = NULL;
12273 	const char *sec_name;
12274 	int n, err;
12275 
12276 	sec_name = bpf_program__section_name(prog);
12277 	if (strcmp(sec_name, "usdt") == 0) {
12278 		/* no auto-attach for just SEC("usdt") */
12279 		*link = NULL;
12280 		return 0;
12281 	}
12282 
12283 	n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
12284 	if (n != 3) {
12285 		pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
12286 			sec_name);
12287 		err = -EINVAL;
12288 	} else {
12289 		*link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
12290 						 provider, name, NULL);
12291 		err = libbpf_get_error(*link);
12292 	}
12293 	free(path);
12294 	free(provider);
12295 	free(name);
12296 	return err;
12297 }
12298 
determine_tracepoint_id(const char * tp_category,const char * tp_name)12299 static int determine_tracepoint_id(const char *tp_category,
12300 				   const char *tp_name)
12301 {
12302 	char file[PATH_MAX];
12303 	int ret;
12304 
12305 	ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id",
12306 		       tracefs_path(), tp_category, tp_name);
12307 	if (ret < 0)
12308 		return -errno;
12309 	if (ret >= sizeof(file)) {
12310 		pr_debug("tracepoint %s/%s path is too long\n",
12311 			 tp_category, tp_name);
12312 		return -E2BIG;
12313 	}
12314 	return parse_uint_from_file(file, "%d\n");
12315 }
12316 
perf_event_open_tracepoint(const char * tp_category,const char * tp_name)12317 static int perf_event_open_tracepoint(const char *tp_category,
12318 				      const char *tp_name)
12319 {
12320 	const size_t attr_sz = sizeof(struct perf_event_attr);
12321 	struct perf_event_attr attr;
12322 	char errmsg[STRERR_BUFSIZE];
12323 	int tp_id, pfd, err;
12324 
12325 	tp_id = determine_tracepoint_id(tp_category, tp_name);
12326 	if (tp_id < 0) {
12327 		pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
12328 			tp_category, tp_name,
12329 			libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
12330 		return tp_id;
12331 	}
12332 
12333 	memset(&attr, 0, attr_sz);
12334 	attr.type = PERF_TYPE_TRACEPOINT;
12335 	attr.size = attr_sz;
12336 	attr.config = tp_id;
12337 
12338 	pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
12339 		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
12340 	if (pfd < 0) {
12341 		err = -errno;
12342 		pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
12343 			tp_category, tp_name,
12344 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12345 		return err;
12346 	}
12347 	return pfd;
12348 }
12349 
bpf_program__attach_tracepoint_opts(const struct bpf_program * prog,const char * tp_category,const char * tp_name,const struct bpf_tracepoint_opts * opts)12350 struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
12351 						     const char *tp_category,
12352 						     const char *tp_name,
12353 						     const struct bpf_tracepoint_opts *opts)
12354 {
12355 	DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
12356 	char errmsg[STRERR_BUFSIZE];
12357 	struct bpf_link *link;
12358 	int pfd, err;
12359 
12360 	if (!OPTS_VALID(opts, bpf_tracepoint_opts))
12361 		return libbpf_err_ptr(-EINVAL);
12362 
12363 	pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
12364 
12365 	pfd = perf_event_open_tracepoint(tp_category, tp_name);
12366 	if (pfd < 0) {
12367 		pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
12368 			prog->name, tp_category, tp_name,
12369 			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12370 		return libbpf_err_ptr(pfd);
12371 	}
12372 	link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
12373 	err = libbpf_get_error(link);
12374 	if (err) {
12375 		close(pfd);
12376 		pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
12377 			prog->name, tp_category, tp_name,
12378 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12379 		return libbpf_err_ptr(err);
12380 	}
12381 	return link;
12382 }
12383 
bpf_program__attach_tracepoint(const struct bpf_program * prog,const char * tp_category,const char * tp_name)12384 struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
12385 						const char *tp_category,
12386 						const char *tp_name)
12387 {
12388 	return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
12389 }
12390 
attach_tp(const struct bpf_program * prog,long cookie,struct bpf_link ** link)12391 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12392 {
12393 	char *sec_name, *tp_cat, *tp_name;
12394 
12395 	*link = NULL;
12396 
12397 	/* no auto-attach for SEC("tp") or SEC("tracepoint") */
12398 	if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
12399 		return 0;
12400 
12401 	sec_name = strdup(prog->sec_name);
12402 	if (!sec_name)
12403 		return -ENOMEM;
12404 
12405 	/* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
12406 	if (str_has_pfx(prog->sec_name, "tp/"))
12407 		tp_cat = sec_name + sizeof("tp/") - 1;
12408 	else
12409 		tp_cat = sec_name + sizeof("tracepoint/") - 1;
12410 	tp_name = strchr(tp_cat, '/');
12411 	if (!tp_name) {
12412 		free(sec_name);
12413 		return -EINVAL;
12414 	}
12415 	*tp_name = '\0';
12416 	tp_name++;
12417 
12418 	*link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
12419 	free(sec_name);
12420 	return libbpf_get_error(*link);
12421 }
12422 
12423 struct bpf_link *
bpf_program__attach_raw_tracepoint_opts(const struct bpf_program * prog,const char * tp_name,struct bpf_raw_tracepoint_opts * opts)12424 bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog,
12425 					const char *tp_name,
12426 					struct bpf_raw_tracepoint_opts *opts)
12427 {
12428 	LIBBPF_OPTS(bpf_raw_tp_opts, raw_opts);
12429 	char errmsg[STRERR_BUFSIZE];
12430 	struct bpf_link *link;
12431 	int prog_fd, pfd;
12432 
12433 	if (!OPTS_VALID(opts, bpf_raw_tracepoint_opts))
12434 		return libbpf_err_ptr(-EINVAL);
12435 
12436 	prog_fd = bpf_program__fd(prog);
12437 	if (prog_fd < 0) {
12438 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12439 		return libbpf_err_ptr(-EINVAL);
12440 	}
12441 
12442 	link = calloc(1, sizeof(*link));
12443 	if (!link)
12444 		return libbpf_err_ptr(-ENOMEM);
12445 	link->detach = &bpf_link__detach_fd;
12446 
12447 	raw_opts.tp_name = tp_name;
12448 	raw_opts.cookie = OPTS_GET(opts, cookie, 0);
12449 	pfd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_opts);
12450 	if (pfd < 0) {
12451 		pfd = -errno;
12452 		free(link);
12453 		pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
12454 			prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12455 		return libbpf_err_ptr(pfd);
12456 	}
12457 	link->fd = pfd;
12458 	return link;
12459 }
12460 
bpf_program__attach_raw_tracepoint(const struct bpf_program * prog,const char * tp_name)12461 struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
12462 						    const char *tp_name)
12463 {
12464 	return bpf_program__attach_raw_tracepoint_opts(prog, tp_name, NULL);
12465 }
12466 
attach_raw_tp(const struct bpf_program * prog,long cookie,struct bpf_link ** link)12467 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12468 {
12469 	static const char *const prefixes[] = {
12470 		"raw_tp",
12471 		"raw_tracepoint",
12472 		"raw_tp.w",
12473 		"raw_tracepoint.w",
12474 	};
12475 	size_t i;
12476 	const char *tp_name = NULL;
12477 
12478 	*link = NULL;
12479 
12480 	for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
12481 		size_t pfx_len;
12482 
12483 		if (!str_has_pfx(prog->sec_name, prefixes[i]))
12484 			continue;
12485 
12486 		pfx_len = strlen(prefixes[i]);
12487 		/* no auto-attach case of, e.g., SEC("raw_tp") */
12488 		if (prog->sec_name[pfx_len] == '\0')
12489 			return 0;
12490 
12491 		if (prog->sec_name[pfx_len] != '/')
12492 			continue;
12493 
12494 		tp_name = prog->sec_name + pfx_len + 1;
12495 		break;
12496 	}
12497 
12498 	if (!tp_name) {
12499 		pr_warn("prog '%s': invalid section name '%s'\n",
12500 			prog->name, prog->sec_name);
12501 		return -EINVAL;
12502 	}
12503 
12504 	*link = bpf_program__attach_raw_tracepoint(prog, tp_name);
12505 	return libbpf_get_error(*link);
12506 }
12507 
12508 /* Common logic for all BPF program types that attach to a btf_id */
bpf_program__attach_btf_id(const struct bpf_program * prog,const struct bpf_trace_opts * opts)12509 static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
12510 						   const struct bpf_trace_opts *opts)
12511 {
12512 	LIBBPF_OPTS(bpf_link_create_opts, link_opts);
12513 	char errmsg[STRERR_BUFSIZE];
12514 	struct bpf_link *link;
12515 	int prog_fd, pfd;
12516 
12517 	if (!OPTS_VALID(opts, bpf_trace_opts))
12518 		return libbpf_err_ptr(-EINVAL);
12519 
12520 	prog_fd = bpf_program__fd(prog);
12521 	if (prog_fd < 0) {
12522 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12523 		return libbpf_err_ptr(-EINVAL);
12524 	}
12525 
12526 	link = calloc(1, sizeof(*link));
12527 	if (!link)
12528 		return libbpf_err_ptr(-ENOMEM);
12529 	link->detach = &bpf_link__detach_fd;
12530 
12531 	/* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
12532 	link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
12533 	pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
12534 	if (pfd < 0) {
12535 		pfd = -errno;
12536 		free(link);
12537 		pr_warn("prog '%s': failed to attach: %s\n",
12538 			prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12539 		return libbpf_err_ptr(pfd);
12540 	}
12541 	link->fd = pfd;
12542 	return link;
12543 }
12544 
bpf_program__attach_trace(const struct bpf_program * prog)12545 struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
12546 {
12547 	return bpf_program__attach_btf_id(prog, NULL);
12548 }
12549 
bpf_program__attach_trace_opts(const struct bpf_program * prog,const struct bpf_trace_opts * opts)12550 struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
12551 						const struct bpf_trace_opts *opts)
12552 {
12553 	return bpf_program__attach_btf_id(prog, opts);
12554 }
12555 
bpf_program__attach_lsm(const struct bpf_program * prog)12556 struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
12557 {
12558 	return bpf_program__attach_btf_id(prog, NULL);
12559 }
12560 
attach_trace(const struct bpf_program * prog,long cookie,struct bpf_link ** link)12561 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12562 {
12563 	*link = bpf_program__attach_trace(prog);
12564 	return libbpf_get_error(*link);
12565 }
12566 
attach_lsm(const struct bpf_program * prog,long cookie,struct bpf_link ** link)12567 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12568 {
12569 	*link = bpf_program__attach_lsm(prog);
12570 	return libbpf_get_error(*link);
12571 }
12572 
12573 static struct bpf_link *
bpf_program_attach_fd(const struct bpf_program * prog,int target_fd,const char * target_name,const struct bpf_link_create_opts * opts)12574 bpf_program_attach_fd(const struct bpf_program *prog,
12575 		      int target_fd, const char *target_name,
12576 		      const struct bpf_link_create_opts *opts)
12577 {
12578 	enum bpf_attach_type attach_type;
12579 	char errmsg[STRERR_BUFSIZE];
12580 	struct bpf_link *link;
12581 	int prog_fd, link_fd;
12582 
12583 	prog_fd = bpf_program__fd(prog);
12584 	if (prog_fd < 0) {
12585 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12586 		return libbpf_err_ptr(-EINVAL);
12587 	}
12588 
12589 	link = calloc(1, sizeof(*link));
12590 	if (!link)
12591 		return libbpf_err_ptr(-ENOMEM);
12592 	link->detach = &bpf_link__detach_fd;
12593 
12594 	attach_type = bpf_program__expected_attach_type(prog);
12595 	link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts);
12596 	if (link_fd < 0) {
12597 		link_fd = -errno;
12598 		free(link);
12599 		pr_warn("prog '%s': failed to attach to %s: %s\n",
12600 			prog->name, target_name,
12601 			libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12602 		return libbpf_err_ptr(link_fd);
12603 	}
12604 	link->fd = link_fd;
12605 	return link;
12606 }
12607 
12608 struct bpf_link *
bpf_program__attach_cgroup(const struct bpf_program * prog,int cgroup_fd)12609 bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
12610 {
12611 	return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL);
12612 }
12613 
12614 struct bpf_link *
bpf_program__attach_netns(const struct bpf_program * prog,int netns_fd)12615 bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
12616 {
12617 	return bpf_program_attach_fd(prog, netns_fd, "netns", NULL);
12618 }
12619 
12620 struct bpf_link *
bpf_program__attach_sockmap(const struct bpf_program * prog,int map_fd)12621 bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd)
12622 {
12623 	return bpf_program_attach_fd(prog, map_fd, "sockmap", NULL);
12624 }
12625 
bpf_program__attach_xdp(const struct bpf_program * prog,int ifindex)12626 struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
12627 {
12628 	/* target_fd/target_ifindex use the same field in LINK_CREATE */
12629 	return bpf_program_attach_fd(prog, ifindex, "xdp", NULL);
12630 }
12631 
12632 struct bpf_link *
bpf_program__attach_tcx(const struct bpf_program * prog,int ifindex,const struct bpf_tcx_opts * opts)12633 bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,
12634 			const struct bpf_tcx_opts *opts)
12635 {
12636 	LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12637 	__u32 relative_id;
12638 	int relative_fd;
12639 
12640 	if (!OPTS_VALID(opts, bpf_tcx_opts))
12641 		return libbpf_err_ptr(-EINVAL);
12642 
12643 	relative_id = OPTS_GET(opts, relative_id, 0);
12644 	relative_fd = OPTS_GET(opts, relative_fd, 0);
12645 
12646 	/* validate we don't have unexpected combinations of non-zero fields */
12647 	if (!ifindex) {
12648 		pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12649 			prog->name);
12650 		return libbpf_err_ptr(-EINVAL);
12651 	}
12652 	if (relative_fd && relative_id) {
12653 		pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12654 			prog->name);
12655 		return libbpf_err_ptr(-EINVAL);
12656 	}
12657 
12658 	link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0);
12659 	link_create_opts.tcx.relative_fd = relative_fd;
12660 	link_create_opts.tcx.relative_id = relative_id;
12661 	link_create_opts.flags = OPTS_GET(opts, flags, 0);
12662 
12663 	/* target_fd/target_ifindex use the same field in LINK_CREATE */
12664 	return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts);
12665 }
12666 
12667 struct bpf_link *
bpf_program__attach_netkit(const struct bpf_program * prog,int ifindex,const struct bpf_netkit_opts * opts)12668 bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex,
12669 			   const struct bpf_netkit_opts *opts)
12670 {
12671 	LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12672 	__u32 relative_id;
12673 	int relative_fd;
12674 
12675 	if (!OPTS_VALID(opts, bpf_netkit_opts))
12676 		return libbpf_err_ptr(-EINVAL);
12677 
12678 	relative_id = OPTS_GET(opts, relative_id, 0);
12679 	relative_fd = OPTS_GET(opts, relative_fd, 0);
12680 
12681 	/* validate we don't have unexpected combinations of non-zero fields */
12682 	if (!ifindex) {
12683 		pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12684 			prog->name);
12685 		return libbpf_err_ptr(-EINVAL);
12686 	}
12687 	if (relative_fd && relative_id) {
12688 		pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12689 			prog->name);
12690 		return libbpf_err_ptr(-EINVAL);
12691 	}
12692 
12693 	link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0);
12694 	link_create_opts.netkit.relative_fd = relative_fd;
12695 	link_create_opts.netkit.relative_id = relative_id;
12696 	link_create_opts.flags = OPTS_GET(opts, flags, 0);
12697 
12698 	return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts);
12699 }
12700 
bpf_program__attach_freplace(const struct bpf_program * prog,int target_fd,const char * attach_func_name)12701 struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
12702 					      int target_fd,
12703 					      const char *attach_func_name)
12704 {
12705 	int btf_id;
12706 
12707 	if (!!target_fd != !!attach_func_name) {
12708 		pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
12709 			prog->name);
12710 		return libbpf_err_ptr(-EINVAL);
12711 	}
12712 
12713 	if (prog->type != BPF_PROG_TYPE_EXT) {
12714 		pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
12715 			prog->name);
12716 		return libbpf_err_ptr(-EINVAL);
12717 	}
12718 
12719 	if (target_fd) {
12720 		LIBBPF_OPTS(bpf_link_create_opts, target_opts);
12721 
12722 		btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
12723 		if (btf_id < 0)
12724 			return libbpf_err_ptr(btf_id);
12725 
12726 		target_opts.target_btf_id = btf_id;
12727 
12728 		return bpf_program_attach_fd(prog, target_fd, "freplace",
12729 					     &target_opts);
12730 	} else {
12731 		/* no target, so use raw_tracepoint_open for compatibility
12732 		 * with old kernels
12733 		 */
12734 		return bpf_program__attach_trace(prog);
12735 	}
12736 }
12737 
12738 struct bpf_link *
bpf_program__attach_iter(const struct bpf_program * prog,const struct bpf_iter_attach_opts * opts)12739 bpf_program__attach_iter(const struct bpf_program *prog,
12740 			 const struct bpf_iter_attach_opts *opts)
12741 {
12742 	DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12743 	char errmsg[STRERR_BUFSIZE];
12744 	struct bpf_link *link;
12745 	int prog_fd, link_fd;
12746 	__u32 target_fd = 0;
12747 
12748 	if (!OPTS_VALID(opts, bpf_iter_attach_opts))
12749 		return libbpf_err_ptr(-EINVAL);
12750 
12751 	link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
12752 	link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
12753 
12754 	prog_fd = bpf_program__fd(prog);
12755 	if (prog_fd < 0) {
12756 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12757 		return libbpf_err_ptr(-EINVAL);
12758 	}
12759 
12760 	link = calloc(1, sizeof(*link));
12761 	if (!link)
12762 		return libbpf_err_ptr(-ENOMEM);
12763 	link->detach = &bpf_link__detach_fd;
12764 
12765 	link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
12766 				  &link_create_opts);
12767 	if (link_fd < 0) {
12768 		link_fd = -errno;
12769 		free(link);
12770 		pr_warn("prog '%s': failed to attach to iterator: %s\n",
12771 			prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12772 		return libbpf_err_ptr(link_fd);
12773 	}
12774 	link->fd = link_fd;
12775 	return link;
12776 }
12777 
attach_iter(const struct bpf_program * prog,long cookie,struct bpf_link ** link)12778 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12779 {
12780 	*link = bpf_program__attach_iter(prog, NULL);
12781 	return libbpf_get_error(*link);
12782 }
12783 
bpf_program__attach_netfilter(const struct bpf_program * prog,const struct bpf_netfilter_opts * opts)12784 struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog,
12785 					       const struct bpf_netfilter_opts *opts)
12786 {
12787 	LIBBPF_OPTS(bpf_link_create_opts, lopts);
12788 	struct bpf_link *link;
12789 	int prog_fd, link_fd;
12790 
12791 	if (!OPTS_VALID(opts, bpf_netfilter_opts))
12792 		return libbpf_err_ptr(-EINVAL);
12793 
12794 	prog_fd = bpf_program__fd(prog);
12795 	if (prog_fd < 0) {
12796 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12797 		return libbpf_err_ptr(-EINVAL);
12798 	}
12799 
12800 	link = calloc(1, sizeof(*link));
12801 	if (!link)
12802 		return libbpf_err_ptr(-ENOMEM);
12803 
12804 	link->detach = &bpf_link__detach_fd;
12805 
12806 	lopts.netfilter.pf = OPTS_GET(opts, pf, 0);
12807 	lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0);
12808 	lopts.netfilter.priority = OPTS_GET(opts, priority, 0);
12809 	lopts.netfilter.flags = OPTS_GET(opts, flags, 0);
12810 
12811 	link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts);
12812 	if (link_fd < 0) {
12813 		char errmsg[STRERR_BUFSIZE];
12814 
12815 		link_fd = -errno;
12816 		free(link);
12817 		pr_warn("prog '%s': failed to attach to netfilter: %s\n",
12818 			prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12819 		return libbpf_err_ptr(link_fd);
12820 	}
12821 	link->fd = link_fd;
12822 
12823 	return link;
12824 }
12825 
bpf_program__attach(const struct bpf_program * prog)12826 struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
12827 {
12828 	struct bpf_link *link = NULL;
12829 	int err;
12830 
12831 	if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
12832 		return libbpf_err_ptr(-EOPNOTSUPP);
12833 
12834 	if (bpf_program__fd(prog) < 0) {
12835 		pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
12836 			prog->name);
12837 		return libbpf_err_ptr(-EINVAL);
12838 	}
12839 
12840 	err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
12841 	if (err)
12842 		return libbpf_err_ptr(err);
12843 
12844 	/* When calling bpf_program__attach() explicitly, auto-attach support
12845 	 * is expected to work, so NULL returned link is considered an error.
12846 	 * This is different for skeleton's attach, see comment in
12847 	 * bpf_object__attach_skeleton().
12848 	 */
12849 	if (!link)
12850 		return libbpf_err_ptr(-EOPNOTSUPP);
12851 
12852 	return link;
12853 }
12854 
12855 struct bpf_link_struct_ops {
12856 	struct bpf_link link;
12857 	int map_fd;
12858 };
12859 
bpf_link__detach_struct_ops(struct bpf_link * link)12860 static int bpf_link__detach_struct_ops(struct bpf_link *link)
12861 {
12862 	struct bpf_link_struct_ops *st_link;
12863 	__u32 zero = 0;
12864 
12865 	st_link = container_of(link, struct bpf_link_struct_ops, link);
12866 
12867 	if (st_link->map_fd < 0)
12868 		/* w/o a real link */
12869 		return bpf_map_delete_elem(link->fd, &zero);
12870 
12871 	return close(link->fd);
12872 }
12873 
bpf_map__attach_struct_ops(const struct bpf_map * map)12874 struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
12875 {
12876 	struct bpf_link_struct_ops *link;
12877 	__u32 zero = 0;
12878 	int err, fd;
12879 
12880 	if (!bpf_map__is_struct_ops(map))
12881 		return libbpf_err_ptr(-EINVAL);
12882 
12883 	if (map->fd < 0) {
12884 		pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
12885 		return libbpf_err_ptr(-EINVAL);
12886 	}
12887 
12888 	link = calloc(1, sizeof(*link));
12889 	if (!link)
12890 		return libbpf_err_ptr(-EINVAL);
12891 
12892 	/* kern_vdata should be prepared during the loading phase. */
12893 	err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12894 	/* It can be EBUSY if the map has been used to create or
12895 	 * update a link before.  We don't allow updating the value of
12896 	 * a struct_ops once it is set.  That ensures that the value
12897 	 * never changed.  So, it is safe to skip EBUSY.
12898 	 */
12899 	if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
12900 		free(link);
12901 		return libbpf_err_ptr(err);
12902 	}
12903 
12904 	link->link.detach = bpf_link__detach_struct_ops;
12905 
12906 	if (!(map->def.map_flags & BPF_F_LINK)) {
12907 		/* w/o a real link */
12908 		link->link.fd = map->fd;
12909 		link->map_fd = -1;
12910 		return &link->link;
12911 	}
12912 
12913 	fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
12914 	if (fd < 0) {
12915 		free(link);
12916 		return libbpf_err_ptr(fd);
12917 	}
12918 
12919 	link->link.fd = fd;
12920 	link->map_fd = map->fd;
12921 
12922 	return &link->link;
12923 }
12924 
12925 /*
12926  * Swap the back struct_ops of a link with a new struct_ops map.
12927  */
bpf_link__update_map(struct bpf_link * link,const struct bpf_map * map)12928 int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
12929 {
12930 	struct bpf_link_struct_ops *st_ops_link;
12931 	__u32 zero = 0;
12932 	int err;
12933 
12934 	if (!bpf_map__is_struct_ops(map))
12935 		return -EINVAL;
12936 
12937 	if (map->fd < 0) {
12938 		pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
12939 		return -EINVAL;
12940 	}
12941 
12942 	st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
12943 	/* Ensure the type of a link is correct */
12944 	if (st_ops_link->map_fd < 0)
12945 		return -EINVAL;
12946 
12947 	err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12948 	/* It can be EBUSY if the map has been used to create or
12949 	 * update a link before.  We don't allow updating the value of
12950 	 * a struct_ops once it is set.  That ensures that the value
12951 	 * never changed.  So, it is safe to skip EBUSY.
12952 	 */
12953 	if (err && err != -EBUSY)
12954 		return err;
12955 
12956 	err = bpf_link_update(link->fd, map->fd, NULL);
12957 	if (err < 0)
12958 		return err;
12959 
12960 	st_ops_link->map_fd = map->fd;
12961 
12962 	return 0;
12963 }
12964 
12965 typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
12966 							  void *private_data);
12967 
12968 static enum bpf_perf_event_ret
12969 perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
12970 		       void **copy_mem, size_t *copy_size,
12971 		       bpf_perf_event_print_t fn, void *private_data)
12972 {
12973 	struct perf_event_mmap_page *header = mmap_mem;
12974 	__u64 data_head = ring_buffer_read_head(header);
12975 	__u64 data_tail = header->data_tail;
12976 	void *base = ((__u8 *)header) + page_size;
12977 	int ret = LIBBPF_PERF_EVENT_CONT;
12978 	struct perf_event_header *ehdr;
12979 	size_t ehdr_size;
12980 
12981 	while (data_head != data_tail) {
12982 		ehdr = base + (data_tail & (mmap_size - 1));
12983 		ehdr_size = ehdr->size;
12984 
12985 		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
12986 			void *copy_start = ehdr;
12987 			size_t len_first = base + mmap_size - copy_start;
12988 			size_t len_secnd = ehdr_size - len_first;
12989 
12990 			if (*copy_size < ehdr_size) {
12991 				free(*copy_mem);
12992 				*copy_mem = malloc(ehdr_size);
12993 				if (!*copy_mem) {
12994 					*copy_size = 0;
12995 					ret = LIBBPF_PERF_EVENT_ERROR;
12996 					break;
12997 				}
12998 				*copy_size = ehdr_size;
12999 			}
13000 
13001 			memcpy(*copy_mem, copy_start, len_first);
13002 			memcpy(*copy_mem + len_first, base, len_secnd);
13003 			ehdr = *copy_mem;
13004 		}
13005 
13006 		ret = fn(ehdr, private_data);
13007 		data_tail += ehdr_size;
13008 		if (ret != LIBBPF_PERF_EVENT_CONT)
13009 			break;
13010 	}
13011 
13012 	ring_buffer_write_tail(header, data_tail);
13013 	return libbpf_err(ret);
13014 }
13015 
13016 struct perf_buffer;
13017 
13018 struct perf_buffer_params {
13019 	struct perf_event_attr *attr;
13020 	/* if event_cb is specified, it takes precendence */
13021 	perf_buffer_event_fn event_cb;
13022 	/* sample_cb and lost_cb are higher-level common-case callbacks */
13023 	perf_buffer_sample_fn sample_cb;
13024 	perf_buffer_lost_fn lost_cb;
13025 	void *ctx;
13026 	int cpu_cnt;
13027 	int *cpus;
13028 	int *map_keys;
13029 };
13030 
13031 struct perf_cpu_buf {
13032 	struct perf_buffer *pb;
13033 	void *base; /* mmap()'ed memory */
13034 	void *buf; /* for reconstructing segmented data */
13035 	size_t buf_size;
13036 	int fd;
13037 	int cpu;
13038 	int map_key;
13039 };
13040 
13041 struct perf_buffer {
13042 	perf_buffer_event_fn event_cb;
13043 	perf_buffer_sample_fn sample_cb;
13044 	perf_buffer_lost_fn lost_cb;
13045 	void *ctx; /* passed into callbacks */
13046 
13047 	size_t page_size;
13048 	size_t mmap_size;
13049 	struct perf_cpu_buf **cpu_bufs;
13050 	struct epoll_event *events;
13051 	int cpu_cnt; /* number of allocated CPU buffers */
13052 	int epoll_fd; /* perf event FD */
13053 	int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
13054 };
13055 
perf_buffer__free_cpu_buf(struct perf_buffer * pb,struct perf_cpu_buf * cpu_buf)13056 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
13057 				      struct perf_cpu_buf *cpu_buf)
13058 {
13059 	if (!cpu_buf)
13060 		return;
13061 	if (cpu_buf->base &&
13062 	    munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
13063 		pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
13064 	if (cpu_buf->fd >= 0) {
13065 		ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
13066 		close(cpu_buf->fd);
13067 	}
13068 	free(cpu_buf->buf);
13069 	free(cpu_buf);
13070 }
13071 
perf_buffer__free(struct perf_buffer * pb)13072 void perf_buffer__free(struct perf_buffer *pb)
13073 {
13074 	int i;
13075 
13076 	if (IS_ERR_OR_NULL(pb))
13077 		return;
13078 	if (pb->cpu_bufs) {
13079 		for (i = 0; i < pb->cpu_cnt; i++) {
13080 			struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13081 
13082 			if (!cpu_buf)
13083 				continue;
13084 
13085 			bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
13086 			perf_buffer__free_cpu_buf(pb, cpu_buf);
13087 		}
13088 		free(pb->cpu_bufs);
13089 	}
13090 	if (pb->epoll_fd >= 0)
13091 		close(pb->epoll_fd);
13092 	free(pb->events);
13093 	free(pb);
13094 }
13095 
13096 static struct perf_cpu_buf *
perf_buffer__open_cpu_buf(struct perf_buffer * pb,struct perf_event_attr * attr,int cpu,int map_key)13097 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
13098 			  int cpu, int map_key)
13099 {
13100 	struct perf_cpu_buf *cpu_buf;
13101 	char msg[STRERR_BUFSIZE];
13102 	int err;
13103 
13104 	cpu_buf = calloc(1, sizeof(*cpu_buf));
13105 	if (!cpu_buf)
13106 		return ERR_PTR(-ENOMEM);
13107 
13108 	cpu_buf->pb = pb;
13109 	cpu_buf->cpu = cpu;
13110 	cpu_buf->map_key = map_key;
13111 
13112 	cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
13113 			      -1, PERF_FLAG_FD_CLOEXEC);
13114 	if (cpu_buf->fd < 0) {
13115 		err = -errno;
13116 		pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
13117 			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
13118 		goto error;
13119 	}
13120 
13121 	cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
13122 			     PROT_READ | PROT_WRITE, MAP_SHARED,
13123 			     cpu_buf->fd, 0);
13124 	if (cpu_buf->base == MAP_FAILED) {
13125 		cpu_buf->base = NULL;
13126 		err = -errno;
13127 		pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
13128 			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
13129 		goto error;
13130 	}
13131 
13132 	if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
13133 		err = -errno;
13134 		pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
13135 			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
13136 		goto error;
13137 	}
13138 
13139 	return cpu_buf;
13140 
13141 error:
13142 	perf_buffer__free_cpu_buf(pb, cpu_buf);
13143 	return (struct perf_cpu_buf *)ERR_PTR(err);
13144 }
13145 
13146 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
13147 					      struct perf_buffer_params *p);
13148 
perf_buffer__new(int map_fd,size_t page_cnt,perf_buffer_sample_fn sample_cb,perf_buffer_lost_fn lost_cb,void * ctx,const struct perf_buffer_opts * opts)13149 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
13150 				     perf_buffer_sample_fn sample_cb,
13151 				     perf_buffer_lost_fn lost_cb,
13152 				     void *ctx,
13153 				     const struct perf_buffer_opts *opts)
13154 {
13155 	const size_t attr_sz = sizeof(struct perf_event_attr);
13156 	struct perf_buffer_params p = {};
13157 	struct perf_event_attr attr;
13158 	__u32 sample_period;
13159 
13160 	if (!OPTS_VALID(opts, perf_buffer_opts))
13161 		return libbpf_err_ptr(-EINVAL);
13162 
13163 	sample_period = OPTS_GET(opts, sample_period, 1);
13164 	if (!sample_period)
13165 		sample_period = 1;
13166 
13167 	memset(&attr, 0, attr_sz);
13168 	attr.size = attr_sz;
13169 	attr.config = PERF_COUNT_SW_BPF_OUTPUT;
13170 	attr.type = PERF_TYPE_SOFTWARE;
13171 	attr.sample_type = PERF_SAMPLE_RAW;
13172 	attr.sample_period = sample_period;
13173 	attr.wakeup_events = sample_period;
13174 
13175 	p.attr = &attr;
13176 	p.sample_cb = sample_cb;
13177 	p.lost_cb = lost_cb;
13178 	p.ctx = ctx;
13179 
13180 	return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
13181 }
13182 
perf_buffer__new_raw(int map_fd,size_t page_cnt,struct perf_event_attr * attr,perf_buffer_event_fn event_cb,void * ctx,const struct perf_buffer_raw_opts * opts)13183 struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt,
13184 					 struct perf_event_attr *attr,
13185 					 perf_buffer_event_fn event_cb, void *ctx,
13186 					 const struct perf_buffer_raw_opts *opts)
13187 {
13188 	struct perf_buffer_params p = {};
13189 
13190 	if (!attr)
13191 		return libbpf_err_ptr(-EINVAL);
13192 
13193 	if (!OPTS_VALID(opts, perf_buffer_raw_opts))
13194 		return libbpf_err_ptr(-EINVAL);
13195 
13196 	p.attr = attr;
13197 	p.event_cb = event_cb;
13198 	p.ctx = ctx;
13199 	p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
13200 	p.cpus = OPTS_GET(opts, cpus, NULL);
13201 	p.map_keys = OPTS_GET(opts, map_keys, NULL);
13202 
13203 	return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
13204 }
13205 
__perf_buffer__new(int map_fd,size_t page_cnt,struct perf_buffer_params * p)13206 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
13207 					      struct perf_buffer_params *p)
13208 {
13209 	const char *online_cpus_file = "/sys/devices/system/cpu/online";
13210 	struct bpf_map_info map;
13211 	char msg[STRERR_BUFSIZE];
13212 	struct perf_buffer *pb;
13213 	bool *online = NULL;
13214 	__u32 map_info_len;
13215 	int err, i, j, n;
13216 
13217 	if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
13218 		pr_warn("page count should be power of two, but is %zu\n",
13219 			page_cnt);
13220 		return ERR_PTR(-EINVAL);
13221 	}
13222 
13223 	/* best-effort sanity checks */
13224 	memset(&map, 0, sizeof(map));
13225 	map_info_len = sizeof(map);
13226 	err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
13227 	if (err) {
13228 		err = -errno;
13229 		/* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
13230 		 * -EBADFD, -EFAULT, or -E2BIG on real error
13231 		 */
13232 		if (err != -EINVAL) {
13233 			pr_warn("failed to get map info for map FD %d: %s\n",
13234 				map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
13235 			return ERR_PTR(err);
13236 		}
13237 		pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
13238 			 map_fd);
13239 	} else {
13240 		if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
13241 			pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
13242 				map.name);
13243 			return ERR_PTR(-EINVAL);
13244 		}
13245 	}
13246 
13247 	pb = calloc(1, sizeof(*pb));
13248 	if (!pb)
13249 		return ERR_PTR(-ENOMEM);
13250 
13251 	pb->event_cb = p->event_cb;
13252 	pb->sample_cb = p->sample_cb;
13253 	pb->lost_cb = p->lost_cb;
13254 	pb->ctx = p->ctx;
13255 
13256 	pb->page_size = getpagesize();
13257 	pb->mmap_size = pb->page_size * page_cnt;
13258 	pb->map_fd = map_fd;
13259 
13260 	pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
13261 	if (pb->epoll_fd < 0) {
13262 		err = -errno;
13263 		pr_warn("failed to create epoll instance: %s\n",
13264 			libbpf_strerror_r(err, msg, sizeof(msg)));
13265 		goto error;
13266 	}
13267 
13268 	if (p->cpu_cnt > 0) {
13269 		pb->cpu_cnt = p->cpu_cnt;
13270 	} else {
13271 		pb->cpu_cnt = libbpf_num_possible_cpus();
13272 		if (pb->cpu_cnt < 0) {
13273 			err = pb->cpu_cnt;
13274 			goto error;
13275 		}
13276 		if (map.max_entries && map.max_entries < pb->cpu_cnt)
13277 			pb->cpu_cnt = map.max_entries;
13278 	}
13279 
13280 	pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
13281 	if (!pb->events) {
13282 		err = -ENOMEM;
13283 		pr_warn("failed to allocate events: out of memory\n");
13284 		goto error;
13285 	}
13286 	pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
13287 	if (!pb->cpu_bufs) {
13288 		err = -ENOMEM;
13289 		pr_warn("failed to allocate buffers: out of memory\n");
13290 		goto error;
13291 	}
13292 
13293 	err = parse_cpu_mask_file(online_cpus_file, &online, &n);
13294 	if (err) {
13295 		pr_warn("failed to get online CPU mask: %d\n", err);
13296 		goto error;
13297 	}
13298 
13299 	for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
13300 		struct perf_cpu_buf *cpu_buf;
13301 		int cpu, map_key;
13302 
13303 		cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
13304 		map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
13305 
13306 		/* in case user didn't explicitly requested particular CPUs to
13307 		 * be attached to, skip offline/not present CPUs
13308 		 */
13309 		if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
13310 			continue;
13311 
13312 		cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
13313 		if (IS_ERR(cpu_buf)) {
13314 			err = PTR_ERR(cpu_buf);
13315 			goto error;
13316 		}
13317 
13318 		pb->cpu_bufs[j] = cpu_buf;
13319 
13320 		err = bpf_map_update_elem(pb->map_fd, &map_key,
13321 					  &cpu_buf->fd, 0);
13322 		if (err) {
13323 			err = -errno;
13324 			pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
13325 				cpu, map_key, cpu_buf->fd,
13326 				libbpf_strerror_r(err, msg, sizeof(msg)));
13327 			goto error;
13328 		}
13329 
13330 		pb->events[j].events = EPOLLIN;
13331 		pb->events[j].data.ptr = cpu_buf;
13332 		if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
13333 			      &pb->events[j]) < 0) {
13334 			err = -errno;
13335 			pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
13336 				cpu, cpu_buf->fd,
13337 				libbpf_strerror_r(err, msg, sizeof(msg)));
13338 			goto error;
13339 		}
13340 		j++;
13341 	}
13342 	pb->cpu_cnt = j;
13343 	free(online);
13344 
13345 	return pb;
13346 
13347 error:
13348 	free(online);
13349 	if (pb)
13350 		perf_buffer__free(pb);
13351 	return ERR_PTR(err);
13352 }
13353 
13354 struct perf_sample_raw {
13355 	struct perf_event_header header;
13356 	uint32_t size;
13357 	char data[];
13358 };
13359 
13360 struct perf_sample_lost {
13361 	struct perf_event_header header;
13362 	uint64_t id;
13363 	uint64_t lost;
13364 	uint64_t sample_id;
13365 };
13366 
13367 static enum bpf_perf_event_ret
perf_buffer__process_record(struct perf_event_header * e,void * ctx)13368 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
13369 {
13370 	struct perf_cpu_buf *cpu_buf = ctx;
13371 	struct perf_buffer *pb = cpu_buf->pb;
13372 	void *data = e;
13373 
13374 	/* user wants full control over parsing perf event */
13375 	if (pb->event_cb)
13376 		return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
13377 
13378 	switch (e->type) {
13379 	case PERF_RECORD_SAMPLE: {
13380 		struct perf_sample_raw *s = data;
13381 
13382 		if (pb->sample_cb)
13383 			pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
13384 		break;
13385 	}
13386 	case PERF_RECORD_LOST: {
13387 		struct perf_sample_lost *s = data;
13388 
13389 		if (pb->lost_cb)
13390 			pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
13391 		break;
13392 	}
13393 	default:
13394 		pr_warn("unknown perf sample type %d\n", e->type);
13395 		return LIBBPF_PERF_EVENT_ERROR;
13396 	}
13397 	return LIBBPF_PERF_EVENT_CONT;
13398 }
13399 
perf_buffer__process_records(struct perf_buffer * pb,struct perf_cpu_buf * cpu_buf)13400 static int perf_buffer__process_records(struct perf_buffer *pb,
13401 					struct perf_cpu_buf *cpu_buf)
13402 {
13403 	enum bpf_perf_event_ret ret;
13404 
13405 	ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
13406 				     pb->page_size, &cpu_buf->buf,
13407 				     &cpu_buf->buf_size,
13408 				     perf_buffer__process_record, cpu_buf);
13409 	if (ret != LIBBPF_PERF_EVENT_CONT)
13410 		return ret;
13411 	return 0;
13412 }
13413 
perf_buffer__epoll_fd(const struct perf_buffer * pb)13414 int perf_buffer__epoll_fd(const struct perf_buffer *pb)
13415 {
13416 	return pb->epoll_fd;
13417 }
13418 
perf_buffer__poll(struct perf_buffer * pb,int timeout_ms)13419 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
13420 {
13421 	int i, cnt, err;
13422 
13423 	cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
13424 	if (cnt < 0)
13425 		return -errno;
13426 
13427 	for (i = 0; i < cnt; i++) {
13428 		struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
13429 
13430 		err = perf_buffer__process_records(pb, cpu_buf);
13431 		if (err) {
13432 			pr_warn("error while processing records: %d\n", err);
13433 			return libbpf_err(err);
13434 		}
13435 	}
13436 	return cnt;
13437 }
13438 
13439 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
13440  * manager.
13441  */
perf_buffer__buffer_cnt(const struct perf_buffer * pb)13442 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
13443 {
13444 	return pb->cpu_cnt;
13445 }
13446 
13447 /*
13448  * Return perf_event FD of a ring buffer in *buf_idx* slot of
13449  * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
13450  * select()/poll()/epoll() Linux syscalls.
13451  */
perf_buffer__buffer_fd(const struct perf_buffer * pb,size_t buf_idx)13452 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
13453 {
13454 	struct perf_cpu_buf *cpu_buf;
13455 
13456 	if (buf_idx >= pb->cpu_cnt)
13457 		return libbpf_err(-EINVAL);
13458 
13459 	cpu_buf = pb->cpu_bufs[buf_idx];
13460 	if (!cpu_buf)
13461 		return libbpf_err(-ENOENT);
13462 
13463 	return cpu_buf->fd;
13464 }
13465 
perf_buffer__buffer(struct perf_buffer * pb,int buf_idx,void ** buf,size_t * buf_size)13466 int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size)
13467 {
13468 	struct perf_cpu_buf *cpu_buf;
13469 
13470 	if (buf_idx >= pb->cpu_cnt)
13471 		return libbpf_err(-EINVAL);
13472 
13473 	cpu_buf = pb->cpu_bufs[buf_idx];
13474 	if (!cpu_buf)
13475 		return libbpf_err(-ENOENT);
13476 
13477 	*buf = cpu_buf->base;
13478 	*buf_size = pb->mmap_size;
13479 	return 0;
13480 }
13481 
13482 /*
13483  * Consume data from perf ring buffer corresponding to slot *buf_idx* in
13484  * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
13485  * consume, do nothing and return success.
13486  * Returns:
13487  *   - 0 on success;
13488  *   - <0 on failure.
13489  */
perf_buffer__consume_buffer(struct perf_buffer * pb,size_t buf_idx)13490 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
13491 {
13492 	struct perf_cpu_buf *cpu_buf;
13493 
13494 	if (buf_idx >= pb->cpu_cnt)
13495 		return libbpf_err(-EINVAL);
13496 
13497 	cpu_buf = pb->cpu_bufs[buf_idx];
13498 	if (!cpu_buf)
13499 		return libbpf_err(-ENOENT);
13500 
13501 	return perf_buffer__process_records(pb, cpu_buf);
13502 }
13503 
perf_buffer__consume(struct perf_buffer * pb)13504 int perf_buffer__consume(struct perf_buffer *pb)
13505 {
13506 	int i, err;
13507 
13508 	for (i = 0; i < pb->cpu_cnt; i++) {
13509 		struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13510 
13511 		if (!cpu_buf)
13512 			continue;
13513 
13514 		err = perf_buffer__process_records(pb, cpu_buf);
13515 		if (err) {
13516 			pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
13517 			return libbpf_err(err);
13518 		}
13519 	}
13520 	return 0;
13521 }
13522 
bpf_program__set_attach_target(struct bpf_program * prog,int attach_prog_fd,const char * attach_func_name)13523 int bpf_program__set_attach_target(struct bpf_program *prog,
13524 				   int attach_prog_fd,
13525 				   const char *attach_func_name)
13526 {
13527 	int btf_obj_fd = 0, btf_id = 0, err;
13528 
13529 	if (!prog || attach_prog_fd < 0)
13530 		return libbpf_err(-EINVAL);
13531 
13532 	if (prog->obj->loaded)
13533 		return libbpf_err(-EINVAL);
13534 
13535 	if (attach_prog_fd && !attach_func_name) {
13536 		/* remember attach_prog_fd and let bpf_program__load() find
13537 		 * BTF ID during the program load
13538 		 */
13539 		prog->attach_prog_fd = attach_prog_fd;
13540 		return 0;
13541 	}
13542 
13543 	if (attach_prog_fd) {
13544 		btf_id = libbpf_find_prog_btf_id(attach_func_name,
13545 						 attach_prog_fd);
13546 		if (btf_id < 0)
13547 			return libbpf_err(btf_id);
13548 	} else {
13549 		if (!attach_func_name)
13550 			return libbpf_err(-EINVAL);
13551 
13552 		/* load btf_vmlinux, if not yet */
13553 		err = bpf_object__load_vmlinux_btf(prog->obj, true);
13554 		if (err)
13555 			return libbpf_err(err);
13556 		err = find_kernel_btf_id(prog->obj, attach_func_name,
13557 					 prog->expected_attach_type,
13558 					 &btf_obj_fd, &btf_id);
13559 		if (err)
13560 			return libbpf_err(err);
13561 	}
13562 
13563 	prog->attach_btf_id = btf_id;
13564 	prog->attach_btf_obj_fd = btf_obj_fd;
13565 	prog->attach_prog_fd = attach_prog_fd;
13566 	return 0;
13567 }
13568 
parse_cpu_mask_str(const char * s,bool ** mask,int * mask_sz)13569 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
13570 {
13571 	int err = 0, n, len, start, end = -1;
13572 	bool *tmp;
13573 
13574 	*mask = NULL;
13575 	*mask_sz = 0;
13576 
13577 	/* Each sub string separated by ',' has format \d+-\d+ or \d+ */
13578 	while (*s) {
13579 		if (*s == ',' || *s == '\n') {
13580 			s++;
13581 			continue;
13582 		}
13583 		n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
13584 		if (n <= 0 || n > 2) {
13585 			pr_warn("Failed to get CPU range %s: %d\n", s, n);
13586 			err = -EINVAL;
13587 			goto cleanup;
13588 		} else if (n == 1) {
13589 			end = start;
13590 		}
13591 		if (start < 0 || start > end) {
13592 			pr_warn("Invalid CPU range [%d,%d] in %s\n",
13593 				start, end, s);
13594 			err = -EINVAL;
13595 			goto cleanup;
13596 		}
13597 		tmp = realloc(*mask, end + 1);
13598 		if (!tmp) {
13599 			err = -ENOMEM;
13600 			goto cleanup;
13601 		}
13602 		*mask = tmp;
13603 		memset(tmp + *mask_sz, 0, start - *mask_sz);
13604 		memset(tmp + start, 1, end - start + 1);
13605 		*mask_sz = end + 1;
13606 		s += len;
13607 	}
13608 	if (!*mask_sz) {
13609 		pr_warn("Empty CPU range\n");
13610 		return -EINVAL;
13611 	}
13612 	return 0;
13613 cleanup:
13614 	free(*mask);
13615 	*mask = NULL;
13616 	return err;
13617 }
13618 
parse_cpu_mask_file(const char * fcpu,bool ** mask,int * mask_sz)13619 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
13620 {
13621 	int fd, err = 0, len;
13622 	char buf[128];
13623 
13624 	fd = open(fcpu, O_RDONLY | O_CLOEXEC);
13625 	if (fd < 0) {
13626 		err = -errno;
13627 		pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
13628 		return err;
13629 	}
13630 	len = read(fd, buf, sizeof(buf));
13631 	close(fd);
13632 	if (len <= 0) {
13633 		err = len ? -errno : -EINVAL;
13634 		pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
13635 		return err;
13636 	}
13637 	if (len >= sizeof(buf)) {
13638 		pr_warn("CPU mask is too big in file %s\n", fcpu);
13639 		return -E2BIG;
13640 	}
13641 	buf[len] = '\0';
13642 
13643 	return parse_cpu_mask_str(buf, mask, mask_sz);
13644 }
13645 
libbpf_num_possible_cpus(void)13646 int libbpf_num_possible_cpus(void)
13647 {
13648 	static const char *fcpu = "/sys/devices/system/cpu/possible";
13649 	static int cpus;
13650 	int err, n, i, tmp_cpus;
13651 	bool *mask;
13652 
13653 	tmp_cpus = READ_ONCE(cpus);
13654 	if (tmp_cpus > 0)
13655 		return tmp_cpus;
13656 
13657 	err = parse_cpu_mask_file(fcpu, &mask, &n);
13658 	if (err)
13659 		return libbpf_err(err);
13660 
13661 	tmp_cpus = 0;
13662 	for (i = 0; i < n; i++) {
13663 		if (mask[i])
13664 			tmp_cpus++;
13665 	}
13666 	free(mask);
13667 
13668 	WRITE_ONCE(cpus, tmp_cpus);
13669 	return tmp_cpus;
13670 }
13671 
populate_skeleton_maps(const struct bpf_object * obj,struct bpf_map_skeleton * maps,size_t map_cnt)13672 static int populate_skeleton_maps(const struct bpf_object *obj,
13673 				  struct bpf_map_skeleton *maps,
13674 				  size_t map_cnt)
13675 {
13676 	int i;
13677 
13678 	for (i = 0; i < map_cnt; i++) {
13679 		struct bpf_map **map = maps[i].map;
13680 		const char *name = maps[i].name;
13681 		void **mmaped = maps[i].mmaped;
13682 
13683 		*map = bpf_object__find_map_by_name(obj, name);
13684 		if (!*map) {
13685 			pr_warn("failed to find skeleton map '%s'\n", name);
13686 			return -ESRCH;
13687 		}
13688 
13689 		/* externs shouldn't be pre-setup from user code */
13690 		if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
13691 			*mmaped = (*map)->mmaped;
13692 	}
13693 	return 0;
13694 }
13695 
populate_skeleton_progs(const struct bpf_object * obj,struct bpf_prog_skeleton * progs,size_t prog_cnt)13696 static int populate_skeleton_progs(const struct bpf_object *obj,
13697 				   struct bpf_prog_skeleton *progs,
13698 				   size_t prog_cnt)
13699 {
13700 	int i;
13701 
13702 	for (i = 0; i < prog_cnt; i++) {
13703 		struct bpf_program **prog = progs[i].prog;
13704 		const char *name = progs[i].name;
13705 
13706 		*prog = bpf_object__find_program_by_name(obj, name);
13707 		if (!*prog) {
13708 			pr_warn("failed to find skeleton program '%s'\n", name);
13709 			return -ESRCH;
13710 		}
13711 	}
13712 	return 0;
13713 }
13714 
bpf_object__open_skeleton(struct bpf_object_skeleton * s,const struct bpf_object_open_opts * opts)13715 int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
13716 			      const struct bpf_object_open_opts *opts)
13717 {
13718 	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
13719 		.object_name = s->name,
13720 	);
13721 	struct bpf_object *obj;
13722 	int err;
13723 
13724 	/* Attempt to preserve opts->object_name, unless overriden by user
13725 	 * explicitly. Overwriting object name for skeletons is discouraged,
13726 	 * as it breaks global data maps, because they contain object name
13727 	 * prefix as their own map name prefix. When skeleton is generated,
13728 	 * bpftool is making an assumption that this name will stay the same.
13729 	 */
13730 	if (opts) {
13731 		memcpy(&skel_opts, opts, sizeof(*opts));
13732 		if (!opts->object_name)
13733 			skel_opts.object_name = s->name;
13734 	}
13735 
13736 	obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
13737 	err = libbpf_get_error(obj);
13738 	if (err) {
13739 		pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
13740 			s->name, err);
13741 		return libbpf_err(err);
13742 	}
13743 
13744 	*s->obj = obj;
13745 	err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
13746 	if (err) {
13747 		pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
13748 		return libbpf_err(err);
13749 	}
13750 
13751 	err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
13752 	if (err) {
13753 		pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
13754 		return libbpf_err(err);
13755 	}
13756 
13757 	return 0;
13758 }
13759 
bpf_object__open_subskeleton(struct bpf_object_subskeleton * s)13760 int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
13761 {
13762 	int err, len, var_idx, i;
13763 	const char *var_name;
13764 	const struct bpf_map *map;
13765 	struct btf *btf;
13766 	__u32 map_type_id;
13767 	const struct btf_type *map_type, *var_type;
13768 	const struct bpf_var_skeleton *var_skel;
13769 	struct btf_var_secinfo *var;
13770 
13771 	if (!s->obj)
13772 		return libbpf_err(-EINVAL);
13773 
13774 	btf = bpf_object__btf(s->obj);
13775 	if (!btf) {
13776 		pr_warn("subskeletons require BTF at runtime (object %s)\n",
13777 			bpf_object__name(s->obj));
13778 		return libbpf_err(-errno);
13779 	}
13780 
13781 	err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
13782 	if (err) {
13783 		pr_warn("failed to populate subskeleton maps: %d\n", err);
13784 		return libbpf_err(err);
13785 	}
13786 
13787 	err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
13788 	if (err) {
13789 		pr_warn("failed to populate subskeleton maps: %d\n", err);
13790 		return libbpf_err(err);
13791 	}
13792 
13793 	for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
13794 		var_skel = &s->vars[var_idx];
13795 		map = *var_skel->map;
13796 		map_type_id = bpf_map__btf_value_type_id(map);
13797 		map_type = btf__type_by_id(btf, map_type_id);
13798 
13799 		if (!btf_is_datasec(map_type)) {
13800 			pr_warn("type for map '%1$s' is not a datasec: %2$s",
13801 				bpf_map__name(map),
13802 				__btf_kind_str(btf_kind(map_type)));
13803 			return libbpf_err(-EINVAL);
13804 		}
13805 
13806 		len = btf_vlen(map_type);
13807 		var = btf_var_secinfos(map_type);
13808 		for (i = 0; i < len; i++, var++) {
13809 			var_type = btf__type_by_id(btf, var->type);
13810 			var_name = btf__name_by_offset(btf, var_type->name_off);
13811 			if (strcmp(var_name, var_skel->name) == 0) {
13812 				*var_skel->addr = map->mmaped + var->offset;
13813 				break;
13814 			}
13815 		}
13816 	}
13817 	return 0;
13818 }
13819 
bpf_object__destroy_subskeleton(struct bpf_object_subskeleton * s)13820 void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
13821 {
13822 	if (!s)
13823 		return;
13824 	free(s->maps);
13825 	free(s->progs);
13826 	free(s->vars);
13827 	free(s);
13828 }
13829 
bpf_object__load_skeleton(struct bpf_object_skeleton * s)13830 int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
13831 {
13832 	int i, err;
13833 
13834 	err = bpf_object__load(*s->obj);
13835 	if (err) {
13836 		pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
13837 		return libbpf_err(err);
13838 	}
13839 
13840 	for (i = 0; i < s->map_cnt; i++) {
13841 		struct bpf_map *map = *s->maps[i].map;
13842 		size_t mmap_sz = bpf_map_mmap_sz(map);
13843 		int prot, map_fd = map->fd;
13844 		void **mmaped = s->maps[i].mmaped;
13845 
13846 		if (!mmaped)
13847 			continue;
13848 
13849 		if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
13850 			*mmaped = NULL;
13851 			continue;
13852 		}
13853 
13854 		if (map->def.type == BPF_MAP_TYPE_ARENA) {
13855 			*mmaped = map->mmaped;
13856 			continue;
13857 		}
13858 
13859 		if (map->def.map_flags & BPF_F_RDONLY_PROG)
13860 			prot = PROT_READ;
13861 		else
13862 			prot = PROT_READ | PROT_WRITE;
13863 
13864 		/* Remap anonymous mmap()-ed "map initialization image" as
13865 		 * a BPF map-backed mmap()-ed memory, but preserving the same
13866 		 * memory address. This will cause kernel to change process'
13867 		 * page table to point to a different piece of kernel memory,
13868 		 * but from userspace point of view memory address (and its
13869 		 * contents, being identical at this point) will stay the
13870 		 * same. This mapping will be released by bpf_object__close()
13871 		 * as per normal clean up procedure, so we don't need to worry
13872 		 * about it from skeleton's clean up perspective.
13873 		 */
13874 		*mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
13875 		if (*mmaped == MAP_FAILED) {
13876 			err = -errno;
13877 			*mmaped = NULL;
13878 			pr_warn("failed to re-mmap() map '%s': %d\n",
13879 				 bpf_map__name(map), err);
13880 			return libbpf_err(err);
13881 		}
13882 	}
13883 
13884 	return 0;
13885 }
13886 
bpf_object__attach_skeleton(struct bpf_object_skeleton * s)13887 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
13888 {
13889 	int i, err;
13890 
13891 	for (i = 0; i < s->prog_cnt; i++) {
13892 		struct bpf_program *prog = *s->progs[i].prog;
13893 		struct bpf_link **link = s->progs[i].link;
13894 
13895 		if (!prog->autoload || !prog->autoattach)
13896 			continue;
13897 
13898 		/* auto-attaching not supported for this program */
13899 		if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
13900 			continue;
13901 
13902 		/* if user already set the link manually, don't attempt auto-attach */
13903 		if (*link)
13904 			continue;
13905 
13906 		err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
13907 		if (err) {
13908 			pr_warn("prog '%s': failed to auto-attach: %d\n",
13909 				bpf_program__name(prog), err);
13910 			return libbpf_err(err);
13911 		}
13912 
13913 		/* It's possible that for some SEC() definitions auto-attach
13914 		 * is supported in some cases (e.g., if definition completely
13915 		 * specifies target information), but is not in other cases.
13916 		 * SEC("uprobe") is one such case. If user specified target
13917 		 * binary and function name, such BPF program can be
13918 		 * auto-attached. But if not, it shouldn't trigger skeleton's
13919 		 * attach to fail. It should just be skipped.
13920 		 * attach_fn signals such case with returning 0 (no error) and
13921 		 * setting link to NULL.
13922 		 */
13923 	}
13924 
13925 	return 0;
13926 }
13927 
bpf_object__detach_skeleton(struct bpf_object_skeleton * s)13928 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
13929 {
13930 	int i;
13931 
13932 	for (i = 0; i < s->prog_cnt; i++) {
13933 		struct bpf_link **link = s->progs[i].link;
13934 
13935 		bpf_link__destroy(*link);
13936 		*link = NULL;
13937 	}
13938 }
13939 
bpf_object__destroy_skeleton(struct bpf_object_skeleton * s)13940 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
13941 {
13942 	if (!s)
13943 		return;
13944 
13945 	if (s->progs)
13946 		bpf_object__detach_skeleton(s);
13947 	if (s->obj)
13948 		bpf_object__close(*s->obj);
13949 	free(s->maps);
13950 	free(s->progs);
13951 	free(s);
13952 }
13953