1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2019 Netronome Systems, Inc. */
3
4 #include <errno.h>
5 #include <fcntl.h>
6 #include <string.h>
7 #include <stdlib.h>
8 #include <unistd.h>
9 #include <net/if.h>
10 #include <sys/utsname.h>
11
12 #include <linux/btf.h>
13 #include <linux/filter.h>
14 #include <linux/kernel.h>
15
16 #include "bpf.h"
17 #include "libbpf.h"
18 #include "libbpf_internal.h"
19
grep(const char * buffer,const char * pattern)20 static bool grep(const char *buffer, const char *pattern)
21 {
22 return !!strstr(buffer, pattern);
23 }
24
get_vendor_id(int ifindex)25 static int get_vendor_id(int ifindex)
26 {
27 char ifname[IF_NAMESIZE], path[64], buf[8];
28 ssize_t len;
29 int fd;
30
31 if (!if_indextoname(ifindex, ifname))
32 return -1;
33
34 snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
35
36 fd = open(path, O_RDONLY);
37 if (fd < 0)
38 return -1;
39
40 len = read(fd, buf, sizeof(buf));
41 close(fd);
42 if (len < 0)
43 return -1;
44 if (len >= (ssize_t)sizeof(buf))
45 return -1;
46 buf[len] = '\0';
47
48 return strtol(buf, NULL, 0);
49 }
50
get_kernel_version(void)51 static int get_kernel_version(void)
52 {
53 int version, subversion, patchlevel;
54 struct utsname utsn;
55
56 /* Return 0 on failure, and attempt to probe with empty kversion */
57 if (uname(&utsn))
58 return 0;
59
60 if (sscanf(utsn.release, "%d.%d.%d",
61 &version, &subversion, &patchlevel) != 3)
62 return 0;
63
64 return (version << 16) + (subversion << 8) + patchlevel;
65 }
66
67 static void
probe_load(enum bpf_prog_type prog_type,const struct bpf_insn * insns,size_t insns_cnt,char * buf,size_t buf_len,__u32 ifindex)68 probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
69 size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
70 {
71 struct bpf_load_program_attr xattr = {};
72 int fd;
73
74 switch (prog_type) {
75 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
76 xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
77 break;
78 case BPF_PROG_TYPE_KPROBE:
79 xattr.kern_version = get_kernel_version();
80 break;
81 case BPF_PROG_TYPE_UNSPEC:
82 case BPF_PROG_TYPE_SOCKET_FILTER:
83 case BPF_PROG_TYPE_SCHED_CLS:
84 case BPF_PROG_TYPE_SCHED_ACT:
85 case BPF_PROG_TYPE_TRACEPOINT:
86 case BPF_PROG_TYPE_XDP:
87 case BPF_PROG_TYPE_PERF_EVENT:
88 case BPF_PROG_TYPE_CGROUP_SKB:
89 case BPF_PROG_TYPE_CGROUP_SOCK:
90 case BPF_PROG_TYPE_LWT_IN:
91 case BPF_PROG_TYPE_LWT_OUT:
92 case BPF_PROG_TYPE_LWT_XMIT:
93 case BPF_PROG_TYPE_SOCK_OPS:
94 case BPF_PROG_TYPE_SK_SKB:
95 case BPF_PROG_TYPE_CGROUP_DEVICE:
96 case BPF_PROG_TYPE_SK_MSG:
97 case BPF_PROG_TYPE_RAW_TRACEPOINT:
98 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
99 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
100 case BPF_PROG_TYPE_LIRC_MODE2:
101 case BPF_PROG_TYPE_SK_REUSEPORT:
102 case BPF_PROG_TYPE_FLOW_DISSECTOR:
103 case BPF_PROG_TYPE_CGROUP_SYSCTL:
104 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
105 case BPF_PROG_TYPE_TRACING:
106 default:
107 break;
108 }
109
110 xattr.prog_type = prog_type;
111 xattr.insns = insns;
112 xattr.insns_cnt = insns_cnt;
113 xattr.license = "GPL";
114 xattr.prog_ifindex = ifindex;
115
116 fd = bpf_load_program_xattr(&xattr, buf, buf_len);
117 if (fd >= 0)
118 close(fd);
119 }
120
bpf_probe_prog_type(enum bpf_prog_type prog_type,__u32 ifindex)121 bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
122 {
123 struct bpf_insn insns[2] = {
124 BPF_MOV64_IMM(BPF_REG_0, 0),
125 BPF_EXIT_INSN()
126 };
127
128 if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
129 /* nfp returns -EINVAL on exit(0) with TC offload */
130 insns[0].imm = 2;
131
132 errno = 0;
133 probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
134
135 return errno != EINVAL && errno != EOPNOTSUPP;
136 }
137
libbpf__load_raw_btf(const char * raw_types,size_t types_len,const char * str_sec,size_t str_len)138 int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
139 const char *str_sec, size_t str_len)
140 {
141 struct btf_header hdr = {
142 .magic = BTF_MAGIC,
143 .version = BTF_VERSION,
144 .hdr_len = sizeof(struct btf_header),
145 .type_len = types_len,
146 .str_off = types_len,
147 .str_len = str_len,
148 };
149 int btf_fd, btf_len;
150 __u8 *raw_btf;
151
152 btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len;
153 raw_btf = malloc(btf_len);
154 if (!raw_btf)
155 return -ENOMEM;
156
157 memcpy(raw_btf, &hdr, sizeof(hdr));
158 memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
159 memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
160
161 btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
162
163 free(raw_btf);
164 return btf_fd;
165 }
166
load_sk_storage_btf(void)167 static int load_sk_storage_btf(void)
168 {
169 const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
170 /* struct bpf_spin_lock {
171 * int val;
172 * };
173 * struct val {
174 * int cnt;
175 * struct bpf_spin_lock l;
176 * };
177 */
178 __u32 types[] = {
179 /* int */
180 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
181 /* struct bpf_spin_lock */ /* [2] */
182 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
183 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
184 /* struct val */ /* [3] */
185 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
186 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
187 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
188 };
189
190 return libbpf__load_raw_btf((char *)types, sizeof(types),
191 strs, sizeof(strs));
192 }
193
bpf_probe_map_type(enum bpf_map_type map_type,__u32 ifindex)194 bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
195 {
196 int key_size, value_size, max_entries, map_flags;
197 __u32 btf_key_type_id = 0, btf_value_type_id = 0;
198 struct bpf_create_map_attr attr = {};
199 int fd = -1, btf_fd = -1, fd_inner;
200
201 key_size = sizeof(__u32);
202 value_size = sizeof(__u32);
203 max_entries = 1;
204 map_flags = 0;
205
206 switch (map_type) {
207 case BPF_MAP_TYPE_STACK_TRACE:
208 value_size = sizeof(__u64);
209 break;
210 case BPF_MAP_TYPE_LPM_TRIE:
211 key_size = sizeof(__u64);
212 value_size = sizeof(__u64);
213 map_flags = BPF_F_NO_PREALLOC;
214 break;
215 case BPF_MAP_TYPE_CGROUP_STORAGE:
216 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
217 key_size = sizeof(struct bpf_cgroup_storage_key);
218 value_size = sizeof(__u64);
219 max_entries = 0;
220 break;
221 case BPF_MAP_TYPE_QUEUE:
222 case BPF_MAP_TYPE_STACK:
223 key_size = 0;
224 break;
225 case BPF_MAP_TYPE_SK_STORAGE:
226 btf_key_type_id = 1;
227 btf_value_type_id = 3;
228 value_size = 8;
229 max_entries = 0;
230 map_flags = BPF_F_NO_PREALLOC;
231 btf_fd = load_sk_storage_btf();
232 if (btf_fd < 0)
233 return false;
234 break;
235 case BPF_MAP_TYPE_UNSPEC:
236 case BPF_MAP_TYPE_HASH:
237 case BPF_MAP_TYPE_ARRAY:
238 case BPF_MAP_TYPE_PROG_ARRAY:
239 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
240 case BPF_MAP_TYPE_PERCPU_HASH:
241 case BPF_MAP_TYPE_PERCPU_ARRAY:
242 case BPF_MAP_TYPE_CGROUP_ARRAY:
243 case BPF_MAP_TYPE_LRU_HASH:
244 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
245 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
246 case BPF_MAP_TYPE_HASH_OF_MAPS:
247 case BPF_MAP_TYPE_DEVMAP:
248 case BPF_MAP_TYPE_DEVMAP_HASH:
249 case BPF_MAP_TYPE_SOCKMAP:
250 case BPF_MAP_TYPE_CPUMAP:
251 case BPF_MAP_TYPE_XSKMAP:
252 case BPF_MAP_TYPE_SOCKHASH:
253 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
254 default:
255 break;
256 }
257
258 if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
259 map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
260 /* TODO: probe for device, once libbpf has a function to create
261 * map-in-map for offload
262 */
263 if (ifindex)
264 return false;
265
266 fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
267 sizeof(__u32), sizeof(__u32), 1, 0);
268 if (fd_inner < 0)
269 return false;
270 fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
271 fd_inner, 1, 0);
272 close(fd_inner);
273 } else {
274 /* Note: No other restriction on map type probes for offload */
275 attr.map_type = map_type;
276 attr.key_size = key_size;
277 attr.value_size = value_size;
278 attr.max_entries = max_entries;
279 attr.map_flags = map_flags;
280 attr.map_ifindex = ifindex;
281 if (btf_fd >= 0) {
282 attr.btf_fd = btf_fd;
283 attr.btf_key_type_id = btf_key_type_id;
284 attr.btf_value_type_id = btf_value_type_id;
285 }
286
287 fd = bpf_create_map_xattr(&attr);
288 }
289 if (fd >= 0)
290 close(fd);
291 if (btf_fd >= 0)
292 close(btf_fd);
293
294 return fd >= 0;
295 }
296
bpf_probe_helper(enum bpf_func_id id,enum bpf_prog_type prog_type,__u32 ifindex)297 bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
298 __u32 ifindex)
299 {
300 struct bpf_insn insns[2] = {
301 BPF_EMIT_CALL(id),
302 BPF_EXIT_INSN()
303 };
304 char buf[4096] = {};
305 bool res;
306
307 probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf),
308 ifindex);
309 res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
310
311 if (ifindex) {
312 switch (get_vendor_id(ifindex)) {
313 case 0x19ee: /* Netronome specific */
314 res = res && !grep(buf, "not supported by FW") &&
315 !grep(buf, "unsupported function id");
316 break;
317 default:
318 break;
319 }
320 }
321
322 return res;
323 }
324