1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3 */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/btf_ids.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/sched/signal.h>
14 #include <net/bpf_sk_storage.h>
15 #include <net/hotdata.h>
16 #include <net/sock.h>
17 #include <net/tcp.h>
18 #include <net/net_namespace.h>
19 #include <net/page_pool/helpers.h>
20 #include <linux/error-injection.h>
21 #include <linux/smp.h>
22 #include <linux/sock_diag.h>
23 #include <linux/netfilter.h>
24 #include <net/netdev_rx_queue.h>
25 #include <net/xdp.h>
26 #include <net/netfilter/nf_bpf_link.h>
27
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/bpf_test_run.h>
30
31 struct bpf_test_timer {
32 enum { NO_PREEMPT, NO_MIGRATE } mode;
33 u32 i;
34 u64 time_start, time_spent;
35 };
36
bpf_test_timer_enter(struct bpf_test_timer * t)37 static void bpf_test_timer_enter(struct bpf_test_timer *t)
38 __acquires(rcu)
39 {
40 rcu_read_lock();
41 if (t->mode == NO_PREEMPT)
42 preempt_disable();
43 else
44 migrate_disable();
45
46 t->time_start = ktime_get_ns();
47 }
48
bpf_test_timer_leave(struct bpf_test_timer * t)49 static void bpf_test_timer_leave(struct bpf_test_timer *t)
50 __releases(rcu)
51 {
52 t->time_start = 0;
53
54 if (t->mode == NO_PREEMPT)
55 preempt_enable();
56 else
57 migrate_enable();
58 rcu_read_unlock();
59 }
60
bpf_test_timer_continue(struct bpf_test_timer * t,int iterations,u32 repeat,int * err,u32 * duration)61 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
62 u32 repeat, int *err, u32 *duration)
63 __must_hold(rcu)
64 {
65 t->i += iterations;
66 if (t->i >= repeat) {
67 /* We're done. */
68 t->time_spent += ktime_get_ns() - t->time_start;
69 do_div(t->time_spent, t->i);
70 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
71 *err = 0;
72 goto reset;
73 }
74
75 if (signal_pending(current)) {
76 /* During iteration: we've been cancelled, abort. */
77 *err = -EINTR;
78 goto reset;
79 }
80
81 if (need_resched()) {
82 /* During iteration: we need to reschedule between runs. */
83 t->time_spent += ktime_get_ns() - t->time_start;
84 bpf_test_timer_leave(t);
85 cond_resched();
86 bpf_test_timer_enter(t);
87 }
88
89 /* Do another round. */
90 return true;
91
92 reset:
93 t->i = 0;
94 return false;
95 }
96
97 /* We put this struct at the head of each page with a context and frame
98 * initialised when the page is allocated, so we don't have to do this on each
99 * repetition of the test run.
100 */
101 struct xdp_page_head {
102 struct xdp_buff orig_ctx;
103 struct xdp_buff ctx;
104 union {
105 /* ::data_hard_start starts here */
106 DECLARE_FLEX_ARRAY(struct xdp_frame, frame);
107 DECLARE_FLEX_ARRAY(u8, data);
108 };
109 };
110
111 struct xdp_test_data {
112 struct xdp_buff *orig_ctx;
113 struct xdp_rxq_info rxq;
114 struct net_device *dev;
115 struct page_pool *pp;
116 struct xdp_frame **frames;
117 struct sk_buff **skbs;
118 struct xdp_mem_info mem;
119 u32 batch_size;
120 u32 frame_cnt;
121 };
122
123 /* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE
124 * must be updated accordingly this gets changed, otherwise BPF selftests
125 * will fail.
126 */
127 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
128 #define TEST_XDP_MAX_BATCH 256
129
xdp_test_run_init_page(struct page * page,void * arg)130 static void xdp_test_run_init_page(struct page *page, void *arg)
131 {
132 struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
133 struct xdp_buff *new_ctx, *orig_ctx;
134 u32 headroom = XDP_PACKET_HEADROOM;
135 struct xdp_test_data *xdp = arg;
136 size_t frm_len, meta_len;
137 struct xdp_frame *frm;
138 void *data;
139
140 orig_ctx = xdp->orig_ctx;
141 frm_len = orig_ctx->data_end - orig_ctx->data_meta;
142 meta_len = orig_ctx->data - orig_ctx->data_meta;
143 headroom -= meta_len;
144
145 new_ctx = &head->ctx;
146 frm = head->frame;
147 data = head->data;
148 memcpy(data + headroom, orig_ctx->data_meta, frm_len);
149
150 xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
151 xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
152 new_ctx->data = new_ctx->data_meta + meta_len;
153
154 xdp_update_frame_from_buff(new_ctx, frm);
155 frm->mem = new_ctx->rxq->mem;
156
157 memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
158 }
159
xdp_test_run_setup(struct xdp_test_data * xdp,struct xdp_buff * orig_ctx)160 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
161 {
162 struct page_pool *pp;
163 int err = -ENOMEM;
164 struct page_pool_params pp_params = {
165 .order = 0,
166 .flags = 0,
167 .pool_size = xdp->batch_size,
168 .nid = NUMA_NO_NODE,
169 .init_callback = xdp_test_run_init_page,
170 .init_arg = xdp,
171 };
172
173 xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
174 if (!xdp->frames)
175 return -ENOMEM;
176
177 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
178 if (!xdp->skbs)
179 goto err_skbs;
180
181 pp = page_pool_create(&pp_params);
182 if (IS_ERR(pp)) {
183 err = PTR_ERR(pp);
184 goto err_pp;
185 }
186
187 /* will copy 'mem.id' into pp->xdp_mem_id */
188 err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
189 if (err)
190 goto err_mmodel;
191
192 xdp->pp = pp;
193
194 /* We create a 'fake' RXQ referencing the original dev, but with an
195 * xdp_mem_info pointing to our page_pool
196 */
197 xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
198 xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
199 xdp->rxq.mem.id = pp->xdp_mem_id;
200 xdp->dev = orig_ctx->rxq->dev;
201 xdp->orig_ctx = orig_ctx;
202
203 return 0;
204
205 err_mmodel:
206 page_pool_destroy(pp);
207 err_pp:
208 kvfree(xdp->skbs);
209 err_skbs:
210 kvfree(xdp->frames);
211 return err;
212 }
213
xdp_test_run_teardown(struct xdp_test_data * xdp)214 static void xdp_test_run_teardown(struct xdp_test_data *xdp)
215 {
216 xdp_unreg_mem_model(&xdp->mem);
217 page_pool_destroy(xdp->pp);
218 kfree(xdp->frames);
219 kfree(xdp->skbs);
220 }
221
frame_was_changed(const struct xdp_page_head * head)222 static bool frame_was_changed(const struct xdp_page_head *head)
223 {
224 /* xdp_scrub_frame() zeroes the data pointer, flags is the last field,
225 * i.e. has the highest chances to be overwritten. If those two are
226 * untouched, it's most likely safe to skip the context reset.
227 */
228 return head->frame->data != head->orig_ctx.data ||
229 head->frame->flags != head->orig_ctx.flags;
230 }
231
ctx_was_changed(struct xdp_page_head * head)232 static bool ctx_was_changed(struct xdp_page_head *head)
233 {
234 return head->orig_ctx.data != head->ctx.data ||
235 head->orig_ctx.data_meta != head->ctx.data_meta ||
236 head->orig_ctx.data_end != head->ctx.data_end;
237 }
238
reset_ctx(struct xdp_page_head * head)239 static void reset_ctx(struct xdp_page_head *head)
240 {
241 if (likely(!frame_was_changed(head) && !ctx_was_changed(head)))
242 return;
243
244 head->ctx.data = head->orig_ctx.data;
245 head->ctx.data_meta = head->orig_ctx.data_meta;
246 head->ctx.data_end = head->orig_ctx.data_end;
247 xdp_update_frame_from_buff(&head->ctx, head->frame);
248 }
249
xdp_recv_frames(struct xdp_frame ** frames,int nframes,struct sk_buff ** skbs,struct net_device * dev)250 static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
251 struct sk_buff **skbs,
252 struct net_device *dev)
253 {
254 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
255 int i, n;
256 LIST_HEAD(list);
257
258 n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes,
259 (void **)skbs);
260 if (unlikely(n == 0)) {
261 for (i = 0; i < nframes; i++)
262 xdp_return_frame(frames[i]);
263 return -ENOMEM;
264 }
265
266 for (i = 0; i < nframes; i++) {
267 struct xdp_frame *xdpf = frames[i];
268 struct sk_buff *skb = skbs[i];
269
270 skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
271 if (!skb) {
272 xdp_return_frame(xdpf);
273 continue;
274 }
275
276 list_add_tail(&skb->list, &list);
277 }
278 netif_receive_skb_list(&list);
279
280 return 0;
281 }
282
xdp_test_run_batch(struct xdp_test_data * xdp,struct bpf_prog * prog,u32 repeat)283 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
284 u32 repeat)
285 {
286 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
287 int err = 0, act, ret, i, nframes = 0, batch_sz;
288 struct xdp_frame **frames = xdp->frames;
289 struct xdp_page_head *head;
290 struct xdp_frame *frm;
291 bool redirect = false;
292 struct xdp_buff *ctx;
293 struct page *page;
294
295 batch_sz = min_t(u32, repeat, xdp->batch_size);
296
297 local_bh_disable();
298 xdp_set_return_frame_no_direct();
299
300 for (i = 0; i < batch_sz; i++) {
301 page = page_pool_dev_alloc_pages(xdp->pp);
302 if (!page) {
303 err = -ENOMEM;
304 goto out;
305 }
306
307 head = phys_to_virt(page_to_phys(page));
308 reset_ctx(head);
309 ctx = &head->ctx;
310 frm = head->frame;
311 xdp->frame_cnt++;
312
313 act = bpf_prog_run_xdp(prog, ctx);
314
315 /* if program changed pkt bounds we need to update the xdp_frame */
316 if (unlikely(ctx_was_changed(head))) {
317 ret = xdp_update_frame_from_buff(ctx, frm);
318 if (ret) {
319 xdp_return_buff(ctx);
320 continue;
321 }
322 }
323
324 switch (act) {
325 case XDP_TX:
326 /* we can't do a real XDP_TX since we're not in the
327 * driver, so turn it into a REDIRECT back to the same
328 * index
329 */
330 ri->tgt_index = xdp->dev->ifindex;
331 ri->map_id = INT_MAX;
332 ri->map_type = BPF_MAP_TYPE_UNSPEC;
333 fallthrough;
334 case XDP_REDIRECT:
335 redirect = true;
336 ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
337 if (ret)
338 xdp_return_buff(ctx);
339 break;
340 case XDP_PASS:
341 frames[nframes++] = frm;
342 break;
343 default:
344 bpf_warn_invalid_xdp_action(NULL, prog, act);
345 fallthrough;
346 case XDP_DROP:
347 xdp_return_buff(ctx);
348 break;
349 }
350 }
351
352 out:
353 if (redirect)
354 xdp_do_flush();
355 if (nframes) {
356 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
357 if (ret)
358 err = ret;
359 }
360
361 xdp_clear_return_frame_no_direct();
362 local_bh_enable();
363 return err;
364 }
365
bpf_test_run_xdp_live(struct bpf_prog * prog,struct xdp_buff * ctx,u32 repeat,u32 batch_size,u32 * time)366 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
367 u32 repeat, u32 batch_size, u32 *time)
368
369 {
370 struct xdp_test_data xdp = { .batch_size = batch_size };
371 struct bpf_test_timer t = { .mode = NO_MIGRATE };
372 int ret;
373
374 if (!repeat)
375 repeat = 1;
376
377 ret = xdp_test_run_setup(&xdp, ctx);
378 if (ret)
379 return ret;
380
381 bpf_test_timer_enter(&t);
382 do {
383 xdp.frame_cnt = 0;
384 ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
385 if (unlikely(ret < 0))
386 break;
387 } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
388 bpf_test_timer_leave(&t);
389
390 xdp_test_run_teardown(&xdp);
391 return ret;
392 }
393
bpf_test_run(struct bpf_prog * prog,void * ctx,u32 repeat,u32 * retval,u32 * time,bool xdp)394 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
395 u32 *retval, u32 *time, bool xdp)
396 {
397 struct bpf_prog_array_item item = {.prog = prog};
398 struct bpf_run_ctx *old_ctx;
399 struct bpf_cg_run_ctx run_ctx;
400 struct bpf_test_timer t = { NO_MIGRATE };
401 enum bpf_cgroup_storage_type stype;
402 int ret;
403
404 for_each_cgroup_storage_type(stype) {
405 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
406 if (IS_ERR(item.cgroup_storage[stype])) {
407 item.cgroup_storage[stype] = NULL;
408 for_each_cgroup_storage_type(stype)
409 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
410 return -ENOMEM;
411 }
412 }
413
414 if (!repeat)
415 repeat = 1;
416
417 bpf_test_timer_enter(&t);
418 old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
419 do {
420 run_ctx.prog_item = &item;
421 local_bh_disable();
422 if (xdp)
423 *retval = bpf_prog_run_xdp(prog, ctx);
424 else
425 *retval = bpf_prog_run(prog, ctx);
426 local_bh_enable();
427 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
428 bpf_reset_run_ctx(old_ctx);
429 bpf_test_timer_leave(&t);
430
431 for_each_cgroup_storage_type(stype)
432 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
433
434 return ret;
435 }
436
bpf_test_finish(const union bpf_attr * kattr,union bpf_attr __user * uattr,const void * data,struct skb_shared_info * sinfo,u32 size,u32 retval,u32 duration)437 static int bpf_test_finish(const union bpf_attr *kattr,
438 union bpf_attr __user *uattr, const void *data,
439 struct skb_shared_info *sinfo, u32 size,
440 u32 retval, u32 duration)
441 {
442 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
443 int err = -EFAULT;
444 u32 copy_size = size;
445
446 /* Clamp copy if the user has provided a size hint, but copy the full
447 * buffer if not to retain old behaviour.
448 */
449 if (kattr->test.data_size_out &&
450 copy_size > kattr->test.data_size_out) {
451 copy_size = kattr->test.data_size_out;
452 err = -ENOSPC;
453 }
454
455 if (data_out) {
456 int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
457
458 if (len < 0) {
459 err = -ENOSPC;
460 goto out;
461 }
462
463 if (copy_to_user(data_out, data, len))
464 goto out;
465
466 if (sinfo) {
467 int i, offset = len;
468 u32 data_len;
469
470 for (i = 0; i < sinfo->nr_frags; i++) {
471 skb_frag_t *frag = &sinfo->frags[i];
472
473 if (offset >= copy_size) {
474 err = -ENOSPC;
475 break;
476 }
477
478 data_len = min_t(u32, copy_size - offset,
479 skb_frag_size(frag));
480
481 if (copy_to_user(data_out + offset,
482 skb_frag_address(frag),
483 data_len))
484 goto out;
485
486 offset += data_len;
487 }
488 }
489 }
490
491 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
492 goto out;
493 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
494 goto out;
495 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
496 goto out;
497 if (err != -ENOSPC)
498 err = 0;
499 out:
500 trace_bpf_test_finish(&err);
501 return err;
502 }
503
504 /* Integer types of various sizes and pointer combinations cover variety of
505 * architecture dependent calling conventions. 7+ can be supported in the
506 * future.
507 */
508 __bpf_kfunc_start_defs();
509
bpf_fentry_test1(int a)510 __bpf_kfunc int bpf_fentry_test1(int a)
511 {
512 return a + 1;
513 }
514 EXPORT_SYMBOL_GPL(bpf_fentry_test1);
515
bpf_fentry_test2(int a,u64 b)516 int noinline bpf_fentry_test2(int a, u64 b)
517 {
518 return a + b;
519 }
520
bpf_fentry_test3(char a,int b,u64 c)521 int noinline bpf_fentry_test3(char a, int b, u64 c)
522 {
523 return a + b + c;
524 }
525
bpf_fentry_test4(void * a,char b,int c,u64 d)526 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
527 {
528 return (long)a + b + c + d;
529 }
530
bpf_fentry_test5(u64 a,void * b,short c,int d,u64 e)531 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
532 {
533 return a + (long)b + c + d + e;
534 }
535
bpf_fentry_test6(u64 a,void * b,short c,int d,void * e,u64 f)536 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
537 {
538 return a + (long)b + c + d + (long)e + f;
539 }
540
541 struct bpf_fentry_test_t {
542 struct bpf_fentry_test_t *a;
543 };
544
bpf_fentry_test7(struct bpf_fentry_test_t * arg)545 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
546 {
547 asm volatile ("": "+r"(arg));
548 return (long)arg;
549 }
550
bpf_fentry_test8(struct bpf_fentry_test_t * arg)551 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
552 {
553 return (long)arg->a;
554 }
555
bpf_fentry_test9(u32 * a)556 __bpf_kfunc u32 bpf_fentry_test9(u32 *a)
557 {
558 return *a;
559 }
560
bpf_fentry_test_sinfo(struct skb_shared_info * sinfo)561 void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo)
562 {
563 }
564
bpf_modify_return_test(int a,int * b)565 __bpf_kfunc int bpf_modify_return_test(int a, int *b)
566 {
567 *b += 1;
568 return a + *b;
569 }
570
bpf_modify_return_test2(int a,int * b,short c,int d,void * e,char f,int g)571 __bpf_kfunc int bpf_modify_return_test2(int a, int *b, short c, int d,
572 void *e, char f, int g)
573 {
574 *b += 1;
575 return a + *b + c + d + (long)e + f + g;
576 }
577
bpf_modify_return_test_tp(int nonce)578 __bpf_kfunc int bpf_modify_return_test_tp(int nonce)
579 {
580 trace_bpf_trigger_tp(nonce);
581
582 return nonce;
583 }
584
bpf_fentry_shadow_test(int a)585 int noinline bpf_fentry_shadow_test(int a)
586 {
587 return a + 1;
588 }
589
590 struct prog_test_member1 {
591 int a;
592 };
593
594 struct prog_test_member {
595 struct prog_test_member1 m;
596 int c;
597 };
598
599 struct prog_test_ref_kfunc {
600 int a;
601 int b;
602 struct prog_test_member memb;
603 struct prog_test_ref_kfunc *next;
604 refcount_t cnt;
605 };
606
bpf_kfunc_call_test_release(struct prog_test_ref_kfunc * p)607 __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
608 {
609 refcount_dec(&p->cnt);
610 }
611
bpf_kfunc_call_test_release_dtor(void * p)612 __bpf_kfunc void bpf_kfunc_call_test_release_dtor(void *p)
613 {
614 bpf_kfunc_call_test_release(p);
615 }
616 CFI_NOSEAL(bpf_kfunc_call_test_release_dtor);
617
bpf_kfunc_call_memb_release(struct prog_test_member * p)618 __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
619 {
620 }
621
bpf_kfunc_call_memb_release_dtor(void * p)622 __bpf_kfunc void bpf_kfunc_call_memb_release_dtor(void *p)
623 {
624 }
625 CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor);
626
627 __bpf_kfunc_end_defs();
628
629 BTF_KFUNCS_START(bpf_test_modify_return_ids)
630 BTF_ID_FLAGS(func, bpf_modify_return_test)
631 BTF_ID_FLAGS(func, bpf_modify_return_test2)
632 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
633 BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
634 BTF_KFUNCS_END(bpf_test_modify_return_ids)
635
636 static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
637 .owner = THIS_MODULE,
638 .set = &bpf_test_modify_return_ids,
639 };
640
641 BTF_KFUNCS_START(test_sk_check_kfunc_ids)
BTF_ID_FLAGS(func,bpf_kfunc_call_test_release,KF_RELEASE)642 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
643 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
644 BTF_KFUNCS_END(test_sk_check_kfunc_ids)
645
646 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
647 u32 size, u32 headroom, u32 tailroom)
648 {
649 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
650 void *data;
651
652 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
653 return ERR_PTR(-EINVAL);
654
655 if (user_size > size)
656 return ERR_PTR(-EMSGSIZE);
657
658 size = SKB_DATA_ALIGN(size);
659 data = kzalloc(size + headroom + tailroom, GFP_USER);
660 if (!data)
661 return ERR_PTR(-ENOMEM);
662
663 if (copy_from_user(data + headroom, data_in, user_size)) {
664 kfree(data);
665 return ERR_PTR(-EFAULT);
666 }
667
668 return data;
669 }
670
bpf_prog_test_run_tracing(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)671 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
672 const union bpf_attr *kattr,
673 union bpf_attr __user *uattr)
674 {
675 struct bpf_fentry_test_t arg = {};
676 u16 side_effect = 0, ret = 0;
677 int b = 2, err = -EFAULT;
678 u32 retval = 0;
679
680 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
681 return -EINVAL;
682
683 switch (prog->expected_attach_type) {
684 case BPF_TRACE_FENTRY:
685 case BPF_TRACE_FEXIT:
686 if (bpf_fentry_test1(1) != 2 ||
687 bpf_fentry_test2(2, 3) != 5 ||
688 bpf_fentry_test3(4, 5, 6) != 15 ||
689 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
690 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
691 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
692 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
693 bpf_fentry_test8(&arg) != 0 ||
694 bpf_fentry_test9(&retval) != 0)
695 goto out;
696 break;
697 case BPF_MODIFY_RETURN:
698 ret = bpf_modify_return_test(1, &b);
699 if (b != 2)
700 side_effect++;
701 b = 2;
702 ret += bpf_modify_return_test2(1, &b, 3, 4, (void *)5, 6, 7);
703 if (b != 2)
704 side_effect++;
705 break;
706 default:
707 goto out;
708 }
709
710 retval = ((u32)side_effect << 16) | ret;
711 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
712 goto out;
713
714 err = 0;
715 out:
716 trace_bpf_test_finish(&err);
717 return err;
718 }
719
720 struct bpf_raw_tp_test_run_info {
721 struct bpf_prog *prog;
722 void *ctx;
723 u32 retval;
724 };
725
726 static void
__bpf_prog_test_run_raw_tp(void * data)727 __bpf_prog_test_run_raw_tp(void *data)
728 {
729 struct bpf_raw_tp_test_run_info *info = data;
730 struct bpf_trace_run_ctx run_ctx = {};
731 struct bpf_run_ctx *old_run_ctx;
732
733 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
734
735 rcu_read_lock();
736 info->retval = bpf_prog_run(info->prog, info->ctx);
737 rcu_read_unlock();
738
739 bpf_reset_run_ctx(old_run_ctx);
740 }
741
bpf_prog_test_run_raw_tp(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)742 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
743 const union bpf_attr *kattr,
744 union bpf_attr __user *uattr)
745 {
746 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
747 __u32 ctx_size_in = kattr->test.ctx_size_in;
748 struct bpf_raw_tp_test_run_info info;
749 int cpu = kattr->test.cpu, err = 0;
750 int current_cpu;
751
752 /* doesn't support data_in/out, ctx_out, duration, or repeat */
753 if (kattr->test.data_in || kattr->test.data_out ||
754 kattr->test.ctx_out || kattr->test.duration ||
755 kattr->test.repeat || kattr->test.batch_size)
756 return -EINVAL;
757
758 if (ctx_size_in < prog->aux->max_ctx_offset ||
759 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
760 return -EINVAL;
761
762 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
763 return -EINVAL;
764
765 if (ctx_size_in) {
766 info.ctx = memdup_user(ctx_in, ctx_size_in);
767 if (IS_ERR(info.ctx))
768 return PTR_ERR(info.ctx);
769 } else {
770 info.ctx = NULL;
771 }
772
773 info.prog = prog;
774
775 current_cpu = get_cpu();
776 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
777 cpu == current_cpu) {
778 __bpf_prog_test_run_raw_tp(&info);
779 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
780 /* smp_call_function_single() also checks cpu_online()
781 * after csd_lock(). However, since cpu is from user
782 * space, let's do an extra quick check to filter out
783 * invalid value before smp_call_function_single().
784 */
785 err = -ENXIO;
786 } else {
787 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
788 &info, 1);
789 }
790 put_cpu();
791
792 if (!err &&
793 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
794 err = -EFAULT;
795
796 kfree(info.ctx);
797 return err;
798 }
799
bpf_ctx_init(const union bpf_attr * kattr,u32 max_size)800 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
801 {
802 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
803 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
804 u32 size = kattr->test.ctx_size_in;
805 void *data;
806 int err;
807
808 if (!data_in && !data_out)
809 return NULL;
810
811 data = kzalloc(max_size, GFP_USER);
812 if (!data)
813 return ERR_PTR(-ENOMEM);
814
815 if (data_in) {
816 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
817 if (err) {
818 kfree(data);
819 return ERR_PTR(err);
820 }
821
822 size = min_t(u32, max_size, size);
823 if (copy_from_user(data, data_in, size)) {
824 kfree(data);
825 return ERR_PTR(-EFAULT);
826 }
827 }
828 return data;
829 }
830
bpf_ctx_finish(const union bpf_attr * kattr,union bpf_attr __user * uattr,const void * data,u32 size)831 static int bpf_ctx_finish(const union bpf_attr *kattr,
832 union bpf_attr __user *uattr, const void *data,
833 u32 size)
834 {
835 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
836 int err = -EFAULT;
837 u32 copy_size = size;
838
839 if (!data || !data_out)
840 return 0;
841
842 if (copy_size > kattr->test.ctx_size_out) {
843 copy_size = kattr->test.ctx_size_out;
844 err = -ENOSPC;
845 }
846
847 if (copy_to_user(data_out, data, copy_size))
848 goto out;
849 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
850 goto out;
851 if (err != -ENOSPC)
852 err = 0;
853 out:
854 return err;
855 }
856
857 /**
858 * range_is_zero - test whether buffer is initialized
859 * @buf: buffer to check
860 * @from: check from this position
861 * @to: check up until (excluding) this position
862 *
863 * This function returns true if the there is a non-zero byte
864 * in the buf in the range [from,to).
865 */
range_is_zero(void * buf,size_t from,size_t to)866 static inline bool range_is_zero(void *buf, size_t from, size_t to)
867 {
868 return !memchr_inv((u8 *)buf + from, 0, to - from);
869 }
870
convert___skb_to_skb(struct sk_buff * skb,struct __sk_buff * __skb)871 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
872 {
873 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
874
875 if (!__skb)
876 return 0;
877
878 /* make sure the fields we don't use are zeroed */
879 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
880 return -EINVAL;
881
882 /* mark is allowed */
883
884 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
885 offsetof(struct __sk_buff, priority)))
886 return -EINVAL;
887
888 /* priority is allowed */
889 /* ingress_ifindex is allowed */
890 /* ifindex is allowed */
891
892 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
893 offsetof(struct __sk_buff, cb)))
894 return -EINVAL;
895
896 /* cb is allowed */
897
898 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
899 offsetof(struct __sk_buff, tstamp)))
900 return -EINVAL;
901
902 /* tstamp is allowed */
903 /* wire_len is allowed */
904 /* gso_segs is allowed */
905
906 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
907 offsetof(struct __sk_buff, gso_size)))
908 return -EINVAL;
909
910 /* gso_size is allowed */
911
912 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
913 offsetof(struct __sk_buff, hwtstamp)))
914 return -EINVAL;
915
916 /* hwtstamp is allowed */
917
918 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
919 sizeof(struct __sk_buff)))
920 return -EINVAL;
921
922 skb->mark = __skb->mark;
923 skb->priority = __skb->priority;
924 skb->skb_iif = __skb->ingress_ifindex;
925 skb->tstamp = __skb->tstamp;
926 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
927
928 if (__skb->wire_len == 0) {
929 cb->pkt_len = skb->len;
930 } else {
931 if (__skb->wire_len < skb->len ||
932 __skb->wire_len > GSO_LEGACY_MAX_SIZE)
933 return -EINVAL;
934 cb->pkt_len = __skb->wire_len;
935 }
936
937 if (__skb->gso_segs > GSO_MAX_SEGS)
938 return -EINVAL;
939 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
940 skb_shinfo(skb)->gso_size = __skb->gso_size;
941 skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
942
943 return 0;
944 }
945
convert_skb_to___skb(struct sk_buff * skb,struct __sk_buff * __skb)946 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
947 {
948 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
949
950 if (!__skb)
951 return;
952
953 __skb->mark = skb->mark;
954 __skb->priority = skb->priority;
955 __skb->ingress_ifindex = skb->skb_iif;
956 __skb->ifindex = skb->dev->ifindex;
957 __skb->tstamp = skb->tstamp;
958 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
959 __skb->wire_len = cb->pkt_len;
960 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
961 __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
962 }
963
964 static struct proto bpf_dummy_proto = {
965 .name = "bpf_dummy",
966 .owner = THIS_MODULE,
967 .obj_size = sizeof(struct sock),
968 };
969
bpf_prog_test_run_skb(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)970 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
971 union bpf_attr __user *uattr)
972 {
973 bool is_l2 = false, is_direct_pkt_access = false;
974 struct net *net = current->nsproxy->net_ns;
975 struct net_device *dev = net->loopback_dev;
976 u32 size = kattr->test.data_size_in;
977 u32 repeat = kattr->test.repeat;
978 struct __sk_buff *ctx = NULL;
979 u32 retval, duration;
980 int hh_len = ETH_HLEN;
981 struct sk_buff *skb;
982 struct sock *sk;
983 void *data;
984 int ret;
985
986 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
987 return -EINVAL;
988
989 data = bpf_test_init(kattr, kattr->test.data_size_in,
990 size, NET_SKB_PAD + NET_IP_ALIGN,
991 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
992 if (IS_ERR(data))
993 return PTR_ERR(data);
994
995 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
996 if (IS_ERR(ctx)) {
997 kfree(data);
998 return PTR_ERR(ctx);
999 }
1000
1001 switch (prog->type) {
1002 case BPF_PROG_TYPE_SCHED_CLS:
1003 case BPF_PROG_TYPE_SCHED_ACT:
1004 is_l2 = true;
1005 fallthrough;
1006 case BPF_PROG_TYPE_LWT_IN:
1007 case BPF_PROG_TYPE_LWT_OUT:
1008 case BPF_PROG_TYPE_LWT_XMIT:
1009 is_direct_pkt_access = true;
1010 break;
1011 default:
1012 break;
1013 }
1014
1015 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
1016 if (!sk) {
1017 kfree(data);
1018 kfree(ctx);
1019 return -ENOMEM;
1020 }
1021 sock_init_data(NULL, sk);
1022
1023 skb = slab_build_skb(data);
1024 if (!skb) {
1025 kfree(data);
1026 kfree(ctx);
1027 sk_free(sk);
1028 return -ENOMEM;
1029 }
1030 skb->sk = sk;
1031
1032 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1033 __skb_put(skb, size);
1034 if (ctx && ctx->ifindex > 1) {
1035 dev = dev_get_by_index(net, ctx->ifindex);
1036 if (!dev) {
1037 ret = -ENODEV;
1038 goto out;
1039 }
1040 }
1041 skb->protocol = eth_type_trans(skb, dev);
1042 skb_reset_network_header(skb);
1043
1044 switch (skb->protocol) {
1045 case htons(ETH_P_IP):
1046 sk->sk_family = AF_INET;
1047 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1048 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1049 sk->sk_daddr = ip_hdr(skb)->daddr;
1050 }
1051 break;
1052 #if IS_ENABLED(CONFIG_IPV6)
1053 case htons(ETH_P_IPV6):
1054 sk->sk_family = AF_INET6;
1055 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1056 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1057 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1058 }
1059 break;
1060 #endif
1061 default:
1062 break;
1063 }
1064
1065 if (is_l2)
1066 __skb_push(skb, hh_len);
1067 if (is_direct_pkt_access)
1068 bpf_compute_data_pointers(skb);
1069 ret = convert___skb_to_skb(skb, ctx);
1070 if (ret)
1071 goto out;
1072 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1073 if (ret)
1074 goto out;
1075 if (!is_l2) {
1076 if (skb_headroom(skb) < hh_len) {
1077 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1078
1079 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1080 ret = -ENOMEM;
1081 goto out;
1082 }
1083 }
1084 memset(__skb_push(skb, hh_len), 0, hh_len);
1085 }
1086 convert_skb_to___skb(skb, ctx);
1087
1088 size = skb->len;
1089 /* bpf program can never convert linear skb to non-linear */
1090 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1091 size = skb_headlen(skb);
1092 ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1093 duration);
1094 if (!ret)
1095 ret = bpf_ctx_finish(kattr, uattr, ctx,
1096 sizeof(struct __sk_buff));
1097 out:
1098 if (dev && dev != net->loopback_dev)
1099 dev_put(dev);
1100 kfree_skb(skb);
1101 sk_free(sk);
1102 kfree(ctx);
1103 return ret;
1104 }
1105
xdp_convert_md_to_buff(struct xdp_md * xdp_md,struct xdp_buff * xdp)1106 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1107 {
1108 unsigned int ingress_ifindex, rx_queue_index;
1109 struct netdev_rx_queue *rxqueue;
1110 struct net_device *device;
1111
1112 if (!xdp_md)
1113 return 0;
1114
1115 if (xdp_md->egress_ifindex != 0)
1116 return -EINVAL;
1117
1118 ingress_ifindex = xdp_md->ingress_ifindex;
1119 rx_queue_index = xdp_md->rx_queue_index;
1120
1121 if (!ingress_ifindex && rx_queue_index)
1122 return -EINVAL;
1123
1124 if (ingress_ifindex) {
1125 device = dev_get_by_index(current->nsproxy->net_ns,
1126 ingress_ifindex);
1127 if (!device)
1128 return -ENODEV;
1129
1130 if (rx_queue_index >= device->real_num_rx_queues)
1131 goto free_dev;
1132
1133 rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1134
1135 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1136 goto free_dev;
1137
1138 xdp->rxq = &rxqueue->xdp_rxq;
1139 /* The device is now tracked in the xdp->rxq for later
1140 * dev_put()
1141 */
1142 }
1143
1144 xdp->data = xdp->data_meta + xdp_md->data;
1145 return 0;
1146
1147 free_dev:
1148 dev_put(device);
1149 return -EINVAL;
1150 }
1151
xdp_convert_buff_to_md(struct xdp_buff * xdp,struct xdp_md * xdp_md)1152 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1153 {
1154 if (!xdp_md)
1155 return;
1156
1157 xdp_md->data = xdp->data - xdp->data_meta;
1158 xdp_md->data_end = xdp->data_end - xdp->data_meta;
1159
1160 if (xdp_md->ingress_ifindex)
1161 dev_put(xdp->rxq->dev);
1162 }
1163
bpf_prog_test_run_xdp(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1164 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1165 union bpf_attr __user *uattr)
1166 {
1167 bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1168 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1169 u32 batch_size = kattr->test.batch_size;
1170 u32 retval = 0, duration, max_data_sz;
1171 u32 size = kattr->test.data_size_in;
1172 u32 headroom = XDP_PACKET_HEADROOM;
1173 u32 repeat = kattr->test.repeat;
1174 struct netdev_rx_queue *rxqueue;
1175 struct skb_shared_info *sinfo;
1176 struct xdp_buff xdp = {};
1177 int i, ret = -EINVAL;
1178 struct xdp_md *ctx;
1179 void *data;
1180
1181 if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1182 prog->expected_attach_type == BPF_XDP_CPUMAP)
1183 return -EINVAL;
1184
1185 if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1186 return -EINVAL;
1187
1188 if (bpf_prog_is_dev_bound(prog->aux))
1189 return -EINVAL;
1190
1191 if (do_live) {
1192 if (!batch_size)
1193 batch_size = NAPI_POLL_WEIGHT;
1194 else if (batch_size > TEST_XDP_MAX_BATCH)
1195 return -E2BIG;
1196
1197 headroom += sizeof(struct xdp_page_head);
1198 } else if (batch_size) {
1199 return -EINVAL;
1200 }
1201
1202 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1203 if (IS_ERR(ctx))
1204 return PTR_ERR(ctx);
1205
1206 if (ctx) {
1207 /* There can't be user provided data before the meta data */
1208 if (ctx->data_meta || ctx->data_end != size ||
1209 ctx->data > ctx->data_end ||
1210 unlikely(xdp_metalen_invalid(ctx->data)) ||
1211 (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1212 goto free_ctx;
1213 /* Meta data is allocated from the headroom */
1214 headroom -= ctx->data;
1215 }
1216
1217 max_data_sz = 4096 - headroom - tailroom;
1218 if (size > max_data_sz) {
1219 /* disallow live data mode for jumbo frames */
1220 if (do_live)
1221 goto free_ctx;
1222 size = max_data_sz;
1223 }
1224
1225 data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
1226 if (IS_ERR(data)) {
1227 ret = PTR_ERR(data);
1228 goto free_ctx;
1229 }
1230
1231 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1232 rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1233 xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1234 xdp_prepare_buff(&xdp, data, headroom, size, true);
1235 sinfo = xdp_get_shared_info_from_buff(&xdp);
1236
1237 ret = xdp_convert_md_to_buff(ctx, &xdp);
1238 if (ret)
1239 goto free_data;
1240
1241 if (unlikely(kattr->test.data_size_in > size)) {
1242 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1243
1244 while (size < kattr->test.data_size_in) {
1245 struct page *page;
1246 skb_frag_t *frag;
1247 u32 data_len;
1248
1249 if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1250 ret = -ENOMEM;
1251 goto out;
1252 }
1253
1254 page = alloc_page(GFP_KERNEL);
1255 if (!page) {
1256 ret = -ENOMEM;
1257 goto out;
1258 }
1259
1260 frag = &sinfo->frags[sinfo->nr_frags++];
1261
1262 data_len = min_t(u32, kattr->test.data_size_in - size,
1263 PAGE_SIZE);
1264 skb_frag_fill_page_desc(frag, page, 0, data_len);
1265
1266 if (copy_from_user(page_address(page), data_in + size,
1267 data_len)) {
1268 ret = -EFAULT;
1269 goto out;
1270 }
1271 sinfo->xdp_frags_size += data_len;
1272 size += data_len;
1273 }
1274 xdp_buff_set_frags_flag(&xdp);
1275 }
1276
1277 if (repeat > 1)
1278 bpf_prog_change_xdp(NULL, prog);
1279
1280 if (do_live)
1281 ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1282 else
1283 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1284 /* We convert the xdp_buff back to an xdp_md before checking the return
1285 * code so the reference count of any held netdevice will be decremented
1286 * even if the test run failed.
1287 */
1288 xdp_convert_buff_to_md(&xdp, ctx);
1289 if (ret)
1290 goto out;
1291
1292 size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1293 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1294 retval, duration);
1295 if (!ret)
1296 ret = bpf_ctx_finish(kattr, uattr, ctx,
1297 sizeof(struct xdp_md));
1298
1299 out:
1300 if (repeat > 1)
1301 bpf_prog_change_xdp(prog, NULL);
1302 free_data:
1303 for (i = 0; i < sinfo->nr_frags; i++)
1304 __free_page(skb_frag_page(&sinfo->frags[i]));
1305 kfree(data);
1306 free_ctx:
1307 kfree(ctx);
1308 return ret;
1309 }
1310
verify_user_bpf_flow_keys(struct bpf_flow_keys * ctx)1311 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1312 {
1313 /* make sure the fields we don't use are zeroed */
1314 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1315 return -EINVAL;
1316
1317 /* flags is allowed */
1318
1319 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1320 sizeof(struct bpf_flow_keys)))
1321 return -EINVAL;
1322
1323 return 0;
1324 }
1325
bpf_prog_test_run_flow_dissector(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1326 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1327 const union bpf_attr *kattr,
1328 union bpf_attr __user *uattr)
1329 {
1330 struct bpf_test_timer t = { NO_PREEMPT };
1331 u32 size = kattr->test.data_size_in;
1332 struct bpf_flow_dissector ctx = {};
1333 u32 repeat = kattr->test.repeat;
1334 struct bpf_flow_keys *user_ctx;
1335 struct bpf_flow_keys flow_keys;
1336 const struct ethhdr *eth;
1337 unsigned int flags = 0;
1338 u32 retval, duration;
1339 void *data;
1340 int ret;
1341
1342 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1343 return -EINVAL;
1344
1345 if (size < ETH_HLEN)
1346 return -EINVAL;
1347
1348 data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1349 if (IS_ERR(data))
1350 return PTR_ERR(data);
1351
1352 eth = (struct ethhdr *)data;
1353
1354 if (!repeat)
1355 repeat = 1;
1356
1357 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1358 if (IS_ERR(user_ctx)) {
1359 kfree(data);
1360 return PTR_ERR(user_ctx);
1361 }
1362 if (user_ctx) {
1363 ret = verify_user_bpf_flow_keys(user_ctx);
1364 if (ret)
1365 goto out;
1366 flags = user_ctx->flags;
1367 }
1368
1369 ctx.flow_keys = &flow_keys;
1370 ctx.data = data;
1371 ctx.data_end = (__u8 *)data + size;
1372
1373 bpf_test_timer_enter(&t);
1374 do {
1375 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1376 size, flags);
1377 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1378 bpf_test_timer_leave(&t);
1379
1380 if (ret < 0)
1381 goto out;
1382
1383 ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1384 sizeof(flow_keys), retval, duration);
1385 if (!ret)
1386 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1387 sizeof(struct bpf_flow_keys));
1388
1389 out:
1390 kfree(user_ctx);
1391 kfree(data);
1392 return ret;
1393 }
1394
bpf_prog_test_run_sk_lookup(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1395 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1396 union bpf_attr __user *uattr)
1397 {
1398 struct bpf_test_timer t = { NO_PREEMPT };
1399 struct bpf_prog_array *progs = NULL;
1400 struct bpf_sk_lookup_kern ctx = {};
1401 u32 repeat = kattr->test.repeat;
1402 struct bpf_sk_lookup *user_ctx;
1403 u32 retval, duration;
1404 int ret = -EINVAL;
1405
1406 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1407 return -EINVAL;
1408
1409 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1410 kattr->test.data_size_out)
1411 return -EINVAL;
1412
1413 if (!repeat)
1414 repeat = 1;
1415
1416 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1417 if (IS_ERR(user_ctx))
1418 return PTR_ERR(user_ctx);
1419
1420 if (!user_ctx)
1421 return -EINVAL;
1422
1423 if (user_ctx->sk)
1424 goto out;
1425
1426 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1427 goto out;
1428
1429 if (user_ctx->local_port > U16_MAX) {
1430 ret = -ERANGE;
1431 goto out;
1432 }
1433
1434 ctx.family = (u16)user_ctx->family;
1435 ctx.protocol = (u16)user_ctx->protocol;
1436 ctx.dport = (u16)user_ctx->local_port;
1437 ctx.sport = user_ctx->remote_port;
1438
1439 switch (ctx.family) {
1440 case AF_INET:
1441 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1442 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1443 break;
1444
1445 #if IS_ENABLED(CONFIG_IPV6)
1446 case AF_INET6:
1447 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1448 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1449 break;
1450 #endif
1451
1452 default:
1453 ret = -EAFNOSUPPORT;
1454 goto out;
1455 }
1456
1457 progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1458 if (!progs) {
1459 ret = -ENOMEM;
1460 goto out;
1461 }
1462
1463 progs->items[0].prog = prog;
1464
1465 bpf_test_timer_enter(&t);
1466 do {
1467 ctx.selected_sk = NULL;
1468 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1469 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1470 bpf_test_timer_leave(&t);
1471
1472 if (ret < 0)
1473 goto out;
1474
1475 user_ctx->cookie = 0;
1476 if (ctx.selected_sk) {
1477 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1478 ret = -EOPNOTSUPP;
1479 goto out;
1480 }
1481
1482 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1483 }
1484
1485 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1486 if (!ret)
1487 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1488
1489 out:
1490 bpf_prog_array_free(progs);
1491 kfree(user_ctx);
1492 return ret;
1493 }
1494
bpf_prog_test_run_syscall(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1495 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1496 const union bpf_attr *kattr,
1497 union bpf_attr __user *uattr)
1498 {
1499 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1500 __u32 ctx_size_in = kattr->test.ctx_size_in;
1501 void *ctx = NULL;
1502 u32 retval;
1503 int err = 0;
1504
1505 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1506 if (kattr->test.data_in || kattr->test.data_out ||
1507 kattr->test.ctx_out || kattr->test.duration ||
1508 kattr->test.repeat || kattr->test.flags ||
1509 kattr->test.batch_size)
1510 return -EINVAL;
1511
1512 if (ctx_size_in < prog->aux->max_ctx_offset ||
1513 ctx_size_in > U16_MAX)
1514 return -EINVAL;
1515
1516 if (ctx_size_in) {
1517 ctx = memdup_user(ctx_in, ctx_size_in);
1518 if (IS_ERR(ctx))
1519 return PTR_ERR(ctx);
1520 }
1521
1522 rcu_read_lock_trace();
1523 retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1524 rcu_read_unlock_trace();
1525
1526 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1527 err = -EFAULT;
1528 goto out;
1529 }
1530 if (ctx_size_in)
1531 if (copy_to_user(ctx_in, ctx, ctx_size_in))
1532 err = -EFAULT;
1533 out:
1534 kfree(ctx);
1535 return err;
1536 }
1537
verify_and_copy_hook_state(struct nf_hook_state * state,const struct nf_hook_state * user,struct net_device * dev)1538 static int verify_and_copy_hook_state(struct nf_hook_state *state,
1539 const struct nf_hook_state *user,
1540 struct net_device *dev)
1541 {
1542 if (user->in || user->out)
1543 return -EINVAL;
1544
1545 if (user->net || user->sk || user->okfn)
1546 return -EINVAL;
1547
1548 switch (user->pf) {
1549 case NFPROTO_IPV4:
1550 case NFPROTO_IPV6:
1551 switch (state->hook) {
1552 case NF_INET_PRE_ROUTING:
1553 state->in = dev;
1554 break;
1555 case NF_INET_LOCAL_IN:
1556 state->in = dev;
1557 break;
1558 case NF_INET_FORWARD:
1559 state->in = dev;
1560 state->out = dev;
1561 break;
1562 case NF_INET_LOCAL_OUT:
1563 state->out = dev;
1564 break;
1565 case NF_INET_POST_ROUTING:
1566 state->out = dev;
1567 break;
1568 }
1569
1570 break;
1571 default:
1572 return -EINVAL;
1573 }
1574
1575 state->pf = user->pf;
1576 state->hook = user->hook;
1577
1578 return 0;
1579 }
1580
nfproto_eth(int nfproto)1581 static __be16 nfproto_eth(int nfproto)
1582 {
1583 switch (nfproto) {
1584 case NFPROTO_IPV4:
1585 return htons(ETH_P_IP);
1586 case NFPROTO_IPV6:
1587 break;
1588 }
1589
1590 return htons(ETH_P_IPV6);
1591 }
1592
bpf_prog_test_run_nf(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1593 int bpf_prog_test_run_nf(struct bpf_prog *prog,
1594 const union bpf_attr *kattr,
1595 union bpf_attr __user *uattr)
1596 {
1597 struct net *net = current->nsproxy->net_ns;
1598 struct net_device *dev = net->loopback_dev;
1599 struct nf_hook_state *user_ctx, hook_state = {
1600 .pf = NFPROTO_IPV4,
1601 .hook = NF_INET_LOCAL_OUT,
1602 };
1603 u32 size = kattr->test.data_size_in;
1604 u32 repeat = kattr->test.repeat;
1605 struct bpf_nf_ctx ctx = {
1606 .state = &hook_state,
1607 };
1608 struct sk_buff *skb = NULL;
1609 u32 retval, duration;
1610 void *data;
1611 int ret;
1612
1613 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1614 return -EINVAL;
1615
1616 if (size < sizeof(struct iphdr))
1617 return -EINVAL;
1618
1619 data = bpf_test_init(kattr, kattr->test.data_size_in, size,
1620 NET_SKB_PAD + NET_IP_ALIGN,
1621 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1622 if (IS_ERR(data))
1623 return PTR_ERR(data);
1624
1625 if (!repeat)
1626 repeat = 1;
1627
1628 user_ctx = bpf_ctx_init(kattr, sizeof(struct nf_hook_state));
1629 if (IS_ERR(user_ctx)) {
1630 kfree(data);
1631 return PTR_ERR(user_ctx);
1632 }
1633
1634 if (user_ctx) {
1635 ret = verify_and_copy_hook_state(&hook_state, user_ctx, dev);
1636 if (ret)
1637 goto out;
1638 }
1639
1640 skb = slab_build_skb(data);
1641 if (!skb) {
1642 ret = -ENOMEM;
1643 goto out;
1644 }
1645
1646 data = NULL; /* data released via kfree_skb */
1647
1648 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1649 __skb_put(skb, size);
1650
1651 ret = -EINVAL;
1652
1653 if (hook_state.hook != NF_INET_LOCAL_OUT) {
1654 if (size < ETH_HLEN + sizeof(struct iphdr))
1655 goto out;
1656
1657 skb->protocol = eth_type_trans(skb, dev);
1658 switch (skb->protocol) {
1659 case htons(ETH_P_IP):
1660 if (hook_state.pf == NFPROTO_IPV4)
1661 break;
1662 goto out;
1663 case htons(ETH_P_IPV6):
1664 if (size < ETH_HLEN + sizeof(struct ipv6hdr))
1665 goto out;
1666 if (hook_state.pf == NFPROTO_IPV6)
1667 break;
1668 goto out;
1669 default:
1670 ret = -EPROTO;
1671 goto out;
1672 }
1673
1674 skb_reset_network_header(skb);
1675 } else {
1676 skb->protocol = nfproto_eth(hook_state.pf);
1677 }
1678
1679 ctx.skb = skb;
1680
1681 ret = bpf_test_run(prog, &ctx, repeat, &retval, &duration, false);
1682 if (ret)
1683 goto out;
1684
1685 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1686
1687 out:
1688 kfree(user_ctx);
1689 kfree_skb(skb);
1690 kfree(data);
1691 return ret;
1692 }
1693
1694 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1695 .owner = THIS_MODULE,
1696 .set = &test_sk_check_kfunc_ids,
1697 };
1698
1699 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
BTF_ID(struct,prog_test_ref_kfunc)1700 BTF_ID(struct, prog_test_ref_kfunc)
1701 BTF_ID(func, bpf_kfunc_call_test_release_dtor)
1702 BTF_ID(struct, prog_test_member)
1703 BTF_ID(func, bpf_kfunc_call_memb_release_dtor)
1704
1705 static int __init bpf_prog_test_run_init(void)
1706 {
1707 const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
1708 {
1709 .btf_id = bpf_prog_test_dtor_kfunc_ids[0],
1710 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
1711 },
1712 {
1713 .btf_id = bpf_prog_test_dtor_kfunc_ids[2],
1714 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
1715 },
1716 };
1717 int ret;
1718
1719 ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set);
1720 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1721 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
1722 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
1723 return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
1724 ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
1725 THIS_MODULE);
1726 }
1727 late_initcall(bpf_prog_test_run_init);
1728