1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 3 #include <linux/mm.h> 4 #include <linux/llist.h> 5 #include <linux/bpf.h> 6 #include <linux/irq_work.h> 7 #include <linux/bpf_mem_alloc.h> 8 #include <linux/memcontrol.h> 9 #include <asm/local.h> 10 11 /* Any context (including NMI) BPF specific memory allocator. 12 * 13 * Tracing BPF programs can attach to kprobe and fentry. Hence they 14 * run in unknown context where calling plain kmalloc() might not be safe. 15 * 16 * Front-end kmalloc() with per-cpu per-bucket cache of free elements. 17 * Refill this cache asynchronously from irq_work. 18 * 19 * CPU_0 buckets 20 * 16 32 64 96 128 196 256 512 1024 2048 4096 21 * ... 22 * CPU_N buckets 23 * 16 32 64 96 128 196 256 512 1024 2048 4096 24 * 25 * The buckets are prefilled at the start. 26 * BPF programs always run with migration disabled. 27 * It's safe to allocate from cache of the current cpu with irqs disabled. 28 * Free-ing is always done into bucket of the current cpu as well. 29 * irq_work trims extra free elements from buckets with kfree 30 * and refills them with kmalloc, so global kmalloc logic takes care 31 * of freeing objects allocated by one cpu and freed on another. 32 * 33 * Every allocated objected is padded with extra 8 bytes that contains 34 * struct llist_node. 35 */ 36 #define LLIST_NODE_SZ sizeof(struct llist_node) 37 38 /* similar to kmalloc, but sizeof == 8 bucket is gone */ 39 static u8 size_index[24] __ro_after_init = { 40 3, /* 8 */ 41 3, /* 16 */ 42 4, /* 24 */ 43 4, /* 32 */ 44 5, /* 40 */ 45 5, /* 48 */ 46 5, /* 56 */ 47 5, /* 64 */ 48 1, /* 72 */ 49 1, /* 80 */ 50 1, /* 88 */ 51 1, /* 96 */ 52 6, /* 104 */ 53 6, /* 112 */ 54 6, /* 120 */ 55 6, /* 128 */ 56 2, /* 136 */ 57 2, /* 144 */ 58 2, /* 152 */ 59 2, /* 160 */ 60 2, /* 168 */ 61 2, /* 176 */ 62 2, /* 184 */ 63 2 /* 192 */ 64 }; 65 66 static int bpf_mem_cache_idx(size_t size) 67 { 68 if (!size || size > 4096) 69 return -1; 70 71 if (size <= 192) 72 return size_index[(size - 1) / 8] - 1; 73 74 return fls(size - 1) - 2; 75 } 76 77 #define NUM_CACHES 11 78 79 struct bpf_mem_cache { 80 /* per-cpu list of free objects of size 'unit_size'. 81 * All accesses are done with interrupts disabled and 'active' counter 82 * protection with __llist_add() and __llist_del_first(). 83 */ 84 struct llist_head free_llist; 85 local_t active; 86 87 /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill 88 * are sequenced by per-cpu 'active' counter. But unit_free() cannot 89 * fail. When 'active' is busy the unit_free() will add an object to 90 * free_llist_extra. 91 */ 92 struct llist_head free_llist_extra; 93 94 struct irq_work refill_work; 95 struct obj_cgroup *objcg; 96 int unit_size; 97 /* count of objects in free_llist */ 98 int free_cnt; 99 int low_watermark, high_watermark, batch; 100 int percpu_size; 101 bool draining; 102 struct bpf_mem_cache *tgt; 103 104 /* list of objects to be freed after RCU tasks trace GP */ 105 struct llist_head free_by_rcu_ttrace; 106 struct llist_head waiting_for_gp_ttrace; 107 struct rcu_head rcu_ttrace; 108 atomic_t call_rcu_ttrace_in_progress; 109 }; 110 111 struct bpf_mem_caches { 112 struct bpf_mem_cache cache[NUM_CACHES]; 113 }; 114 115 static struct llist_node notrace *__llist_del_first(struct llist_head *head) 116 { 117 struct llist_node *entry, *next; 118 119 entry = head->first; 120 if (!entry) 121 return NULL; 122 next = entry->next; 123 head->first = next; 124 return entry; 125 } 126 127 static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags) 128 { 129 if (c->percpu_size) { 130 void **obj = kmalloc_node(c->percpu_size, flags, node); 131 void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); 132 133 if (!obj || !pptr) { 134 free_percpu(pptr); 135 kfree(obj); 136 return NULL; 137 } 138 obj[1] = pptr; 139 return obj; 140 } 141 142 return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node); 143 } 144 145 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) 146 { 147 #ifdef CONFIG_MEMCG_KMEM 148 if (c->objcg) 149 return get_mem_cgroup_from_objcg(c->objcg); 150 #endif 151 152 #ifdef CONFIG_MEMCG 153 return root_mem_cgroup; 154 #else 155 return NULL; 156 #endif 157 } 158 159 static void inc_active(struct bpf_mem_cache *c, unsigned long *flags) 160 { 161 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 162 /* In RT irq_work runs in per-cpu kthread, so disable 163 * interrupts to avoid preemption and interrupts and 164 * reduce the chance of bpf prog executing on this cpu 165 * when active counter is busy. 166 */ 167 local_irq_save(*flags); 168 /* alloc_bulk runs from irq_work which will not preempt a bpf 169 * program that does unit_alloc/unit_free since IRQs are 170 * disabled there. There is no race to increment 'active' 171 * counter. It protects free_llist from corruption in case NMI 172 * bpf prog preempted this loop. 173 */ 174 WARN_ON_ONCE(local_inc_return(&c->active) != 1); 175 } 176 177 static void dec_active(struct bpf_mem_cache *c, unsigned long flags) 178 { 179 local_dec(&c->active); 180 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 181 local_irq_restore(flags); 182 } 183 184 static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj) 185 { 186 unsigned long flags; 187 188 inc_active(c, &flags); 189 __llist_add(obj, &c->free_llist); 190 c->free_cnt++; 191 dec_active(c, flags); 192 } 193 194 /* Mostly runs from irq_work except __init phase. */ 195 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) 196 { 197 struct mem_cgroup *memcg = NULL, *old_memcg; 198 void *obj; 199 int i; 200 201 for (i = 0; i < cnt; i++) { 202 /* 203 * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is 204 * done only by one CPU == current CPU. Other CPUs might 205 * llist_add() and llist_del_all() in parallel. 206 */ 207 obj = llist_del_first(&c->free_by_rcu_ttrace); 208 if (!obj) 209 break; 210 add_obj_to_free_list(c, obj); 211 } 212 if (i >= cnt) 213 return; 214 215 for (; i < cnt; i++) { 216 obj = llist_del_first(&c->waiting_for_gp_ttrace); 217 if (!obj) 218 break; 219 add_obj_to_free_list(c, obj); 220 } 221 if (i >= cnt) 222 return; 223 224 memcg = get_memcg(c); 225 old_memcg = set_active_memcg(memcg); 226 for (; i < cnt; i++) { 227 /* Allocate, but don't deplete atomic reserves that typical 228 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc 229 * will allocate from the current numa node which is what we 230 * want here. 231 */ 232 obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT); 233 if (!obj) 234 break; 235 add_obj_to_free_list(c, obj); 236 } 237 set_active_memcg(old_memcg); 238 mem_cgroup_put(memcg); 239 } 240 241 static void free_one(void *obj, bool percpu) 242 { 243 if (percpu) { 244 free_percpu(((void **)obj)[1]); 245 kfree(obj); 246 return; 247 } 248 249 kfree(obj); 250 } 251 252 static int free_all(struct llist_node *llnode, bool percpu) 253 { 254 struct llist_node *pos, *t; 255 int cnt = 0; 256 257 llist_for_each_safe(pos, t, llnode) { 258 free_one(pos, percpu); 259 cnt++; 260 } 261 return cnt; 262 } 263 264 static void __free_rcu(struct rcu_head *head) 265 { 266 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace); 267 268 free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size); 269 atomic_set(&c->call_rcu_ttrace_in_progress, 0); 270 } 271 272 static void __free_rcu_tasks_trace(struct rcu_head *head) 273 { 274 /* If RCU Tasks Trace grace period implies RCU grace period, 275 * there is no need to invoke call_rcu(). 276 */ 277 if (rcu_trace_implies_rcu_gp()) 278 __free_rcu(head); 279 else 280 call_rcu(head, __free_rcu); 281 } 282 283 static void enque_to_free(struct bpf_mem_cache *c, void *obj) 284 { 285 struct llist_node *llnode = obj; 286 287 /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work. 288 * Nothing races to add to free_by_rcu_ttrace list. 289 */ 290 llist_add(llnode, &c->free_by_rcu_ttrace); 291 } 292 293 static void do_call_rcu_ttrace(struct bpf_mem_cache *c) 294 { 295 struct llist_node *llnode, *t; 296 297 if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) { 298 if (unlikely(READ_ONCE(c->draining))) { 299 llnode = llist_del_all(&c->free_by_rcu_ttrace); 300 free_all(llnode, !!c->percpu_size); 301 } 302 return; 303 } 304 305 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); 306 llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace)) 307 llist_add(llnode, &c->waiting_for_gp_ttrace); 308 309 if (unlikely(READ_ONCE(c->draining))) { 310 __free_rcu(&c->rcu_ttrace); 311 return; 312 } 313 314 /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish. 315 * If RCU Tasks Trace grace period implies RCU grace period, free 316 * these elements directly, else use call_rcu() to wait for normal 317 * progs to finish and finally do free_one() on each element. 318 */ 319 call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace); 320 } 321 322 static void free_bulk(struct bpf_mem_cache *c) 323 { 324 struct bpf_mem_cache *tgt = c->tgt; 325 struct llist_node *llnode, *t; 326 unsigned long flags; 327 int cnt; 328 329 WARN_ON_ONCE(tgt->unit_size != c->unit_size); 330 331 do { 332 inc_active(c, &flags); 333 llnode = __llist_del_first(&c->free_llist); 334 if (llnode) 335 cnt = --c->free_cnt; 336 else 337 cnt = 0; 338 dec_active(c, flags); 339 if (llnode) 340 enque_to_free(tgt, llnode); 341 } while (cnt > (c->high_watermark + c->low_watermark) / 2); 342 343 /* and drain free_llist_extra */ 344 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) 345 enque_to_free(tgt, llnode); 346 do_call_rcu_ttrace(tgt); 347 } 348 349 static void bpf_mem_refill(struct irq_work *work) 350 { 351 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); 352 int cnt; 353 354 /* Racy access to free_cnt. It doesn't need to be 100% accurate */ 355 cnt = c->free_cnt; 356 if (cnt < c->low_watermark) 357 /* irq_work runs on this cpu and kmalloc will allocate 358 * from the current numa node which is what we want here. 359 */ 360 alloc_bulk(c, c->batch, NUMA_NO_NODE); 361 else if (cnt > c->high_watermark) 362 free_bulk(c); 363 } 364 365 static void notrace irq_work_raise(struct bpf_mem_cache *c) 366 { 367 irq_work_queue(&c->refill_work); 368 } 369 370 /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket 371 * the freelist cache will be elem_size * 64 (or less) on each cpu. 372 * 373 * For bpf programs that don't have statically known allocation sizes and 374 * assuming (low_mark + high_mark) / 2 as an average number of elements per 375 * bucket and all buckets are used the total amount of memory in freelists 376 * on each cpu will be: 377 * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096 378 * == ~ 116 Kbyte using below heuristic. 379 * Initialized, but unused bpf allocator (not bpf map specific one) will 380 * consume ~ 11 Kbyte per cpu. 381 * Typical case will be between 11K and 116K closer to 11K. 382 * bpf progs can and should share bpf_mem_cache when possible. 383 */ 384 385 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) 386 { 387 init_irq_work(&c->refill_work, bpf_mem_refill); 388 if (c->unit_size <= 256) { 389 c->low_watermark = 32; 390 c->high_watermark = 96; 391 } else { 392 /* When page_size == 4k, order-0 cache will have low_mark == 2 393 * and high_mark == 6 with batch alloc of 3 individual pages at 394 * a time. 395 * 8k allocs and above low == 1, high == 3, batch == 1. 396 */ 397 c->low_watermark = max(32 * 256 / c->unit_size, 1); 398 c->high_watermark = max(96 * 256 / c->unit_size, 3); 399 } 400 c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1); 401 402 /* To avoid consuming memory assume that 1st run of bpf 403 * prog won't be doing more than 4 map_update_elem from 404 * irq disabled region 405 */ 406 alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu)); 407 } 408 409 /* When size != 0 bpf_mem_cache for each cpu. 410 * This is typical bpf hash map use case when all elements have equal size. 411 * 412 * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on 413 * kmalloc/kfree. Max allocation size is 4096 in this case. 414 * This is bpf_dynptr and bpf_kptr use case. 415 */ 416 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) 417 { 418 static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096}; 419 struct bpf_mem_caches *cc, __percpu *pcc; 420 struct bpf_mem_cache *c, __percpu *pc; 421 struct obj_cgroup *objcg = NULL; 422 int cpu, i, unit_size, percpu_size = 0; 423 424 if (size) { 425 pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL); 426 if (!pc) 427 return -ENOMEM; 428 429 if (percpu) 430 /* room for llist_node and per-cpu pointer */ 431 percpu_size = LLIST_NODE_SZ + sizeof(void *); 432 else 433 size += LLIST_NODE_SZ; /* room for llist_node */ 434 unit_size = size; 435 436 #ifdef CONFIG_MEMCG_KMEM 437 if (memcg_bpf_enabled()) 438 objcg = get_obj_cgroup_from_current(); 439 #endif 440 for_each_possible_cpu(cpu) { 441 c = per_cpu_ptr(pc, cpu); 442 c->unit_size = unit_size; 443 c->objcg = objcg; 444 c->percpu_size = percpu_size; 445 c->tgt = c; 446 prefill_mem_cache(c, cpu); 447 } 448 ma->cache = pc; 449 return 0; 450 } 451 452 /* size == 0 && percpu is an invalid combination */ 453 if (WARN_ON_ONCE(percpu)) 454 return -EINVAL; 455 456 pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL); 457 if (!pcc) 458 return -ENOMEM; 459 #ifdef CONFIG_MEMCG_KMEM 460 objcg = get_obj_cgroup_from_current(); 461 #endif 462 for_each_possible_cpu(cpu) { 463 cc = per_cpu_ptr(pcc, cpu); 464 for (i = 0; i < NUM_CACHES; i++) { 465 c = &cc->cache[i]; 466 c->unit_size = sizes[i]; 467 c->objcg = objcg; 468 c->tgt = c; 469 prefill_mem_cache(c, cpu); 470 } 471 } 472 ma->caches = pcc; 473 return 0; 474 } 475 476 static void drain_mem_cache(struct bpf_mem_cache *c) 477 { 478 bool percpu = !!c->percpu_size; 479 480 /* No progs are using this bpf_mem_cache, but htab_map_free() called 481 * bpf_mem_cache_free() for all remaining elements and they can be in 482 * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now. 483 * 484 * Except for waiting_for_gp_ttrace list, there are no concurrent operations 485 * on these lists, so it is safe to use __llist_del_all(). 486 */ 487 free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu); 488 free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu); 489 free_all(__llist_del_all(&c->free_llist), percpu); 490 free_all(__llist_del_all(&c->free_llist_extra), percpu); 491 } 492 493 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) 494 { 495 free_percpu(ma->cache); 496 free_percpu(ma->caches); 497 ma->cache = NULL; 498 ma->caches = NULL; 499 } 500 501 static void free_mem_alloc(struct bpf_mem_alloc *ma) 502 { 503 /* waiting_for_gp_ttrace lists was drained, but __free_rcu might 504 * still execute. Wait for it now before we freeing percpu caches. 505 * 506 * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(), 507 * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used 508 * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(), 509 * so if call_rcu(head, __free_rcu) is skipped due to 510 * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by 511 * using rcu_trace_implies_rcu_gp() as well. 512 */ 513 rcu_barrier_tasks_trace(); 514 if (!rcu_trace_implies_rcu_gp()) 515 rcu_barrier(); 516 free_mem_alloc_no_barrier(ma); 517 } 518 519 static void free_mem_alloc_deferred(struct work_struct *work) 520 { 521 struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work); 522 523 free_mem_alloc(ma); 524 kfree(ma); 525 } 526 527 static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress) 528 { 529 struct bpf_mem_alloc *copy; 530 531 if (!rcu_in_progress) { 532 /* Fast path. No callbacks are pending, hence no need to do 533 * rcu_barrier-s. 534 */ 535 free_mem_alloc_no_barrier(ma); 536 return; 537 } 538 539 copy = kmemdup(ma, sizeof(*ma), GFP_KERNEL); 540 if (!copy) { 541 /* Slow path with inline barrier-s */ 542 free_mem_alloc(ma); 543 return; 544 } 545 546 /* Defer barriers into worker to let the rest of map memory to be freed */ 547 memset(ma, 0, sizeof(*ma)); 548 INIT_WORK(©->work, free_mem_alloc_deferred); 549 queue_work(system_unbound_wq, ©->work); 550 } 551 552 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) 553 { 554 struct bpf_mem_caches *cc; 555 struct bpf_mem_cache *c; 556 int cpu, i, rcu_in_progress; 557 558 if (ma->cache) { 559 rcu_in_progress = 0; 560 for_each_possible_cpu(cpu) { 561 c = per_cpu_ptr(ma->cache, cpu); 562 WRITE_ONCE(c->draining, true); 563 irq_work_sync(&c->refill_work); 564 drain_mem_cache(c); 565 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); 566 } 567 /* objcg is the same across cpus */ 568 if (c->objcg) 569 obj_cgroup_put(c->objcg); 570 destroy_mem_alloc(ma, rcu_in_progress); 571 } 572 if (ma->caches) { 573 rcu_in_progress = 0; 574 for_each_possible_cpu(cpu) { 575 cc = per_cpu_ptr(ma->caches, cpu); 576 for (i = 0; i < NUM_CACHES; i++) { 577 c = &cc->cache[i]; 578 WRITE_ONCE(c->draining, true); 579 irq_work_sync(&c->refill_work); 580 drain_mem_cache(c); 581 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); 582 } 583 } 584 if (c->objcg) 585 obj_cgroup_put(c->objcg); 586 destroy_mem_alloc(ma, rcu_in_progress); 587 } 588 } 589 590 /* notrace is necessary here and in other functions to make sure 591 * bpf programs cannot attach to them and cause llist corruptions. 592 */ 593 static void notrace *unit_alloc(struct bpf_mem_cache *c) 594 { 595 struct llist_node *llnode = NULL; 596 unsigned long flags; 597 int cnt = 0; 598 599 /* Disable irqs to prevent the following race for majority of prog types: 600 * prog_A 601 * bpf_mem_alloc 602 * preemption or irq -> prog_B 603 * bpf_mem_alloc 604 * 605 * but prog_B could be a perf_event NMI prog. 606 * Use per-cpu 'active' counter to order free_list access between 607 * unit_alloc/unit_free/bpf_mem_refill. 608 */ 609 local_irq_save(flags); 610 if (local_inc_return(&c->active) == 1) { 611 llnode = __llist_del_first(&c->free_llist); 612 if (llnode) { 613 cnt = --c->free_cnt; 614 *(struct bpf_mem_cache **)llnode = c; 615 } 616 } 617 local_dec(&c->active); 618 local_irq_restore(flags); 619 620 WARN_ON(cnt < 0); 621 622 if (cnt < c->low_watermark) 623 irq_work_raise(c); 624 return llnode; 625 } 626 627 /* Though 'ptr' object could have been allocated on a different cpu 628 * add it to the free_llist of the current cpu. 629 * Let kfree() logic deal with it when it's later called from irq_work. 630 */ 631 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) 632 { 633 struct llist_node *llnode = ptr - LLIST_NODE_SZ; 634 unsigned long flags; 635 int cnt = 0; 636 637 BUILD_BUG_ON(LLIST_NODE_SZ > 8); 638 639 /* 640 * Remember bpf_mem_cache that allocated this object. 641 * The hint is not accurate. 642 */ 643 c->tgt = *(struct bpf_mem_cache **)llnode; 644 645 local_irq_save(flags); 646 if (local_inc_return(&c->active) == 1) { 647 __llist_add(llnode, &c->free_llist); 648 cnt = ++c->free_cnt; 649 } else { 650 /* unit_free() cannot fail. Therefore add an object to atomic 651 * llist. free_bulk() will drain it. Though free_llist_extra is 652 * a per-cpu list we have to use atomic llist_add here, since 653 * it also can be interrupted by bpf nmi prog that does another 654 * unit_free() into the same free_llist_extra. 655 */ 656 llist_add(llnode, &c->free_llist_extra); 657 } 658 local_dec(&c->active); 659 local_irq_restore(flags); 660 661 if (cnt > c->high_watermark) 662 /* free few objects from current cpu into global kmalloc pool */ 663 irq_work_raise(c); 664 } 665 666 /* Called from BPF program or from sys_bpf syscall. 667 * In both cases migration is disabled. 668 */ 669 void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size) 670 { 671 int idx; 672 void *ret; 673 674 if (!size) 675 return ZERO_SIZE_PTR; 676 677 idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ); 678 if (idx < 0) 679 return NULL; 680 681 ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx); 682 return !ret ? NULL : ret + LLIST_NODE_SZ; 683 } 684 685 void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) 686 { 687 int idx; 688 689 if (!ptr) 690 return; 691 692 idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ)); 693 if (idx < 0) 694 return; 695 696 unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); 697 } 698 699 void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma) 700 { 701 void *ret; 702 703 ret = unit_alloc(this_cpu_ptr(ma->cache)); 704 return !ret ? NULL : ret + LLIST_NODE_SZ; 705 } 706 707 void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr) 708 { 709 if (!ptr) 710 return; 711 712 unit_free(this_cpu_ptr(ma->cache), ptr); 713 } 714 715 /* Directly does a kfree() without putting 'ptr' back to the free_llist 716 * for reuse and without waiting for a rcu_tasks_trace gp. 717 * The caller must first go through the rcu_tasks_trace gp for 'ptr' 718 * before calling bpf_mem_cache_raw_free(). 719 * It could be used when the rcu_tasks_trace callback does not have 720 * a hold on the original bpf_mem_alloc object that allocated the 721 * 'ptr'. This should only be used in the uncommon code path. 722 * Otherwise, the bpf_mem_alloc's free_llist cannot be refilled 723 * and may affect performance. 724 */ 725 void bpf_mem_cache_raw_free(void *ptr) 726 { 727 if (!ptr) 728 return; 729 730 kfree(ptr - LLIST_NODE_SZ); 731 } 732 733 /* When flags == GFP_KERNEL, it signals that the caller will not cause 734 * deadlock when using kmalloc. bpf_mem_cache_alloc_flags() will use 735 * kmalloc if the free_llist is empty. 736 */ 737 void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags) 738 { 739 struct bpf_mem_cache *c; 740 void *ret; 741 742 c = this_cpu_ptr(ma->cache); 743 744 ret = unit_alloc(c); 745 if (!ret && flags == GFP_KERNEL) { 746 struct mem_cgroup *memcg, *old_memcg; 747 748 memcg = get_memcg(c); 749 old_memcg = set_active_memcg(memcg); 750 ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT); 751 set_active_memcg(old_memcg); 752 mem_cgroup_put(memcg); 753 } 754 755 return !ret ? NULL : ret + LLIST_NODE_SZ; 756 } 757