1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2020 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Christian König
24 */
25
26 /* Pooling of allocated pages is necessary because changing the caching
27 * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28 * invalidate for those addresses.
29 *
30 * Additional to that allocations from the DMA coherent API are pooled as well
31 * cause they are rather slow compared to alloc_pages+map.
32 */
33
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/debugfs.h>
37 #include <linux/highmem.h>
38 #include <linux/sched/mm.h>
39 #include <linux/seq_file.h>
40
41 #ifdef CONFIG_X86
42 #include <asm/set_memory.h>
43 #endif
44
45 #include <drm/ttm/ttm_pool.h>
46 #include <drm/ttm/ttm_tt.h>
47 #include <drm/ttm/ttm_bo.h>
48 #include <drm/drm_legacy.h>
49
50 #include "ttm_module.h"
51
52 /**
53 * struct ttm_pool_dma - Helper object for coherent DMA mappings
54 *
55 * @addr: original DMA address returned for the mapping
56 * @vaddr: original vaddr return for the mapping and order in the lower bits
57 */
58 struct ttm_pool_dma {
59 dma_addr_t addr;
60 unsigned long vaddr;
61 bus_dma_tag_t dmat;
62 bus_dmamap_t map;
63 bus_dma_segment_t seg;
64 };
65
66 static unsigned long page_pool_size;
67
68 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
69 module_param(page_pool_size, ulong, 0644);
70
71 static atomic_long_t allocated_pages;
72
73 static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
74 static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
75
76 static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
77 static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
78
79 static spinlock_t shrinker_lock;
80 static struct list_head shrinker_list;
81 static struct shrinker mm_shrinker;
82
83 #ifdef __linux__
84
85 /* Allocate pages of size 1 << order with the given gfp_flags */
ttm_pool_alloc_page(struct ttm_pool * pool,gfp_t gfp_flags,unsigned int order)86 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
87 unsigned int order)
88 {
89 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
90 struct ttm_pool_dma *dma;
91 struct page *p;
92 void *vaddr;
93
94 /* Don't set the __GFP_COMP flag for higher order allocations.
95 * Mapping pages directly into an userspace process and calling
96 * put_page() on a TTM allocated page is illegal.
97 */
98 if (order)
99 gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
100 __GFP_KSWAPD_RECLAIM;
101
102 if (!pool->use_dma_alloc) {
103 p = alloc_pages_node(pool->nid, gfp_flags, order);
104 if (p)
105 p->private = order;
106
107 return p;
108 }
109
110 dma = kmalloc(sizeof(*dma), GFP_KERNEL);
111 if (!dma)
112 return NULL;
113
114 if (order)
115 attr |= DMA_ATTR_NO_WARN;
116
117 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
118 &dma->addr, gfp_flags, attr);
119 if (!vaddr)
120 goto error_free;
121
122 /* TODO: This is an illegal abuse of the DMA API, but we need to rework
123 * TTM page fault handling and extend the DMA API to clean this up.
124 */
125 if (is_vmalloc_addr(vaddr))
126 p = vmalloc_to_page(vaddr);
127 else
128 p = virt_to_page(vaddr);
129
130 dma->vaddr = (unsigned long)vaddr | order;
131 p->private = (unsigned long)dma;
132 return p;
133
134 error_free:
135 kfree(dma);
136 return NULL;
137 }
138
139 /* Reset the caching and pages of size 1 << order */
ttm_pool_free_page(struct ttm_pool * pool,enum ttm_caching caching,unsigned int order,struct page * p)140 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
141 unsigned int order, struct page *p)
142 {
143 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
144 struct ttm_pool_dma *dma;
145 void *vaddr;
146
147 #ifdef CONFIG_X86
148 /* We don't care that set_pages_wb is inefficient here. This is only
149 * used when we have to shrink and CPU overhead is irrelevant then.
150 */
151 if (caching != ttm_cached && !PageHighMem(p))
152 set_pages_wb(p, 1 << order);
153 #endif
154
155 if (!pool || !pool->use_dma_alloc) {
156 __free_pages(p, order);
157 return;
158 }
159
160 if (order)
161 attr |= DMA_ATTR_NO_WARN;
162
163 dma = (void *)p->private;
164 vaddr = (void *)(dma->vaddr & PAGE_MASK);
165 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
166 attr);
167 kfree(dma);
168 }
169
170 #else
171
ttm_pool_alloc_page(struct ttm_pool * pool,gfp_t gfp_flags,unsigned int order,bus_dma_tag_t dmat)172 static struct vm_page *ttm_pool_alloc_page(struct ttm_pool *pool,
173 gfp_t gfp_flags, unsigned int order,
174 bus_dma_tag_t dmat)
175 {
176 struct ttm_pool_dma *dma;
177 struct vm_page *p;
178 struct uvm_constraint_range *constraint = &no_constraint;
179 int flags = (gfp_flags & M_NOWAIT) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
180 int dmaflags = BUS_DMA_64BIT;
181 int nsegs;
182
183 if (pool->use_dma32) {
184 constraint = &dma_constraint;
185 dmaflags &= ~BUS_DMA_64BIT;
186 }
187
188 dma = kmalloc(sizeof(*dma), GFP_KERNEL);
189 if (!dma)
190 return NULL;
191
192 if (bus_dmamap_create(dmat, (1ULL << order) * PAGE_SIZE, 1,
193 (1ULL << order) * PAGE_SIZE, 0, flags | dmaflags, &dma->map))
194 goto error_free;
195 #ifdef bus_dmamem_alloc_range
196 if (bus_dmamem_alloc_range(dmat, (1ULL << order) * PAGE_SIZE,
197 PAGE_SIZE, 0, &dma->seg, 1, &nsegs, flags | BUS_DMA_ZERO,
198 constraint->ucr_low, constraint->ucr_high)) {
199 bus_dmamap_destroy(dmat, dma->map);
200 goto error_free;
201 }
202 #else
203 if (bus_dmamem_alloc(dmat, (1ULL << order) * PAGE_SIZE,
204 PAGE_SIZE, 0, &dma->seg, 1, &nsegs, flags | BUS_DMA_ZERO)) {
205 bus_dmamap_destroy(dmat, dma->map);
206 goto error_free;
207 }
208 #endif
209 if (bus_dmamap_load_raw(dmat, dma->map, &dma->seg, 1,
210 (1ULL << order) * PAGE_SIZE, flags)) {
211 bus_dmamem_free(dmat, &dma->seg, 1);
212 bus_dmamap_destroy(dmat, dma->map);
213 goto error_free;
214 }
215 dma->dmat = dmat;
216 dma->addr = dma->map->dm_segs[0].ds_addr;
217
218 #ifndef __sparc64__
219 p = PHYS_TO_VM_PAGE(dma->seg.ds_addr);
220 #else
221 p = TAILQ_FIRST((struct pglist *)dma->seg._ds_mlist);
222 #endif
223
224 p->objt.rbt_parent = (struct rb_entry *)dma;
225 return p;
226
227 error_free:
228 kfree(dma);
229 return NULL;
230 }
231
ttm_pool_free_page(struct ttm_pool * pool,enum ttm_caching caching,unsigned int order,struct vm_page * p)232 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
233 unsigned int order, struct vm_page *p)
234 {
235 struct ttm_pool_dma *dma;
236
237 #ifdef CONFIG_X86
238 /* We don't care that set_pages_wb is inefficient here. This is only
239 * used when we have to shrink and CPU overhead is irrelevant then.
240 */
241 if (caching != ttm_cached && !PageHighMem(p))
242 set_pages_wb(p, 1 << order);
243 #endif
244
245 dma = (struct ttm_pool_dma *)p->objt.rbt_parent;
246 bus_dmamap_unload(dma->dmat, dma->map);
247 bus_dmamem_free(dma->dmat, &dma->seg, 1);
248 bus_dmamap_destroy(dma->dmat, dma->map);
249 kfree(dma);
250 }
251
252 #endif
253
254 /* Apply a new caching to an array of pages */
ttm_pool_apply_caching(struct vm_page ** first,struct vm_page ** last,enum ttm_caching caching)255 static int ttm_pool_apply_caching(struct vm_page **first, struct vm_page **last,
256 enum ttm_caching caching)
257 {
258 #ifdef CONFIG_X86
259 unsigned int num_pages = last - first;
260
261 if (!num_pages)
262 return 0;
263
264 switch (caching) {
265 case ttm_cached:
266 break;
267 case ttm_write_combined:
268 return set_pages_array_wc(first, num_pages);
269 case ttm_uncached:
270 return set_pages_array_uc(first, num_pages);
271 }
272 #endif
273 return 0;
274 }
275
276 #ifdef __linux__
277
278 /* Map pages of 1 << order size and fill the DMA address array */
ttm_pool_map(struct ttm_pool * pool,unsigned int order,struct vm_page * p,dma_addr_t ** dma_addr)279 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
280 struct vm_page *p, dma_addr_t **dma_addr)
281 {
282 dma_addr_t addr;
283 unsigned int i;
284
285 if (pool->use_dma_alloc) {
286 struct ttm_pool_dma *dma = (void *)p->private;
287
288 addr = dma->addr;
289 } else {
290 size_t size = (1ULL << order) * PAGE_SIZE;
291
292 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
293 if (dma_mapping_error(pool->dev, addr))
294 return -EFAULT;
295 }
296
297 for (i = 1 << order; i ; --i) {
298 *(*dma_addr)++ = addr;
299 addr += PAGE_SIZE;
300 }
301
302 return 0;
303 }
304
305 /* Unmap pages of 1 << order size */
ttm_pool_unmap(struct ttm_pool * pool,dma_addr_t dma_addr,unsigned int num_pages)306 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
307 unsigned int num_pages)
308 {
309 /* Unmapped while freeing the page */
310 if (pool->use_dma_alloc)
311 return;
312
313 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
314 DMA_BIDIRECTIONAL);
315 }
316
317 #else
318
ttm_pool_map(struct ttm_pool * pool,unsigned int order,struct vm_page * p,dma_addr_t ** dma_addr)319 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
320 struct vm_page *p, dma_addr_t **dma_addr)
321 {
322 struct ttm_pool_dma *dma;
323 dma_addr_t addr;
324 unsigned int i;
325
326 dma = (struct ttm_pool_dma *)p->objt.rbt_parent;
327 addr = dma->addr;
328
329 for (i = 1 << order; i ; --i) {
330 *(*dma_addr)++ = addr;
331 addr += PAGE_SIZE;
332 }
333
334 return 0;
335 }
336
ttm_pool_unmap(struct ttm_pool * pool,dma_addr_t dma_addr,unsigned int num_pages)337 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
338 unsigned int num_pages)
339 {
340 }
341
342 #endif
343
344 /* Give pages into a specific pool_type */
ttm_pool_type_give(struct ttm_pool_type * pt,struct vm_page * p)345 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct vm_page *p)
346 {
347 unsigned int i, num_pages = 1 << pt->order;
348 struct ttm_pool_type_lru *entry;
349
350 for (i = 0; i < num_pages; ++i) {
351 #ifdef notyet
352 if (PageHighMem(p))
353 clear_highpage(p + i);
354 else
355 #endif
356 pmap_zero_page(p + i);
357 }
358
359 entry = malloc(sizeof(struct ttm_pool_type_lru), M_DRM, M_WAITOK);
360 entry->pg = p;
361 spin_lock(&pt->lock);
362 LIST_INSERT_HEAD(&pt->lru, entry, entries);
363 spin_unlock(&pt->lock);
364 atomic_long_add(1 << pt->order, &allocated_pages);
365 }
366
367 /* Take pages from a specific pool_type, return NULL when nothing available */
ttm_pool_type_take(struct ttm_pool_type * pt)368 static struct vm_page *ttm_pool_type_take(struct ttm_pool_type *pt)
369 {
370 struct vm_page *p = NULL;
371 struct ttm_pool_type_lru *entry;
372
373 spin_lock(&pt->lock);
374 if (!LIST_EMPTY(&pt->lru)) {
375 entry = LIST_FIRST(&pt->lru);
376 p = entry->pg;
377 atomic_long_sub(1 << pt->order, &allocated_pages);
378 LIST_REMOVE(entry, entries);
379 free(entry, M_DRM, sizeof(struct ttm_pool_type_lru));
380 }
381 spin_unlock(&pt->lock);
382
383 return p;
384 }
385
386 /* Initialize and add a pool type to the global shrinker list */
ttm_pool_type_init(struct ttm_pool_type * pt,struct ttm_pool * pool,enum ttm_caching caching,unsigned int order)387 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
388 enum ttm_caching caching, unsigned int order)
389 {
390 pt->pool = pool;
391 pt->caching = caching;
392 pt->order = order;
393 mtx_init(&pt->lock, IPL_NONE);
394 INIT_LIST_HEAD(&pt->pages);
395 LIST_INIT(&pt->lru);
396
397 spin_lock(&shrinker_lock);
398 list_add_tail(&pt->shrinker_list, &shrinker_list);
399 spin_unlock(&shrinker_lock);
400 }
401
402 /* Remove a pool_type from the global shrinker list and free all pages */
ttm_pool_type_fini(struct ttm_pool_type * pt)403 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
404 {
405 struct vm_page *p;
406 struct ttm_pool_type_lru *entry;
407
408 spin_lock(&shrinker_lock);
409 list_del(&pt->shrinker_list);
410 spin_unlock(&shrinker_lock);
411
412 while ((p = ttm_pool_type_take(pt)))
413 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
414
415 while (!LIST_EMPTY(&pt->lru)) {
416 entry = LIST_FIRST(&pt->lru);
417 LIST_REMOVE(entry, entries);
418 free(entry, M_DRM, sizeof(struct ttm_pool_type_lru));
419 }
420 }
421
422 /* Return the pool_type to use for the given caching and order */
ttm_pool_select_type(struct ttm_pool * pool,enum ttm_caching caching,unsigned int order)423 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
424 enum ttm_caching caching,
425 unsigned int order)
426 {
427 if (pool->use_dma_alloc)
428 return &pool->caching[caching].orders[order];
429
430 #ifdef CONFIG_X86
431 switch (caching) {
432 case ttm_write_combined:
433 if (pool->nid != NUMA_NO_NODE)
434 return &pool->caching[caching].orders[order];
435
436 if (pool->use_dma32)
437 return &global_dma32_write_combined[order];
438
439 return &global_write_combined[order];
440 case ttm_uncached:
441 if (pool->nid != NUMA_NO_NODE)
442 return &pool->caching[caching].orders[order];
443
444 if (pool->use_dma32)
445 return &global_dma32_uncached[order];
446
447 return &global_uncached[order];
448 default:
449 break;
450 }
451 #endif
452
453 return NULL;
454 }
455
456 /* Free pages using the global shrinker list */
ttm_pool_shrink(void)457 static unsigned int ttm_pool_shrink(void)
458 {
459 struct ttm_pool_type *pt;
460 unsigned int num_pages;
461 struct vm_page *p;
462
463 spin_lock(&shrinker_lock);
464 pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
465 list_move_tail(&pt->shrinker_list, &shrinker_list);
466 spin_unlock(&shrinker_lock);
467
468 p = ttm_pool_type_take(pt);
469 if (p) {
470 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
471 num_pages = 1 << pt->order;
472 } else {
473 num_pages = 0;
474 }
475
476 return num_pages;
477 }
478
479 #ifdef notyet
480
481 /* Return the allocation order based for a page */
ttm_pool_page_order(struct ttm_pool * pool,struct vm_page * p)482 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct vm_page *p)
483 {
484 if (pool->use_dma_alloc) {
485 struct ttm_pool_dma *dma = (void *)p->private;
486
487 return dma->vaddr & ~LINUX_PAGE_MASK;
488 }
489
490 return p->private;
491 }
492
493 #endif /* notyet */
494
495 /* Called when we got a page, either from a pool or newly allocated */
ttm_pool_page_allocated(struct ttm_pool * pool,unsigned int order,struct vm_page * p,dma_addr_t ** dma_addr,unsigned long * num_pages,struct vm_page *** pages,unsigned long ** orders)496 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
497 struct vm_page *p, dma_addr_t **dma_addr,
498 unsigned long *num_pages,
499 struct vm_page ***pages,
500 unsigned long **orders)
501 {
502 unsigned int i;
503 int r;
504
505 if (*dma_addr) {
506 r = ttm_pool_map(pool, order, p, dma_addr);
507 if (r)
508 return r;
509 }
510
511 *num_pages -= 1 << order;
512 for (i = 1 << order; i; --i, ++(*pages), ++p, ++(*orders)) {
513 **pages = p;
514 **orders = order;
515 }
516
517 return 0;
518 }
519
520 /**
521 * ttm_pool_free_range() - Free a range of TTM pages
522 * @pool: The pool used for allocating.
523 * @tt: The struct ttm_tt holding the page pointers.
524 * @caching: The page caching mode used by the range.
525 * @start_page: index for first page to free.
526 * @end_page: index for last page to free + 1.
527 *
528 * During allocation the ttm_tt page-vector may be populated with ranges of
529 * pages with different attributes if allocation hit an error without being
530 * able to completely fulfill the allocation. This function can be used
531 * to free these individual ranges.
532 */
ttm_pool_free_range(struct ttm_pool * pool,struct ttm_tt * tt,enum ttm_caching caching,pgoff_t start_page,pgoff_t end_page)533 static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
534 enum ttm_caching caching,
535 pgoff_t start_page, pgoff_t end_page)
536 {
537 struct vm_page **pages = &tt->pages[start_page];
538 unsigned int order;
539 pgoff_t i, nr;
540
541 for (i = start_page; i < end_page; i += nr, pages += nr) {
542 struct ttm_pool_type *pt = NULL;
543
544 order = tt->orders[i];
545 nr = (1UL << order);
546 if (tt->dma_address)
547 ttm_pool_unmap(pool, tt->dma_address[i], nr);
548
549 pt = ttm_pool_select_type(pool, caching, order);
550 if (pt)
551 ttm_pool_type_give(pt, *pages);
552 else
553 ttm_pool_free_page(pool, caching, order, *pages);
554 }
555 }
556
557 /**
558 * ttm_pool_alloc - Fill a ttm_tt object
559 *
560 * @pool: ttm_pool to use
561 * @tt: ttm_tt object to fill
562 * @ctx: operation context
563 *
564 * Fill the ttm_tt object with pages and also make sure to DMA map them when
565 * necessary.
566 *
567 * Returns: 0 on successe, negative error code otherwise.
568 */
ttm_pool_alloc(struct ttm_pool * pool,struct ttm_tt * tt,struct ttm_operation_ctx * ctx)569 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
570 struct ttm_operation_ctx *ctx)
571 {
572 pgoff_t num_pages = tt->num_pages;
573 dma_addr_t *dma_addr = tt->dma_address;
574 struct vm_page **caching = tt->pages;
575 struct vm_page **pages = tt->pages;
576 enum ttm_caching page_caching;
577 gfp_t gfp_flags = GFP_USER;
578 pgoff_t caching_divide;
579 unsigned int order;
580 struct vm_page *p;
581 int r;
582 unsigned long *orders = tt->orders;
583
584 WARN_ON(!num_pages || ttm_tt_is_populated(tt));
585 #ifdef __linux__
586 WARN_ON(dma_addr && !pool->dev);
587 #endif
588
589 if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
590 gfp_flags |= __GFP_ZERO;
591
592 if (ctx->gfp_retry_mayfail)
593 gfp_flags |= __GFP_RETRY_MAYFAIL;
594
595 if (pool->use_dma32)
596 gfp_flags |= GFP_DMA32;
597 else
598 gfp_flags |= GFP_HIGHUSER;
599
600 for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages));
601 num_pages;
602 order = min_t(unsigned int, order, __fls(num_pages))) {
603 struct ttm_pool_type *pt;
604
605 page_caching = tt->caching;
606 pt = ttm_pool_select_type(pool, tt->caching, order);
607 p = pt ? ttm_pool_type_take(pt) : NULL;
608 if (p) {
609 r = ttm_pool_apply_caching(caching, pages,
610 tt->caching);
611 if (r)
612 goto error_free_page;
613
614 caching = pages;
615 do {
616 r = ttm_pool_page_allocated(pool, order, p,
617 &dma_addr,
618 &num_pages,
619 &pages, &orders);
620 if (r)
621 goto error_free_page;
622
623 caching = pages;
624 if (num_pages < (1 << order))
625 break;
626
627 p = ttm_pool_type_take(pt);
628 } while (p);
629 }
630
631 page_caching = ttm_cached;
632 while (num_pages >= (1 << order) &&
633 (p = ttm_pool_alloc_page(pool, gfp_flags, order, tt->dmat))) {
634
635 if (PageHighMem(p)) {
636 r = ttm_pool_apply_caching(caching, pages,
637 tt->caching);
638 if (r)
639 goto error_free_page;
640 caching = pages;
641 }
642 r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
643 &num_pages, &pages, &orders);
644 if (r)
645 goto error_free_page;
646 if (PageHighMem(p))
647 caching = pages;
648 }
649
650 if (!p) {
651 if (order) {
652 --order;
653 continue;
654 }
655 r = -ENOMEM;
656 goto error_free_all;
657 }
658 }
659
660 r = ttm_pool_apply_caching(caching, pages, tt->caching);
661 if (r)
662 goto error_free_all;
663
664 return 0;
665
666 error_free_page:
667 ttm_pool_free_page(pool, page_caching, order, p);
668
669 error_free_all:
670 num_pages = tt->num_pages - num_pages;
671 caching_divide = caching - tt->pages;
672 ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
673 ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
674
675 return r;
676 }
677 EXPORT_SYMBOL(ttm_pool_alloc);
678
679 /**
680 * ttm_pool_free - Free the backing pages from a ttm_tt object
681 *
682 * @pool: Pool to give pages back to.
683 * @tt: ttm_tt object to unpopulate
684 *
685 * Give the packing pages back to a pool or free them
686 */
ttm_pool_free(struct ttm_pool * pool,struct ttm_tt * tt)687 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
688 {
689 ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
690
691 while (atomic_long_read(&allocated_pages) > page_pool_size)
692 ttm_pool_shrink();
693 }
694 EXPORT_SYMBOL(ttm_pool_free);
695
696 /**
697 * ttm_pool_init - Initialize a pool
698 *
699 * @pool: the pool to initialize
700 * @dev: device for DMA allocations and mappings
701 * @nid: NUMA node to use for allocations
702 * @use_dma_alloc: true if coherent DMA alloc should be used
703 * @use_dma32: true if GFP_DMA32 should be used
704 *
705 * Initialize the pool and its pool types.
706 */
ttm_pool_init(struct ttm_pool * pool,struct device * dev,int nid,bool use_dma_alloc,bool use_dma32)707 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
708 int nid, bool use_dma_alloc, bool use_dma32)
709 {
710 unsigned int i, j;
711
712 WARN_ON(!dev && use_dma_alloc);
713
714 pool->dev = dev;
715 pool->nid = nid;
716 pool->use_dma_alloc = use_dma_alloc;
717 pool->use_dma32 = use_dma32;
718
719 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
720 for (j = 0; j < NR_PAGE_ORDERS; ++j) {
721 struct ttm_pool_type *pt;
722
723 /* Initialize only pool types which are actually used */
724 pt = ttm_pool_select_type(pool, i, j);
725 if (pt != &pool->caching[i].orders[j])
726 continue;
727
728 ttm_pool_type_init(pt, pool, i, j);
729 }
730 }
731 }
732 EXPORT_SYMBOL(ttm_pool_init);
733
734 /**
735 * ttm_pool_fini - Cleanup a pool
736 *
737 * @pool: the pool to clean up
738 *
739 * Free all pages in the pool and unregister the types from the global
740 * shrinker.
741 */
ttm_pool_fini(struct ttm_pool * pool)742 void ttm_pool_fini(struct ttm_pool *pool)
743 {
744 unsigned int i, j;
745
746 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
747 for (j = 0; j < NR_PAGE_ORDERS; ++j) {
748 struct ttm_pool_type *pt;
749
750 pt = ttm_pool_select_type(pool, i, j);
751 if (pt != &pool->caching[i].orders[j])
752 continue;
753
754 ttm_pool_type_fini(pt);
755 }
756 }
757
758 /* We removed the pool types from the LRU, but we need to also make sure
759 * that no shrinker is concurrently freeing pages from the pool.
760 */
761 synchronize_shrinkers();
762 }
763 EXPORT_SYMBOL(ttm_pool_fini);
764
765 /* As long as pages are available make sure to release at least one */
ttm_pool_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)766 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
767 struct shrink_control *sc)
768 {
769 unsigned long num_freed = 0;
770
771 do
772 num_freed += ttm_pool_shrink();
773 while (!num_freed && atomic_long_read(&allocated_pages));
774
775 return num_freed;
776 }
777
778 /* Return the number of pages available or SHRINK_EMPTY if we have none */
ttm_pool_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)779 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
780 struct shrink_control *sc)
781 {
782 #ifdef notyet
783 unsigned long num_pages = atomic_long_read(&allocated_pages);
784
785 return num_pages ? num_pages : SHRINK_EMPTY;
786 #else
787 STUB();
788 unsigned long num_pages = atomic_long_read(&allocated_pages);
789
790 return num_pages ? num_pages : 0;
791 #endif
792 }
793
794 #ifdef CONFIG_DEBUG_FS
795 /* Count the number of pages available in a pool_type */
ttm_pool_type_count(struct ttm_pool_type * pt)796 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
797 {
798 unsigned int count = 0;
799 struct ttm_pool_type_lru *entry;
800
801 spin_lock(&pt->lock);
802 /* Only used for debugfs, the overhead doesn't matter */
803 LIST_FOREACH(entry, &pt->lru, entries)
804 ++count;
805 spin_unlock(&pt->lock);
806
807 return count;
808 }
809
810 /* Print a nice header for the order */
ttm_pool_debugfs_header(struct seq_file * m)811 static void ttm_pool_debugfs_header(struct seq_file *m)
812 {
813 unsigned int i;
814
815 seq_puts(m, "\t ");
816 for (i = 0; i < NR_PAGE_ORDERS; ++i)
817 seq_printf(m, " ---%2u---", i);
818 seq_puts(m, "\n");
819 }
820
821 /* Dump information about the different pool types */
ttm_pool_debugfs_orders(struct ttm_pool_type * pt,struct seq_file * m)822 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
823 struct seq_file *m)
824 {
825 unsigned int i;
826
827 for (i = 0; i < NR_PAGE_ORDERS; ++i)
828 seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
829 seq_puts(m, "\n");
830 }
831
832 /* Dump the total amount of allocated pages */
ttm_pool_debugfs_footer(struct seq_file * m)833 static void ttm_pool_debugfs_footer(struct seq_file *m)
834 {
835 seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
836 atomic_long_read(&allocated_pages), page_pool_size);
837 }
838
839 /* Dump the information for the global pools */
ttm_pool_debugfs_globals_show(struct seq_file * m,void * data)840 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
841 {
842 ttm_pool_debugfs_header(m);
843
844 spin_lock(&shrinker_lock);
845 seq_puts(m, "wc\t:");
846 ttm_pool_debugfs_orders(global_write_combined, m);
847 seq_puts(m, "uc\t:");
848 ttm_pool_debugfs_orders(global_uncached, m);
849 seq_puts(m, "wc 32\t:");
850 ttm_pool_debugfs_orders(global_dma32_write_combined, m);
851 seq_puts(m, "uc 32\t:");
852 ttm_pool_debugfs_orders(global_dma32_uncached, m);
853 spin_unlock(&shrinker_lock);
854
855 ttm_pool_debugfs_footer(m);
856
857 return 0;
858 }
859 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
860
861 /**
862 * ttm_pool_debugfs - Debugfs dump function for a pool
863 *
864 * @pool: the pool to dump the information for
865 * @m: seq_file to dump to
866 *
867 * Make a debugfs dump with the per pool and global information.
868 */
ttm_pool_debugfs(struct ttm_pool * pool,struct seq_file * m)869 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
870 {
871 unsigned int i;
872
873 if (!pool->use_dma_alloc) {
874 seq_puts(m, "unused\n");
875 return 0;
876 }
877
878 ttm_pool_debugfs_header(m);
879
880 spin_lock(&shrinker_lock);
881 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
882 seq_puts(m, "DMA ");
883 switch (i) {
884 case ttm_cached:
885 seq_puts(m, "\t:");
886 break;
887 case ttm_write_combined:
888 seq_puts(m, "wc\t:");
889 break;
890 case ttm_uncached:
891 seq_puts(m, "uc\t:");
892 break;
893 }
894 ttm_pool_debugfs_orders(pool->caching[i].orders, m);
895 }
896 spin_unlock(&shrinker_lock);
897
898 ttm_pool_debugfs_footer(m);
899 return 0;
900 }
901 EXPORT_SYMBOL(ttm_pool_debugfs);
902
903 /* Test the shrinker functions and dump the result */
ttm_pool_debugfs_shrink_show(struct seq_file * m,void * data)904 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
905 {
906 struct shrink_control sc = { .gfp_mask = GFP_NOFS };
907
908 fs_reclaim_acquire(GFP_KERNEL);
909 seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
910 ttm_pool_shrinker_scan(&mm_shrinker, &sc));
911 fs_reclaim_release(GFP_KERNEL);
912
913 return 0;
914 }
915 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
916
917 #endif
918
919 /**
920 * ttm_pool_mgr_init - Initialize globals
921 *
922 * @num_pages: default number of pages
923 *
924 * Initialize the global locks and lists for the MM shrinker.
925 */
ttm_pool_mgr_init(unsigned long num_pages)926 int ttm_pool_mgr_init(unsigned long num_pages)
927 {
928 unsigned int i;
929
930 if (!page_pool_size)
931 page_pool_size = num_pages;
932
933 mtx_init(&shrinker_lock, IPL_NONE);
934 INIT_LIST_HEAD(&shrinker_list);
935
936 for (i = 0; i < NR_PAGE_ORDERS; ++i) {
937 ttm_pool_type_init(&global_write_combined[i], NULL,
938 ttm_write_combined, i);
939 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
940
941 ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
942 ttm_write_combined, i);
943 ttm_pool_type_init(&global_dma32_uncached[i], NULL,
944 ttm_uncached, i);
945 }
946
947 #ifdef CONFIG_DEBUG_FS
948 debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
949 &ttm_pool_debugfs_globals_fops);
950 debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
951 &ttm_pool_debugfs_shrink_fops);
952 #endif
953
954 mm_shrinker.count_objects = ttm_pool_shrinker_count;
955 mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
956 mm_shrinker.seeks = 1;
957 return register_shrinker(&mm_shrinker, "drm-ttm_pool");
958 }
959
960 /**
961 * ttm_pool_mgr_fini - Finalize globals
962 *
963 * Cleanup the global pools and unregister the MM shrinker.
964 */
ttm_pool_mgr_fini(void)965 void ttm_pool_mgr_fini(void)
966 {
967 unsigned int i;
968
969 for (i = 0; i < NR_PAGE_ORDERS; ++i) {
970 ttm_pool_type_fini(&global_write_combined[i]);
971 ttm_pool_type_fini(&global_uncached[i]);
972
973 ttm_pool_type_fini(&global_dma32_write_combined[i]);
974 ttm_pool_type_fini(&global_dma32_uncached[i]);
975 }
976
977 unregister_shrinker(&mm_shrinker);
978 WARN_ON(!list_empty(&shrinker_list));
979 }
980