xref: /dragonfly/sys/dev/drm/ttm/ttm_page_alloc_dma.c (revision c3762235)
1 /*
2  * Copyright 2011 (c) Oracle Corp.
3 
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24  *
25  * $FreeBSD: head/sys/dev/drm2/ttm/ttm_page_alloc_dma.c 247835 2013-03-05 09:49:34Z kib $
26  */
27 
28 /*
29  * A simple DMA pool losely based on dmapool.c. It has certain advantages
30  * over the DMA pools:
31  * - Pool collects resently freed pages for reuse (and hooks up to
32  *   the shrinker).
33  * - Tracks currently in use pages
34  * - Tracks whether the page is UC, WB or cached (and reverts to WB
35  *   when freed).
36  */
37 
38 #define pr_fmt(fmt) "[TTM] " fmt
39 
40 #include <linux/dma-mapping.h>
41 #include <linux/list.h>
42 #include <linux/seq_file.h> /* for seq_printf */
43 #include <linux/slab.h>
44 #include <linux/spinlock.h>
45 #include <linux/highmem.h>
46 #include <linux/mm_types.h>
47 #include <linux/module.h>
48 #include <linux/mm.h>
49 #include <linux/atomic.h>
50 #include <linux/device.h>
51 #include <linux/kthread.h>
52 #include <drm/ttm/ttm_bo_driver.h>
53 #include <drm/ttm/ttm_page_alloc.h>
54 #ifdef TTM_HAS_AGP
55 #include <asm/agp.h>
56 #endif
57 
58 #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
59 #define SMALL_ALLOCATION		4
60 #define FREE_ALL_PAGES			(~0U)
61 /* times are in msecs */
62 #define IS_UNDEFINED			(0)
63 #define IS_WC				(1<<1)
64 #define IS_UC				(1<<2)
65 #define IS_CACHED			(1<<3)
66 #define IS_DMA32			(1<<4)
67 
68 enum pool_type {
69 	POOL_IS_UNDEFINED,
70 	POOL_IS_WC = IS_WC,
71 	POOL_IS_UC = IS_UC,
72 	POOL_IS_CACHED = IS_CACHED,
73 	POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
74 	POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
75 	POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
76 };
77 /*
78  * The pool structure. There are usually six pools:
79  *  - generic (not restricted to DMA32):
80  *      - write combined, uncached, cached.
81  *  - dma32 (up to 2^32 - so up 4GB):
82  *      - write combined, uncached, cached.
83  * for each 'struct device'. The 'cached' is for pages that are actively used.
84  * The other ones can be shrunk by the shrinker API if neccessary.
85  * @pools: The 'struct device->dma_pools' link.
86  * @type: Type of the pool
87  * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
88  * used with irqsave/irqrestore variants because pool allocator maybe called
89  * from delayed work.
90  * @inuse_list: Pool of pages that are in use. The order is very important and
91  *   it is in the order that the TTM pages that are put back are in.
92  * @free_list: Pool of pages that are free to be used. No order requirements.
93  * @dev: The device that is associated with these pools.
94  * @size: Size used during DMA allocation.
95  * @npages_free: Count of available pages for re-use.
96  * @npages_in_use: Count of pages that are in use.
97  * @nfrees: Stats when pool is shrinking.
98  * @nrefills: Stats when the pool is grown.
99  * @gfp_flags: Flags to pass for alloc_page.
100  * @name: Name of the pool.
101  * @dev_name: Name derieved from dev - similar to how dev_info works.
102  *   Used during shutdown as the dev_info during release is unavailable.
103  */
104 struct dma_pool {
105 	struct list_head pools; /* The 'struct device->dma_pools link */
106 	enum pool_type type;
107 	spinlock_t lock;
108 	struct list_head inuse_list;
109 	struct list_head free_list;
110 	struct device *dev;
111 	unsigned size;
112 	unsigned npages_free;
113 	unsigned npages_in_use;
114 	unsigned long nfrees; /* Stats when shrunk. */
115 	unsigned long nrefills; /* Stats when grown. */
116 	gfp_t gfp_flags;
117 	char name[13]; /* "cached dma32" */
118 	char dev_name[64]; /* Constructed from dev */
119 };
120 
121 /*
122  * The accounting page keeping track of the allocated page along with
123  * the DMA address.
124  * @page_list: The link to the 'page_list' in 'struct dma_pool'.
125  * @vaddr: The virtual address of the page
126  * @dma: The bus address of the page. If the page is not allocated
127  *   via the DMA API, it will be -1.
128  */
129 struct dma_page {
130 	struct list_head page_list;
131 	void *vaddr;
132 	struct page *p;
133 	dma_addr_t dma;
134 };
135 
136 /*
137  * Limits for the pool. They are handled without locks because only place where
138  * they may change is in sysfs store. They won't have immediate effect anyway
139  * so forcing serialization to access them is pointless.
140  */
141 
142 struct ttm_pool_opts {
143 	unsigned	alloc_size;
144 	unsigned	max_size;
145 	unsigned	small;
146 };
147 
148 /*
149  * Contains the list of all of the 'struct device' and their corresponding
150  * DMA pools. Guarded by _mutex->lock.
151  * @pools: The link to 'struct ttm_pool_manager->pools'
152  * @dev: The 'struct device' associated with the 'pool'
153  * @pool: The 'struct dma_pool' associated with the 'dev'
154  */
155 struct device_pools {
156 	struct list_head pools;
157 	struct device *dev;
158 	struct dma_pool *pool;
159 };
160 
161 /*
162  * struct ttm_pool_manager - Holds memory pools for fast allocation
163  *
164  * @lock: Lock used when adding/removing from pools
165  * @pools: List of 'struct device' and 'struct dma_pool' tuples.
166  * @options: Limits for the pool.
167  * @npools: Total amount of pools in existence.
168  * @shrinker: The structure used by [un|]register_shrinker
169  */
170 struct ttm_pool_manager {
171 	struct mutex		lock;
172 	struct list_head	pools;
173 	struct ttm_pool_opts	options;
174 	unsigned		npools;
175 	struct shrinker		mm_shrink;
176 	struct kobject		kobj;
177 };
178 
179 static struct ttm_pool_manager *_manager;
180 
181 static struct attribute ttm_page_pool_max = {
182 	.name = "pool_max_size",
183 	.mode = S_IRUGO | S_IWUSR
184 };
185 static struct attribute ttm_page_pool_small = {
186 	.name = "pool_small_allocation",
187 	.mode = S_IRUGO | S_IWUSR
188 };
189 static struct attribute ttm_page_pool_alloc_size = {
190 	.name = "pool_allocation_size",
191 	.mode = S_IRUGO | S_IWUSR
192 };
193 
194 static struct attribute *ttm_pool_attrs[] = {
195 	&ttm_page_pool_max,
196 	&ttm_page_pool_small,
197 	&ttm_page_pool_alloc_size,
198 	NULL
199 };
200 
201 static void ttm_pool_kobj_release(struct kobject *kobj)
202 {
203 	struct ttm_pool_manager *m =
204 		container_of(kobj, struct ttm_pool_manager, kobj);
205 	kfree(m);
206 }
207 
208 static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
209 			      const char *buffer, size_t size)
210 {
211 	struct ttm_pool_manager *m =
212 		container_of(kobj, struct ttm_pool_manager, kobj);
213 	int chars;
214 	unsigned val;
215 	chars = sscanf(buffer, "%u", &val);
216 	if (chars == 0)
217 		return size;
218 
219 	/* Convert kb to number of pages */
220 	val = val / (PAGE_SIZE >> 10);
221 
222 	if (attr == &ttm_page_pool_max)
223 		m->options.max_size = val;
224 	else if (attr == &ttm_page_pool_small)
225 		m->options.small = val;
226 	else if (attr == &ttm_page_pool_alloc_size) {
227 		if (val > NUM_PAGES_TO_ALLOC*8) {
228 			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
229 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
230 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
231 			return size;
232 		} else if (val > NUM_PAGES_TO_ALLOC) {
233 			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
234 				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
235 		}
236 		m->options.alloc_size = val;
237 	}
238 
239 	return size;
240 }
241 
242 static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
243 			     char *buffer)
244 {
245 	struct ttm_pool_manager *m =
246 		container_of(kobj, struct ttm_pool_manager, kobj);
247 	unsigned val = 0;
248 
249 	if (attr == &ttm_page_pool_max)
250 		val = m->options.max_size;
251 	else if (attr == &ttm_page_pool_small)
252 		val = m->options.small;
253 	else if (attr == &ttm_page_pool_alloc_size)
254 		val = m->options.alloc_size;
255 
256 	val = val * (PAGE_SIZE >> 10);
257 
258 	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
259 }
260 
261 static const struct sysfs_ops ttm_pool_sysfs_ops = {
262 	.show = &ttm_pool_show,
263 	.store = &ttm_pool_store,
264 };
265 
266 static struct kobj_type ttm_pool_kobj_type = {
267 	.release = &ttm_pool_kobj_release,
268 	.sysfs_ops = &ttm_pool_sysfs_ops,
269 	.default_attrs = ttm_pool_attrs,
270 };
271 
272 #ifndef CONFIG_X86
273 static int set_pages_array_wb(struct page **pages, int addrinarray)
274 {
275 #ifdef TTM_HAS_AGP
276 	int i;
277 
278 	for (i = 0; i < addrinarray; i++)
279 		unmap_page_from_agp(pages[i]);
280 #endif
281 	return 0;
282 }
283 
284 static int set_pages_array_wc(struct page **pages, int addrinarray)
285 {
286 #ifdef TTM_HAS_AGP
287 	int i;
288 
289 	for (i = 0; i < addrinarray; i++)
290 		map_page_into_agp(pages[i]);
291 #endif
292 	return 0;
293 }
294 
295 static int set_pages_array_uc(struct page **pages, int addrinarray)
296 {
297 #ifdef TTM_HAS_AGP
298 	int i;
299 
300 	for (i = 0; i < addrinarray; i++)
301 		map_page_into_agp(pages[i]);
302 #endif
303 	return 0;
304 }
305 #endif /* for !CONFIG_X86 */
306 
307 static int ttm_set_pages_caching(struct dma_pool *pool,
308 				 struct page **pages, unsigned cpages)
309 {
310 	int r = 0;
311 	/* Set page caching */
312 	if (pool->type & IS_UC) {
313 		r = set_pages_array_uc(pages, cpages);
314 		if (r)
315 			pr_err("%s: Failed to set %d pages to uc!\n",
316 			       pool->dev_name, cpages);
317 	}
318 	if (pool->type & IS_WC) {
319 		r = set_pages_array_wc(pages, cpages);
320 		if (r)
321 			pr_err("%s: Failed to set %d pages to wc!\n",
322 			       pool->dev_name, cpages);
323 	}
324 	return r;
325 }
326 
327 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
328 {
329 	dma_addr_t dma = d_page->dma;
330 	dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
331 
332 	kfree(d_page);
333 	d_page = NULL;
334 }
335 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
336 {
337 	struct dma_page *d_page;
338 
339 	d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
340 	if (!d_page)
341 		return NULL;
342 
343 	d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
344 					   &d_page->dma,
345 					   pool->gfp_flags);
346 	if (d_page->vaddr)
347 		d_page->p = virt_to_page(d_page->vaddr);
348 	else {
349 		kfree(d_page);
350 		d_page = NULL;
351 	}
352 	return d_page;
353 }
354 static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
355 {
356 	enum pool_type type = IS_UNDEFINED;
357 
358 	if (flags & TTM_PAGE_FLAG_DMA32)
359 		type |= IS_DMA32;
360 	if (cstate == tt_cached)
361 		type |= IS_CACHED;
362 	else if (cstate == tt_uncached)
363 		type |= IS_UC;
364 	else
365 		type |= IS_WC;
366 
367 	return type;
368 }
369 
370 static void ttm_pool_update_free_locked(struct dma_pool *pool,
371 					unsigned freed_pages)
372 {
373 	pool->npages_free -= freed_pages;
374 	pool->nfrees += freed_pages;
375 
376 }
377 
378 /* set memory back to wb and free the pages. */
379 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
380 			      struct page *pages[], unsigned npages)
381 {
382 	struct dma_page *d_page, *tmp;
383 
384 	/* Don't set WB on WB page pool. */
385 	if (npages && !(pool->type & IS_CACHED) &&
386 	    set_pages_array_wb(pages, npages))
387 		pr_err("%s: Failed to set %d pages to wb!\n",
388 		       pool->dev_name, npages);
389 
390 	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
391 		list_del(&d_page->page_list);
392 		__ttm_dma_free_page(pool, d_page);
393 	}
394 }
395 
396 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
397 {
398 	/* Don't set WB on WB page pool. */
399 	if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
400 		pr_err("%s: Failed to set %d pages to wb!\n",
401 		       pool->dev_name, 1);
402 
403 	list_del(&d_page->page_list);
404 	__ttm_dma_free_page(pool, d_page);
405 }
406 
407 /*
408  * Free pages from pool.
409  *
410  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
411  * number of pages in one go.
412  *
413  * @pool: to free the pages from
414  * @nr_free: If set to true will free all pages in pool
415  **/
416 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
417 {
418 	unsigned long irq_flags;
419 	struct dma_page *dma_p, *tmp;
420 	struct page **pages_to_free;
421 	struct list_head d_pages;
422 	unsigned freed_pages = 0,
423 		 npages_to_free = nr_free;
424 
425 	if (NUM_PAGES_TO_ALLOC < nr_free)
426 		npages_to_free = NUM_PAGES_TO_ALLOC;
427 #if 0
428 	if (nr_free > 1) {
429 		pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
430 			 pool->dev_name, pool->name, current->pid,
431 			 npages_to_free, nr_free);
432 	}
433 #endif
434 	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
435 			GFP_KERNEL);
436 
437 	if (!pages_to_free) {
438 		pr_err("%s: Failed to allocate memory for pool free operation\n",
439 		       pool->dev_name);
440 		return 0;
441 	}
442 	INIT_LIST_HEAD(&d_pages);
443 restart:
444 	spin_lock_irqsave(&pool->lock, irq_flags);
445 
446 	/* We picking the oldest ones off the list */
447 	list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
448 					 page_list) {
449 		if (freed_pages >= npages_to_free)
450 			break;
451 
452 		/* Move the dma_page from one list to another. */
453 		list_move(&dma_p->page_list, &d_pages);
454 
455 		pages_to_free[freed_pages++] = dma_p->p;
456 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
457 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
458 
459 			ttm_pool_update_free_locked(pool, freed_pages);
460 			/**
461 			 * Because changing page caching is costly
462 			 * we unlock the pool to prevent stalling.
463 			 */
464 			spin_unlock_irqrestore(&pool->lock, irq_flags);
465 
466 			ttm_dma_pages_put(pool, &d_pages, pages_to_free,
467 					  freed_pages);
468 
469 			INIT_LIST_HEAD(&d_pages);
470 
471 			if (likely(nr_free != FREE_ALL_PAGES))
472 				nr_free -= freed_pages;
473 
474 			if (NUM_PAGES_TO_ALLOC >= nr_free)
475 				npages_to_free = nr_free;
476 			else
477 				npages_to_free = NUM_PAGES_TO_ALLOC;
478 
479 			freed_pages = 0;
480 
481 			/* free all so restart the processing */
482 			if (nr_free)
483 				goto restart;
484 
485 			/* Not allowed to fall through or break because
486 			 * following context is inside spinlock while we are
487 			 * outside here.
488 			 */
489 			goto out;
490 
491 		}
492 	}
493 
494 	/* remove range of pages from the pool */
495 	if (freed_pages) {
496 		ttm_pool_update_free_locked(pool, freed_pages);
497 		nr_free -= freed_pages;
498 	}
499 
500 	spin_unlock_irqrestore(&pool->lock, irq_flags);
501 
502 	if (freed_pages)
503 		ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
504 out:
505 	kfree(pages_to_free);
506 	return nr_free;
507 }
508 
509 static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
510 {
511 	struct device_pools *p;
512 	struct dma_pool *pool;
513 
514 	if (!dev)
515 		return;
516 
517 	mutex_lock(&_manager->lock);
518 	list_for_each_entry_reverse(p, &_manager->pools, pools) {
519 		if (p->dev != dev)
520 			continue;
521 		pool = p->pool;
522 		if (pool->type != type)
523 			continue;
524 
525 		list_del(&p->pools);
526 		kfree(p);
527 		_manager->npools--;
528 		break;
529 	}
530 	list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
531 		if (pool->type != type)
532 			continue;
533 		/* Takes a spinlock.. */
534 		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
535 		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
536 		/* This code path is called after _all_ references to the
537 		 * struct device has been dropped - so nobody should be
538 		 * touching it. In case somebody is trying to _add_ we are
539 		 * guarded by the mutex. */
540 		list_del(&pool->pools);
541 		kfree(pool);
542 		break;
543 	}
544 	mutex_unlock(&_manager->lock);
545 }
546 
547 /*
548  * On free-ing of the 'struct device' this deconstructor is run.
549  * Albeit the pool might have already been freed earlier.
550  */
551 static void ttm_dma_pool_release(struct device *dev, void *res)
552 {
553 	struct dma_pool *pool = *(struct dma_pool **)res;
554 
555 	if (pool)
556 		ttm_dma_free_pool(dev, pool->type);
557 }
558 
559 static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
560 {
561 	return *(struct dma_pool **)res == match_data;
562 }
563 
564 static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
565 					  enum pool_type type)
566 {
567 	char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
568 	enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
569 	struct device_pools *sec_pool = NULL;
570 	struct dma_pool *pool = NULL, **ptr;
571 	unsigned i;
572 	int ret = -ENODEV;
573 	char *p;
574 
575 	if (!dev)
576 		return NULL;
577 
578 	ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
579 	if (!ptr)
580 		return NULL;
581 
582 	ret = -ENOMEM;
583 
584 	pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
585 			    dev_to_node(dev));
586 	if (!pool)
587 		goto err_mem;
588 
589 	sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
590 				dev_to_node(dev));
591 	if (!sec_pool)
592 		goto err_mem;
593 
594 	INIT_LIST_HEAD(&sec_pool->pools);
595 	sec_pool->dev = dev;
596 	sec_pool->pool =  pool;
597 
598 	INIT_LIST_HEAD(&pool->free_list);
599 	INIT_LIST_HEAD(&pool->inuse_list);
600 	INIT_LIST_HEAD(&pool->pools);
601 	spin_lock_init(&pool->lock);
602 	pool->dev = dev;
603 	pool->npages_free = pool->npages_in_use = 0;
604 	pool->nfrees = 0;
605 	pool->gfp_flags = flags;
606 	pool->size = PAGE_SIZE;
607 	pool->type = type;
608 	pool->nrefills = 0;
609 	p = pool->name;
610 	for (i = 0; i < 5; i++) {
611 		if (type & t[i]) {
612 			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
613 				      "%s", n[i]);
614 		}
615 	}
616 	*p = 0;
617 	/* We copy the name for pr_ calls b/c when dma_pool_destroy is called
618 	 * - the kobj->name has already been deallocated.*/
619 	snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
620 		 dev_driver_string(dev), dev_name(dev));
621 	mutex_lock(&_manager->lock);
622 	/* You can get the dma_pool from either the global: */
623 	list_add(&sec_pool->pools, &_manager->pools);
624 	_manager->npools++;
625 	/* or from 'struct device': */
626 	list_add(&pool->pools, &dev->dma_pools);
627 	mutex_unlock(&_manager->lock);
628 
629 	*ptr = pool;
630 	devres_add(dev, ptr);
631 
632 	return pool;
633 err_mem:
634 	devres_free(ptr);
635 	kfree(sec_pool);
636 	kfree(pool);
637 	return ERR_PTR(ret);
638 }
639 
640 static struct dma_pool *ttm_dma_find_pool(struct device *dev,
641 					  enum pool_type type)
642 {
643 	struct dma_pool *pool, *tmp, *found = NULL;
644 
645 	if (type == IS_UNDEFINED)
646 		return found;
647 
648 	/* NB: We iterate on the 'struct dev' which has no spinlock, but
649 	 * it does have a kref which we have taken. The kref is taken during
650 	 * graphic driver loading - in the drm_pci_init it calls either
651 	 * pci_dev_get or pci_register_driver which both end up taking a kref
652 	 * on 'struct device'.
653 	 *
654 	 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
655 	 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
656 	 * thing is at that point of time there are no pages associated with the
657 	 * driver so this function will not be called.
658 	 */
659 	list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
660 		if (pool->type != type)
661 			continue;
662 		found = pool;
663 		break;
664 	}
665 	return found;
666 }
667 
668 /*
669  * Free pages the pages that failed to change the caching state. If there
670  * are pages that have changed their caching state already put them to the
671  * pool.
672  */
673 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
674 						 struct list_head *d_pages,
675 						 struct page **failed_pages,
676 						 unsigned cpages)
677 {
678 	struct dma_page *d_page, *tmp;
679 	struct page *p;
680 	unsigned i = 0;
681 
682 	p = failed_pages[0];
683 	if (!p)
684 		return;
685 	/* Find the failed page. */
686 	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
687 		if (d_page->p != p)
688 			continue;
689 		/* .. and then progress over the full list. */
690 		list_del(&d_page->page_list);
691 		__ttm_dma_free_page(pool, d_page);
692 		if (++i < cpages)
693 			p = failed_pages[i];
694 		else
695 			break;
696 	}
697 
698 }
699 
700 /*
701  * Allocate 'count' pages, and put 'need' number of them on the
702  * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
703  * The full list of pages should also be on 'd_pages'.
704  * We return zero for success, and negative numbers as errors.
705  */
706 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
707 					struct list_head *d_pages,
708 					unsigned count)
709 {
710 	struct page **caching_array;
711 	struct dma_page *dma_p;
712 	struct page *p;
713 	int r = 0;
714 	unsigned i, cpages;
715 	unsigned max_cpages = min(count,
716 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
717 
718 	/* allocate array for page caching change */
719 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
720 
721 	if (!caching_array) {
722 		pr_err("%s: Unable to allocate table for new pages\n",
723 		       pool->dev_name);
724 		return -ENOMEM;
725 	}
726 
727 	if (count > 1) {
728 		pr_debug("%s: (%s:%d) Getting %d pages\n",
729 			 pool->dev_name, pool->name, current->pid, count);
730 	}
731 
732 	for (i = 0, cpages = 0; i < count; ++i) {
733 		dma_p = __ttm_dma_alloc_page(pool);
734 		if (!dma_p) {
735 			pr_err("%s: Unable to get page %u\n",
736 			       pool->dev_name, i);
737 
738 			/* store already allocated pages in the pool after
739 			 * setting the caching state */
740 			if (cpages) {
741 				r = ttm_set_pages_caching(pool, caching_array,
742 							  cpages);
743 				if (r)
744 					ttm_dma_handle_caching_state_failure(
745 						pool, d_pages, caching_array,
746 						cpages);
747 			}
748 			r = -ENOMEM;
749 			goto out;
750 		}
751 		p = dma_p->p;
752 #ifdef CONFIG_HIGHMEM
753 		/* gfp flags of highmem page should never be dma32 so we
754 		 * we should be fine in such case
755 		 */
756 		if (!PageHighMem(p))
757 #endif
758 		{
759 			caching_array[cpages++] = p;
760 			if (cpages == max_cpages) {
761 				/* Note: Cannot hold the spinlock */
762 				r = ttm_set_pages_caching(pool, caching_array,
763 						 cpages);
764 				if (r) {
765 					ttm_dma_handle_caching_state_failure(
766 						pool, d_pages, caching_array,
767 						cpages);
768 					goto out;
769 				}
770 				cpages = 0;
771 			}
772 		}
773 		list_add(&dma_p->page_list, d_pages);
774 	}
775 
776 	if (cpages) {
777 		r = ttm_set_pages_caching(pool, caching_array, cpages);
778 		if (r)
779 			ttm_dma_handle_caching_state_failure(pool, d_pages,
780 					caching_array, cpages);
781 	}
782 out:
783 	kfree(caching_array);
784 	return r;
785 }
786 
787 /*
788  * @return count of pages still required to fulfill the request.
789  */
790 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
791 					 unsigned long *irq_flags)
792 {
793 	unsigned count = _manager->options.small;
794 	int r = pool->npages_free;
795 
796 	if (count > pool->npages_free) {
797 		struct list_head d_pages;
798 
799 		INIT_LIST_HEAD(&d_pages);
800 
801 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
802 
803 		/* Returns how many more are neccessary to fulfill the
804 		 * request. */
805 		r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
806 
807 		spin_lock_irqsave(&pool->lock, *irq_flags);
808 		if (!r) {
809 			/* Add the fresh to the end.. */
810 			list_splice(&d_pages, &pool->free_list);
811 			++pool->nrefills;
812 			pool->npages_free += count;
813 			r = count;
814 		} else {
815 			struct dma_page *d_page;
816 			unsigned cpages = 0;
817 
818 			pr_err("%s: Failed to fill %s pool (r:%d)!\n",
819 			       pool->dev_name, pool->name, r);
820 
821 			list_for_each_entry(d_page, &d_pages, page_list) {
822 				cpages++;
823 			}
824 			list_splice_tail(&d_pages, &pool->free_list);
825 			pool->npages_free += cpages;
826 			r = cpages;
827 		}
828 	}
829 	return r;
830 }
831 
832 /*
833  * @return count of pages still required to fulfill the request.
834  * The populate list is actually a stack (not that is matters as TTM
835  * allocates one page at a time.
836  */
837 static int ttm_dma_pool_get_pages(struct dma_pool *pool,
838 				  struct ttm_dma_tt *ttm_dma,
839 				  unsigned index)
840 {
841 	struct dma_page *d_page;
842 	struct ttm_tt *ttm = &ttm_dma->ttm;
843 	unsigned long irq_flags;
844 	int count, r = -ENOMEM;
845 
846 	spin_lock_irqsave(&pool->lock, irq_flags);
847 	count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
848 	if (count) {
849 		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
850 		ttm->pages[index] = d_page->p;
851 		ttm_dma->dma_address[index] = d_page->dma;
852 		list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
853 		r = 0;
854 		pool->npages_in_use += 1;
855 		pool->npages_free -= 1;
856 	}
857 	spin_unlock_irqrestore(&pool->lock, irq_flags);
858 	return r;
859 }
860 
861 /*
862  * On success pages list will hold count number of correctly
863  * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
864  */
865 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
866 {
867 	struct ttm_tt *ttm = &ttm_dma->ttm;
868 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
869 	struct dma_pool *pool;
870 	enum pool_type type;
871 	unsigned i;
872 	gfp_t gfp_flags;
873 	int ret;
874 
875 	if (ttm->state != tt_unpopulated)
876 		return 0;
877 
878 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
879 	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
880 		gfp_flags = GFP_USER | GFP_DMA32;
881 	else
882 		gfp_flags = GFP_HIGHUSER;
883 	if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
884 		gfp_flags |= __GFP_ZERO;
885 
886 	pool = ttm_dma_find_pool(dev, type);
887 	if (!pool) {
888 		pool = ttm_dma_pool_init(dev, gfp_flags, type);
889 		if (IS_ERR_OR_NULL(pool)) {
890 			return -ENOMEM;
891 		}
892 	}
893 
894 	INIT_LIST_HEAD(&ttm_dma->pages_list);
895 	for (i = 0; i < ttm->num_pages; ++i) {
896 		ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
897 		if (ret != 0) {
898 			ttm_dma_unpopulate(ttm_dma, dev);
899 			return -ENOMEM;
900 		}
901 
902 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
903 						false, false);
904 		if (unlikely(ret != 0)) {
905 			ttm_dma_unpopulate(ttm_dma, dev);
906 			return -ENOMEM;
907 		}
908 	}
909 
910 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
911 		ret = ttm_tt_swapin(ttm);
912 		if (unlikely(ret != 0)) {
913 			ttm_dma_unpopulate(ttm_dma, dev);
914 			return ret;
915 		}
916 	}
917 
918 	ttm->state = tt_unbound;
919 	return 0;
920 }
921 EXPORT_SYMBOL_GPL(ttm_dma_populate);
922 
923 /* Get good estimation how many pages are free in pools */
924 static int ttm_dma_pool_get_num_unused_pages(void)
925 {
926 	struct device_pools *p;
927 	unsigned total = 0;
928 
929 	mutex_lock(&_manager->lock);
930 	list_for_each_entry(p, &_manager->pools, pools)
931 		total += p->pool->npages_free;
932 	mutex_unlock(&_manager->lock);
933 	return total;
934 }
935 
936 /* Put all pages in pages list to correct pool to wait for reuse */
937 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
938 {
939 	struct ttm_tt *ttm = &ttm_dma->ttm;
940 	struct dma_pool *pool;
941 	struct dma_page *d_page, *next;
942 	enum pool_type type;
943 	bool is_cached = false;
944 	unsigned count = 0, i, npages = 0;
945 	unsigned long irq_flags;
946 
947 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
948 	pool = ttm_dma_find_pool(dev, type);
949 	if (!pool)
950 		return;
951 
952 	is_cached = (ttm_dma_find_pool(pool->dev,
953 		     ttm_to_type(ttm->page_flags, tt_cached)) == pool);
954 
955 	/* make sure pages array match list and count number of pages */
956 	list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
957 		ttm->pages[count] = d_page->p;
958 		count++;
959 	}
960 
961 	spin_lock_irqsave(&pool->lock, irq_flags);
962 	pool->npages_in_use -= count;
963 	if (is_cached) {
964 		pool->nfrees += count;
965 	} else {
966 		pool->npages_free += count;
967 		list_splice(&ttm_dma->pages_list, &pool->free_list);
968 		npages = count;
969 		if (pool->npages_free > _manager->options.max_size) {
970 			npages = pool->npages_free - _manager->options.max_size;
971 			/* free at least NUM_PAGES_TO_ALLOC number of pages
972 			 * to reduce calls to set_memory_wb */
973 			if (npages < NUM_PAGES_TO_ALLOC)
974 				npages = NUM_PAGES_TO_ALLOC;
975 		}
976 	}
977 	spin_unlock_irqrestore(&pool->lock, irq_flags);
978 
979 	if (is_cached) {
980 		list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
981 			ttm_mem_global_free_page(ttm->glob->mem_glob,
982 						 d_page->p);
983 			ttm_dma_page_put(pool, d_page);
984 		}
985 	} else {
986 		for (i = 0; i < count; i++) {
987 			ttm_mem_global_free_page(ttm->glob->mem_glob,
988 						 ttm->pages[i]);
989 		}
990 	}
991 
992 	INIT_LIST_HEAD(&ttm_dma->pages_list);
993 	for (i = 0; i < ttm->num_pages; i++) {
994 		ttm->pages[i] = NULL;
995 		ttm_dma->dma_address[i] = 0;
996 	}
997 
998 	/* shrink pool if necessary (only on !is_cached pools)*/
999 	if (npages)
1000 		ttm_dma_page_pool_free(pool, npages);
1001 	ttm->state = tt_unpopulated;
1002 }
1003 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1004 
1005 /**
1006  * Callback for mm to request pool to reduce number of page held.
1007  */
1008 static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
1009 				  struct shrink_control *sc)
1010 {
1011 	static atomic_t start_pool = ATOMIC_INIT(0);
1012 	unsigned idx = 0;
1013 	unsigned pool_offset = atomic_add_return(1, &start_pool);
1014 	unsigned shrink_pages = sc->nr_to_scan;
1015 	struct device_pools *p;
1016 
1017 	if (list_empty(&_manager->pools))
1018 		return 0;
1019 
1020 	mutex_lock(&_manager->lock);
1021 	pool_offset = pool_offset % _manager->npools;
1022 	list_for_each_entry(p, &_manager->pools, pools) {
1023 		unsigned nr_free;
1024 
1025 		if (!p->dev)
1026 			continue;
1027 		if (shrink_pages == 0)
1028 			break;
1029 		/* Do it in round-robin fashion. */
1030 		if (++idx < pool_offset)
1031 			continue;
1032 		nr_free = shrink_pages;
1033 		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
1034 		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1035 			 p->pool->dev_name, p->pool->name, current->pid,
1036 			 nr_free, shrink_pages);
1037 	}
1038 	mutex_unlock(&_manager->lock);
1039 	/* return estimated number of unused pages in pool */
1040 	return ttm_dma_pool_get_num_unused_pages();
1041 }
1042 
1043 static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1044 {
1045 	manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
1046 	manager->mm_shrink.seeks = 1;
1047 	register_shrinker(&manager->mm_shrink);
1048 }
1049 
1050 static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1051 {
1052 	unregister_shrinker(&manager->mm_shrink);
1053 }
1054 
1055 int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1056 {
1057 	int ret = -ENOMEM;
1058 
1059 	WARN_ON(_manager);
1060 
1061 	pr_info("Initializing DMA pool allocator\n");
1062 
1063 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1064 	if (!_manager)
1065 		goto err;
1066 
1067 	mutex_init(&_manager->lock);
1068 	INIT_LIST_HEAD(&_manager->pools);
1069 
1070 	_manager->options.max_size = max_pages;
1071 	_manager->options.small = SMALL_ALLOCATION;
1072 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1073 
1074 	/* This takes care of auto-freeing the _manager */
1075 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1076 				   &glob->kobj, "dma_pool");
1077 	if (unlikely(ret != 0)) {
1078 		kobject_put(&_manager->kobj);
1079 		goto err;
1080 	}
1081 	ttm_dma_pool_mm_shrink_init(_manager);
1082 	return 0;
1083 err:
1084 	return ret;
1085 }
1086 
1087 void ttm_dma_page_alloc_fini(void)
1088 {
1089 	struct device_pools *p, *t;
1090 
1091 	pr_info("Finalizing DMA pool allocator\n");
1092 	ttm_dma_pool_mm_shrink_fini(_manager);
1093 
1094 	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1095 		dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1096 			current->pid);
1097 		WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1098 			ttm_dma_pool_match, p->pool));
1099 		ttm_dma_free_pool(p->dev, p->pool->type);
1100 	}
1101 	kobject_put(&_manager->kobj);
1102 	_manager = NULL;
1103 }
1104 
1105 int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1106 {
1107 	struct device_pools *p;
1108 	struct dma_pool *pool = NULL;
1109 	char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
1110 		     "name", "virt", "busaddr"};
1111 
1112 	if (!_manager) {
1113 		seq_printf(m, "No pool allocator running.\n");
1114 		return 0;
1115 	}
1116 	seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
1117 		   h[0], h[1], h[2], h[3], h[4], h[5]);
1118 	mutex_lock(&_manager->lock);
1119 	list_for_each_entry(p, &_manager->pools, pools) {
1120 		struct device *dev = p->dev;
1121 		if (!dev)
1122 			continue;
1123 		pool = p->pool;
1124 		seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1125 				pool->name, pool->nrefills,
1126 				pool->nfrees, pool->npages_in_use,
1127 				pool->npages_free,
1128 				pool->dev_name);
1129 	}
1130 	mutex_unlock(&_manager->lock);
1131 	return 0;
1132 }
1133 EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1134