xref: /dragonfly/sys/dev/drm/ttm/ttm_page_alloc.c (revision 7b1120e5)
1 /*
2  * Copyright (c) Red Hat Inc.
3 
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27 /*
28  * Copyright (c) 2013 The FreeBSD Foundation
29  * All rights reserved.
30  *
31  * Portions of this software were developed by Konstantin Belousov
32  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
33  */
34 
35 /* simple list based uncached page pool
36  * - Pool collects resently freed pages for reuse
37  * - Use page->lru to keep a free list
38  * - doesn't track currently in use pages
39  */
40 
41 #define pr_fmt(fmt) "[TTM] " fmt
42 
43 #include <linux/list.h>
44 #include <linux/spinlock.h>
45 #include <linux/highmem.h>
46 #include <linux/mm_types.h>
47 #include <linux/module.h>
48 #include <linux/mm.h>
49 #include <linux/seq_file.h> /* for seq_printf */
50 #include <linux/dma-mapping.h>
51 
52 #include <linux/atomic.h>
53 
54 #include <drm/ttm/ttm_bo_driver.h>
55 #include <drm/ttm/ttm_page_alloc.h>
56 
57 #include <sys/eventhandler.h>
58 
59 #ifdef TTM_HAS_AGP
60 #include <asm/agp.h>
61 #endif
62 
63 #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
64 #define SMALL_ALLOCATION		16
65 #define FREE_ALL_PAGES			(~0U)
66 /* times are in msecs */
67 #define PAGE_FREE_INTERVAL		1000
68 
69 /**
70  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
71  *
72  * @lock: Protects the shared pool from concurrnet access. Must be used with
73  * irqsave/irqrestore variants because pool allocator maybe called from
74  * delayed work.
75  * @fill_lock: Prevent concurrent calls to fill.
76  * @list: Pool of free uc/wc pages for fast reuse.
77  * @gfp_flags: Flags to pass for alloc_page.
78  * @npages: Number of pages in pool.
79  */
80 struct ttm_page_pool {
81 	struct lock		lock;
82 	bool			fill_lock;
83 	struct pglist		list;
84 	gfp_t			gfp_flags;
85 	unsigned		npages;
86 	char			*name;
87 	unsigned long		nfrees;
88 	unsigned long		nrefills;
89 };
90 
91 /**
92  * Limits for the pool. They are handled without locks because only place where
93  * they may change is in sysfs store. They won't have immediate effect anyway
94  * so forcing serialization to access them is pointless.
95  */
96 
97 struct ttm_pool_opts {
98 	unsigned	alloc_size;
99 	unsigned	max_size;
100 	unsigned	small;
101 };
102 
103 #define NUM_POOLS 4
104 
105 /**
106  * struct ttm_pool_manager - Holds memory pools for fst allocation
107  *
108  * Manager is read only object for pool code so it doesn't need locking.
109  *
110  * @free_interval: minimum number of jiffies between freeing pages from pool.
111  * @page_alloc_inited: reference counting for pool allocation.
112  * @work: Work that is used to shrink the pool. Work is only run when there is
113  * some pages to free.
114  * @small_allocation: Limit in number of pages what is small allocation.
115  *
116  * @pools: All pool objects in use.
117  **/
118 struct ttm_pool_manager {
119 	struct kobject		kobj;
120 	struct shrinker		mm_shrink;
121 	eventhandler_tag lowmem_handler;
122 	struct ttm_pool_opts	options;
123 
124 	union {
125 		struct ttm_page_pool	pools[NUM_POOLS];
126 		struct {
127 			struct ttm_page_pool	wc_pool;
128 			struct ttm_page_pool	uc_pool;
129 			struct ttm_page_pool	wc_pool_dma32;
130 			struct ttm_page_pool	uc_pool_dma32;
131 		} ;
132 	};
133 };
134 
135 static struct attribute ttm_page_pool_max = {
136 	.name = "pool_max_size",
137 	.mode = S_IRUGO | S_IWUSR
138 };
139 static struct attribute ttm_page_pool_small = {
140 	.name = "pool_small_allocation",
141 	.mode = S_IRUGO | S_IWUSR
142 };
143 static struct attribute ttm_page_pool_alloc_size = {
144 	.name = "pool_allocation_size",
145 	.mode = S_IRUGO | S_IWUSR
146 };
147 
148 static struct attribute *ttm_pool_attrs[] = {
149 	&ttm_page_pool_max,
150 	&ttm_page_pool_small,
151 	&ttm_page_pool_alloc_size,
152 	NULL
153 };
154 
155 static void ttm_pool_kobj_release(struct kobject *kobj)
156 {
157 	struct ttm_pool_manager *m =
158 		container_of(kobj, struct ttm_pool_manager, kobj);
159 	kfree(m);
160 }
161 
162 static ssize_t ttm_pool_store(struct kobject *kobj,
163 		struct attribute *attr, const char *buffer, size_t size)
164 {
165 	struct ttm_pool_manager *m =
166 		container_of(kobj, struct ttm_pool_manager, kobj);
167 	int chars;
168 	unsigned val;
169 	chars = ksscanf(buffer, "%u", &val);
170 	if (chars == 0)
171 		return size;
172 
173 	/* Convert kb to number of pages */
174 	val = val / (PAGE_SIZE >> 10);
175 
176 	if (attr == &ttm_page_pool_max)
177 		m->options.max_size = val;
178 	else if (attr == &ttm_page_pool_small)
179 		m->options.small = val;
180 	else if (attr == &ttm_page_pool_alloc_size) {
181 		if (val > NUM_PAGES_TO_ALLOC*8) {
182 			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
183 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
184 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
185 			return size;
186 		} else if (val > NUM_PAGES_TO_ALLOC) {
187 			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
188 				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
189 		}
190 		m->options.alloc_size = val;
191 	}
192 
193 	return size;
194 }
195 
196 static ssize_t ttm_pool_show(struct kobject *kobj,
197 		struct attribute *attr, char *buffer)
198 {
199 	struct ttm_pool_manager *m =
200 		container_of(kobj, struct ttm_pool_manager, kobj);
201 	unsigned val = 0;
202 
203 	if (attr == &ttm_page_pool_max)
204 		val = m->options.max_size;
205 	else if (attr == &ttm_page_pool_small)
206 		val = m->options.small;
207 	else if (attr == &ttm_page_pool_alloc_size)
208 		val = m->options.alloc_size;
209 
210 	val = val * (PAGE_SIZE >> 10);
211 
212 	return ksnprintf(buffer, PAGE_SIZE, "%u\n", val);
213 }
214 
215 static const struct sysfs_ops ttm_pool_sysfs_ops = {
216 	.show = &ttm_pool_show,
217 	.store = &ttm_pool_store,
218 };
219 
220 static struct kobj_type ttm_pool_kobj_type = {
221 	.release = &ttm_pool_kobj_release,
222 	.sysfs_ops = &ttm_pool_sysfs_ops,
223 	.default_attrs = ttm_pool_attrs,
224 };
225 
226 static struct ttm_pool_manager *_manager;
227 
228 #ifndef CONFIG_X86
229 static int set_pages_array_wb(struct page **pages, int addrinarray)
230 {
231 #ifdef TTM_HAS_AGP
232 	int i;
233 
234 	for (i = 0; i < addrinarray; i++)
235 		unmap_page_from_agp(pages[i]);
236 #endif
237 	return 0;
238 }
239 
240 static int set_pages_array_wc(struct page **pages, int addrinarray)
241 {
242 #ifdef TTM_HAS_AGP
243 	int i;
244 
245 	for (i = 0; i < addrinarray; i++)
246 		map_page_into_agp(pages[i]);
247 #endif
248 	return 0;
249 }
250 
251 static int set_pages_array_uc(struct page **pages, int addrinarray)
252 {
253 #ifdef TTM_HAS_AGP
254 	int i;
255 
256 	for (i = 0; i < addrinarray; i++)
257 		map_page_into_agp(pages[i]);
258 #endif
259 	return 0;
260 }
261 #endif
262 
263 /**
264  * Select the right pool or requested caching state and ttm flags. */
265 static struct ttm_page_pool *ttm_get_pool(int flags,
266 		enum ttm_caching_state cstate)
267 {
268 	int pool_index;
269 
270 	if (cstate == tt_cached)
271 		return NULL;
272 
273 	if (cstate == tt_wc)
274 		pool_index = 0x0;
275 	else
276 		pool_index = 0x1;
277 
278 	if (flags & TTM_PAGE_FLAG_DMA32)
279 		pool_index |= 0x2;
280 
281 	return &_manager->pools[pool_index];
282 }
283 
284 /* set memory back to wb and free the pages. */
285 static void ttm_pages_put(struct page *pages[], unsigned npages)
286 {
287 	unsigned i;
288 	if (set_pages_array_wb(pages, npages))
289 		pr_err("Failed to set %d pages to wb!\n", npages);
290 	for (i = 0; i < npages; ++i)
291 		__free_page(pages[i]);
292 }
293 
294 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
295 		unsigned freed_pages)
296 {
297 	pool->npages -= freed_pages;
298 	pool->nfrees += freed_pages;
299 }
300 
301 /**
302  * Free pages from pool.
303  *
304  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
305  * number of pages in one go.
306  *
307  * @pool: to free the pages from
308  * @free_all: If set to true will free all pages in pool
309  **/
310 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
311 {
312 	unsigned long irq_flags;
313 	struct vm_page *p, *p1;
314 	struct page **pages_to_free;
315 	unsigned freed_pages = 0,
316 		 npages_to_free = nr_free;
317 	unsigned i;
318 
319 	if (NUM_PAGES_TO_ALLOC < nr_free)
320 		npages_to_free = NUM_PAGES_TO_ALLOC;
321 
322 	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
323 			M_DRM, M_WAITOK);
324 	if (!pages_to_free) {
325 		pr_err("Failed to allocate memory for pool free operation\n");
326 		return 0;
327 	}
328 
329 restart:
330 	spin_lock_irqsave(&pool->lock, irq_flags);
331 
332 	TAILQ_FOREACH_REVERSE_MUTABLE(p, &pool->list, pglist, pageq, p1) {
333 		if (freed_pages >= npages_to_free)
334 			break;
335 
336 		pages_to_free[freed_pages++] = (struct page *)p;
337 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
338 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
339 			/* remove range of pages from the pool */
340 			for (i = 0; i < freed_pages; i++)
341 				TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq);
342 
343 			ttm_pool_update_free_locked(pool, freed_pages);
344 			/**
345 			 * Because changing page caching is costly
346 			 * we unlock the pool to prevent stalling.
347 			 */
348 			spin_unlock_irqrestore(&pool->lock, irq_flags);
349 
350 			ttm_pages_put(pages_to_free, freed_pages);
351 			if (likely(nr_free != FREE_ALL_PAGES))
352 				nr_free -= freed_pages;
353 
354 			if (NUM_PAGES_TO_ALLOC >= nr_free)
355 				npages_to_free = nr_free;
356 			else
357 				npages_to_free = NUM_PAGES_TO_ALLOC;
358 
359 			freed_pages = 0;
360 
361 			/* free all so restart the processing */
362 			if (nr_free)
363 				goto restart;
364 
365 			/* Not allowed to fall through or break because
366 			 * following context is inside spinlock while we are
367 			 * outside here.
368 			 */
369 			goto out;
370 
371 		}
372 	}
373 
374 	/* remove range of pages from the pool */
375 	if (freed_pages) {
376 		for (i = 0; i < freed_pages; i++)
377 			TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq);
378 
379 		ttm_pool_update_free_locked(pool, freed_pages);
380 		nr_free -= freed_pages;
381 	}
382 
383 	spin_unlock_irqrestore(&pool->lock, irq_flags);
384 
385 	if (freed_pages)
386 		ttm_pages_put(pages_to_free, freed_pages);
387 out:
388 	kfree(pages_to_free);
389 	return nr_free;
390 }
391 
392 /**
393  * Callback for mm to request pool to reduce number of page held.
394  *
395  * XXX: (dchinner) Deadlock warning!
396  *
397  * ttm_page_pool_free() does memory allocation using GFP_KERNEL.  that means
398  * this can deadlock when called a sc->gfp_mask that is not equal to
399  * GFP_KERNEL.
400  *
401  * This code is crying out for a shrinker per pool....
402  */
403 static unsigned long
404 ttm_pool_shrink_scan(void *arg)
405 {
406 	static atomic_t start_pool = ATOMIC_INIT(0);
407 	unsigned i;
408 	unsigned pool_offset = atomic_add_return(1, &start_pool);
409 	struct ttm_page_pool *pool;
410 	int shrink_pages = 100; /* XXXKIB */
411 	unsigned long freed = 0;
412 
413 	pool_offset = pool_offset % NUM_POOLS;
414 	/* select start pool in round robin fashion */
415 	for (i = 0; i < NUM_POOLS; ++i) {
416 		unsigned nr_free = shrink_pages;
417 		if (shrink_pages == 0)
418 			break;
419 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
420 		shrink_pages = ttm_page_pool_free(pool, nr_free);
421 		freed += nr_free - shrink_pages;
422 	}
423 	return freed;
424 }
425 
426 
427 static unsigned long
428 ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
429 {
430 	unsigned i;
431 	unsigned long count = 0;
432 
433 	for (i = 0; i < NUM_POOLS; ++i)
434 		count += _manager->pools[i].npages;
435 
436 	return count;
437 }
438 
439 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
440 {
441 	manager->mm_shrink.count_objects = ttm_pool_shrink_count;
442 	manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
443 	    ttm_pool_shrink_scan, manager, EVENTHANDLER_PRI_ANY);
444 }
445 
446 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
447 {
448 	EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
449 }
450 
451 static int ttm_set_pages_caching(struct page **pages,
452 		enum ttm_caching_state cstate, unsigned cpages)
453 {
454 	int r = 0;
455 	/* Set page caching */
456 	switch (cstate) {
457 	case tt_uncached:
458 		r = set_pages_array_uc(pages, cpages);
459 		if (r)
460 			pr_err("Failed to set %d pages to uc!\n", cpages);
461 		break;
462 	case tt_wc:
463 		r = set_pages_array_wc(pages, cpages);
464 		if (r)
465 			pr_err("Failed to set %d pages to wc!\n", cpages);
466 		break;
467 	default:
468 		break;
469 	}
470 	return r;
471 }
472 
473 /**
474  * Free pages the pages that failed to change the caching state. If there is
475  * any pages that have changed their caching state already put them to the
476  * pool.
477  */
478 static void ttm_handle_caching_state_failure(struct pglist *pages,
479 		int ttm_flags, enum ttm_caching_state cstate,
480 		struct page **failed_pages, unsigned cpages)
481 {
482 	unsigned i;
483 	/* Failed pages have to be freed */
484 	for (i = 0; i < cpages; ++i) {
485 		TAILQ_REMOVE(pages, (struct vm_page *)failed_pages[i], pageq);
486 		__free_page(failed_pages[i]);
487 	}
488 }
489 
490 /**
491  * Allocate new pages with correct caching.
492  *
493  * This function is reentrant if caller updates count depending on number of
494  * pages returned in pages array.
495  */
496 static int ttm_alloc_new_pages(struct pglist *pages, gfp_t gfp_flags,
497 		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
498 {
499 	struct page **caching_array;
500 	struct page *p;
501 	int r = 0;
502 	unsigned i, cpages;
503 	unsigned max_cpages = min(count,
504 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
505 
506 	/* allocate array for page caching change */
507 	caching_array = kmalloc(max_cpages*sizeof(struct page *), M_DRM, M_WAITOK);
508 
509 	if (!caching_array) {
510 		pr_err("Unable to allocate table for new pages\n");
511 		return -ENOMEM;
512 	}
513 
514 	for (i = 0, cpages = 0; i < count; ++i) {
515 		p = alloc_page(gfp_flags);
516 
517 		if (!p) {
518 			pr_err("Unable to get page %u\n", i);
519 
520 			/* store already allocated pages in the pool after
521 			 * setting the caching state */
522 			if (cpages) {
523 				r = ttm_set_pages_caching(caching_array,
524 							  cstate, cpages);
525 				if (r)
526 					ttm_handle_caching_state_failure(pages,
527 						ttm_flags, cstate,
528 						caching_array, cpages);
529 			}
530 			r = -ENOMEM;
531 			goto out;
532 		}
533 		((struct vm_page *)p)->flags |= PG_FICTITIOUS;
534 
535 #ifdef CONFIG_HIGHMEM
536 		/* gfp flags of highmem page should never be dma32 so we
537 		 * we should be fine in such case
538 		 */
539 		if (!PageHighMem(p))
540 #endif
541 		{
542 			caching_array[cpages++] = p;
543 			if (cpages == max_cpages) {
544 
545 				r = ttm_set_pages_caching(caching_array,
546 						cstate, cpages);
547 				if (r) {
548 					ttm_handle_caching_state_failure(pages,
549 						ttm_flags, cstate,
550 						caching_array, cpages);
551 					goto out;
552 				}
553 				cpages = 0;
554 			}
555 		}
556 
557 		TAILQ_INSERT_HEAD(pages, (struct vm_page *)p, pageq);
558 	}
559 
560 	if (cpages) {
561 		r = ttm_set_pages_caching(caching_array, cstate, cpages);
562 		if (r)
563 			ttm_handle_caching_state_failure(pages,
564 					ttm_flags, cstate,
565 					caching_array, cpages);
566 	}
567 out:
568 	kfree(caching_array);
569 
570 	return r;
571 }
572 
573 /**
574  * Fill the given pool if there aren't enough pages and the requested number of
575  * pages is small.
576  */
577 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
578 		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
579 		unsigned long *irq_flags)
580 {
581 	vm_page_t p;
582 	int r;
583 	unsigned cpages = 0;
584 	/**
585 	 * Only allow one pool fill operation at a time.
586 	 * If pool doesn't have enough pages for the allocation new pages are
587 	 * allocated from outside of pool.
588 	 */
589 	if (pool->fill_lock)
590 		return;
591 
592 	pool->fill_lock = true;
593 
594 	/* If allocation request is small and there are not enough
595 	 * pages in a pool we fill the pool up first. */
596 	if (count < _manager->options.small
597 		&& count > pool->npages) {
598 		struct pglist new_pages;
599 		unsigned alloc_size = _manager->options.alloc_size;
600 
601 		/**
602 		 * Can't change page caching if in irqsave context. We have to
603 		 * drop the pool->lock.
604 		 */
605 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
606 
607 		TAILQ_INIT(&new_pages);
608 		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
609 				cstate,	alloc_size);
610 		spin_lock_irqsave(&pool->lock, *irq_flags);
611 
612 		if (!r) {
613 			TAILQ_CONCAT(&pool->list, &new_pages, pageq);
614 			++pool->nrefills;
615 			pool->npages += alloc_size;
616 		} else {
617 			pr_err("Failed to fill pool (%p)\n", pool);
618 			/* If we have any pages left put them to the pool. */
619 			TAILQ_FOREACH(p, &pool->list, pageq) {
620 				++cpages;
621 			}
622 			TAILQ_CONCAT(&pool->list, &new_pages, pageq);
623 			pool->npages += cpages;
624 		}
625 
626 	}
627 	pool->fill_lock = false;
628 }
629 
630 /**
631  * Cut 'count' number of pages from the pool and put them on the return list.
632  *
633  * @return count of pages still required to fulfill the request.
634  */
635 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
636 					struct pglist *pages,
637 					int ttm_flags,
638 					enum ttm_caching_state cstate,
639 					unsigned count)
640 {
641 	unsigned long irq_flags;
642 	vm_page_t p;
643 	unsigned i;
644 
645 	spin_lock_irqsave(&pool->lock, irq_flags);
646 	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
647 
648 	if (count >= pool->npages) {
649 		/* take all pages from the pool */
650 		TAILQ_CONCAT(pages, &pool->list, pageq);
651 		count -= pool->npages;
652 		pool->npages = 0;
653 		goto out;
654 	}
655 	for (i = 0; i < count; i++) {
656 		p = TAILQ_FIRST(&pool->list);
657 		TAILQ_REMOVE(&pool->list, p, pageq);
658 		TAILQ_INSERT_TAIL(pages, p, pageq);
659 	}
660 	pool->npages -= count;
661 	count = 0;
662 out:
663 	spin_unlock_irqrestore(&pool->lock, irq_flags);
664 	return count;
665 }
666 
667 /* Put all pages in pages list to correct pool to wait for reuse */
668 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
669 			  enum ttm_caching_state cstate)
670 {
671 	unsigned long irq_flags;
672 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
673 	unsigned i;
674 	struct vm_page *page;
675 
676 	if (pool == NULL) {
677 		/* No pool for this memory type so free the pages */
678 		for (i = 0; i < npages; i++) {
679 			if (pages[i]) {
680 #if 0
681 				if (page_count(pages[i]) != 1)
682 					pr_err("Erroneous page count. Leaking pages.\n");
683 #endif
684 				__free_page(pages[i]);
685 				pages[i] = NULL;
686 			}
687 		}
688 		return;
689 	}
690 
691 	spin_lock_irqsave(&pool->lock, irq_flags);
692 	for (i = 0; i < npages; i++) {
693 		if (pages[i]) {
694 			page = (struct vm_page *)pages[i];
695 			TAILQ_INSERT_TAIL(&pool->list, page, pageq);
696 			pages[i] = NULL;
697 			pool->npages++;
698 		}
699 	}
700 	/* Check that we don't go over the pool limit */
701 	npages = 0;
702 	if (pool->npages > _manager->options.max_size) {
703 		npages = pool->npages - _manager->options.max_size;
704 		/* free at least NUM_PAGES_TO_ALLOC number of pages
705 		 * to reduce calls to set_memory_wb */
706 		if (npages < NUM_PAGES_TO_ALLOC)
707 			npages = NUM_PAGES_TO_ALLOC;
708 	}
709 	spin_unlock_irqrestore(&pool->lock, irq_flags);
710 	if (npages)
711 		ttm_page_pool_free(pool, npages);
712 }
713 
714 /*
715  * On success pages list will hold count number of correctly
716  * cached pages.
717  */
718 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
719 			 enum ttm_caching_state cstate)
720 {
721 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
722 	struct pglist plist;
723 	struct vm_page *p = NULL;
724 	gfp_t gfp_flags = GFP_USER;
725 	unsigned count;
726 	int r;
727 
728 	/* set zero flag for page allocation if required */
729 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
730 		gfp_flags |= __GFP_ZERO;
731 
732 	/* No pool for cached pages */
733 	if (pool == NULL) {
734 		if (flags & TTM_PAGE_FLAG_DMA32)
735 			gfp_flags |= GFP_DMA32;
736 		else
737 			gfp_flags |= GFP_HIGHUSER;
738 
739 		for (r = 0; r < npages; ++r) {
740 			p = (struct vm_page *)alloc_page(gfp_flags);
741 			if (!p) {
742 
743 				pr_err("Unable to allocate page\n");
744 				return -ENOMEM;
745 			}
746 			p->flags |= PG_FICTITIOUS;
747 
748 			pages[r] = (struct page *)p;
749 		}
750 		return 0;
751 	}
752 
753 	/* combine zero flag to pool flags */
754 	gfp_flags |= pool->gfp_flags;
755 
756 	/* First we take pages from the pool */
757 	TAILQ_INIT(&plist);
758 	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
759 	count = 0;
760 	TAILQ_FOREACH(p, &plist, pageq) {
761 		pages[count++] = (struct page *)p;
762 	}
763 
764 	/* clear the pages coming from the pool if requested */
765 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
766 		TAILQ_FOREACH(p, &plist, pageq) {
767 			pmap_zero_page(VM_PAGE_TO_PHYS(p));
768 		}
769 	}
770 
771 	/* If pool didn't have enough pages allocate new one. */
772 	if (npages > 0) {
773 		/* ttm_alloc_new_pages doesn't reference pool so we can run
774 		 * multiple requests in parallel.
775 		 **/
776 		TAILQ_INIT(&plist);
777 		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
778 		TAILQ_FOREACH(p, &plist, pageq) {
779 			pages[count++] = (struct page *)p;
780 		}
781 		if (r) {
782 			/* If there is any pages in the list put them back to
783 			 * the pool. */
784 			pr_err("Failed to allocate extra pages for large request\n");
785 			ttm_put_pages(pages, count, flags, cstate);
786 			return r;
787 		}
788 	}
789 
790 	return 0;
791 }
792 
793 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
794 		char *name)
795 {
796 	lockinit(&pool->lock, "ttmpool", 0, LK_CANRECURSE);
797 	pool->fill_lock = false;
798 	TAILQ_INIT(&pool->list);
799 	pool->npages = pool->nfrees = 0;
800 	pool->gfp_flags = flags;
801 	pool->name = name;
802 }
803 
804 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
805 {
806 	int ret;
807 
808 	WARN_ON(_manager);
809 
810 	pr_info("Initializing pool allocator\n");
811 
812 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
813 
814 	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
815 
816 	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
817 
818 	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
819 				  GFP_USER | GFP_DMA32, "wc dma");
820 
821 	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
822 				  GFP_USER | GFP_DMA32, "uc dma");
823 
824 	_manager->options.max_size = max_pages;
825 	_manager->options.small = SMALL_ALLOCATION;
826 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
827 
828 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
829 				   &glob->kobj, "pool");
830 	if (unlikely(ret != 0)) {
831 		kobject_put(&_manager->kobj);
832 		_manager = NULL;
833 		return ret;
834 	}
835 
836 	ttm_pool_mm_shrink_init(_manager);
837 
838 	return 0;
839 }
840 
841 void ttm_page_alloc_fini(void)
842 {
843 	int i;
844 
845 	pr_info("Finalizing pool allocator\n");
846 	ttm_pool_mm_shrink_fini(_manager);
847 
848 	for (i = 0; i < NUM_POOLS; ++i)
849 		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
850 
851 	kobject_put(&_manager->kobj);
852 	_manager = NULL;
853 }
854 
855 int ttm_pool_populate(struct ttm_tt *ttm)
856 {
857 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
858 	unsigned i;
859 	int ret;
860 
861 	if (ttm->state != tt_unpopulated)
862 		return 0;
863 
864 	for (i = 0; i < ttm->num_pages; ++i) {
865 		ret = ttm_get_pages(&ttm->pages[i], 1,
866 				    ttm->page_flags,
867 				    ttm->caching_state);
868 		if (ret != 0) {
869 			ttm_pool_unpopulate(ttm);
870 			return -ENOMEM;
871 		}
872 
873 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
874 						false, false);
875 		if (unlikely(ret != 0)) {
876 			ttm_pool_unpopulate(ttm);
877 			return -ENOMEM;
878 		}
879 	}
880 
881 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
882 		ret = ttm_tt_swapin(ttm);
883 		if (unlikely(ret != 0)) {
884 			ttm_pool_unpopulate(ttm);
885 			return ret;
886 		}
887 	}
888 
889 	ttm->state = tt_unbound;
890 	return 0;
891 }
892 EXPORT_SYMBOL(ttm_pool_populate);
893 
894 void ttm_pool_unpopulate(struct ttm_tt *ttm)
895 {
896 	unsigned i;
897 
898 	for (i = 0; i < ttm->num_pages; ++i) {
899 		if (ttm->pages[i]) {
900 			ttm_mem_global_free_page(ttm->glob->mem_glob,
901 						 ttm->pages[i]);
902 			ttm_put_pages(&ttm->pages[i], 1,
903 				      ttm->page_flags,
904 				      ttm->caching_state);
905 		}
906 	}
907 	ttm->state = tt_unpopulated;
908 }
909 EXPORT_SYMBOL(ttm_pool_unpopulate);
910 
911 #if 0
912 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
913 {
914 	struct ttm_page_pool *p;
915 	unsigned i;
916 	char *h[] = {"pool", "refills", "pages freed", "size"};
917 	if (!_manager) {
918 		seq_printf(m, "No pool allocator running.\n");
919 		return 0;
920 	}
921 	seq_printf(m, "%6s %12s %13s %8s\n",
922 			h[0], h[1], h[2], h[3]);
923 	for (i = 0; i < NUM_POOLS; ++i) {
924 		p = &_manager->pools[i];
925 
926 		seq_printf(m, "%6s %12ld %13ld %8d\n",
927 				p->name, p->nrefills,
928 				p->nfrees, p->npages);
929 	}
930 	return 0;
931 }
932 #endif
933 EXPORT_SYMBOL(ttm_page_alloc_debugfs);
934