xref: /dragonfly/sys/dev/drm/ttm/ttm_page_alloc.c (revision 5071e670)
1 /*
2  * Copyright (c) Red Hat Inc.
3 
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27 /*
28  * Copyright (c) 2013 The FreeBSD Foundation
29  * All rights reserved.
30  *
31  * Portions of this software were developed by Konstantin Belousov
32  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
33  */
34 
35 /* simple list based uncached page pool
36  * - Pool collects resently freed pages for reuse
37  * - Use page->lru to keep a free list
38  * - doesn't track currently in use pages
39  */
40 
41 #define pr_fmt(fmt) "[TTM] " fmt
42 
43 #include <linux/list.h>
44 #include <linux/spinlock.h>
45 #include <linux/highmem.h>
46 #include <linux/mm_types.h>
47 #include <linux/module.h>
48 #include <linux/mm.h>
49 #include <linux/seq_file.h> /* for seq_printf */
50 #include <linux/dma-mapping.h>
51 
52 #include <linux/atomic.h>
53 
54 #include <drm/ttm/ttm_bo_driver.h>
55 #include <drm/ttm/ttm_page_alloc.h>
56 
57 #include <sys/eventhandler.h>
58 
59 #ifdef TTM_HAS_AGP
60 #include <asm/agp.h>
61 #endif
62 
63 #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
64 #define SMALL_ALLOCATION		16
65 #define FREE_ALL_PAGES			(~0U)
66 /* times are in msecs */
67 #define PAGE_FREE_INTERVAL		1000
68 
69 /**
70  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
71  *
72  * @lock: Protects the shared pool from concurrnet access. Must be used with
73  * irqsave/irqrestore variants because pool allocator maybe called from
74  * delayed work.
75  * @fill_lock: Prevent concurrent calls to fill.
76  * @list: Pool of free uc/wc pages for fast reuse.
77  * @gfp_flags: Flags to pass for alloc_page.
78  * @npages: Number of pages in pool.
79  */
80 struct ttm_page_pool {
81 	struct lock		lock;
82 	bool			fill_lock;
83 	struct pglist		list;
84 	gfp_t			gfp_flags;
85 	unsigned		npages;
86 	char			*name;
87 	unsigned long		nfrees;
88 	unsigned long		nrefills;
89 };
90 
91 /**
92  * Limits for the pool. They are handled without locks because only place where
93  * they may change is in sysfs store. They won't have immediate effect anyway
94  * so forcing serialization to access them is pointless.
95  */
96 
97 struct ttm_pool_opts {
98 	unsigned	alloc_size;
99 	unsigned	max_size;
100 	unsigned	small;
101 };
102 
103 #define NUM_POOLS 4
104 
105 /**
106  * struct ttm_pool_manager - Holds memory pools for fst allocation
107  *
108  * Manager is read only object for pool code so it doesn't need locking.
109  *
110  * @free_interval: minimum number of jiffies between freeing pages from pool.
111  * @page_alloc_inited: reference counting for pool allocation.
112  * @work: Work that is used to shrink the pool. Work is only run when there is
113  * some pages to free.
114  * @small_allocation: Limit in number of pages what is small allocation.
115  *
116  * @pools: All pool objects in use.
117  **/
118 struct ttm_pool_manager {
119 	struct kobject		kobj;
120 	eventhandler_tag lowmem_handler;
121 	struct ttm_pool_opts	options;
122 
123 	union {
124 		struct ttm_page_pool	pools[NUM_POOLS];
125 		struct {
126 			struct ttm_page_pool	wc_pool;
127 			struct ttm_page_pool	uc_pool;
128 			struct ttm_page_pool	wc_pool_dma32;
129 			struct ttm_page_pool	uc_pool_dma32;
130 		} ;
131 	};
132 };
133 
134 static struct attribute ttm_page_pool_max = {
135 	.name = "pool_max_size",
136 	.mode = S_IRUGO | S_IWUSR
137 };
138 static struct attribute ttm_page_pool_small = {
139 	.name = "pool_small_allocation",
140 	.mode = S_IRUGO | S_IWUSR
141 };
142 static struct attribute ttm_page_pool_alloc_size = {
143 	.name = "pool_allocation_size",
144 	.mode = S_IRUGO | S_IWUSR
145 };
146 
147 static struct attribute *ttm_pool_attrs[] = {
148 	&ttm_page_pool_max,
149 	&ttm_page_pool_small,
150 	&ttm_page_pool_alloc_size,
151 	NULL
152 };
153 
154 static void ttm_pool_kobj_release(struct kobject *kobj)
155 {
156 	struct ttm_pool_manager *m =
157 		container_of(kobj, struct ttm_pool_manager, kobj);
158 	kfree(m);
159 }
160 
161 static ssize_t ttm_pool_store(struct kobject *kobj,
162 		struct attribute *attr, const char *buffer, size_t size)
163 {
164 	struct ttm_pool_manager *m =
165 		container_of(kobj, struct ttm_pool_manager, kobj);
166 	int chars;
167 	unsigned val;
168 	chars = ksscanf(buffer, "%u", &val);
169 	if (chars == 0)
170 		return size;
171 
172 	/* Convert kb to number of pages */
173 	val = val / (PAGE_SIZE >> 10);
174 
175 	if (attr == &ttm_page_pool_max)
176 		m->options.max_size = val;
177 	else if (attr == &ttm_page_pool_small)
178 		m->options.small = val;
179 	else if (attr == &ttm_page_pool_alloc_size) {
180 		if (val > NUM_PAGES_TO_ALLOC*8) {
181 			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
182 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
183 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
184 			return size;
185 		} else if (val > NUM_PAGES_TO_ALLOC) {
186 			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
187 				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
188 		}
189 		m->options.alloc_size = val;
190 	}
191 
192 	return size;
193 }
194 
195 static ssize_t ttm_pool_show(struct kobject *kobj,
196 		struct attribute *attr, char *buffer)
197 {
198 	struct ttm_pool_manager *m =
199 		container_of(kobj, struct ttm_pool_manager, kobj);
200 	unsigned val = 0;
201 
202 	if (attr == &ttm_page_pool_max)
203 		val = m->options.max_size;
204 	else if (attr == &ttm_page_pool_small)
205 		val = m->options.small;
206 	else if (attr == &ttm_page_pool_alloc_size)
207 		val = m->options.alloc_size;
208 
209 	val = val * (PAGE_SIZE >> 10);
210 
211 	return ksnprintf(buffer, PAGE_SIZE, "%u\n", val);
212 }
213 
214 static const struct sysfs_ops ttm_pool_sysfs_ops = {
215 	.show = &ttm_pool_show,
216 	.store = &ttm_pool_store,
217 };
218 
219 static struct kobj_type ttm_pool_kobj_type = {
220 	.release = &ttm_pool_kobj_release,
221 	.sysfs_ops = &ttm_pool_sysfs_ops,
222 	.default_attrs = ttm_pool_attrs,
223 };
224 
225 static struct ttm_pool_manager *_manager;
226 
227 #ifndef CONFIG_X86
228 static int set_pages_array_wb(struct page **pages, int addrinarray)
229 {
230 #ifdef TTM_HAS_AGP
231 	int i;
232 
233 	for (i = 0; i < addrinarray; i++)
234 		unmap_page_from_agp(pages[i]);
235 #endif
236 	return 0;
237 }
238 
239 static int set_pages_array_wc(struct page **pages, int addrinarray)
240 {
241 #ifdef TTM_HAS_AGP
242 	int i;
243 
244 	for (i = 0; i < addrinarray; i++)
245 		map_page_into_agp(pages[i]);
246 #endif
247 	return 0;
248 }
249 
250 static int set_pages_array_uc(struct page **pages, int addrinarray)
251 {
252 #ifdef TTM_HAS_AGP
253 	int i;
254 
255 	for (i = 0; i < addrinarray; i++)
256 		map_page_into_agp(pages[i]);
257 #endif
258 	return 0;
259 }
260 #endif
261 
262 /**
263  * Select the right pool or requested caching state and ttm flags. */
264 static struct ttm_page_pool *ttm_get_pool(int flags,
265 		enum ttm_caching_state cstate)
266 {
267 	int pool_index;
268 
269 	if (cstate == tt_cached)
270 		return NULL;
271 
272 	if (cstate == tt_wc)
273 		pool_index = 0x0;
274 	else
275 		pool_index = 0x1;
276 
277 	if (flags & TTM_PAGE_FLAG_DMA32)
278 		pool_index |= 0x2;
279 
280 	return &_manager->pools[pool_index];
281 }
282 
283 /* set memory back to wb and free the pages. */
284 static void ttm_pages_put(struct page *pages[], unsigned npages)
285 {
286 	unsigned i;
287 	if (set_pages_array_wb(pages, npages))
288 		pr_err("Failed to set %d pages to wb!\n", npages);
289 	for (i = 0; i < npages; ++i)
290 		__free_page(pages[i]);
291 }
292 
293 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
294 		unsigned freed_pages)
295 {
296 	pool->npages -= freed_pages;
297 	pool->nfrees += freed_pages;
298 }
299 
300 /**
301  * Free pages from pool.
302  *
303  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
304  * number of pages in one go.
305  *
306  * @pool: to free the pages from
307  * @free_all: If set to true will free all pages in pool
308  **/
309 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
310 {
311 	unsigned long irq_flags;
312 	struct vm_page *p, *p1;
313 	struct page **pages_to_free;
314 	unsigned freed_pages = 0,
315 		 npages_to_free = nr_free;
316 	unsigned i;
317 
318 	if (NUM_PAGES_TO_ALLOC < nr_free)
319 		npages_to_free = NUM_PAGES_TO_ALLOC;
320 
321 	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
322 			M_DRM, M_WAITOK);
323 	if (!pages_to_free) {
324 		pr_err("Failed to allocate memory for pool free operation\n");
325 		return 0;
326 	}
327 
328 restart:
329 	spin_lock_irqsave(&pool->lock, irq_flags);
330 
331 	TAILQ_FOREACH_REVERSE_MUTABLE(p, &pool->list, pglist, pageq, p1) {
332 		if (freed_pages >= npages_to_free)
333 			break;
334 
335 		pages_to_free[freed_pages++] = (struct page *)p;
336 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
337 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
338 			/* remove range of pages from the pool */
339 			for (i = 0; i < freed_pages; i++)
340 				TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq);
341 
342 			ttm_pool_update_free_locked(pool, freed_pages);
343 			/**
344 			 * Because changing page caching is costly
345 			 * we unlock the pool to prevent stalling.
346 			 */
347 			spin_unlock_irqrestore(&pool->lock, irq_flags);
348 
349 			ttm_pages_put(pages_to_free, freed_pages);
350 			if (likely(nr_free != FREE_ALL_PAGES))
351 				nr_free -= freed_pages;
352 
353 			if (NUM_PAGES_TO_ALLOC >= nr_free)
354 				npages_to_free = nr_free;
355 			else
356 				npages_to_free = NUM_PAGES_TO_ALLOC;
357 
358 			freed_pages = 0;
359 
360 			/* free all so restart the processing */
361 			if (nr_free)
362 				goto restart;
363 
364 			/* Not allowed to fall through or break because
365 			 * following context is inside spinlock while we are
366 			 * outside here.
367 			 */
368 			goto out;
369 
370 		}
371 	}
372 
373 	/* remove range of pages from the pool */
374 	if (freed_pages) {
375 		for (i = 0; i < freed_pages; i++)
376 			TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq);
377 
378 		ttm_pool_update_free_locked(pool, freed_pages);
379 		nr_free -= freed_pages;
380 	}
381 
382 	spin_unlock_irqrestore(&pool->lock, irq_flags);
383 
384 	if (freed_pages)
385 		ttm_pages_put(pages_to_free, freed_pages);
386 out:
387 	kfree(pages_to_free);
388 	return nr_free;
389 }
390 
391 /* Get good estimation how many pages are free in pools */
392 static int ttm_pool_get_num_unused_pages(void)
393 {
394 	unsigned i;
395 	int total = 0;
396 	for (i = 0; i < NUM_POOLS; ++i)
397 		total += _manager->pools[i].npages;
398 
399 	return total;
400 }
401 
402 /**
403  * Callback for mm to request pool to reduce number of page held.
404  */
405 static int ttm_pool_mm_shrink(void *arg)
406 {
407 	static unsigned int start_pool = 0;
408 	unsigned i;
409 	unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
410 	struct ttm_page_pool *pool;
411 	int shrink_pages = 100; /* XXXKIB */
412 
413 	pool_offset = pool_offset % NUM_POOLS;
414 	/* select start pool in round robin fashion */
415 	for (i = 0; i < NUM_POOLS; ++i) {
416 		unsigned nr_free = shrink_pages;
417 		if (shrink_pages == 0)
418 			break;
419 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
420 		shrink_pages = ttm_page_pool_free(pool, nr_free);
421 	}
422 	/* return estimated number of unused pages in pool */
423 	return ttm_pool_get_num_unused_pages();
424 }
425 
426 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
427 {
428 	manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
429 	    ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
430 }
431 
432 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
433 {
434 	EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
435 }
436 
437 static int ttm_set_pages_caching(struct page **pages,
438 		enum ttm_caching_state cstate, unsigned cpages)
439 {
440 	int r = 0;
441 	/* Set page caching */
442 	switch (cstate) {
443 	case tt_uncached:
444 		r = set_pages_array_uc(pages, cpages);
445 		if (r)
446 			pr_err("Failed to set %d pages to uc!\n", cpages);
447 		break;
448 	case tt_wc:
449 		r = set_pages_array_wc(pages, cpages);
450 		if (r)
451 			pr_err("Failed to set %d pages to wc!\n", cpages);
452 		break;
453 	default:
454 		break;
455 	}
456 	return r;
457 }
458 
459 /**
460  * Free pages the pages that failed to change the caching state. If there is
461  * any pages that have changed their caching state already put them to the
462  * pool.
463  */
464 static void ttm_handle_caching_state_failure(struct pglist *pages,
465 		int ttm_flags, enum ttm_caching_state cstate,
466 		struct page **failed_pages, unsigned cpages)
467 {
468 	unsigned i;
469 	/* Failed pages have to be freed */
470 	for (i = 0; i < cpages; ++i) {
471 		TAILQ_REMOVE(pages, (struct vm_page *)failed_pages[i], pageq);
472 		__free_page(failed_pages[i]);
473 	}
474 }
475 
476 /**
477  * Allocate new pages with correct caching.
478  *
479  * This function is reentrant if caller updates count depending on number of
480  * pages returned in pages array.
481  */
482 static int ttm_alloc_new_pages(struct pglist *pages, gfp_t gfp_flags,
483 		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
484 {
485 	struct page **caching_array;
486 	struct page *p;
487 	int r = 0;
488 	unsigned i, cpages;
489 	unsigned max_cpages = min(count,
490 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
491 
492 	/* allocate array for page caching change */
493 	caching_array = kmalloc(max_cpages*sizeof(struct page *), M_DRM, M_WAITOK);
494 
495 	if (!caching_array) {
496 		pr_err("Unable to allocate table for new pages\n");
497 		return -ENOMEM;
498 	}
499 
500 	for (i = 0, cpages = 0; i < count; ++i) {
501 		p = alloc_page(gfp_flags);
502 
503 		if (!p) {
504 			pr_err("Unable to get page %u\n", i);
505 
506 			/* store already allocated pages in the pool after
507 			 * setting the caching state */
508 			if (cpages) {
509 				r = ttm_set_pages_caching(caching_array,
510 							  cstate, cpages);
511 				if (r)
512 					ttm_handle_caching_state_failure(pages,
513 						ttm_flags, cstate,
514 						caching_array, cpages);
515 			}
516 			r = -ENOMEM;
517 			goto out;
518 		}
519 		((struct vm_page *)p)->flags |= PG_FICTITIOUS;
520 
521 #ifdef CONFIG_HIGHMEM
522 		/* gfp flags of highmem page should never be dma32 so we
523 		 * we should be fine in such case
524 		 */
525 		if (!PageHighMem(p))
526 #endif
527 		{
528 			caching_array[cpages++] = p;
529 			if (cpages == max_cpages) {
530 
531 				r = ttm_set_pages_caching(caching_array,
532 						cstate, cpages);
533 				if (r) {
534 					ttm_handle_caching_state_failure(pages,
535 						ttm_flags, cstate,
536 						caching_array, cpages);
537 					goto out;
538 				}
539 				cpages = 0;
540 			}
541 		}
542 
543 		TAILQ_INSERT_HEAD(pages, (struct vm_page *)p, pageq);
544 	}
545 
546 	if (cpages) {
547 		r = ttm_set_pages_caching(caching_array, cstate, cpages);
548 		if (r)
549 			ttm_handle_caching_state_failure(pages,
550 					ttm_flags, cstate,
551 					caching_array, cpages);
552 	}
553 out:
554 	kfree(caching_array);
555 
556 	return r;
557 }
558 
559 /**
560  * Fill the given pool if there aren't enough pages and the requested number of
561  * pages is small.
562  */
563 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
564 		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
565 		unsigned long *irq_flags)
566 {
567 	vm_page_t p;
568 	int r;
569 	unsigned cpages = 0;
570 	/**
571 	 * Only allow one pool fill operation at a time.
572 	 * If pool doesn't have enough pages for the allocation new pages are
573 	 * allocated from outside of pool.
574 	 */
575 	if (pool->fill_lock)
576 		return;
577 
578 	pool->fill_lock = true;
579 
580 	/* If allocation request is small and there are not enough
581 	 * pages in a pool we fill the pool up first. */
582 	if (count < _manager->options.small
583 		&& count > pool->npages) {
584 		struct pglist new_pages;
585 		unsigned alloc_size = _manager->options.alloc_size;
586 
587 		/**
588 		 * Can't change page caching if in irqsave context. We have to
589 		 * drop the pool->lock.
590 		 */
591 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
592 
593 		TAILQ_INIT(&new_pages);
594 		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
595 				cstate,	alloc_size);
596 		spin_lock_irqsave(&pool->lock, *irq_flags);
597 
598 		if (!r) {
599 			TAILQ_CONCAT(&pool->list, &new_pages, pageq);
600 			++pool->nrefills;
601 			pool->npages += alloc_size;
602 		} else {
603 			pr_err("Failed to fill pool (%p)\n", pool);
604 			/* If we have any pages left put them to the pool. */
605 			TAILQ_FOREACH(p, &pool->list, pageq) {
606 				++cpages;
607 			}
608 			TAILQ_CONCAT(&pool->list, &new_pages, pageq);
609 			pool->npages += cpages;
610 		}
611 
612 	}
613 	pool->fill_lock = false;
614 }
615 
616 /**
617  * Cut 'count' number of pages from the pool and put them on the return list.
618  *
619  * @return count of pages still required to fulfill the request.
620  */
621 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
622 					struct pglist *pages,
623 					int ttm_flags,
624 					enum ttm_caching_state cstate,
625 					unsigned count)
626 {
627 	unsigned long irq_flags;
628 	vm_page_t p;
629 	unsigned i;
630 
631 	spin_lock_irqsave(&pool->lock, irq_flags);
632 	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
633 
634 	if (count >= pool->npages) {
635 		/* take all pages from the pool */
636 		TAILQ_CONCAT(pages, &pool->list, pageq);
637 		count -= pool->npages;
638 		pool->npages = 0;
639 		goto out;
640 	}
641 	for (i = 0; i < count; i++) {
642 		p = TAILQ_FIRST(&pool->list);
643 		TAILQ_REMOVE(&pool->list, p, pageq);
644 		TAILQ_INSERT_TAIL(pages, p, pageq);
645 	}
646 	pool->npages -= count;
647 	count = 0;
648 out:
649 	spin_unlock_irqrestore(&pool->lock, irq_flags);
650 	return count;
651 }
652 
653 /* Put all pages in pages list to correct pool to wait for reuse */
654 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
655 			  enum ttm_caching_state cstate)
656 {
657 	unsigned long irq_flags;
658 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
659 	unsigned i;
660 	struct vm_page *page;
661 
662 	if (pool == NULL) {
663 		/* No pool for this memory type so free the pages */
664 		for (i = 0; i < npages; i++) {
665 			if (pages[i]) {
666 #if 0
667 				if (page_count(pages[i]) != 1)
668 					pr_err("Erroneous page count. Leaking pages.\n");
669 #endif
670 				__free_page(pages[i]);
671 				pages[i] = NULL;
672 			}
673 		}
674 		return;
675 	}
676 
677 	spin_lock_irqsave(&pool->lock, irq_flags);
678 	for (i = 0; i < npages; i++) {
679 		if (pages[i]) {
680 			page = (struct vm_page *)pages[i];
681 			TAILQ_INSERT_TAIL(&pool->list, page, pageq);
682 			pages[i] = NULL;
683 			pool->npages++;
684 		}
685 	}
686 	/* Check that we don't go over the pool limit */
687 	npages = 0;
688 	if (pool->npages > _manager->options.max_size) {
689 		npages = pool->npages - _manager->options.max_size;
690 		/* free at least NUM_PAGES_TO_ALLOC number of pages
691 		 * to reduce calls to set_memory_wb */
692 		if (npages < NUM_PAGES_TO_ALLOC)
693 			npages = NUM_PAGES_TO_ALLOC;
694 	}
695 	spin_unlock_irqrestore(&pool->lock, irq_flags);
696 	if (npages)
697 		ttm_page_pool_free(pool, npages);
698 }
699 
700 /*
701  * On success pages list will hold count number of correctly
702  * cached pages.
703  */
704 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
705 			 enum ttm_caching_state cstate)
706 {
707 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
708 	struct pglist plist;
709 	struct vm_page *p = NULL;
710 	gfp_t gfp_flags = GFP_USER;
711 	unsigned count;
712 	int r;
713 
714 	/* set zero flag for page allocation if required */
715 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
716 		gfp_flags |= __GFP_ZERO;
717 
718 	/* No pool for cached pages */
719 	if (pool == NULL) {
720 		if (flags & TTM_PAGE_FLAG_DMA32)
721 			gfp_flags |= GFP_DMA32;
722 		else
723 			gfp_flags |= GFP_HIGHUSER;
724 
725 		for (r = 0; r < npages; ++r) {
726 			p = (struct vm_page *)alloc_page(gfp_flags);
727 			if (!p) {
728 
729 				pr_err("Unable to allocate page\n");
730 				return -ENOMEM;
731 			}
732 			p->flags |= PG_FICTITIOUS;
733 
734 			pages[r] = (struct page *)p;
735 		}
736 		return 0;
737 	}
738 
739 	/* combine zero flag to pool flags */
740 	gfp_flags |= pool->gfp_flags;
741 
742 	/* First we take pages from the pool */
743 	TAILQ_INIT(&plist);
744 	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
745 	count = 0;
746 	TAILQ_FOREACH(p, &plist, pageq) {
747 		pages[count++] = (struct page *)p;
748 	}
749 
750 	/* clear the pages coming from the pool if requested */
751 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
752 		TAILQ_FOREACH(p, &plist, pageq) {
753 			pmap_zero_page(VM_PAGE_TO_PHYS(p));
754 		}
755 	}
756 
757 	/* If pool didn't have enough pages allocate new one. */
758 	if (npages > 0) {
759 		/* ttm_alloc_new_pages doesn't reference pool so we can run
760 		 * multiple requests in parallel.
761 		 **/
762 		TAILQ_INIT(&plist);
763 		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
764 		TAILQ_FOREACH(p, &plist, pageq) {
765 			pages[count++] = (struct page *)p;
766 		}
767 		if (r) {
768 			/* If there is any pages in the list put them back to
769 			 * the pool. */
770 			pr_err("Failed to allocate extra pages for large request\n");
771 			ttm_put_pages(pages, count, flags, cstate);
772 			return r;
773 		}
774 	}
775 
776 	return 0;
777 }
778 
779 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
780 		char *name)
781 {
782 	lockinit(&pool->lock, "ttmpool", 0, LK_CANRECURSE);
783 	pool->fill_lock = false;
784 	TAILQ_INIT(&pool->list);
785 	pool->npages = pool->nfrees = 0;
786 	pool->gfp_flags = flags;
787 	pool->name = name;
788 }
789 
790 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
791 {
792 	int ret;
793 
794 	WARN_ON(_manager);
795 
796 	pr_info("Initializing pool allocator\n");
797 
798 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
799 
800 	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
801 
802 	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
803 
804 	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
805 				  GFP_USER | GFP_DMA32, "wc dma");
806 
807 	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
808 				  GFP_USER | GFP_DMA32, "uc dma");
809 
810 	_manager->options.max_size = max_pages;
811 	_manager->options.small = SMALL_ALLOCATION;
812 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
813 
814 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
815 				   &glob->kobj, "pool");
816 	if (unlikely(ret != 0)) {
817 		kobject_put(&_manager->kobj);
818 		_manager = NULL;
819 		return ret;
820 	}
821 
822 	ttm_pool_mm_shrink_init(_manager);
823 
824 	return 0;
825 }
826 
827 void ttm_page_alloc_fini(void)
828 {
829 	int i;
830 
831 	pr_info("Finalizing pool allocator\n");
832 	ttm_pool_mm_shrink_fini(_manager);
833 
834 	for (i = 0; i < NUM_POOLS; ++i)
835 		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
836 
837 	kobject_put(&_manager->kobj);
838 	_manager = NULL;
839 }
840 
841 int ttm_pool_populate(struct ttm_tt *ttm)
842 {
843 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
844 	unsigned i;
845 	int ret;
846 
847 	if (ttm->state != tt_unpopulated)
848 		return 0;
849 
850 	for (i = 0; i < ttm->num_pages; ++i) {
851 		ret = ttm_get_pages(&ttm->pages[i], 1,
852 				    ttm->page_flags,
853 				    ttm->caching_state);
854 		if (ret != 0) {
855 			ttm_pool_unpopulate(ttm);
856 			return -ENOMEM;
857 		}
858 
859 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
860 						false, false);
861 		if (unlikely(ret != 0)) {
862 			ttm_pool_unpopulate(ttm);
863 			return -ENOMEM;
864 		}
865 	}
866 
867 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
868 		ret = ttm_tt_swapin(ttm);
869 		if (unlikely(ret != 0)) {
870 			ttm_pool_unpopulate(ttm);
871 			return ret;
872 		}
873 	}
874 
875 	ttm->state = tt_unbound;
876 	return 0;
877 }
878 EXPORT_SYMBOL(ttm_pool_populate);
879 
880 void ttm_pool_unpopulate(struct ttm_tt *ttm)
881 {
882 	unsigned i;
883 
884 	for (i = 0; i < ttm->num_pages; ++i) {
885 		if (ttm->pages[i]) {
886 			ttm_mem_global_free_page(ttm->glob->mem_glob,
887 						 ttm->pages[i]);
888 			ttm_put_pages(&ttm->pages[i], 1,
889 				      ttm->page_flags,
890 				      ttm->caching_state);
891 		}
892 	}
893 	ttm->state = tt_unpopulated;
894 }
895 EXPORT_SYMBOL(ttm_pool_unpopulate);
896 
897 #if 0
898 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
899 {
900 	struct ttm_page_pool *p;
901 	unsigned i;
902 	char *h[] = {"pool", "refills", "pages freed", "size"};
903 	if (!_manager) {
904 		seq_printf(m, "No pool allocator running.\n");
905 		return 0;
906 	}
907 	seq_printf(m, "%6s %12s %13s %8s\n",
908 			h[0], h[1], h[2], h[3]);
909 	for (i = 0; i < NUM_POOLS; ++i) {
910 		p = &_manager->pools[i];
911 
912 		seq_printf(m, "%6s %12ld %13ld %8d\n",
913 				p->name, p->nrefills,
914 				p->nfrees, p->npages);
915 	}
916 	return 0;
917 }
918 #endif
919 EXPORT_SYMBOL(ttm_page_alloc_debugfs);
920