xref: /dragonfly/sys/dev/drm/ttm/ttm_tt.c (revision 7d3e9a5b)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <linux/sched.h>
34 #include <linux/highmem.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <linux/swap.h>
39 #include <linux/slab.h>
40 #include <linux/export.h>
41 #include <drm/drm_cache.h>
42 #include <drm/ttm/ttm_module.h>
43 #include <drm/ttm/ttm_bo_driver.h>
44 #include <drm/ttm/ttm_placement.h>
45 #include <drm/ttm/ttm_page_alloc.h>
46 #include <drm/ttm/ttm_set_memory.h>
47 
48 /**
49  * Allocates a ttm structure for the given BO.
50  */
51 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
52 {
53 	struct ttm_bo_device *bdev = bo->bdev;
54 	uint32_t page_flags = 0;
55 
56 	reservation_object_assert_held(bo->resv);
57 
58 	if (bdev->need_dma32)
59 		page_flags |= TTM_PAGE_FLAG_DMA32;
60 
61 	if (bdev->no_retry)
62 		page_flags |= TTM_PAGE_FLAG_NO_RETRY;
63 
64 	switch (bo->type) {
65 	case ttm_bo_type_device:
66 		if (zero_alloc)
67 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
68 		break;
69 	case ttm_bo_type_kernel:
70 		break;
71 	case ttm_bo_type_sg:
72 		page_flags |= TTM_PAGE_FLAG_SG;
73 		break;
74 	default:
75 		bo->ttm = NULL;
76 		pr_err("Illegal buffer object type\n");
77 		return -EINVAL;
78 	}
79 
80 	bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
81 	if (unlikely(bo->ttm == NULL))
82 		return -ENOMEM;
83 
84 	return 0;
85 }
86 
87 /**
88  * Allocates storage for pointers to the pages that back the ttm.
89  */
90 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
91 {
92 	ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
93 			GFP_KERNEL | __GFP_ZERO);
94 	if (!ttm->pages)
95 		return -ENOMEM;
96 	return 0;
97 }
98 
99 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
100 {
101 	ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
102 					  sizeof(*ttm->ttm.pages) +
103 					  sizeof(*ttm->dma_address),
104 					  GFP_KERNEL | __GFP_ZERO);
105 	if (!ttm->ttm.pages)
106 		return -ENOMEM;
107 	ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
108 	return 0;
109 }
110 
111 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
112 {
113 	ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
114 					  sizeof(*ttm->dma_address),
115 					  GFP_KERNEL | __GFP_ZERO);
116 	if (!ttm->dma_address)
117 		return -ENOMEM;
118 	return 0;
119 }
120 
121 static int ttm_tt_set_page_caching(struct page *p,
122 				   enum ttm_caching_state c_old,
123 				   enum ttm_caching_state c_new)
124 {
125 	int ret = 0;
126 
127 #if 0
128 	if (PageHighMem(p))
129 		return 0;
130 #endif
131 
132 	if (c_old != tt_cached) {
133 		/* p isn't in the default caching state, set it to
134 		 * writeback first to free its current memtype. */
135 
136 		ret = ttm_set_pages_wb(p, 1);
137 		if (ret)
138 			return ret;
139 	}
140 
141 	if (c_new == tt_wc)
142 		pmap_page_set_memattr((struct vm_page *)p, VM_MEMATTR_WRITE_COMBINING);
143 	else if (c_new == tt_uncached)
144 		ret = ttm_set_pages_uc(p, 1);
145 
146 	return ret;
147 }
148 
149 /*
150  * Change caching policy for the linear kernel map
151  * for range of pages in a ttm.
152  */
153 
154 static int ttm_tt_set_caching(struct ttm_tt *ttm,
155 			      enum ttm_caching_state c_state)
156 {
157 	int i, j;
158 	struct page *cur_page;
159 	int ret;
160 
161 	if (ttm->caching_state == c_state)
162 		return 0;
163 
164 	if (ttm->state == tt_unpopulated) {
165 		/* Change caching but don't populate */
166 		ttm->caching_state = c_state;
167 		return 0;
168 	}
169 
170 	if (ttm->caching_state == tt_cached)
171 		drm_clflush_pages(ttm->pages, ttm->num_pages);
172 
173 	for (i = 0; i < ttm->num_pages; ++i) {
174 		cur_page = ttm->pages[i];
175 		if (likely(cur_page != NULL)) {
176 			ret = ttm_tt_set_page_caching(cur_page,
177 						      ttm->caching_state,
178 						      c_state);
179 			if (unlikely(ret != 0))
180 				goto out_err;
181 		}
182 	}
183 
184 	ttm->caching_state = c_state;
185 
186 	return 0;
187 
188 out_err:
189 	for (j = 0; j < i; ++j) {
190 		cur_page = ttm->pages[j];
191 		if (likely(cur_page != NULL)) {
192 			(void)ttm_tt_set_page_caching(cur_page, c_state,
193 						      ttm->caching_state);
194 		}
195 	}
196 
197 	return ret;
198 }
199 
200 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
201 {
202 	enum ttm_caching_state state;
203 
204 	if (placement & TTM_PL_FLAG_WC)
205 		state = tt_wc;
206 	else if (placement & TTM_PL_FLAG_UNCACHED)
207 		state = tt_uncached;
208 	else
209 		state = tt_cached;
210 
211 	return ttm_tt_set_caching(ttm, state);
212 }
213 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
214 
215 void ttm_tt_destroy(struct ttm_tt *ttm)
216 {
217 	if (ttm == NULL)
218 		return;
219 
220 	ttm_tt_unbind(ttm);
221 
222 	if (ttm->state == tt_unbound)
223 		ttm_tt_unpopulate(ttm);
224 
225 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
226 	    ttm->swap_storage)
227 		vm_object_deallocate(ttm->swap_storage);
228 
229 	ttm->swap_storage = NULL;
230 	ttm->func->destroy(ttm);
231 }
232 
233 static
234 void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
235 			uint32_t page_flags)
236 {
237 	ttm->bdev = bo->bdev;
238 	ttm->num_pages = bo->num_pages;
239 	ttm->caching_state = tt_cached;
240 	ttm->page_flags = page_flags;
241 	ttm->state = tt_unpopulated;
242 	ttm->swap_storage = NULL;
243 	ttm->sg = bo->sg;
244 }
245 
246 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
247 		uint32_t page_flags)
248 {
249 	ttm_tt_init_fields(ttm, bo, page_flags);
250 
251 	if (ttm_tt_alloc_page_directory(ttm)) {
252 		pr_err("Failed allocating page table\n");
253 		return -ENOMEM;
254 	}
255 	return 0;
256 }
257 EXPORT_SYMBOL(ttm_tt_init);
258 
259 void ttm_tt_fini(struct ttm_tt *ttm)
260 {
261 	kvfree(ttm->pages);
262 	ttm->pages = NULL;
263 }
264 EXPORT_SYMBOL(ttm_tt_fini);
265 
266 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
267 		    uint32_t page_flags)
268 {
269 	struct ttm_tt *ttm = &ttm_dma->ttm;
270 
271 	ttm_tt_init_fields(ttm, bo, page_flags);
272 
273 	INIT_LIST_HEAD(&ttm_dma->pages_list);
274 	if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
275 		pr_err("Failed allocating page table\n");
276 		return -ENOMEM;
277 	}
278 	return 0;
279 }
280 EXPORT_SYMBOL(ttm_dma_tt_init);
281 
282 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
283 		   uint32_t page_flags)
284 {
285 	struct ttm_tt *ttm = &ttm_dma->ttm;
286 	int ret;
287 
288 	ttm_tt_init_fields(ttm, bo, page_flags);
289 
290 	INIT_LIST_HEAD(&ttm_dma->pages_list);
291 	if (page_flags & TTM_PAGE_FLAG_SG)
292 		ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
293 	else
294 		ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
295 	if (ret) {
296 		pr_err("Failed allocating page table\n");
297 		return -ENOMEM;
298 	}
299 	return 0;
300 }
301 EXPORT_SYMBOL(ttm_sg_tt_init);
302 
303 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
304 {
305 	struct ttm_tt *ttm = &ttm_dma->ttm;
306 
307 	if (ttm->pages)
308 		kvfree(ttm->pages);
309 	else
310 		kvfree(ttm_dma->dma_address);
311 	ttm->pages = NULL;
312 	ttm_dma->dma_address = NULL;
313 }
314 EXPORT_SYMBOL(ttm_dma_tt_fini);
315 
316 void ttm_tt_unbind(struct ttm_tt *ttm)
317 {
318 	int ret;
319 
320 	if (ttm->state == tt_bound) {
321 		ret = ttm->func->unbind(ttm);
322 		BUG_ON(ret);
323 		ttm->state = tt_unbound;
324 	}
325 }
326 
327 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
328 		struct ttm_operation_ctx *ctx)
329 {
330 	int ret = 0;
331 
332 	if (!ttm)
333 		return -EINVAL;
334 
335 	if (ttm->state == tt_bound)
336 		return 0;
337 
338 	ret = ttm_tt_populate(ttm, ctx);
339 	if (ret)
340 		return ret;
341 
342 	ret = ttm->func->bind(ttm, bo_mem);
343 	if (unlikely(ret != 0))
344 		return ret;
345 
346 	ttm->state = tt_bound;
347 
348 	return 0;
349 }
350 EXPORT_SYMBOL(ttm_tt_bind);
351 
352 int ttm_tt_swapin(struct ttm_tt *ttm)
353 {
354 	vm_object_t swap_storage;
355 	struct page *from_page;
356 	struct page *to_page;
357 	int i;
358 	int ret = -ENOMEM;
359 
360 	swap_storage = ttm->swap_storage;
361 	BUG_ON(swap_storage == NULL);
362 
363 	VM_OBJECT_LOCK(swap_storage);
364 	vm_object_pip_add(swap_storage, 1);
365 	for (i = 0; i < ttm->num_pages; ++i) {
366 		from_page = (struct page *)vm_page_grab(swap_storage, i, VM_ALLOC_NORMAL |
367 						 VM_ALLOC_RETRY);
368 		if (((struct vm_page *)from_page)->valid != VM_PAGE_BITS_ALL) {
369 			if (vm_pager_has_page(swap_storage, i)) {
370 				if (vm_pager_get_page(swap_storage, i,
371 				    (struct vm_page **)&from_page, 1) != VM_PAGER_OK) {
372 					vm_page_free((struct vm_page *)from_page);
373 					ret = -EIO;
374 					goto out_err;
375 				}
376 			} else {
377 				vm_page_zero_invalid((struct vm_page *)from_page, TRUE);
378 			}
379 		}
380 		to_page = ttm->pages[i];
381 		if (unlikely(to_page == NULL)) {
382 			vm_page_wakeup((struct vm_page *)from_page);
383 			goto out_err;
384 		}
385 
386 		pmap_copy_page(VM_PAGE_TO_PHYS((struct vm_page *)from_page),
387 			       VM_PAGE_TO_PHYS((struct vm_page *)to_page));
388 		vm_page_wakeup((struct vm_page *)from_page);
389 	}
390 	vm_object_pip_wakeup(swap_storage);
391 	VM_OBJECT_UNLOCK(swap_storage);
392 
393 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
394 		vm_object_deallocate(swap_storage);
395 	ttm->swap_storage = NULL;
396 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
397 
398 	return 0;
399 out_err:
400 	vm_object_pip_wakeup(swap_storage);
401 	VM_OBJECT_UNLOCK(swap_storage);
402 
403 	return ret;
404 }
405 
406 int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
407 {
408 	vm_object_t obj;
409 	vm_page_t from_page, to_page;
410 	int i;
411 
412 	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
413 	BUG_ON(ttm->caching_state != tt_cached);
414 
415 	if (!persistent_swap_storage) {
416 		obj = swap_pager_alloc(NULL,
417 		    IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0);
418 		if (obj == NULL) {
419 			pr_err("Failed allocating swap storage\n");
420 			return (-ENOMEM);
421 		}
422 	} else
423 		obj = persistent_swap_storage;
424 
425 	VM_OBJECT_LOCK(obj);
426 	vm_object_pip_add(obj, 1);
427 	for (i = 0; i < ttm->num_pages; ++i) {
428 		from_page = (struct vm_page *)ttm->pages[i];
429 		if (unlikely(from_page == NULL))
430 			continue;
431 		to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL |
432 					       VM_ALLOC_RETRY);
433 		pmap_copy_page(VM_PAGE_TO_PHYS(from_page),
434 					VM_PAGE_TO_PHYS(to_page));
435 		to_page->valid = VM_PAGE_BITS_ALL;
436 		vm_page_dirty(to_page);
437 		vm_page_wakeup(to_page);
438 	}
439 	vm_object_pip_wakeup(obj);
440 	VM_OBJECT_UNLOCK(obj);
441 
442 	ttm_tt_unpopulate(ttm);
443 	ttm->swap_storage = obj;
444 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
445 	if (persistent_swap_storage)
446 		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
447 
448 	return 0;
449 }
450 
451 static void ttm_tt_add_mapping(struct ttm_tt *ttm)
452 {
453 #if 0
454 	pgoff_t i;
455 #endif
456 
457 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
458 		return;
459 #if 0
460 	for (i = 0; i < ttm->num_pages; ++i)
461 		ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
462 #endif
463 }
464 
465 int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
466 {
467 	int ret;
468 
469 	if (ttm->state != tt_unpopulated)
470 		return 0;
471 
472 	if (ttm->bdev->driver->ttm_tt_populate)
473 		ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
474 	else
475 		ret = ttm_pool_populate(ttm, ctx);
476 	if (!ret)
477 		ttm_tt_add_mapping(ttm);
478 	return ret;
479 }
480 
481 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
482 {
483 #if 0
484 	pgoff_t i;
485 	struct page **page = ttm->pages;
486 
487 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
488 		return;
489 
490 	for (i = 0; i < ttm->num_pages; ++i) {
491 		(*page)->mapping = NULL;
492 		(*page++)->index = 0;
493 	}
494 #endif
495 }
496 
497 void ttm_tt_unpopulate(struct ttm_tt *ttm)
498 {
499 	if (ttm->state == tt_unpopulated)
500 		return;
501 
502 	ttm_tt_clear_mapping(ttm);
503 	if (ttm->bdev->driver->ttm_tt_unpopulate)
504 		ttm->bdev->driver->ttm_tt_unpopulate(ttm);
505 	else
506 		ttm_pool_unpopulate(ttm);
507 }
508