1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <linux/sched.h> 34 #include <linux/highmem.h> 35 #include <linux/pagemap.h> 36 #include <linux/shmem_fs.h> 37 #include <linux/file.h> 38 #include <linux/swap.h> 39 #include <linux/slab.h> 40 #include <linux/export.h> 41 #include <drm/drm_cache.h> 42 #include <drm/ttm/ttm_module.h> 43 #include <drm/ttm/ttm_bo_driver.h> 44 #include <drm/ttm/ttm_placement.h> 45 #include <drm/ttm/ttm_page_alloc.h> 46 #ifdef CONFIG_X86 47 #include <asm/set_memory.h> 48 #endif 49 50 /** 51 * Allocates storage for pointers to the pages that back the ttm. 52 */ 53 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 54 { 55 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), 56 GFP_KERNEL | __GFP_ZERO); 57 } 58 59 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 60 { 61 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, 62 sizeof(*ttm->ttm.pages) + 63 sizeof(*ttm->dma_address), 64 GFP_KERNEL | __GFP_ZERO); 65 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); 66 } 67 68 #ifdef CONFIG_X86 69 static inline int ttm_tt_set_page_caching(struct page *p, 70 enum ttm_caching_state c_old, 71 enum ttm_caching_state c_new) 72 { 73 int ret = 0; 74 75 #if 0 76 if (PageHighMem(p)) 77 return 0; 78 #endif 79 80 if (c_old != tt_cached) { 81 /* p isn't in the default caching state, set it to 82 * writeback first to free its current memtype. */ 83 84 ret = set_pages_wb(p, 1); 85 if (ret) 86 return ret; 87 } 88 89 if (c_new == tt_wc) 90 pmap_page_set_memattr((struct vm_page *)p, VM_MEMATTR_WRITE_COMBINING); 91 else if (c_new == tt_uncached) 92 ret = set_pages_uc(p, 1); 93 94 return ret; 95 } 96 #else /* CONFIG_X86 */ 97 static inline int ttm_tt_set_page_caching(struct page *p, 98 enum ttm_caching_state c_old, 99 enum ttm_caching_state c_new) 100 { 101 return 0; 102 } 103 #endif /* CONFIG_X86 */ 104 105 /* 106 * Change caching policy for the linear kernel map 107 * for range of pages in a ttm. 108 */ 109 110 static int ttm_tt_set_caching(struct ttm_tt *ttm, 111 enum ttm_caching_state c_state) 112 { 113 int i, j; 114 struct page *cur_page; 115 int ret; 116 117 if (ttm->caching_state == c_state) 118 return 0; 119 120 if (ttm->state == tt_unpopulated) { 121 /* Change caching but don't populate */ 122 ttm->caching_state = c_state; 123 return 0; 124 } 125 126 if (ttm->caching_state == tt_cached) 127 drm_clflush_pages(ttm->pages, ttm->num_pages); 128 129 for (i = 0; i < ttm->num_pages; ++i) { 130 cur_page = ttm->pages[i]; 131 if (likely(cur_page != NULL)) { 132 ret = ttm_tt_set_page_caching(cur_page, 133 ttm->caching_state, 134 c_state); 135 if (unlikely(ret != 0)) 136 goto out_err; 137 } 138 } 139 140 ttm->caching_state = c_state; 141 142 return 0; 143 144 out_err: 145 for (j = 0; j < i; ++j) { 146 cur_page = ttm->pages[j]; 147 if (likely(cur_page != NULL)) { 148 (void)ttm_tt_set_page_caching(cur_page, c_state, 149 ttm->caching_state); 150 } 151 } 152 153 return ret; 154 } 155 156 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) 157 { 158 enum ttm_caching_state state; 159 160 if (placement & TTM_PL_FLAG_WC) 161 state = tt_wc; 162 else if (placement & TTM_PL_FLAG_UNCACHED) 163 state = tt_uncached; 164 else 165 state = tt_cached; 166 167 return ttm_tt_set_caching(ttm, state); 168 } 169 EXPORT_SYMBOL(ttm_tt_set_placement_caching); 170 171 void ttm_tt_destroy(struct ttm_tt *ttm) 172 { 173 if (ttm == NULL) 174 return; 175 176 ttm_tt_unbind(ttm); 177 178 if (ttm->state == tt_unbound) 179 ttm_tt_unpopulate(ttm); 180 181 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && 182 ttm->swap_storage) 183 vm_object_deallocate(ttm->swap_storage); 184 185 ttm->swap_storage = NULL; 186 ttm->func->destroy(ttm); 187 } 188 189 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 190 unsigned long size, uint32_t page_flags, 191 struct page *dummy_read_page) 192 { 193 ttm->bdev = bdev; 194 ttm->glob = bdev->glob; 195 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 196 ttm->caching_state = tt_cached; 197 ttm->page_flags = page_flags; 198 ttm->dummy_read_page = dummy_read_page; 199 ttm->state = tt_unpopulated; 200 ttm->swap_storage = NULL; 201 202 ttm_tt_alloc_page_directory(ttm); 203 if (!ttm->pages) { 204 ttm_tt_destroy(ttm); 205 pr_err("Failed allocating page table\n"); 206 return -ENOMEM; 207 } 208 return 0; 209 } 210 EXPORT_SYMBOL(ttm_tt_init); 211 212 void ttm_tt_fini(struct ttm_tt *ttm) 213 { 214 kvfree(ttm->pages); 215 ttm->pages = NULL; 216 } 217 EXPORT_SYMBOL(ttm_tt_fini); 218 219 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 220 unsigned long size, uint32_t page_flags, 221 struct page *dummy_read_page) 222 { 223 struct ttm_tt *ttm = &ttm_dma->ttm; 224 225 ttm->bdev = bdev; 226 ttm->glob = bdev->glob; 227 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 228 ttm->caching_state = tt_cached; 229 ttm->page_flags = page_flags; 230 ttm->dummy_read_page = dummy_read_page; 231 ttm->state = tt_unpopulated; 232 ttm->swap_storage = NULL; 233 234 INIT_LIST_HEAD(&ttm_dma->pages_list); 235 ttm_dma_tt_alloc_page_directory(ttm_dma); 236 if (!ttm->pages) { 237 ttm_tt_destroy(ttm); 238 pr_err("Failed allocating page table\n"); 239 return -ENOMEM; 240 } 241 return 0; 242 } 243 EXPORT_SYMBOL(ttm_dma_tt_init); 244 245 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) 246 { 247 struct ttm_tt *ttm = &ttm_dma->ttm; 248 249 kvfree(ttm->pages); 250 ttm->pages = NULL; 251 ttm_dma->dma_address = NULL; 252 } 253 EXPORT_SYMBOL(ttm_dma_tt_fini); 254 255 void ttm_tt_unbind(struct ttm_tt *ttm) 256 { 257 int ret; 258 259 if (ttm->state == tt_bound) { 260 ret = ttm->func->unbind(ttm); 261 BUG_ON(ret); 262 ttm->state = tt_unbound; 263 } 264 } 265 266 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 267 { 268 int ret = 0; 269 270 if (!ttm) 271 return -EINVAL; 272 273 if (ttm->state == tt_bound) 274 return 0; 275 276 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 277 if (ret) 278 return ret; 279 280 ret = ttm->func->bind(ttm, bo_mem); 281 if (unlikely(ret != 0)) 282 return ret; 283 284 ttm->state = tt_bound; 285 286 return 0; 287 } 288 EXPORT_SYMBOL(ttm_tt_bind); 289 290 int ttm_tt_swapin(struct ttm_tt *ttm) 291 { 292 vm_object_t swap_storage; 293 struct page *from_page; 294 struct page *to_page; 295 int i; 296 int ret = -ENOMEM; 297 298 swap_storage = ttm->swap_storage; 299 BUG_ON(swap_storage == NULL); 300 301 VM_OBJECT_LOCK(swap_storage); 302 vm_object_pip_add(swap_storage, 1); 303 for (i = 0; i < ttm->num_pages; ++i) { 304 from_page = (struct page *)vm_page_grab(swap_storage, i, VM_ALLOC_NORMAL | 305 VM_ALLOC_RETRY); 306 if (((struct vm_page *)from_page)->valid != VM_PAGE_BITS_ALL) { 307 if (vm_pager_has_page(swap_storage, i)) { 308 if (vm_pager_get_page(swap_storage, i, 309 (struct vm_page **)&from_page, 1) != VM_PAGER_OK) { 310 vm_page_free((struct vm_page *)from_page); 311 ret = -EIO; 312 goto out_err; 313 } 314 } else { 315 vm_page_zero_invalid((struct vm_page *)from_page, TRUE); 316 } 317 } 318 to_page = ttm->pages[i]; 319 if (unlikely(to_page == NULL)) { 320 vm_page_wakeup((struct vm_page *)from_page); 321 goto out_err; 322 } 323 324 pmap_copy_page(VM_PAGE_TO_PHYS((struct vm_page *)from_page), 325 VM_PAGE_TO_PHYS((struct vm_page *)to_page)); 326 vm_page_wakeup((struct vm_page *)from_page); 327 } 328 vm_object_pip_wakeup(swap_storage); 329 VM_OBJECT_UNLOCK(swap_storage); 330 331 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) 332 vm_object_deallocate(swap_storage); 333 ttm->swap_storage = NULL; 334 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 335 336 return 0; 337 out_err: 338 vm_object_pip_wakeup(swap_storage); 339 VM_OBJECT_UNLOCK(swap_storage); 340 341 return ret; 342 } 343 344 int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage) 345 { 346 vm_object_t obj; 347 vm_page_t from_page, to_page; 348 int i; 349 350 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 351 BUG_ON(ttm->caching_state != tt_cached); 352 353 if (!persistent_swap_storage) { 354 obj = swap_pager_alloc(NULL, 355 IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0); 356 if (obj == NULL) { 357 pr_err("Failed allocating swap storage\n"); 358 return (-ENOMEM); 359 } 360 } else 361 obj = persistent_swap_storage; 362 363 VM_OBJECT_LOCK(obj); 364 vm_object_pip_add(obj, 1); 365 for (i = 0; i < ttm->num_pages; ++i) { 366 from_page = (struct vm_page *)ttm->pages[i]; 367 if (unlikely(from_page == NULL)) 368 continue; 369 to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL | 370 VM_ALLOC_RETRY); 371 pmap_copy_page(VM_PAGE_TO_PHYS(from_page), 372 VM_PAGE_TO_PHYS(to_page)); 373 to_page->valid = VM_PAGE_BITS_ALL; 374 vm_page_dirty(to_page); 375 vm_page_wakeup(to_page); 376 } 377 vm_object_pip_wakeup(obj); 378 VM_OBJECT_UNLOCK(obj); 379 380 ttm_tt_unpopulate(ttm); 381 ttm->swap_storage = obj; 382 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 383 if (persistent_swap_storage) 384 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP; 385 386 return 0; 387 } 388 389 static void ttm_tt_clear_mapping(struct ttm_tt *ttm) 390 { 391 #if 0 392 pgoff_t i; 393 struct page **page = ttm->pages; 394 395 if (ttm->page_flags & TTM_PAGE_FLAG_SG) 396 return; 397 398 for (i = 0; i < ttm->num_pages; ++i) { 399 (*page)->mapping = NULL; 400 (*page++)->index = 0; 401 } 402 #endif 403 } 404 405 void ttm_tt_unpopulate(struct ttm_tt *ttm) 406 { 407 if (ttm->state == tt_unpopulated) 408 return; 409 410 ttm_tt_clear_mapping(ttm); 411 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 412 } 413