1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <linux/sched.h> 34 #include <linux/highmem.h> 35 #include <linux/pagemap.h> 36 #include <linux/shmem_fs.h> 37 #include <linux/file.h> 38 #include <linux/swap.h> 39 #include <linux/slab.h> 40 #include <linux/export.h> 41 #include <drm/drm_cache.h> 42 #include <drm/drm_mem_util.h> 43 #include <drm/ttm/ttm_module.h> 44 #include <drm/ttm/ttm_bo_driver.h> 45 #include <drm/ttm/ttm_placement.h> 46 #include <drm/ttm/ttm_page_alloc.h> 47 #ifdef CONFIG_X86 48 #include <asm/set_memory.h> 49 #endif 50 51 /** 52 * Allocates storage for pointers to the pages that back the ttm. 53 */ 54 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 55 { 56 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); 57 } 58 59 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 60 { 61 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, 62 sizeof(*ttm->ttm.pages) + 63 sizeof(*ttm->dma_address)); 64 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); 65 } 66 67 #ifdef CONFIG_X86 68 static inline int ttm_tt_set_page_caching(struct page *p, 69 enum ttm_caching_state c_old, 70 enum ttm_caching_state c_new) 71 { 72 int ret = 0; 73 74 #if 0 75 if (PageHighMem(p)) 76 return 0; 77 #endif 78 79 if (c_old != tt_cached) { 80 /* p isn't in the default caching state, set it to 81 * writeback first to free its current memtype. */ 82 83 ret = set_pages_wb(p, 1); 84 if (ret) 85 return ret; 86 } 87 88 if (c_new == tt_wc) 89 pmap_page_set_memattr((struct vm_page *)p, VM_MEMATTR_WRITE_COMBINING); 90 else if (c_new == tt_uncached) 91 ret = set_pages_uc(p, 1); 92 93 return ret; 94 } 95 #else /* CONFIG_X86 */ 96 static inline int ttm_tt_set_page_caching(struct page *p, 97 enum ttm_caching_state c_old, 98 enum ttm_caching_state c_new) 99 { 100 return 0; 101 } 102 #endif /* CONFIG_X86 */ 103 104 /* 105 * Change caching policy for the linear kernel map 106 * for range of pages in a ttm. 107 */ 108 109 static int ttm_tt_set_caching(struct ttm_tt *ttm, 110 enum ttm_caching_state c_state) 111 { 112 int i, j; 113 struct page *cur_page; 114 int ret; 115 116 if (ttm->caching_state == c_state) 117 return 0; 118 119 if (ttm->state == tt_unpopulated) { 120 /* Change caching but don't populate */ 121 ttm->caching_state = c_state; 122 return 0; 123 } 124 125 if (ttm->caching_state == tt_cached) 126 drm_clflush_pages(ttm->pages, ttm->num_pages); 127 128 for (i = 0; i < ttm->num_pages; ++i) { 129 cur_page = ttm->pages[i]; 130 if (likely(cur_page != NULL)) { 131 ret = ttm_tt_set_page_caching(cur_page, 132 ttm->caching_state, 133 c_state); 134 if (unlikely(ret != 0)) 135 goto out_err; 136 } 137 } 138 139 ttm->caching_state = c_state; 140 141 return 0; 142 143 out_err: 144 for (j = 0; j < i; ++j) { 145 cur_page = ttm->pages[j]; 146 if (likely(cur_page != NULL)) { 147 (void)ttm_tt_set_page_caching(cur_page, c_state, 148 ttm->caching_state); 149 } 150 } 151 152 return ret; 153 } 154 155 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) 156 { 157 enum ttm_caching_state state; 158 159 if (placement & TTM_PL_FLAG_WC) 160 state = tt_wc; 161 else if (placement & TTM_PL_FLAG_UNCACHED) 162 state = tt_uncached; 163 else 164 state = tt_cached; 165 166 return ttm_tt_set_caching(ttm, state); 167 } 168 EXPORT_SYMBOL(ttm_tt_set_placement_caching); 169 170 void ttm_tt_destroy(struct ttm_tt *ttm) 171 { 172 if (ttm == NULL) 173 return; 174 175 ttm_tt_unbind(ttm); 176 177 if (ttm->state == tt_unbound) 178 ttm_tt_unpopulate(ttm); 179 180 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && 181 ttm->swap_storage) 182 vm_object_deallocate(ttm->swap_storage); 183 184 ttm->swap_storage = NULL; 185 ttm->func->destroy(ttm); 186 } 187 188 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 189 unsigned long size, uint32_t page_flags, 190 struct page *dummy_read_page) 191 { 192 ttm->bdev = bdev; 193 ttm->glob = bdev->glob; 194 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 195 ttm->caching_state = tt_cached; 196 ttm->page_flags = page_flags; 197 ttm->dummy_read_page = dummy_read_page; 198 ttm->state = tt_unpopulated; 199 ttm->swap_storage = NULL; 200 201 ttm_tt_alloc_page_directory(ttm); 202 if (!ttm->pages) { 203 ttm_tt_destroy(ttm); 204 pr_err("Failed allocating page table\n"); 205 return -ENOMEM; 206 } 207 return 0; 208 } 209 EXPORT_SYMBOL(ttm_tt_init); 210 211 void ttm_tt_fini(struct ttm_tt *ttm) 212 { 213 drm_free_large(ttm->pages); 214 ttm->pages = NULL; 215 } 216 EXPORT_SYMBOL(ttm_tt_fini); 217 218 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 219 unsigned long size, uint32_t page_flags, 220 struct page *dummy_read_page) 221 { 222 struct ttm_tt *ttm = &ttm_dma->ttm; 223 224 ttm->bdev = bdev; 225 ttm->glob = bdev->glob; 226 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 227 ttm->caching_state = tt_cached; 228 ttm->page_flags = page_flags; 229 ttm->dummy_read_page = dummy_read_page; 230 ttm->state = tt_unpopulated; 231 ttm->swap_storage = NULL; 232 233 INIT_LIST_HEAD(&ttm_dma->pages_list); 234 ttm_dma_tt_alloc_page_directory(ttm_dma); 235 if (!ttm->pages) { 236 ttm_tt_destroy(ttm); 237 pr_err("Failed allocating page table\n"); 238 return -ENOMEM; 239 } 240 return 0; 241 } 242 EXPORT_SYMBOL(ttm_dma_tt_init); 243 244 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) 245 { 246 struct ttm_tt *ttm = &ttm_dma->ttm; 247 248 drm_free_large(ttm->pages); 249 ttm->pages = NULL; 250 ttm_dma->dma_address = NULL; 251 } 252 EXPORT_SYMBOL(ttm_dma_tt_fini); 253 254 void ttm_tt_unbind(struct ttm_tt *ttm) 255 { 256 int ret; 257 258 if (ttm->state == tt_bound) { 259 ret = ttm->func->unbind(ttm); 260 BUG_ON(ret); 261 ttm->state = tt_unbound; 262 } 263 } 264 265 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 266 { 267 int ret = 0; 268 269 if (!ttm) 270 return -EINVAL; 271 272 if (ttm->state == tt_bound) 273 return 0; 274 275 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 276 if (ret) 277 return ret; 278 279 ret = ttm->func->bind(ttm, bo_mem); 280 if (unlikely(ret != 0)) 281 return ret; 282 283 ttm->state = tt_bound; 284 285 return 0; 286 } 287 EXPORT_SYMBOL(ttm_tt_bind); 288 289 int ttm_tt_swapin(struct ttm_tt *ttm) 290 { 291 vm_object_t swap_storage; 292 struct page *from_page; 293 struct page *to_page; 294 int i; 295 int ret = -ENOMEM; 296 297 swap_storage = ttm->swap_storage; 298 BUG_ON(swap_storage == NULL); 299 300 VM_OBJECT_LOCK(swap_storage); 301 vm_object_pip_add(swap_storage, 1); 302 for (i = 0; i < ttm->num_pages; ++i) { 303 from_page = (struct page *)vm_page_grab(swap_storage, i, VM_ALLOC_NORMAL | 304 VM_ALLOC_RETRY); 305 if (((struct vm_page *)from_page)->valid != VM_PAGE_BITS_ALL) { 306 if (vm_pager_has_page(swap_storage, i)) { 307 if (vm_pager_get_page(swap_storage, i, 308 (struct vm_page **)&from_page, 1) != VM_PAGER_OK) { 309 vm_page_free((struct vm_page *)from_page); 310 ret = -EIO; 311 goto out_err; 312 } 313 } else { 314 vm_page_zero_invalid((struct vm_page *)from_page, TRUE); 315 } 316 } 317 to_page = ttm->pages[i]; 318 if (unlikely(to_page == NULL)) { 319 vm_page_wakeup((struct vm_page *)from_page); 320 goto out_err; 321 } 322 323 pmap_copy_page(VM_PAGE_TO_PHYS((struct vm_page *)from_page), 324 VM_PAGE_TO_PHYS((struct vm_page *)to_page)); 325 vm_page_wakeup((struct vm_page *)from_page); 326 } 327 vm_object_pip_wakeup(swap_storage); 328 VM_OBJECT_UNLOCK(swap_storage); 329 330 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) 331 vm_object_deallocate(swap_storage); 332 ttm->swap_storage = NULL; 333 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 334 335 return 0; 336 out_err: 337 vm_object_pip_wakeup(swap_storage); 338 VM_OBJECT_UNLOCK(swap_storage); 339 340 return ret; 341 } 342 343 int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage) 344 { 345 vm_object_t obj; 346 vm_page_t from_page, to_page; 347 int i; 348 349 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 350 BUG_ON(ttm->caching_state != tt_cached); 351 352 if (!persistent_swap_storage) { 353 obj = swap_pager_alloc(NULL, 354 IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0); 355 if (obj == NULL) { 356 pr_err("Failed allocating swap storage\n"); 357 return (-ENOMEM); 358 } 359 } else 360 obj = persistent_swap_storage; 361 362 VM_OBJECT_LOCK(obj); 363 vm_object_pip_add(obj, 1); 364 for (i = 0; i < ttm->num_pages; ++i) { 365 from_page = (struct vm_page *)ttm->pages[i]; 366 if (unlikely(from_page == NULL)) 367 continue; 368 to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL | 369 VM_ALLOC_RETRY); 370 pmap_copy_page(VM_PAGE_TO_PHYS(from_page), 371 VM_PAGE_TO_PHYS(to_page)); 372 to_page->valid = VM_PAGE_BITS_ALL; 373 vm_page_dirty(to_page); 374 vm_page_wakeup(to_page); 375 } 376 vm_object_pip_wakeup(obj); 377 VM_OBJECT_UNLOCK(obj); 378 379 ttm_tt_unpopulate(ttm); 380 ttm->swap_storage = obj; 381 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 382 if (persistent_swap_storage) 383 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP; 384 385 return 0; 386 } 387 388 static void ttm_tt_clear_mapping(struct ttm_tt *ttm) 389 { 390 #if 0 391 pgoff_t i; 392 struct page **page = ttm->pages; 393 394 if (ttm->page_flags & TTM_PAGE_FLAG_SG) 395 return; 396 397 for (i = 0; i < ttm->num_pages; ++i) { 398 (*page)->mapping = NULL; 399 (*page++)->index = 0; 400 } 401 #endif 402 } 403 404 void ttm_tt_unpopulate(struct ttm_tt *ttm) 405 { 406 if (ttm->state == tt_unpopulated) 407 return; 408 409 ttm_tt_clear_mapping(ttm); 410 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 411 } 412