1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <linux/sched.h> 34 #include <linux/highmem.h> 35 #include <linux/pagemap.h> 36 #include <linux/shmem_fs.h> 37 #include <linux/file.h> 38 #include <linux/swap.h> 39 #include <linux/slab.h> 40 #include <linux/export.h> 41 #include <drm/drm_mem_util.h> 42 #include <drm/ttm/ttm_module.h> 43 #include <drm/ttm/ttm_bo_driver.h> 44 #include <drm/ttm/ttm_placement.h> 45 #include <drm/ttm/ttm_page_alloc.h> 46 47 /** 48 * Allocates storage for pointers to the pages that back the ttm. 49 */ 50 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 51 { 52 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); 53 } 54 55 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 56 { 57 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, 58 sizeof(*ttm->ttm.pages) + 59 sizeof(*ttm->dma_address)); 60 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); 61 } 62 63 #ifdef CONFIG_X86 64 static inline int ttm_tt_set_page_caching(struct page *p, 65 enum ttm_caching_state c_old, 66 enum ttm_caching_state c_new) 67 { 68 int ret = 0; 69 70 #if 0 71 if (PageHighMem(p)) 72 return 0; 73 #endif 74 75 if (c_old != tt_cached) { 76 /* p isn't in the default caching state, set it to 77 * writeback first to free its current memtype. */ 78 79 ret = set_pages_wb(p, 1); 80 if (ret) 81 return ret; 82 } 83 84 if (c_new == tt_wc) 85 pmap_page_set_memattr((struct vm_page *)p, VM_MEMATTR_WRITE_COMBINING); 86 else if (c_new == tt_uncached) 87 pmap_page_set_memattr((struct vm_page *)p, VM_MEMATTR_UNCACHEABLE); 88 89 return (0); 90 } 91 #else /* CONFIG_X86 */ 92 static inline int ttm_tt_set_page_caching(struct page *p, 93 enum ttm_caching_state c_old, 94 enum ttm_caching_state c_new) 95 { 96 return 0; 97 } 98 #endif /* CONFIG_X86 */ 99 100 /* 101 * Change caching policy for the linear kernel map 102 * for range of pages in a ttm. 103 */ 104 105 static int ttm_tt_set_caching(struct ttm_tt *ttm, 106 enum ttm_caching_state c_state) 107 { 108 int i, j; 109 struct page *cur_page; 110 int ret; 111 112 if (ttm->caching_state == c_state) 113 return 0; 114 115 if (ttm->state == tt_unpopulated) { 116 /* Change caching but don't populate */ 117 ttm->caching_state = c_state; 118 return 0; 119 } 120 121 if (ttm->caching_state == tt_cached) 122 drm_clflush_pages(ttm->pages, ttm->num_pages); 123 124 for (i = 0; i < ttm->num_pages; ++i) { 125 cur_page = ttm->pages[i]; 126 if (likely(cur_page != NULL)) { 127 ret = ttm_tt_set_page_caching(cur_page, 128 ttm->caching_state, 129 c_state); 130 if (unlikely(ret != 0)) 131 goto out_err; 132 } 133 } 134 135 ttm->caching_state = c_state; 136 137 return 0; 138 139 out_err: 140 for (j = 0; j < i; ++j) { 141 cur_page = ttm->pages[j]; 142 if (likely(cur_page != NULL)) { 143 (void)ttm_tt_set_page_caching(cur_page, c_state, 144 ttm->caching_state); 145 } 146 } 147 148 return ret; 149 } 150 151 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) 152 { 153 enum ttm_caching_state state; 154 155 if (placement & TTM_PL_FLAG_WC) 156 state = tt_wc; 157 else if (placement & TTM_PL_FLAG_UNCACHED) 158 state = tt_uncached; 159 else 160 state = tt_cached; 161 162 return ttm_tt_set_caching(ttm, state); 163 } 164 EXPORT_SYMBOL(ttm_tt_set_placement_caching); 165 166 void ttm_tt_destroy(struct ttm_tt *ttm) 167 { 168 if (ttm == NULL) 169 return; 170 171 ttm_tt_unbind(ttm); 172 173 if (ttm->state == tt_unbound) 174 ttm_tt_unpopulate(ttm); 175 176 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && 177 ttm->swap_storage) 178 vm_object_deallocate(ttm->swap_storage); 179 180 ttm->swap_storage = NULL; 181 ttm->func->destroy(ttm); 182 } 183 184 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 185 unsigned long size, uint32_t page_flags, 186 struct page *dummy_read_page) 187 { 188 ttm->bdev = bdev; 189 ttm->glob = bdev->glob; 190 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 191 ttm->caching_state = tt_cached; 192 ttm->page_flags = page_flags; 193 ttm->dummy_read_page = dummy_read_page; 194 ttm->state = tt_unpopulated; 195 ttm->swap_storage = NULL; 196 197 ttm_tt_alloc_page_directory(ttm); 198 if (!ttm->pages) { 199 ttm_tt_destroy(ttm); 200 pr_err("Failed allocating page table\n"); 201 return -ENOMEM; 202 } 203 return 0; 204 } 205 EXPORT_SYMBOL(ttm_tt_init); 206 207 void ttm_tt_fini(struct ttm_tt *ttm) 208 { 209 drm_free_large(ttm->pages); 210 ttm->pages = NULL; 211 } 212 EXPORT_SYMBOL(ttm_tt_fini); 213 214 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 215 unsigned long size, uint32_t page_flags, 216 struct page *dummy_read_page) 217 { 218 struct ttm_tt *ttm = &ttm_dma->ttm; 219 220 ttm->bdev = bdev; 221 ttm->glob = bdev->glob; 222 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 223 ttm->caching_state = tt_cached; 224 ttm->page_flags = page_flags; 225 ttm->dummy_read_page = dummy_read_page; 226 ttm->state = tt_unpopulated; 227 ttm->swap_storage = NULL; 228 229 INIT_LIST_HEAD(&ttm_dma->pages_list); 230 ttm_dma_tt_alloc_page_directory(ttm_dma); 231 if (!ttm->pages) { 232 ttm_tt_destroy(ttm); 233 pr_err("Failed allocating page table\n"); 234 return -ENOMEM; 235 } 236 return 0; 237 } 238 EXPORT_SYMBOL(ttm_dma_tt_init); 239 240 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) 241 { 242 struct ttm_tt *ttm = &ttm_dma->ttm; 243 244 drm_free_large(ttm->pages); 245 ttm->pages = NULL; 246 ttm_dma->dma_address = NULL; 247 } 248 EXPORT_SYMBOL(ttm_dma_tt_fini); 249 250 void ttm_tt_unbind(struct ttm_tt *ttm) 251 { 252 int ret; 253 254 if (ttm->state == tt_bound) { 255 ret = ttm->func->unbind(ttm); 256 BUG_ON(ret); 257 ttm->state = tt_unbound; 258 } 259 } 260 261 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 262 { 263 int ret = 0; 264 265 if (!ttm) 266 return -EINVAL; 267 268 if (ttm->state == tt_bound) 269 return 0; 270 271 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 272 if (ret) 273 return ret; 274 275 ret = ttm->func->bind(ttm, bo_mem); 276 if (unlikely(ret != 0)) 277 return ret; 278 279 ttm->state = tt_bound; 280 281 return 0; 282 } 283 EXPORT_SYMBOL(ttm_tt_bind); 284 285 int ttm_tt_swapin(struct ttm_tt *ttm) 286 { 287 vm_object_t obj; 288 struct page *from_page; 289 struct page *to_page; 290 int i; 291 int ret = -ENOMEM; 292 293 obj = ttm->swap_storage; 294 295 VM_OBJECT_LOCK(obj); 296 vm_object_pip_add(obj, 1); 297 for (i = 0; i < ttm->num_pages; ++i) { 298 from_page = (struct page *)vm_page_grab(obj, i, VM_ALLOC_NORMAL | 299 VM_ALLOC_RETRY); 300 if (((struct vm_page *)from_page)->valid != VM_PAGE_BITS_ALL) { 301 if (vm_pager_has_page(obj, i)) { 302 if (vm_pager_get_page(obj, (struct vm_page **)&from_page, 1) != VM_PAGER_OK) { 303 vm_page_free((struct vm_page *)from_page); 304 ret = -EIO; 305 goto out_err; 306 } 307 } else { 308 vm_page_zero_invalid((struct vm_page *)from_page, TRUE); 309 } 310 } 311 to_page = ttm->pages[i]; 312 if (unlikely(to_page == NULL)) { 313 vm_page_wakeup((struct vm_page *)from_page); 314 goto out_err; 315 } 316 317 pmap_copy_page(VM_PAGE_TO_PHYS((struct vm_page *)from_page), 318 VM_PAGE_TO_PHYS((struct vm_page *)to_page)); 319 vm_page_wakeup((struct vm_page *)from_page); 320 } 321 vm_object_pip_wakeup(obj); 322 VM_OBJECT_UNLOCK(obj); 323 324 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) 325 vm_object_deallocate(obj); 326 ttm->swap_storage = NULL; 327 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 328 329 return 0; 330 out_err: 331 vm_object_pip_wakeup(obj); 332 VM_OBJECT_UNLOCK(obj); 333 return ret; 334 } 335 336 int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage) 337 { 338 vm_object_t obj; 339 vm_page_t from_page, to_page; 340 int i; 341 342 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 343 BUG_ON(ttm->caching_state != tt_cached); 344 345 if (!persistent_swap_storage) { 346 obj = swap_pager_alloc(NULL, 347 IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0); 348 if (obj == NULL) { 349 pr_err("Failed allocating swap storage\n"); 350 return (-ENOMEM); 351 } 352 } else 353 obj = persistent_swap_storage; 354 355 VM_OBJECT_LOCK(obj); 356 vm_object_pip_add(obj, 1); 357 for (i = 0; i < ttm->num_pages; ++i) { 358 from_page = (struct vm_page *)ttm->pages[i]; 359 if (unlikely(from_page == NULL)) 360 continue; 361 to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL | 362 VM_ALLOC_RETRY); 363 pmap_copy_page(VM_PAGE_TO_PHYS(from_page), 364 VM_PAGE_TO_PHYS(to_page)); 365 to_page->valid = VM_PAGE_BITS_ALL; 366 vm_page_dirty(to_page); 367 vm_page_wakeup(to_page); 368 } 369 vm_object_pip_wakeup(obj); 370 VM_OBJECT_UNLOCK(obj); 371 372 ttm_tt_unpopulate(ttm); 373 ttm->swap_storage = obj; 374 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 375 if (persistent_swap_storage) 376 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP; 377 378 return 0; 379 } 380 381 static void ttm_tt_clear_mapping(struct ttm_tt *ttm) 382 { 383 #if 0 384 pgoff_t i; 385 struct page **page = ttm->pages; 386 387 if (ttm->page_flags & TTM_PAGE_FLAG_SG) 388 return; 389 390 for (i = 0; i < ttm->num_pages; ++i) { 391 (*page)->mapping = NULL; 392 (*page++)->index = 0; 393 } 394 #endif 395 } 396 397 void ttm_tt_unpopulate(struct ttm_tt *ttm) 398 { 399 if (ttm->state == tt_unpopulated) 400 return; 401 402 ttm_tt_clear_mapping(ttm); 403 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 404 } 405