1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * $FreeBSD: head/sys/dev/drm2/ttm/ttm_memory.c 248663 2013-03-23 20:46:47Z dumbbell $ 27 **************************************************************************/ 28 29 #include <drm/drmP.h> 30 #include <drm/ttm/ttm_memory.h> 31 #include <drm/ttm/ttm_module.h> 32 #include <drm/ttm/ttm_page_alloc.h> 33 #include <linux/export.h> 34 35 #define TTM_MEMORY_ALLOC_RETRIES 4 36 37 struct ttm_mem_zone { 38 u_int kobj_ref; 39 struct ttm_mem_global *glob; 40 const char *name; 41 uint64_t zone_mem; 42 uint64_t emer_mem; 43 uint64_t max_mem; 44 uint64_t swap_limit; 45 uint64_t used_mem; 46 }; 47 48 MALLOC_DEFINE(M_TTM_ZONE, "ttm_zone", "TTM Zone"); 49 50 static void ttm_mem_zone_kobj_release(struct ttm_mem_zone *zone) 51 { 52 53 kprintf("[TTM] Zone %7s: Used memory at exit: %llu kiB\n", 54 zone->name, (unsigned long long)zone->used_mem >> 10); 55 drm_free(zone, M_TTM_ZONE); 56 } 57 58 #if 0 59 /* XXXKIB sysctl */ 60 static ssize_t ttm_mem_zone_show(struct ttm_mem_zone *zone; 61 struct attribute *attr, 62 char *buffer) 63 { 64 uint64_t val = 0; 65 66 mtx_lock(&zone->glob->lock); 67 if (attr == &ttm_mem_sys) 68 val = zone->zone_mem; 69 else if (attr == &ttm_mem_emer) 70 val = zone->emer_mem; 71 else if (attr == &ttm_mem_max) 72 val = zone->max_mem; 73 else if (attr == &ttm_mem_swap) 74 val = zone->swap_limit; 75 else if (attr == &ttm_mem_used) 76 val = zone->used_mem; 77 mtx_unlock(&zone->glob->lock); 78 79 return snprintf(buffer, PAGE_SIZE, "%llu\n", 80 (unsigned long long) val >> 10); 81 } 82 #endif 83 84 static void ttm_check_swapping(struct ttm_mem_global *glob); 85 86 #if 0 87 /* XXXKIB sysctl */ 88 static ssize_t ttm_mem_zone_store(struct ttm_mem_zone *zone, 89 struct attribute *attr, 90 const char *buffer, 91 size_t size) 92 { 93 int chars; 94 unsigned long val; 95 uint64_t val64; 96 97 chars = sscanf(buffer, "%lu", &val); 98 if (chars == 0) 99 return size; 100 101 val64 = val; 102 val64 <<= 10; 103 104 mtx_lock(&zone->glob->lock); 105 if (val64 > zone->zone_mem) 106 val64 = zone->zone_mem; 107 if (attr == &ttm_mem_emer) { 108 zone->emer_mem = val64; 109 if (zone->max_mem > val64) 110 zone->max_mem = val64; 111 } else if (attr == &ttm_mem_max) { 112 zone->max_mem = val64; 113 if (zone->emer_mem < val64) 114 zone->emer_mem = val64; 115 } else if (attr == &ttm_mem_swap) 116 zone->swap_limit = val64; 117 mtx_unlock(&zone->glob->lock); 118 119 ttm_check_swapping(zone->glob); 120 121 return size; 122 } 123 #endif 124 125 static void ttm_mem_global_kobj_release(struct ttm_mem_global *glob) 126 { 127 } 128 129 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, 130 bool from_wq, uint64_t extra) 131 { 132 unsigned int i; 133 struct ttm_mem_zone *zone; 134 uint64_t target; 135 136 for (i = 0; i < glob->num_zones; ++i) { 137 zone = glob->zones[i]; 138 139 if (from_wq) 140 target = zone->swap_limit; 141 else if (priv_check(curthread, PRIV_VM_MLOCK) == 0) 142 target = zone->emer_mem; 143 else 144 target = zone->max_mem; 145 146 target = (extra > target) ? 0ULL : target; 147 148 if (zone->used_mem > target) 149 return true; 150 } 151 return false; 152 } 153 154 /** 155 * At this point we only support a single shrink callback. 156 * Extend this if needed, perhaps using a linked list of callbacks. 157 * Note that this function is reentrant: 158 * many threads may try to swap out at any given time. 159 */ 160 161 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, 162 uint64_t extra) 163 { 164 int ret; 165 struct ttm_mem_shrink *shrink; 166 167 spin_lock(&glob->spin); 168 if (glob->shrink == NULL) 169 goto out; 170 171 while (ttm_zones_above_swap_target(glob, from_wq, extra)) { 172 shrink = glob->shrink; 173 spin_lock(&glob->spin); 174 ret = shrink->do_shrink(shrink); 175 spin_unlock(&glob->spin); 176 if (unlikely(ret != 0)) 177 goto out; 178 } 179 out: 180 spin_unlock(&glob->spin); 181 } 182 183 184 185 static void ttm_shrink_work(void *arg, int pending __unused) 186 { 187 struct ttm_mem_global *glob = arg; 188 189 ttm_shrink(glob, true, 0ULL); 190 } 191 192 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, 193 uint64_t mem) 194 { 195 struct ttm_mem_zone *zone; 196 197 zone = kmalloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO); 198 199 zone->name = "kernel"; 200 zone->zone_mem = mem; 201 zone->max_mem = mem >> 1; 202 zone->emer_mem = (mem >> 1) + (mem >> 2); 203 zone->swap_limit = zone->max_mem - (mem >> 3); 204 zone->used_mem = 0; 205 zone->glob = glob; 206 glob->zone_kernel = zone; 207 refcount_init(&zone->kobj_ref, 1); 208 glob->zones[glob->num_zones++] = zone; 209 return 0; 210 } 211 212 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, 213 uint64_t mem) 214 { 215 struct ttm_mem_zone *zone; 216 217 zone = kmalloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO); 218 219 /** 220 * No special dma32 zone needed. 221 */ 222 223 if (mem <= ((uint64_t) 1ULL << 32)) { 224 drm_free(zone, M_TTM_ZONE); 225 return 0; 226 } 227 228 /* 229 * Limit max dma32 memory to 4GB for now 230 * until we can figure out how big this 231 * zone really is. 232 */ 233 234 mem = ((uint64_t) 1ULL << 32); 235 zone->name = "dma32"; 236 zone->zone_mem = mem; 237 zone->max_mem = mem >> 1; 238 zone->emer_mem = (mem >> 1) + (mem >> 2); 239 zone->swap_limit = zone->max_mem - (mem >> 3); 240 zone->used_mem = 0; 241 zone->glob = glob; 242 glob->zone_dma32 = zone; 243 refcount_init(&zone->kobj_ref, 1); 244 glob->zones[glob->num_zones++] = zone; 245 return 0; 246 } 247 248 int ttm_mem_global_init(struct ttm_mem_global *glob) 249 { 250 u_int64_t mem; 251 int ret; 252 int i; 253 struct ttm_mem_zone *zone; 254 255 spin_init(&glob->spin); 256 glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK, 257 taskqueue_thread_enqueue, &glob->swap_queue); 258 taskqueue_start_threads(&glob->swap_queue, 1, 0, -1, "ttm swap"); 259 TASK_INIT(&glob->work, 0, ttm_shrink_work, glob); 260 261 refcount_init(&glob->kobj_ref, 1); 262 263 mem = physmem * PAGE_SIZE; 264 265 ret = ttm_mem_init_kernel_zone(glob, mem); 266 if (unlikely(ret != 0)) 267 goto out_no_zone; 268 ret = ttm_mem_init_dma32_zone(glob, mem); 269 if (unlikely(ret != 0)) 270 goto out_no_zone; 271 for (i = 0; i < glob->num_zones; ++i) { 272 zone = glob->zones[i]; 273 kprintf("[TTM] Zone %7s: Available graphics memory: %llu kiB\n", 274 zone->name, (unsigned long long)zone->max_mem >> 10); 275 } 276 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); 277 ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); 278 return 0; 279 out_no_zone: 280 ttm_mem_global_release(glob); 281 return ret; 282 } 283 EXPORT_SYMBOL(ttm_mem_global_init); 284 285 void ttm_mem_global_release(struct ttm_mem_global *glob) 286 { 287 unsigned int i; 288 struct ttm_mem_zone *zone; 289 290 /* let the page allocator first stop the shrink work. */ 291 ttm_page_alloc_fini(); 292 ttm_dma_page_alloc_fini(); 293 294 taskqueue_drain(glob->swap_queue, &glob->work); 295 taskqueue_free(glob->swap_queue); 296 glob->swap_queue = NULL; 297 for (i = 0; i < glob->num_zones; ++i) { 298 zone = glob->zones[i]; 299 if (refcount_release(&zone->kobj_ref)) 300 ttm_mem_zone_kobj_release(zone); 301 } 302 if (refcount_release(&glob->kobj_ref)) 303 ttm_mem_global_kobj_release(glob); 304 } 305 EXPORT_SYMBOL(ttm_mem_global_release); 306 307 static void ttm_check_swapping(struct ttm_mem_global *glob) 308 { 309 bool needs_swapping = false; 310 unsigned int i; 311 struct ttm_mem_zone *zone; 312 313 spin_lock(&glob->spin); 314 for (i = 0; i < glob->num_zones; ++i) { 315 zone = glob->zones[i]; 316 if (zone->used_mem > zone->swap_limit) { 317 needs_swapping = true; 318 break; 319 } 320 } 321 spin_unlock(&glob->spin); 322 323 if (unlikely(needs_swapping)) 324 taskqueue_enqueue(glob->swap_queue, &glob->work); 325 326 } 327 328 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, 329 struct ttm_mem_zone *single_zone, 330 uint64_t amount) 331 { 332 unsigned int i; 333 struct ttm_mem_zone *zone; 334 335 spin_lock(&glob->spin); 336 for (i = 0; i < glob->num_zones; ++i) { 337 zone = glob->zones[i]; 338 if (single_zone && zone != single_zone) 339 continue; 340 zone->used_mem -= amount; 341 } 342 spin_unlock(&glob->spin); 343 } 344 345 void ttm_mem_global_free(struct ttm_mem_global *glob, 346 uint64_t amount) 347 { 348 ttm_mem_global_free_zone(glob, NULL, amount); 349 } 350 EXPORT_SYMBOL(ttm_mem_global_free); 351 352 static int ttm_mem_global_reserve(struct ttm_mem_global *glob, 353 struct ttm_mem_zone *single_zone, 354 uint64_t amount, bool reserve) 355 { 356 uint64_t limit; 357 int ret = -ENOMEM; 358 unsigned int i; 359 struct ttm_mem_zone *zone; 360 361 spin_lock(&glob->spin); 362 for (i = 0; i < glob->num_zones; ++i) { 363 zone = glob->zones[i]; 364 if (single_zone && zone != single_zone) 365 continue; 366 367 limit = (priv_check(curthread, PRIV_VM_MLOCK) == 0) ? 368 zone->emer_mem : zone->max_mem; 369 370 if (zone->used_mem > limit) 371 goto out_unlock; 372 } 373 374 if (reserve) { 375 for (i = 0; i < glob->num_zones; ++i) { 376 zone = glob->zones[i]; 377 if (single_zone && zone != single_zone) 378 continue; 379 zone->used_mem += amount; 380 } 381 } 382 383 ret = 0; 384 out_unlock: 385 spin_unlock(&glob->spin); 386 ttm_check_swapping(glob); 387 388 return ret; 389 } 390 391 392 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, 393 struct ttm_mem_zone *single_zone, 394 uint64_t memory, 395 bool no_wait, bool interruptible) 396 { 397 int count = TTM_MEMORY_ALLOC_RETRIES; 398 399 while (unlikely(ttm_mem_global_reserve(glob, 400 single_zone, 401 memory, true) 402 != 0)) { 403 if (no_wait) 404 return -ENOMEM; 405 if (unlikely(count-- == 0)) 406 return -ENOMEM; 407 ttm_shrink(glob, false, memory + (memory >> 2) + 16); 408 } 409 410 return 0; 411 } 412 413 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, 414 bool no_wait, bool interruptible) 415 { 416 /** 417 * Normal allocations of kernel memory are registered in 418 * all zones. 419 */ 420 421 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, 422 interruptible); 423 } 424 EXPORT_SYMBOL(ttm_mem_global_alloc); 425 426 #define page_to_pfn(pp) OFF_TO_IDX(VM_PAGE_TO_PHYS(pp)) 427 428 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, 429 struct vm_page *page, 430 bool no_wait, bool interruptible) 431 { 432 433 struct ttm_mem_zone *zone = NULL; 434 435 /** 436 * Page allocations may be registed in a single zone 437 * only if highmem or !dma32. 438 */ 439 440 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) 441 zone = glob->zone_kernel; 442 return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait, 443 interruptible); 444 } 445 446 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page) 447 { 448 struct ttm_mem_zone *zone = NULL; 449 450 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) 451 zone = glob->zone_kernel; 452 ttm_mem_global_free_zone(glob, zone, PAGE_SIZE); 453 } 454 455 456 size_t ttm_round_pot(size_t size) 457 { 458 if ((size & (size - 1)) == 0) 459 return size; 460 else if (size > PAGE_SIZE) 461 return PAGE_ALIGN(size); 462 else { 463 size_t tmp_size = 4; 464 465 while (tmp_size < size) 466 tmp_size <<= 1; 467 468 return tmp_size; 469 } 470 return 0; 471 } 472 EXPORT_SYMBOL(ttm_round_pot); 473