1 /**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #define pr_fmt(fmt) "[TTM] " fmt
29
30 #include <drm/ttm/ttm_memory.h>
31 #include <drm/ttm/ttm_module.h>
32 #include <drm/ttm/ttm_page_alloc.h>
33 #include <linux/spinlock.h>
34 #include <linux/sched.h>
35 #include <linux/wait.h>
36 #include <linux/mm.h>
37 #include <linux/module.h>
38 #include <linux/slab.h>
39
40 #define TTM_MEMORY_ALLOC_RETRIES 4
41
42 struct ttm_mem_zone {
43 struct kobject kobj;
44 struct ttm_mem_global *glob;
45 const char *name;
46 uint64_t zone_mem;
47 uint64_t emer_mem;
48 uint64_t max_mem;
49 uint64_t swap_limit;
50 uint64_t used_mem;
51 };
52
53 static struct attribute ttm_mem_sys = {
54 .name = "zone_memory",
55 .mode = S_IRUGO
56 };
57 static struct attribute ttm_mem_emer = {
58 .name = "emergency_memory",
59 .mode = S_IRUGO | S_IWUSR
60 };
61 static struct attribute ttm_mem_max = {
62 .name = "available_memory",
63 .mode = S_IRUGO | S_IWUSR
64 };
65 static struct attribute ttm_mem_swap = {
66 .name = "swap_limit",
67 .mode = S_IRUGO | S_IWUSR
68 };
69 static struct attribute ttm_mem_used = {
70 .name = "used_memory",
71 .mode = S_IRUGO
72 };
73
ttm_mem_zone_kobj_release(struct kobject * kobj)74 static void ttm_mem_zone_kobj_release(struct kobject *kobj)
75 {
76 struct ttm_mem_zone *zone =
77 container_of(kobj, struct ttm_mem_zone, kobj);
78
79 pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
80 zone->name, (unsigned long long)zone->used_mem >> 10);
81 kfree(zone);
82 }
83
ttm_mem_zone_show(struct kobject * kobj,struct attribute * attr,char * buffer)84 static ssize_t ttm_mem_zone_show(struct kobject *kobj,
85 struct attribute *attr,
86 char *buffer)
87 {
88 struct ttm_mem_zone *zone =
89 container_of(kobj, struct ttm_mem_zone, kobj);
90 uint64_t val = 0;
91
92 lockmgr(&zone->glob->lock, LK_EXCLUSIVE);
93 if (attr == &ttm_mem_sys)
94 val = zone->zone_mem;
95 else if (attr == &ttm_mem_emer)
96 val = zone->emer_mem;
97 else if (attr == &ttm_mem_max)
98 val = zone->max_mem;
99 else if (attr == &ttm_mem_swap)
100 val = zone->swap_limit;
101 else if (attr == &ttm_mem_used)
102 val = zone->used_mem;
103 lockmgr(&zone->glob->lock, LK_RELEASE);
104
105 return ksnprintf(buffer, PAGE_SIZE, "%llu\n",
106 (unsigned long long) val >> 10);
107 }
108
109 static void ttm_check_swapping(struct ttm_mem_global *glob);
110
ttm_mem_zone_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)111 static ssize_t ttm_mem_zone_store(struct kobject *kobj,
112 struct attribute *attr,
113 const char *buffer,
114 size_t size)
115 {
116 struct ttm_mem_zone *zone =
117 container_of(kobj, struct ttm_mem_zone, kobj);
118 int chars;
119 unsigned long val;
120 uint64_t val64;
121
122 chars = ksscanf(buffer, "%lu", &val);
123 if (chars == 0)
124 return size;
125
126 val64 = val;
127 val64 <<= 10;
128
129 lockmgr(&zone->glob->lock, LK_EXCLUSIVE);
130 if (val64 > zone->zone_mem)
131 val64 = zone->zone_mem;
132 if (attr == &ttm_mem_emer) {
133 zone->emer_mem = val64;
134 if (zone->max_mem > val64)
135 zone->max_mem = val64;
136 } else if (attr == &ttm_mem_max) {
137 zone->max_mem = val64;
138 if (zone->emer_mem < val64)
139 zone->emer_mem = val64;
140 } else if (attr == &ttm_mem_swap)
141 zone->swap_limit = val64;
142 lockmgr(&zone->glob->lock, LK_RELEASE);
143
144 ttm_check_swapping(zone->glob);
145
146 return size;
147 }
148
149 static struct attribute *ttm_mem_zone_attrs[] = {
150 &ttm_mem_sys,
151 &ttm_mem_emer,
152 &ttm_mem_max,
153 &ttm_mem_swap,
154 &ttm_mem_used,
155 NULL
156 };
157
158 static const struct sysfs_ops ttm_mem_zone_ops = {
159 .show = &ttm_mem_zone_show,
160 .store = &ttm_mem_zone_store
161 };
162
163 static struct kobj_type ttm_mem_zone_kobj_type = {
164 .release = &ttm_mem_zone_kobj_release,
165 .sysfs_ops = &ttm_mem_zone_ops,
166 .default_attrs = ttm_mem_zone_attrs,
167 };
168
ttm_mem_global_kobj_release(struct kobject * kobj)169 static void ttm_mem_global_kobj_release(struct kobject *kobj)
170 {
171 struct ttm_mem_global *glob =
172 container_of(kobj, struct ttm_mem_global, kobj);
173
174 kfree(glob);
175 }
176
177 static struct kobj_type ttm_mem_glob_kobj_type = {
178 .release = &ttm_mem_global_kobj_release,
179 };
180
ttm_zones_above_swap_target(struct ttm_mem_global * glob,bool from_wq,uint64_t extra)181 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
182 bool from_wq, uint64_t extra)
183 {
184 unsigned int i;
185 struct ttm_mem_zone *zone;
186 uint64_t target;
187
188 for (i = 0; i < glob->num_zones; ++i) {
189 zone = glob->zones[i];
190
191 if (from_wq)
192 target = zone->swap_limit;
193 else if (caps_priv_check_self(SYSCAP_NOVM_MLOCK) == 0)
194 target = zone->emer_mem;
195 else
196 target = zone->max_mem;
197
198 target = (extra > target) ? 0ULL : target;
199
200 if (zone->used_mem > target)
201 return true;
202 }
203 return false;
204 }
205
206 /**
207 * At this point we only support a single shrink callback.
208 * Extend this if needed, perhaps using a linked list of callbacks.
209 * Note that this function is reentrant:
210 * many threads may try to swap out at any given time.
211 */
212
ttm_shrink(struct ttm_mem_global * glob,bool from_wq,uint64_t extra,struct ttm_operation_ctx * ctx)213 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
214 uint64_t extra, struct ttm_operation_ctx *ctx)
215 {
216 int ret;
217
218 lockmgr(&glob->lock, LK_EXCLUSIVE);
219
220 while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
221 lockmgr(&glob->lock, LK_RELEASE);
222 ret = ttm_bo_swapout(glob->bo_glob, ctx);
223 lockmgr(&glob->lock, LK_EXCLUSIVE);
224 if (unlikely(ret != 0))
225 break;
226 }
227
228 lockmgr(&glob->lock, LK_RELEASE);
229 }
230
ttm_shrink_work(struct work_struct * work)231 static void ttm_shrink_work(struct work_struct *work)
232 {
233 struct ttm_operation_ctx ctx = {
234 .interruptible = false,
235 .no_wait_gpu = false
236 };
237 struct ttm_mem_global *glob =
238 container_of(work, struct ttm_mem_global, work);
239
240 ttm_shrink(glob, true, 0ULL, &ctx);
241 }
242
ttm_mem_init_kernel_zone(struct ttm_mem_global * glob,uint64_t mem)243 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
244 uint64_t mem)
245 {
246 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
247 int ret;
248
249 zone->name = "kernel";
250 zone->zone_mem = mem;
251 zone->max_mem = mem >> 1;
252 zone->emer_mem = (mem >> 1) + (mem >> 2);
253 zone->swap_limit = zone->max_mem - (mem >> 3);
254 zone->used_mem = 0;
255 zone->glob = glob;
256 glob->zone_kernel = zone;
257 ret = kobject_init_and_add(
258 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
259 if (unlikely(ret != 0)) {
260 kobject_put(&zone->kobj);
261 return ret;
262 }
263 glob->zones[glob->num_zones++] = zone;
264 return 0;
265 }
266
267 #ifdef CONFIG_HIGHMEM
268 #else
ttm_mem_init_dma32_zone(struct ttm_mem_global * glob,uint64_t mem)269 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
270 uint64_t mem)
271 {
272 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
273 int ret;
274
275 /**
276 * No special dma32 zone needed.
277 */
278
279 if ((physmem * PAGE_SIZE) <= ((uint64_t) 1ULL << 32)) {
280 kfree(zone);
281 return 0;
282 }
283
284 /*
285 * Limit max dma32 memory to 4GB for now
286 * until we can figure out how big this
287 * zone really is.
288 */
289 if (mem > ((uint64_t) 1ULL << 32))
290 mem = ((uint64_t) 1ULL << 32);
291
292 zone->name = "dma32";
293 zone->zone_mem = mem;
294 zone->max_mem = mem >> 1;
295 zone->emer_mem = (mem >> 1) + (mem >> 2);
296 zone->swap_limit = zone->max_mem - (mem >> 3);
297 zone->used_mem = 0;
298 zone->glob = glob;
299 glob->zone_dma32 = zone;
300 ret = kobject_init_and_add(
301 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
302 if (unlikely(ret != 0)) {
303 kobject_put(&zone->kobj);
304 return ret;
305 }
306 glob->zones[glob->num_zones++] = zone;
307 return 0;
308 }
309 #endif
310
ttm_mem_global_init(struct ttm_mem_global * glob)311 int ttm_mem_global_init(struct ttm_mem_global *glob)
312 {
313 u_int64_t mem;
314 int ret;
315 int i;
316 struct ttm_mem_zone *zone;
317
318 lockinit(&glob->lock, "ttmemglob", 0, 0);
319 glob->swap_queue = create_singlethread_workqueue("ttm_swap");
320 INIT_WORK(&glob->work, ttm_shrink_work);
321 ret = kobject_init_and_add(
322 &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
323 if (unlikely(ret != 0)) {
324 kobject_put(&glob->kobj);
325 return ret;
326 }
327
328 /*
329 * Managed contiguous memory for TTM. Only use kernel-reserved
330 * dma memory for TTM, which can be controlled via /boot/loader.conf
331 * (e.g. vm.dma_reserved=256m). This is the only truly dependable
332 * DMA memory.
333 */
334 mem = (uint64_t)vm_contig_avail_pages() * PAGE_SIZE;
335
336 ret = ttm_mem_init_kernel_zone(glob, mem);
337 if (unlikely(ret != 0))
338 goto out_no_zone;
339 #ifdef CONFIG_HIGHMEM
340 ret = ttm_mem_init_highmem_zone(glob, &si);
341 if (unlikely(ret != 0))
342 goto out_no_zone;
343 #else
344 ret = ttm_mem_init_dma32_zone(glob, mem);
345 if (unlikely(ret != 0))
346 goto out_no_zone;
347 #endif
348 for (i = 0; i < glob->num_zones; ++i) {
349 zone = glob->zones[i];
350 pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
351 zone->name, (unsigned long long)zone->max_mem >> 10);
352 }
353 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
354 ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
355 return 0;
356 out_no_zone:
357 ttm_mem_global_release(glob);
358 return ret;
359 }
360 EXPORT_SYMBOL(ttm_mem_global_init);
361
ttm_mem_global_release(struct ttm_mem_global * glob)362 void ttm_mem_global_release(struct ttm_mem_global *glob)
363 {
364 unsigned int i;
365 struct ttm_mem_zone *zone;
366
367 /* let the page allocator first stop the shrink work. */
368 ttm_page_alloc_fini();
369 ttm_dma_page_alloc_fini();
370
371 flush_workqueue(glob->swap_queue);
372 destroy_workqueue(glob->swap_queue);
373 glob->swap_queue = NULL;
374 for (i = 0; i < glob->num_zones; ++i) {
375 zone = glob->zones[i];
376 kobject_del(&zone->kobj);
377 kobject_put(&zone->kobj);
378 }
379 kobject_del(&glob->kobj);
380 kobject_put(&glob->kobj);
381 }
382 EXPORT_SYMBOL(ttm_mem_global_release);
383
ttm_check_swapping(struct ttm_mem_global * glob)384 static void ttm_check_swapping(struct ttm_mem_global *glob)
385 {
386 bool needs_swapping = false;
387 unsigned int i;
388 struct ttm_mem_zone *zone;
389
390 lockmgr(&glob->lock, LK_EXCLUSIVE);
391 for (i = 0; i < glob->num_zones; ++i) {
392 zone = glob->zones[i];
393 if (zone->used_mem > zone->swap_limit) {
394 needs_swapping = true;
395 break;
396 }
397 }
398
399 lockmgr(&glob->lock, LK_RELEASE);
400
401 if (unlikely(needs_swapping))
402 (void)queue_work(glob->swap_queue, &glob->work);
403
404 }
405
ttm_mem_global_free_zone(struct ttm_mem_global * glob,struct ttm_mem_zone * single_zone,uint64_t amount)406 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
407 struct ttm_mem_zone *single_zone,
408 uint64_t amount)
409 {
410 unsigned int i;
411 struct ttm_mem_zone *zone;
412
413 lockmgr(&glob->lock, LK_EXCLUSIVE);
414 for (i = 0; i < glob->num_zones; ++i) {
415 zone = glob->zones[i];
416 if (single_zone && zone != single_zone)
417 continue;
418 zone->used_mem -= amount;
419 }
420 lockmgr(&glob->lock, LK_RELEASE);
421 }
422
ttm_mem_global_free(struct ttm_mem_global * glob,uint64_t amount)423 void ttm_mem_global_free(struct ttm_mem_global *glob,
424 uint64_t amount)
425 {
426 return ttm_mem_global_free_zone(glob, NULL, amount);
427 }
428 EXPORT_SYMBOL(ttm_mem_global_free);
429
430 /*
431 * check if the available mem is under lower memory limit
432 *
433 * a. if no swap disk at all or free swap space is under swap_mem_limit
434 * but available system mem is bigger than sys_mem_limit, allow TTM
435 * allocation;
436 *
437 * b. if the available system mem is less than sys_mem_limit but free
438 * swap disk is bigger than swap_mem_limit, allow TTM allocation.
439 */
440 bool
ttm_check_under_lowerlimit(struct ttm_mem_global * glob,uint64_t num_pages,struct ttm_operation_ctx * ctx)441 ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
442 uint64_t num_pages,
443 struct ttm_operation_ctx *ctx)
444 {
445 STUB();
446 return false;
447 #if 0
448 int64_t available;
449
450 if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
451 return false;
452
453 available = get_nr_swap_pages() + si_mem_available();
454 available -= num_pages;
455 if (available < glob->lower_mem_limit)
456 return true;
457
458 return false;
459 #endif
460 }
461 EXPORT_SYMBOL(ttm_check_under_lowerlimit);
462
ttm_mem_global_reserve(struct ttm_mem_global * glob,struct ttm_mem_zone * single_zone,uint64_t amount,bool reserve)463 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
464 struct ttm_mem_zone *single_zone,
465 uint64_t amount, bool reserve)
466 {
467 uint64_t limit;
468 int ret = -ENOMEM;
469 unsigned int i;
470 struct ttm_mem_zone *zone;
471
472 lockmgr(&glob->lock, LK_EXCLUSIVE);
473 for (i = 0; i < glob->num_zones; ++i) {
474 zone = glob->zones[i];
475 if (single_zone && zone != single_zone)
476 continue;
477
478 limit = (caps_priv_check_self(SYSCAP_NOVM_MLOCK) == 0) ?
479 zone->emer_mem : zone->max_mem;
480
481 if (zone->used_mem > limit)
482 goto out_unlock;
483 }
484
485 if (reserve) {
486 for (i = 0; i < glob->num_zones; ++i) {
487 zone = glob->zones[i];
488 if (single_zone && zone != single_zone)
489 continue;
490 zone->used_mem += amount;
491 }
492 }
493
494 ret = 0;
495 out_unlock:
496 lockmgr(&glob->lock, LK_RELEASE);
497 ttm_check_swapping(glob);
498
499 return ret;
500 }
501
502
ttm_mem_global_alloc_zone(struct ttm_mem_global * glob,struct ttm_mem_zone * single_zone,uint64_t memory,struct ttm_operation_ctx * ctx)503 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
504 struct ttm_mem_zone *single_zone,
505 uint64_t memory,
506 struct ttm_operation_ctx *ctx)
507 {
508 int count = TTM_MEMORY_ALLOC_RETRIES;
509
510 while (unlikely(ttm_mem_global_reserve(glob,
511 single_zone,
512 memory, true)
513 != 0)) {
514 if (ctx->no_wait_gpu)
515 return -ENOMEM;
516 if (unlikely(count-- == 0))
517 return -ENOMEM;
518 ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
519 }
520
521 return 0;
522 }
523
ttm_mem_global_alloc(struct ttm_mem_global * glob,uint64_t memory,struct ttm_operation_ctx * ctx)524 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
525 struct ttm_operation_ctx *ctx)
526 {
527 /**
528 * Normal allocations of kernel memory are registered in
529 * all zones.
530 */
531
532 return ttm_mem_global_alloc_zone(glob, NULL, memory, ctx);
533 }
534 EXPORT_SYMBOL(ttm_mem_global_alloc);
535
ttm_mem_global_alloc_page(struct ttm_mem_global * glob,struct page * page,uint64_t size,struct ttm_operation_ctx * ctx)536 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
537 struct page *page, uint64_t size,
538 struct ttm_operation_ctx *ctx)
539 {
540 struct ttm_mem_zone *zone = NULL;
541
542 /**
543 * Page allocations may be registed in a single zone
544 * only if highmem or !dma32.
545 */
546
547 #ifdef CONFIG_HIGHMEM
548 if (PageHighMem(page) && glob->zone_highmem != NULL)
549 zone = glob->zone_highmem;
550 #else
551 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
552 zone = glob->zone_kernel;
553 #endif
554 return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
555 }
556
ttm_mem_global_free_page(struct ttm_mem_global * glob,struct page * page,uint64_t size)557 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
558 uint64_t size)
559 {
560 struct ttm_mem_zone *zone = NULL;
561
562 #ifdef CONFIG_HIGHMEM
563 if (PageHighMem(page) && glob->zone_highmem != NULL)
564 zone = glob->zone_highmem;
565 #else
566 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
567 zone = glob->zone_kernel;
568 #endif
569 ttm_mem_global_free_zone(glob, zone, size);
570 }
571
ttm_round_pot(size_t size)572 size_t ttm_round_pot(size_t size)
573 {
574 if ((size & (size - 1)) == 0)
575 return size;
576 else if (size > PAGE_SIZE)
577 return PAGE_ALIGN(size);
578 else {
579 size_t tmp_size = 4;
580
581 while (tmp_size < size)
582 tmp_size <<= 1;
583
584 return tmp_size;
585 }
586 return 0;
587 }
588 EXPORT_SYMBOL(ttm_round_pot);
589
ttm_get_kernel_zone_memory_size(struct ttm_mem_global * glob)590 uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
591 {
592 return glob->zone_kernel->max_mem;
593 }
594 EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);
595