1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32 #include <linux/list.h>
33 #include <drm/drmP.h>
34 #include <drm/radeon_drm.h>
35 #include <drm/drm_cache.h>
36 #include "radeon.h"
37 #include "radeon_trace.h"
38
39
40 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
41
42 /*
43 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
44 * function are calling it.
45 */
46
radeon_update_memory_usage(struct radeon_bo * bo,unsigned mem_type,int sign)47 static void radeon_update_memory_usage(struct radeon_bo *bo,
48 unsigned mem_type, int sign)
49 {
50 struct radeon_device *rdev = bo->rdev;
51 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
52
53 switch (mem_type) {
54 case TTM_PL_TT:
55 if (sign > 0)
56 atomic64_add(size, &rdev->gtt_usage);
57 else
58 atomic64_sub(size, &rdev->gtt_usage);
59 break;
60 case TTM_PL_VRAM:
61 if (sign > 0)
62 atomic64_add(size, &rdev->vram_usage);
63 else
64 atomic64_sub(size, &rdev->vram_usage);
65 break;
66 }
67 }
68
radeon_ttm_bo_destroy(struct ttm_buffer_object * tbo)69 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
70 {
71 struct radeon_bo *bo;
72
73 bo = container_of(tbo, struct radeon_bo, tbo);
74
75 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
76
77 mutex_lock(&bo->rdev->gem.mutex);
78 list_del_init(&bo->list);
79 mutex_unlock(&bo->rdev->gem.mutex);
80 radeon_bo_clear_surface_reg(bo);
81 WARN_ON_ONCE(!list_empty(&bo->va));
82 if (bo->gem_base.import_attach)
83 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
84 drm_gem_object_release(&bo->gem_base);
85 kfree(bo);
86 }
87
radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object * bo)88 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
89 {
90 if (bo->destroy == &radeon_ttm_bo_destroy)
91 return true;
92 return false;
93 }
94
radeon_ttm_placement_from_domain(struct radeon_bo * rbo,u32 domain)95 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
96 {
97 u32 c = 0, i;
98
99 rbo->placement.placement = rbo->placements;
100 rbo->placement.busy_placement = rbo->placements;
101 if (domain & RADEON_GEM_DOMAIN_VRAM) {
102 /* Try placing BOs which don't need CPU access outside of the
103 * CPU accessible part of VRAM
104 */
105 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
106 rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
107 rbo->placements[c].fpfn =
108 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
109 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
110 TTM_PL_FLAG_UNCACHED |
111 TTM_PL_FLAG_VRAM;
112 }
113
114 rbo->placements[c].fpfn = 0;
115 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
116 TTM_PL_FLAG_UNCACHED |
117 TTM_PL_FLAG_VRAM;
118 }
119
120 if (domain & RADEON_GEM_DOMAIN_GTT) {
121 if (rbo->flags & RADEON_GEM_GTT_UC) {
122 rbo->placements[c].fpfn = 0;
123 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
124 TTM_PL_FLAG_TT;
125
126 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
127 (rbo->rdev->flags & RADEON_IS_AGP)) {
128 rbo->placements[c].fpfn = 0;
129 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
130 TTM_PL_FLAG_UNCACHED |
131 TTM_PL_FLAG_TT;
132 } else {
133 rbo->placements[c].fpfn = 0;
134 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
135 TTM_PL_FLAG_TT;
136 }
137 }
138
139 if (domain & RADEON_GEM_DOMAIN_CPU) {
140 if (rbo->flags & RADEON_GEM_GTT_UC) {
141 rbo->placements[c].fpfn = 0;
142 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
143 TTM_PL_FLAG_SYSTEM;
144
145 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
146 rbo->rdev->flags & RADEON_IS_AGP) {
147 rbo->placements[c].fpfn = 0;
148 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
149 TTM_PL_FLAG_UNCACHED |
150 TTM_PL_FLAG_SYSTEM;
151 } else {
152 rbo->placements[c].fpfn = 0;
153 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
154 TTM_PL_FLAG_SYSTEM;
155 }
156 }
157 if (!c) {
158 rbo->placements[c].fpfn = 0;
159 rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
160 TTM_PL_FLAG_SYSTEM;
161 }
162
163 rbo->placement.num_placement = c;
164 rbo->placement.num_busy_placement = c;
165
166 for (i = 0; i < c; ++i) {
167 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
168 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
169 !rbo->placements[i].fpfn)
170 rbo->placements[i].lpfn =
171 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
172 else
173 rbo->placements[i].lpfn = 0;
174 }
175 }
176
radeon_bo_create(struct radeon_device * rdev,unsigned long size,int byte_align,bool kernel,u32 domain,u32 flags,struct sg_table * sg,struct reservation_object * resv,struct radeon_bo ** bo_ptr)177 int radeon_bo_create(struct radeon_device *rdev,
178 unsigned long size, int byte_align, bool kernel,
179 u32 domain, u32 flags, struct sg_table *sg,
180 struct reservation_object *resv,
181 struct radeon_bo **bo_ptr)
182 {
183 struct radeon_bo *bo;
184 enum ttm_bo_type type;
185 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
186 size_t acc_size;
187 int r;
188
189 size = ALIGN(size, PAGE_SIZE);
190
191 if (kernel) {
192 type = ttm_bo_type_kernel;
193 } else if (sg) {
194 type = ttm_bo_type_sg;
195 } else {
196 type = ttm_bo_type_device;
197 }
198 *bo_ptr = NULL;
199
200 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
201 sizeof(struct radeon_bo));
202
203 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
204 if (bo == NULL)
205 return -ENOMEM;
206 r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
207 if (unlikely(r)) {
208 kfree(bo);
209 return r;
210 }
211 bo->rdev = rdev;
212 bo->surface_reg = -1;
213 INIT_LIST_HEAD(&bo->list);
214 INIT_LIST_HEAD(&bo->va);
215 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
216 RADEON_GEM_DOMAIN_GTT |
217 RADEON_GEM_DOMAIN_CPU);
218
219 bo->flags = flags;
220 /* PCI GART is always snooped */
221 if (!(rdev->flags & RADEON_IS_PCIE))
222 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
223
224 /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
225 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
226 */
227 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
228 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
229
230 #ifdef CONFIG_X86_32
231 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
232 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
233 */
234 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
235 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
236 /* Don't try to enable write-combining when it can't work, or things
237 * may be slow
238 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
239 */
240 #ifndef CONFIG_COMPILE_TEST
241 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
242 thanks to write-combining
243 #endif
244
245 if (bo->flags & RADEON_GEM_GTT_WC)
246 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
247 "better performance thanks to write-combining\n");
248 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
249 #else
250 /* For architectures that don't support WC memory,
251 * mask out the WC flag from the BO
252 */
253 if (!drm_arch_can_wc_memory())
254 bo->flags &= ~RADEON_GEM_GTT_WC;
255 #endif
256
257 radeon_ttm_placement_from_domain(bo, domain);
258 /* Kernel allocation are uninterruptible */
259 down_read(&rdev->pm.mclk_lock);
260 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
261 &bo->placement, page_align, !kernel,
262 acc_size, sg, resv, &radeon_ttm_bo_destroy);
263 up_read(&rdev->pm.mclk_lock);
264 if (unlikely(r != 0)) {
265 return r;
266 }
267 *bo_ptr = bo;
268
269 trace_radeon_bo_create(bo);
270
271 return 0;
272 }
273
radeon_bo_kmap(struct radeon_bo * bo,void ** ptr)274 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
275 {
276 bool is_iomem;
277 int r;
278
279 if (bo->kptr) {
280 if (ptr) {
281 *ptr = bo->kptr;
282 }
283 return 0;
284 }
285 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
286 if (r) {
287 return r;
288 }
289 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
290 if (ptr) {
291 *ptr = bo->kptr;
292 }
293 radeon_bo_check_tiling(bo, 0, 0);
294 return 0;
295 }
296
radeon_bo_kunmap(struct radeon_bo * bo)297 void radeon_bo_kunmap(struct radeon_bo *bo)
298 {
299 if (bo->kptr == NULL)
300 return;
301 bo->kptr = NULL;
302 radeon_bo_check_tiling(bo, 0, 0);
303 ttm_bo_kunmap(&bo->kmap);
304 }
305
radeon_bo_ref(struct radeon_bo * bo)306 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
307 {
308 if (bo == NULL)
309 return NULL;
310
311 ttm_bo_reference(&bo->tbo);
312 return bo;
313 }
314
radeon_bo_unref(struct radeon_bo ** bo)315 void radeon_bo_unref(struct radeon_bo **bo)
316 {
317 struct ttm_buffer_object *tbo;
318 struct radeon_device *rdev;
319
320 if ((*bo) == NULL)
321 return;
322 rdev = (*bo)->rdev;
323 tbo = &((*bo)->tbo);
324 ttm_bo_unref(&tbo);
325 if (tbo == NULL)
326 *bo = NULL;
327 }
328
radeon_bo_pin_restricted(struct radeon_bo * bo,u32 domain,u64 max_offset,u64 * gpu_addr)329 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
330 u64 *gpu_addr)
331 {
332 struct ttm_operation_ctx ctx = { false, false };
333 int r, i;
334
335 if (bo->pin_count) {
336 bo->pin_count++;
337 if (gpu_addr)
338 *gpu_addr = radeon_bo_gpu_offset(bo);
339
340 if (max_offset != 0) {
341 u64 domain_start;
342
343 if (domain == RADEON_GEM_DOMAIN_VRAM)
344 domain_start = bo->rdev->mc.vram_start;
345 else
346 domain_start = bo->rdev->mc.gtt_start;
347 WARN_ON_ONCE(max_offset <
348 (radeon_bo_gpu_offset(bo) - domain_start));
349 }
350
351 return 0;
352 }
353 if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
354 /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
355 return -EINVAL;
356 }
357
358 radeon_ttm_placement_from_domain(bo, domain);
359 for (i = 0; i < bo->placement.num_placement; i++) {
360 /* force to pin into visible video ram */
361 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
362 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
363 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
364 bo->placements[i].lpfn =
365 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
366 else
367 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
368
369 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
370 }
371
372 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
373 if (likely(r == 0)) {
374 bo->pin_count = 1;
375 if (gpu_addr != NULL)
376 *gpu_addr = radeon_bo_gpu_offset(bo);
377 if (domain == RADEON_GEM_DOMAIN_VRAM)
378 bo->rdev->vram_pin_size += radeon_bo_size(bo);
379 else
380 bo->rdev->gart_pin_size += radeon_bo_size(bo);
381 } else {
382 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
383 }
384 return r;
385 }
386
radeon_bo_pin(struct radeon_bo * bo,u32 domain,u64 * gpu_addr)387 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
388 {
389 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
390 }
391
radeon_bo_unpin(struct radeon_bo * bo)392 int radeon_bo_unpin(struct radeon_bo *bo)
393 {
394 struct ttm_operation_ctx ctx = { false, false };
395 int r, i;
396
397 if (!bo->pin_count) {
398 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
399 return 0;
400 }
401 bo->pin_count--;
402 if (bo->pin_count)
403 return 0;
404 for (i = 0; i < bo->placement.num_placement; i++) {
405 bo->placements[i].lpfn = 0;
406 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
407 }
408 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
409 if (likely(r == 0)) {
410 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
411 bo->rdev->vram_pin_size -= radeon_bo_size(bo);
412 else
413 bo->rdev->gart_pin_size -= radeon_bo_size(bo);
414 } else {
415 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
416 }
417 return r;
418 }
419
radeon_bo_evict_vram(struct radeon_device * rdev)420 int radeon_bo_evict_vram(struct radeon_device *rdev)
421 {
422 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
423 if (0 && (rdev->flags & RADEON_IS_IGP)) {
424 if (rdev->mc.igp_sideport_enabled == false)
425 /* Useless to evict on IGP chips */
426 return 0;
427 }
428 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
429 }
430
radeon_bo_force_delete(struct radeon_device * rdev)431 void radeon_bo_force_delete(struct radeon_device *rdev)
432 {
433 struct radeon_bo *bo, *n;
434
435 if (list_empty(&rdev->gem.objects)) {
436 return;
437 }
438 dev_err(rdev->dev, "Userspace still has active objects !\n");
439 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
440 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
441 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
442 *((unsigned long *)&bo->gem_base.refcount));
443 mutex_lock(&bo->rdev->gem.mutex);
444 list_del_init(&bo->list);
445 mutex_unlock(&bo->rdev->gem.mutex);
446 /* this should unref the ttm bo */
447 drm_gem_object_put_unlocked(&bo->gem_base);
448 }
449 }
450
radeon_bo_init(struct radeon_device * rdev)451 int radeon_bo_init(struct radeon_device *rdev)
452 {
453 /* reserve PAT memory space to WC for VRAM */
454 arch_io_reserve_memtype_wc(rdev->mc.aper_base,
455 rdev->mc.aper_size);
456
457 /* Add an MTRR for the VRAM */
458 if (!rdev->fastfb_working) {
459 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
460 rdev->mc.aper_size);
461 }
462 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
463 rdev->mc.mc_vram_size >> 20,
464 (unsigned long long)rdev->mc.aper_size >> 20);
465 DRM_INFO("RAM width %dbits %cDR\n",
466 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
467 return radeon_ttm_init(rdev);
468 }
469
radeon_bo_fini(struct radeon_device * rdev)470 void radeon_bo_fini(struct radeon_device *rdev)
471 {
472 radeon_ttm_fini(rdev);
473 arch_phys_wc_del(rdev->mc.vram_mtrr);
474 #if 0
475 arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
476 #endif
477 }
478
479 /* Returns how many bytes TTM can move per IB.
480 */
radeon_bo_get_threshold_for_moves(struct radeon_device * rdev)481 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
482 {
483 u64 real_vram_size = rdev->mc.real_vram_size;
484 u64 vram_usage = atomic64_read(&rdev->vram_usage);
485
486 /* This function is based on the current VRAM usage.
487 *
488 * - If all of VRAM is free, allow relocating the number of bytes that
489 * is equal to 1/4 of the size of VRAM for this IB.
490
491 * - If more than one half of VRAM is occupied, only allow relocating
492 * 1 MB of data for this IB.
493 *
494 * - From 0 to one half of used VRAM, the threshold decreases
495 * linearly.
496 * __________________
497 * 1/4 of -|\ |
498 * VRAM | \ |
499 * | \ |
500 * | \ |
501 * | \ |
502 * | \ |
503 * | \ |
504 * | \________|1 MB
505 * |----------------|
506 * VRAM 0 % 100 %
507 * used used
508 *
509 * Note: It's a threshold, not a limit. The threshold must be crossed
510 * for buffer relocations to stop, so any buffer of an arbitrary size
511 * can be moved as long as the threshold isn't crossed before
512 * the relocation takes place. We don't want to disable buffer
513 * relocations completely.
514 *
515 * The idea is that buffers should be placed in VRAM at creation time
516 * and TTM should only do a minimum number of relocations during
517 * command submission. In practice, you need to submit at least
518 * a dozen IBs to move all buffers to VRAM if they are in GTT.
519 *
520 * Also, things can get pretty crazy under memory pressure and actual
521 * VRAM usage can change a lot, so playing safe even at 50% does
522 * consistently increase performance.
523 */
524
525 u64 half_vram = real_vram_size >> 1;
526 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
527 u64 bytes_moved_threshold = half_free_vram >> 1;
528 return max(bytes_moved_threshold, 1024*1024ull);
529 }
530
radeon_bo_list_validate(struct radeon_device * rdev,struct ww_acquire_ctx * ticket,struct list_head * head,int ring)531 int radeon_bo_list_validate(struct radeon_device *rdev,
532 struct ww_acquire_ctx *ticket,
533 struct list_head *head, int ring)
534 {
535 struct ttm_operation_ctx ctx = { true, false };
536 struct radeon_bo_list *lobj;
537 struct list_head duplicates;
538 int r;
539 u64 bytes_moved = 0, initial_bytes_moved;
540 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
541
542 INIT_LIST_HEAD(&duplicates);
543 r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
544 if (unlikely(r != 0)) {
545 return r;
546 }
547
548 list_for_each_entry(lobj, head, tv.head) {
549 struct radeon_bo *bo = lobj->robj;
550 if (!bo->pin_count) {
551 u32 domain = lobj->preferred_domains;
552 u32 allowed = lobj->allowed_domains;
553 u32 current_domain =
554 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
555
556 /* Check if this buffer will be moved and don't move it
557 * if we have moved too many buffers for this IB already.
558 *
559 * Note that this allows moving at least one buffer of
560 * any size, because it doesn't take the current "bo"
561 * into account. We don't want to disallow buffer moves
562 * completely.
563 */
564 if ((allowed & current_domain) != 0 &&
565 (domain & current_domain) == 0 && /* will be moved */
566 bytes_moved > bytes_moved_threshold) {
567 /* don't move it */
568 domain = current_domain;
569 }
570
571 retry:
572 radeon_ttm_placement_from_domain(bo, domain);
573 if (ring == R600_RING_TYPE_UVD_INDEX)
574 radeon_uvd_force_into_uvd_segment(bo, allowed);
575
576 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
577 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
578 bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
579 initial_bytes_moved;
580
581 if (unlikely(r)) {
582 if (r != -ERESTARTSYS &&
583 domain != lobj->allowed_domains) {
584 domain = lobj->allowed_domains;
585 goto retry;
586 }
587 ttm_eu_backoff_reservation(ticket, head);
588 return r;
589 }
590 }
591 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
592 lobj->tiling_flags = bo->tiling_flags;
593 }
594
595 list_for_each_entry(lobj, &duplicates, tv.head) {
596 lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
597 lobj->tiling_flags = lobj->robj->tiling_flags;
598 }
599
600 return 0;
601 }
602
radeon_bo_get_surface_reg(struct radeon_bo * bo)603 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
604 {
605 struct radeon_device *rdev = bo->rdev;
606 struct radeon_surface_reg *reg;
607 struct radeon_bo *old_object;
608 int steal;
609 int i;
610
611 lockdep_assert_held(&bo->tbo.resv->lock.base);
612
613 if (!bo->tiling_flags)
614 return 0;
615
616 if (bo->surface_reg >= 0) {
617 reg = &rdev->surface_regs[bo->surface_reg];
618 i = bo->surface_reg;
619 goto out;
620 }
621
622 steal = -1;
623 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
624
625 reg = &rdev->surface_regs[i];
626 if (!reg->bo)
627 break;
628
629 old_object = reg->bo;
630 if (old_object->pin_count == 0)
631 steal = i;
632 }
633
634 /* if we are all out */
635 if (i == RADEON_GEM_MAX_SURFACES) {
636 if (steal == -1)
637 return -ENOMEM;
638 /* find someone with a surface reg and nuke their BO */
639 reg = &rdev->surface_regs[steal];
640 old_object = reg->bo;
641 /* blow away the mapping */
642 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
643 ttm_bo_unmap_virtual(&old_object->tbo);
644 old_object->surface_reg = -1;
645 i = steal;
646 }
647
648 bo->surface_reg = i;
649 reg->bo = bo;
650
651 out:
652 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
653 bo->tbo.mem.start << PAGE_SHIFT,
654 bo->tbo.num_pages << PAGE_SHIFT);
655 return 0;
656 }
657
radeon_bo_clear_surface_reg(struct radeon_bo * bo)658 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
659 {
660 struct radeon_device *rdev = bo->rdev;
661 struct radeon_surface_reg *reg;
662
663 if (bo->surface_reg == -1)
664 return;
665
666 reg = &rdev->surface_regs[bo->surface_reg];
667 radeon_clear_surface_reg(rdev, bo->surface_reg);
668
669 reg->bo = NULL;
670 bo->surface_reg = -1;
671 }
672
radeon_bo_set_tiling_flags(struct radeon_bo * bo,uint32_t tiling_flags,uint32_t pitch)673 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
674 uint32_t tiling_flags, uint32_t pitch)
675 {
676 struct radeon_device *rdev = bo->rdev;
677 int r;
678
679 if (rdev->family >= CHIP_CEDAR) {
680 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
681
682 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
683 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
684 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
685 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
686 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
687 switch (bankw) {
688 case 0:
689 case 1:
690 case 2:
691 case 4:
692 case 8:
693 break;
694 default:
695 return -EINVAL;
696 }
697 switch (bankh) {
698 case 0:
699 case 1:
700 case 2:
701 case 4:
702 case 8:
703 break;
704 default:
705 return -EINVAL;
706 }
707 switch (mtaspect) {
708 case 0:
709 case 1:
710 case 2:
711 case 4:
712 case 8:
713 break;
714 default:
715 return -EINVAL;
716 }
717 if (tilesplit > 6) {
718 return -EINVAL;
719 }
720 if (stilesplit > 6) {
721 return -EINVAL;
722 }
723 }
724 r = radeon_bo_reserve(bo, false);
725 if (unlikely(r != 0))
726 return r;
727 bo->tiling_flags = tiling_flags;
728 bo->pitch = pitch;
729 radeon_bo_unreserve(bo);
730 return 0;
731 }
732
radeon_bo_get_tiling_flags(struct radeon_bo * bo,uint32_t * tiling_flags,uint32_t * pitch)733 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
734 uint32_t *tiling_flags,
735 uint32_t *pitch)
736 {
737 lockdep_assert_held(&bo->tbo.resv->lock.base);
738
739 if (tiling_flags)
740 *tiling_flags = bo->tiling_flags;
741 if (pitch)
742 *pitch = bo->pitch;
743 }
744
radeon_bo_check_tiling(struct radeon_bo * bo,bool has_moved,bool force_drop)745 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
746 bool force_drop)
747 {
748 if (!force_drop)
749 lockdep_assert_held(&bo->tbo.resv->lock.base);
750
751 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
752 return 0;
753
754 if (force_drop) {
755 radeon_bo_clear_surface_reg(bo);
756 return 0;
757 }
758
759 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
760 if (!has_moved)
761 return 0;
762
763 if (bo->surface_reg >= 0)
764 radeon_bo_clear_surface_reg(bo);
765 return 0;
766 }
767
768 if ((bo->surface_reg >= 0) && !has_moved)
769 return 0;
770
771 return radeon_bo_get_surface_reg(bo);
772 }
773
radeon_bo_move_notify(struct ttm_buffer_object * bo,bool evict,struct ttm_mem_reg * new_mem)774 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
775 bool evict,
776 struct ttm_mem_reg *new_mem)
777 {
778 struct radeon_bo *rbo;
779
780 if (!radeon_ttm_bo_is_radeon_bo(bo))
781 return;
782
783 rbo = container_of(bo, struct radeon_bo, tbo);
784 radeon_bo_check_tiling(rbo, 0, 1);
785 radeon_vm_bo_invalidate(rbo->rdev, rbo);
786
787 /* update statistics */
788 if (!new_mem)
789 return;
790
791 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
792 radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
793 }
794
radeon_bo_fault_reserve_notify(struct ttm_buffer_object * bo)795 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
796 {
797 struct ttm_operation_ctx ctx = { false, false };
798 struct radeon_device *rdev;
799 struct radeon_bo *rbo;
800 unsigned long offset, size, lpfn;
801 int i, r;
802
803 if (!radeon_ttm_bo_is_radeon_bo(bo))
804 return 0;
805 rbo = container_of(bo, struct radeon_bo, tbo);
806 radeon_bo_check_tiling(rbo, 0, 0);
807 rdev = rbo->rdev;
808 if (bo->mem.mem_type != TTM_PL_VRAM)
809 return 0;
810
811 size = bo->mem.num_pages << PAGE_SHIFT;
812 offset = bo->mem.start << PAGE_SHIFT;
813 if ((offset + size) <= rdev->mc.visible_vram_size)
814 return 0;
815
816 /* Can't move a pinned BO to visible VRAM */
817 if (rbo->pin_count > 0)
818 return -EINVAL;
819
820 /* hurrah the memory is not visible ! */
821 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
822 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
823 for (i = 0; i < rbo->placement.num_placement; i++) {
824 /* Force into visible VRAM */
825 if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
826 (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
827 rbo->placements[i].lpfn = lpfn;
828 }
829 r = ttm_bo_validate(bo, &rbo->placement, &ctx);
830 if (unlikely(r == -ENOMEM)) {
831 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
832 return ttm_bo_validate(bo, &rbo->placement, &ctx);
833 } else if (unlikely(r != 0)) {
834 return r;
835 }
836
837 offset = bo->mem.start << PAGE_SHIFT;
838 /* this should never happen */
839 if ((offset + size) > rdev->mc.visible_vram_size)
840 return -EINVAL;
841
842 return 0;
843 }
844
radeon_bo_wait(struct radeon_bo * bo,u32 * mem_type,bool no_wait)845 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
846 {
847 int r;
848
849 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
850 if (unlikely(r != 0))
851 return r;
852 if (mem_type)
853 *mem_type = bo->tbo.mem.mem_type;
854
855 r = ttm_bo_wait(&bo->tbo, true, no_wait);
856 ttm_bo_unreserve(&bo->tbo);
857 return r;
858 }
859
860 /**
861 * radeon_bo_fence - add fence to buffer object
862 *
863 * @bo: buffer object in question
864 * @fence: fence to add
865 * @shared: true if fence should be added shared
866 *
867 */
radeon_bo_fence(struct radeon_bo * bo,struct radeon_fence * fence,bool shared)868 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
869 bool shared)
870 {
871 struct reservation_object *resv = bo->tbo.resv;
872
873 if (shared)
874 reservation_object_add_shared_fence(resv, &fence->base);
875 else
876 reservation_object_add_excl_fence(resv, &fence->base);
877 }
878