1 /*
2  * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3  * Copyright © 2015 Advanced Micro Devices, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * The above copyright notice and this permission notice (including the
24  * next paragraph) shall be included in all copies or substantial portions
25  * of the Software.
26  */
27 
28 #include "amdgpu_cs.h"
29 
30 #include "util/hash_table.h"
31 #include "util/os_time.h"
32 #include "util/u_hash_table.h"
33 #include "frontend/drm_driver.h"
34 #include "drm-uapi/amdgpu_drm.h"
35 #include <xf86drm.h>
36 #include <stdio.h>
37 #include <inttypes.h>
38 
39 #ifndef AMDGPU_VA_RANGE_HIGH
40 #define AMDGPU_VA_RANGE_HIGH	0x2
41 #endif
42 
43 /* Set to 1 for verbose output showing committed sparse buffer ranges. */
44 #define DEBUG_SPARSE_COMMITS 0
45 
46 struct amdgpu_sparse_backing_chunk {
47    uint32_t begin, end;
48 };
49 
amdgpu_bo_wait(struct radeon_winsys * rws,struct pb_buffer * _buf,uint64_t timeout,enum radeon_bo_usage usage)50 static bool amdgpu_bo_wait(struct radeon_winsys *rws,
51                            struct pb_buffer *_buf, uint64_t timeout,
52                            enum radeon_bo_usage usage)
53 {
54    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
55    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
56    int64_t abs_timeout = 0;
57 
58    if (timeout == 0) {
59       if (p_atomic_read(&bo->num_active_ioctls))
60          return false;
61 
62    } else {
63       abs_timeout = os_time_get_absolute_timeout(timeout);
64 
65       /* Wait if any ioctl is being submitted with this buffer. */
66       if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
67          return false;
68    }
69 
70    if (bo->bo && bo->u.real.is_shared) {
71       /* We can't use user fences for shared buffers, because user fences
72        * are local to this process only. If we want to wait for all buffer
73        * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
74        */
75       bool buffer_busy = true;
76       int r;
77 
78       r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
79       if (r)
80          fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
81                  r);
82       return !buffer_busy;
83    }
84 
85    if (timeout == 0) {
86       unsigned idle_fences;
87       bool buffer_idle;
88 
89       simple_mtx_lock(&ws->bo_fence_lock);
90 
91       for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
92          if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
93             break;
94       }
95 
96       /* Release the idle fences to avoid checking them again later. */
97       for (unsigned i = 0; i < idle_fences; ++i)
98          amdgpu_fence_reference(&bo->fences[i], NULL);
99 
100       memmove(&bo->fences[0], &bo->fences[idle_fences],
101               (bo->num_fences - idle_fences) * sizeof(*bo->fences));
102       bo->num_fences -= idle_fences;
103 
104       buffer_idle = !bo->num_fences;
105       simple_mtx_unlock(&ws->bo_fence_lock);
106 
107       return buffer_idle;
108    } else {
109       bool buffer_idle = true;
110 
111       simple_mtx_lock(&ws->bo_fence_lock);
112       while (bo->num_fences && buffer_idle) {
113          struct pipe_fence_handle *fence = NULL;
114          bool fence_idle = false;
115 
116          amdgpu_fence_reference(&fence, bo->fences[0]);
117 
118          /* Wait for the fence. */
119          simple_mtx_unlock(&ws->bo_fence_lock);
120          if (amdgpu_fence_wait(fence, abs_timeout, true))
121             fence_idle = true;
122          else
123             buffer_idle = false;
124          simple_mtx_lock(&ws->bo_fence_lock);
125 
126          /* Release an idle fence to avoid checking it again later, keeping in
127           * mind that the fence array may have been modified by other threads.
128           */
129          if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
130             amdgpu_fence_reference(&bo->fences[0], NULL);
131             memmove(&bo->fences[0], &bo->fences[1],
132                     (bo->num_fences - 1) * sizeof(*bo->fences));
133             bo->num_fences--;
134          }
135 
136          amdgpu_fence_reference(&fence, NULL);
137       }
138       simple_mtx_unlock(&ws->bo_fence_lock);
139 
140       return buffer_idle;
141    }
142 }
143 
amdgpu_bo_get_initial_domain(struct pb_buffer * buf)144 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
145       struct pb_buffer *buf)
146 {
147    return ((struct amdgpu_winsys_bo*)buf)->base.placement;
148 }
149 
amdgpu_bo_get_flags(struct pb_buffer * buf)150 static enum radeon_bo_flag amdgpu_bo_get_flags(
151       struct pb_buffer *buf)
152 {
153    return ((struct amdgpu_winsys_bo*)buf)->base.usage;
154 }
155 
amdgpu_bo_remove_fences(struct amdgpu_winsys_bo * bo)156 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
157 {
158    for (unsigned i = 0; i < bo->num_fences; ++i)
159       amdgpu_fence_reference(&bo->fences[i], NULL);
160 
161    FREE(bo->fences);
162    bo->num_fences = 0;
163    bo->max_fences = 0;
164 }
165 
amdgpu_bo_destroy(struct amdgpu_winsys * ws,struct pb_buffer * _buf)166 void amdgpu_bo_destroy(struct amdgpu_winsys *ws, struct pb_buffer *_buf)
167 {
168    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
169    struct amdgpu_screen_winsys *sws_iter;
170 
171    assert(bo->bo && "must not be called for slab entries");
172 
173    if (!bo->u.real.is_user_ptr && bo->u.real.cpu_ptr) {
174       bo->u.real.cpu_ptr = NULL;
175       amdgpu_bo_unmap(&ws->dummy_ws.base, &bo->base);
176    }
177    assert(bo->u.real.is_user_ptr || bo->u.real.map_count == 0);
178 
179 #if DEBUG
180    if (ws->debug_all_bos) {
181       simple_mtx_lock(&ws->global_bo_list_lock);
182       list_del(&bo->u.real.global_list_item);
183       ws->num_buffers--;
184       simple_mtx_unlock(&ws->global_bo_list_lock);
185    }
186 #endif
187 
188    /* Close all KMS handles retrieved for other DRM file descriptions */
189    simple_mtx_lock(&ws->sws_list_lock);
190    for (sws_iter = ws->sws_list; sws_iter; sws_iter = sws_iter->next) {
191       struct hash_entry *entry;
192 
193       if (!sws_iter->kms_handles)
194          continue;
195 
196       entry = _mesa_hash_table_search(sws_iter->kms_handles, bo);
197       if (entry) {
198          struct drm_gem_close args = { .handle = (uintptr_t)entry->data };
199 
200          drmIoctl(sws_iter->fd, DRM_IOCTL_GEM_CLOSE, &args);
201          _mesa_hash_table_remove(sws_iter->kms_handles, entry);
202       }
203    }
204    simple_mtx_unlock(&ws->sws_list_lock);
205 
206    simple_mtx_lock(&ws->bo_export_table_lock);
207    _mesa_hash_table_remove_key(ws->bo_export_table, bo->bo);
208    simple_mtx_unlock(&ws->bo_export_table_lock);
209 
210    if (bo->base.placement & RADEON_DOMAIN_VRAM_GTT) {
211       amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
212       amdgpu_va_range_free(bo->u.real.va_handle);
213    }
214    amdgpu_bo_free(bo->bo);
215 
216    amdgpu_bo_remove_fences(bo);
217 
218    if (bo->base.placement & RADEON_DOMAIN_VRAM)
219       ws->allocated_vram -= align64(bo->base.size, ws->info.gart_page_size);
220    else if (bo->base.placement & RADEON_DOMAIN_GTT)
221       ws->allocated_gtt -= align64(bo->base.size, ws->info.gart_page_size);
222 
223    simple_mtx_destroy(&bo->lock);
224    FREE(bo);
225 }
226 
amdgpu_bo_destroy_or_cache(struct radeon_winsys * rws,struct pb_buffer * _buf)227 static void amdgpu_bo_destroy_or_cache(struct radeon_winsys *rws, struct pb_buffer *_buf)
228 {
229    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
230    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
231 
232    assert(bo->bo); /* slab buffers have a separate vtbl */
233 
234    if (bo->u.real.use_reusable_pool)
235       pb_cache_add_buffer(bo->cache_entry);
236    else
237       amdgpu_bo_destroy(ws, _buf);
238 }
239 
amdgpu_clean_up_buffer_managers(struct amdgpu_winsys * ws)240 static void amdgpu_clean_up_buffer_managers(struct amdgpu_winsys *ws)
241 {
242    for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
243       pb_slabs_reclaim(&ws->bo_slabs[i]);
244       if (ws->info.has_tmz_support)
245          pb_slabs_reclaim(&ws->bo_slabs_encrypted[i]);
246    }
247 
248    pb_cache_release_all_buffers(&ws->bo_cache);
249 }
250 
amdgpu_bo_do_map(struct radeon_winsys * rws,struct amdgpu_winsys_bo * bo,void ** cpu)251 static bool amdgpu_bo_do_map(struct radeon_winsys *rws, struct amdgpu_winsys_bo *bo, void **cpu)
252 {
253    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
254 
255    assert(!(bo->base.usage & RADEON_FLAG_SPARSE) && bo->bo && !bo->u.real.is_user_ptr);
256    int r = amdgpu_bo_cpu_map(bo->bo, cpu);
257    if (r) {
258       /* Clean up buffer managers and try again. */
259       amdgpu_clean_up_buffer_managers(ws);
260       r = amdgpu_bo_cpu_map(bo->bo, cpu);
261       if (r)
262          return false;
263    }
264 
265    if (p_atomic_inc_return(&bo->u.real.map_count) == 1) {
266       if (bo->base.placement & RADEON_DOMAIN_VRAM)
267          ws->mapped_vram += bo->base.size;
268       else if (bo->base.placement & RADEON_DOMAIN_GTT)
269          ws->mapped_gtt += bo->base.size;
270       ws->num_mapped_buffers++;
271    }
272 
273    return true;
274 }
275 
amdgpu_bo_map(struct radeon_winsys * rws,struct pb_buffer * buf,struct radeon_cmdbuf * rcs,enum pipe_map_flags usage)276 void *amdgpu_bo_map(struct radeon_winsys *rws,
277                     struct pb_buffer *buf,
278                     struct radeon_cmdbuf *rcs,
279                     enum pipe_map_flags usage)
280 {
281    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
282    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
283    struct amdgpu_winsys_bo *real;
284    struct amdgpu_cs *cs = rcs ? amdgpu_cs(rcs) : NULL;
285 
286    assert(!(bo->base.usage & RADEON_FLAG_SPARSE));
287 
288    /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
289    if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
290       /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
291       if (usage & PIPE_MAP_DONTBLOCK) {
292          if (!(usage & PIPE_MAP_WRITE)) {
293             /* Mapping for read.
294              *
295              * Since we are mapping for read, we don't need to wait
296              * if the GPU is using the buffer for read too
297              * (neither one is changing it).
298              *
299              * Only check whether the buffer is being used for write. */
300             if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
301                                                                RADEON_USAGE_WRITE)) {
302                cs->flush_cs(cs->flush_data,
303 			    RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
304                return NULL;
305             }
306 
307             if (!amdgpu_bo_wait(rws, (struct pb_buffer*)bo, 0,
308                                 RADEON_USAGE_WRITE)) {
309                return NULL;
310             }
311          } else {
312             if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
313                cs->flush_cs(cs->flush_data,
314 			    RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
315                return NULL;
316             }
317 
318             if (!amdgpu_bo_wait(rws, (struct pb_buffer*)bo, 0,
319                                 RADEON_USAGE_READWRITE)) {
320                return NULL;
321             }
322          }
323       } else {
324          uint64_t time = os_time_get_nano();
325 
326          if (!(usage & PIPE_MAP_WRITE)) {
327             /* Mapping for read.
328              *
329              * Since we are mapping for read, we don't need to wait
330              * if the GPU is using the buffer for read too
331              * (neither one is changing it).
332              *
333              * Only check whether the buffer is being used for write. */
334             if (cs) {
335                if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
336                                                             RADEON_USAGE_WRITE)) {
337                   cs->flush_cs(cs->flush_data,
338 			       RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
339                } else {
340                   /* Try to avoid busy-waiting in amdgpu_bo_wait. */
341                   if (p_atomic_read(&bo->num_active_ioctls))
342                      amdgpu_cs_sync_flush(rcs);
343                }
344             }
345 
346             amdgpu_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
347                            RADEON_USAGE_WRITE);
348          } else {
349             /* Mapping for write. */
350             if (cs) {
351                if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
352                   cs->flush_cs(cs->flush_data,
353 			       RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
354                } else {
355                   /* Try to avoid busy-waiting in amdgpu_bo_wait. */
356                   if (p_atomic_read(&bo->num_active_ioctls))
357                      amdgpu_cs_sync_flush(rcs);
358                }
359             }
360 
361             amdgpu_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
362                            RADEON_USAGE_READWRITE);
363          }
364 
365          ws->buffer_wait_time += os_time_get_nano() - time;
366       }
367    }
368 
369    /* Buffer synchronization has been checked, now actually map the buffer. */
370    void *cpu = NULL;
371    uint64_t offset = 0;
372 
373    if (bo->bo) {
374       real = bo;
375    } else {
376       real = bo->u.slab.real;
377       offset = bo->va - real->va;
378    }
379 
380    if (usage & RADEON_MAP_TEMPORARY) {
381       if (real->u.real.is_user_ptr) {
382          cpu = real->u.real.cpu_ptr;
383       } else {
384          if (!amdgpu_bo_do_map(rws, real, &cpu))
385             return NULL;
386       }
387    } else {
388       cpu = p_atomic_read(&real->u.real.cpu_ptr);
389       if (!cpu) {
390          simple_mtx_lock(&real->lock);
391          /* Must re-check due to the possibility of a race. Re-check need not
392           * be atomic thanks to the lock. */
393          cpu = real->u.real.cpu_ptr;
394          if (!cpu) {
395             if (!amdgpu_bo_do_map(rws, real, &cpu)) {
396                simple_mtx_unlock(&real->lock);
397                return NULL;
398             }
399             p_atomic_set(&real->u.real.cpu_ptr, cpu);
400          }
401          simple_mtx_unlock(&real->lock);
402       }
403    }
404 
405    return (uint8_t*)cpu + offset;
406 }
407 
amdgpu_bo_unmap(struct radeon_winsys * rws,struct pb_buffer * buf)408 void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer *buf)
409 {
410    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
411    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
412    struct amdgpu_winsys_bo *real;
413 
414    assert(!(bo->base.usage & RADEON_FLAG_SPARSE));
415 
416    real = bo->bo ? bo : bo->u.slab.real;
417 
418    if (real->u.real.is_user_ptr)
419       return;
420 
421    assert(real->u.real.map_count != 0 && "too many unmaps");
422    if (p_atomic_dec_zero(&real->u.real.map_count)) {
423       assert(!real->u.real.cpu_ptr &&
424              "too many unmaps or forgot RADEON_MAP_TEMPORARY flag");
425 
426       if (real->base.placement & RADEON_DOMAIN_VRAM)
427          ws->mapped_vram -= real->base.size;
428       else if (real->base.placement & RADEON_DOMAIN_GTT)
429          ws->mapped_gtt -= real->base.size;
430       ws->num_mapped_buffers--;
431    }
432 
433    amdgpu_bo_cpu_unmap(real->bo);
434 }
435 
436 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
437    /* Cast to void* because one of the function parameters is a struct pointer instead of void*. */
438    (void*)amdgpu_bo_destroy_or_cache
439    /* other functions are never called */
440 };
441 
amdgpu_add_buffer_to_global_list(struct amdgpu_winsys * ws,struct amdgpu_winsys_bo * bo)442 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys *ws, struct amdgpu_winsys_bo *bo)
443 {
444 #if DEBUG
445    assert(bo->bo);
446 
447    if (ws->debug_all_bos) {
448       simple_mtx_lock(&ws->global_bo_list_lock);
449       list_addtail(&bo->u.real.global_list_item, &ws->global_bo_list);
450       ws->num_buffers++;
451       simple_mtx_unlock(&ws->global_bo_list_lock);
452    }
453 #endif
454 }
455 
amdgpu_get_optimal_alignment(struct amdgpu_winsys * ws,uint64_t size,unsigned alignment)456 static unsigned amdgpu_get_optimal_alignment(struct amdgpu_winsys *ws,
457                                              uint64_t size, unsigned alignment)
458 {
459    /* Increase the alignment for faster address translation and better memory
460     * access pattern.
461     */
462    if (size >= ws->info.pte_fragment_size) {
463       alignment = MAX2(alignment, ws->info.pte_fragment_size);
464    } else if (size) {
465       unsigned msb = util_last_bit(size);
466 
467       alignment = MAX2(alignment, 1u << (msb - 1));
468    }
469    return alignment;
470 }
471 
amdgpu_create_bo(struct amdgpu_winsys * ws,uint64_t size,unsigned alignment,enum radeon_bo_domain initial_domain,unsigned flags,int heap)472 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
473                                                  uint64_t size,
474                                                  unsigned alignment,
475                                                  enum radeon_bo_domain initial_domain,
476                                                  unsigned flags,
477                                                  int heap)
478 {
479    struct amdgpu_bo_alloc_request request = {0};
480    amdgpu_bo_handle buf_handle;
481    uint64_t va = 0;
482    struct amdgpu_winsys_bo *bo;
483    amdgpu_va_handle va_handle = NULL;
484    int r;
485    bool init_pb_cache;
486 
487    /* VRAM or GTT must be specified, but not both at the same time. */
488    assert(util_bitcount(initial_domain & (RADEON_DOMAIN_VRAM_GTT |
489                                           RADEON_DOMAIN_GDS |
490                                           RADEON_DOMAIN_OA)) == 1);
491 
492    alignment = amdgpu_get_optimal_alignment(ws, size, alignment);
493 
494    init_pb_cache = heap >= 0 && (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING);
495 
496    bo = CALLOC(1, sizeof(struct amdgpu_winsys_bo) +
497                   init_pb_cache * sizeof(struct pb_cache_entry));
498    if (!bo) {
499       return NULL;
500    }
501 
502    if (init_pb_cache) {
503       bo->u.real.use_reusable_pool = true;
504       pb_cache_init_entry(&ws->bo_cache, bo->cache_entry, &bo->base,
505                           heap);
506    }
507    request.alloc_size = size;
508    request.phys_alignment = alignment;
509 
510    if (initial_domain & RADEON_DOMAIN_VRAM) {
511       request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
512 
513       /* Since VRAM and GTT have almost the same performance on APUs, we could
514        * just set GTT. However, in order to decrease GTT(RAM) usage, which is
515        * shared with the OS, allow VRAM placements too. The idea is not to use
516        * VRAM usefully, but to use it so that it's not unused and wasted.
517        */
518       if (!ws->info.has_dedicated_vram)
519          request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
520    }
521 
522    if (initial_domain & RADEON_DOMAIN_GTT)
523       request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
524    if (initial_domain & RADEON_DOMAIN_GDS)
525       request.preferred_heap |= AMDGPU_GEM_DOMAIN_GDS;
526    if (initial_domain & RADEON_DOMAIN_OA)
527       request.preferred_heap |= AMDGPU_GEM_DOMAIN_OA;
528 
529    if (flags & RADEON_FLAG_NO_CPU_ACCESS)
530       request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
531    if (flags & RADEON_FLAG_GTT_WC)
532       request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
533    if (ws->zero_all_vram_allocs &&
534        (request.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM))
535       request.flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
536    if ((flags & RADEON_FLAG_ENCRYPTED) &&
537        ws->info.has_tmz_support) {
538       request.flags |= AMDGPU_GEM_CREATE_ENCRYPTED;
539 
540       if (!(flags & RADEON_FLAG_DRIVER_INTERNAL)) {
541          struct amdgpu_screen_winsys *sws_iter;
542          simple_mtx_lock(&ws->sws_list_lock);
543          for (sws_iter = ws->sws_list; sws_iter; sws_iter = sws_iter->next) {
544             *((bool*) &sws_iter->base.uses_secure_bos) = true;
545          }
546          simple_mtx_unlock(&ws->sws_list_lock);
547       }
548    }
549 
550    r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
551    if (r) {
552       fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
553       fprintf(stderr, "amdgpu:    size      : %"PRIu64" bytes\n", size);
554       fprintf(stderr, "amdgpu:    alignment : %u bytes\n", alignment);
555       fprintf(stderr, "amdgpu:    domains   : %u\n", initial_domain);
556       fprintf(stderr, "amdgpu:    flags   : %" PRIx64 "\n", request.flags);
557       goto error_bo_alloc;
558    }
559 
560    if (initial_domain & RADEON_DOMAIN_VRAM_GTT) {
561       unsigned va_gap_size = ws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
562 
563       r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
564                                 size + va_gap_size, alignment,
565                                 0, &va, &va_handle,
566                                 (flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) |
567                                 AMDGPU_VA_RANGE_HIGH);
568       if (r)
569          goto error_va_alloc;
570 
571       unsigned vm_flags = AMDGPU_VM_PAGE_READABLE |
572                           AMDGPU_VM_PAGE_EXECUTABLE;
573 
574       if (!(flags & RADEON_FLAG_READ_ONLY))
575          vm_flags |= AMDGPU_VM_PAGE_WRITEABLE;
576 
577       if (flags & RADEON_FLAG_UNCACHED)
578          vm_flags |= AMDGPU_VM_MTYPE_UC;
579 
580       r = amdgpu_bo_va_op_raw(ws->dev, buf_handle, 0, size, va, vm_flags,
581 			   AMDGPU_VA_OP_MAP);
582       if (r)
583          goto error_va_map;
584    }
585 
586    simple_mtx_init(&bo->lock, mtx_plain);
587    pipe_reference_init(&bo->base.reference, 1);
588    bo->base.alignment_log2 = util_logbase2(alignment);
589    bo->base.size = size;
590    bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
591    bo->bo = buf_handle;
592    bo->va = va;
593    bo->u.real.va_handle = va_handle;
594    bo->base.placement = initial_domain;
595    bo->base.usage = flags;
596    bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
597 
598    if (initial_domain & RADEON_DOMAIN_VRAM)
599       ws->allocated_vram += align64(size, ws->info.gart_page_size);
600    else if (initial_domain & RADEON_DOMAIN_GTT)
601       ws->allocated_gtt += align64(size, ws->info.gart_page_size);
602 
603    amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->u.real.kms_handle);
604 
605    amdgpu_add_buffer_to_global_list(ws, bo);
606 
607    return bo;
608 
609 error_va_map:
610    amdgpu_va_range_free(va_handle);
611 
612 error_va_alloc:
613    amdgpu_bo_free(buf_handle);
614 
615 error_bo_alloc:
616    FREE(bo);
617    return NULL;
618 }
619 
amdgpu_bo_can_reclaim(struct amdgpu_winsys * ws,struct pb_buffer * _buf)620 bool amdgpu_bo_can_reclaim(struct amdgpu_winsys *ws, struct pb_buffer *_buf)
621 {
622    return amdgpu_bo_wait(&ws->dummy_ws.base, _buf, 0, RADEON_USAGE_READWRITE);
623 }
624 
amdgpu_bo_can_reclaim_slab(void * priv,struct pb_slab_entry * entry)625 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
626 {
627    struct amdgpu_winsys_bo *bo = container_of(entry, struct amdgpu_winsys_bo, u.slab.entry);
628 
629    return amdgpu_bo_can_reclaim(priv, &bo->base);
630 }
631 
get_slabs(struct amdgpu_winsys * ws,uint64_t size,enum radeon_bo_flag flags)632 static struct pb_slabs *get_slabs(struct amdgpu_winsys *ws, uint64_t size,
633                                   enum radeon_bo_flag flags)
634 {
635    struct pb_slabs *bo_slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ?
636       ws->bo_slabs_encrypted : ws->bo_slabs;
637    /* Find the correct slab allocator for the given size. */
638    for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
639       struct pb_slabs *slabs = &bo_slabs[i];
640 
641       if (size <= 1 << (slabs->min_order + slabs->num_orders - 1))
642          return slabs;
643    }
644 
645    assert(0);
646    return NULL;
647 }
648 
get_slab_wasted_size(struct amdgpu_winsys * ws,struct amdgpu_winsys_bo * bo)649 static unsigned get_slab_wasted_size(struct amdgpu_winsys *ws, struct amdgpu_winsys_bo *bo)
650 {
651    assert(bo->base.size <= bo->u.slab.entry.entry_size);
652    assert(bo->base.size < (1 << bo->base.alignment_log2) ||
653           bo->base.size < 1 << ws->bo_slabs[0].min_order ||
654           bo->base.size > bo->u.slab.entry.entry_size / 2);
655    return bo->u.slab.entry.entry_size - bo->base.size;
656 }
657 
amdgpu_bo_slab_destroy(struct radeon_winsys * rws,struct pb_buffer * _buf)658 static void amdgpu_bo_slab_destroy(struct radeon_winsys *rws, struct pb_buffer *_buf)
659 {
660    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
661    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
662    struct pb_slabs *slabs;
663 
664    assert(!bo->bo);
665 
666    slabs = get_slabs(ws, bo->base.size, bo->base.usage & RADEON_FLAG_ENCRYPTED);
667 
668    if (bo->base.placement & RADEON_DOMAIN_VRAM)
669       ws->slab_wasted_vram -= get_slab_wasted_size(ws, bo);
670    else
671       ws->slab_wasted_gtt -= get_slab_wasted_size(ws, bo);
672 
673    pb_slab_free(slabs, &bo->u.slab.entry);
674 }
675 
676 static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl = {
677    /* Cast to void* because one of the function parameters is a struct pointer instead of void*. */
678    (void*)amdgpu_bo_slab_destroy
679    /* other functions are never called */
680 };
681 
682 /* Return the power of two size of a slab entry matching the input size. */
get_slab_pot_entry_size(struct amdgpu_winsys * ws,unsigned size)683 static unsigned get_slab_pot_entry_size(struct amdgpu_winsys *ws, unsigned size)
684 {
685    unsigned entry_size = util_next_power_of_two(size);
686    unsigned min_entry_size = 1 << ws->bo_slabs[0].min_order;
687 
688    return MAX2(entry_size, min_entry_size);
689 }
690 
691 /* Return the slab entry alignment. */
get_slab_entry_alignment(struct amdgpu_winsys * ws,unsigned size)692 static unsigned get_slab_entry_alignment(struct amdgpu_winsys *ws, unsigned size)
693 {
694    unsigned entry_size = get_slab_pot_entry_size(ws, size);
695 
696    if (size <= entry_size * 3 / 4)
697       return entry_size / 4;
698 
699    return entry_size;
700 }
701 
amdgpu_bo_slab_alloc(void * priv,unsigned heap,unsigned entry_size,unsigned group_index,bool encrypted)702 static struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
703                                             unsigned entry_size,
704                                             unsigned group_index,
705                                             bool encrypted)
706 {
707    struct amdgpu_winsys *ws = priv;
708    struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
709    enum radeon_bo_domain domains = radeon_domain_from_heap(heap);
710    enum radeon_bo_flag flags = radeon_flags_from_heap(heap);
711    uint32_t base_id;
712    unsigned slab_size = 0;
713 
714    if (!slab)
715       return NULL;
716 
717    if (encrypted)
718       flags |= RADEON_FLAG_ENCRYPTED;
719 
720    struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ?
721       ws->bo_slabs_encrypted : ws->bo_slabs;
722 
723    /* Determine the slab buffer size. */
724    for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
725       unsigned max_entry_size = 1 << (slabs[i].min_order + slabs[i].num_orders - 1);
726 
727       if (entry_size <= max_entry_size) {
728          /* The slab size is twice the size of the largest possible entry. */
729          slab_size = max_entry_size * 2;
730 
731          if (!util_is_power_of_two_nonzero(entry_size)) {
732             assert(util_is_power_of_two_nonzero(entry_size * 4 / 3));
733 
734             /* If the entry size is 3/4 of a power of two, we would waste space and not gain
735              * anything if we allocated only twice the power of two for the backing buffer:
736              *   2 * 3/4 = 1.5 usable with buffer size 2
737              *
738              * Allocating 5 times the entry size leads us to the next power of two and results
739              * in a much better memory utilization:
740              *   5 * 3/4 = 3.75 usable with buffer size 4
741              */
742             if (entry_size * 5 > slab_size)
743                slab_size = util_next_power_of_two(entry_size * 5);
744          }
745 
746          /* The largest slab should have the same size as the PTE fragment
747           * size to get faster address translation.
748           */
749          if (i == NUM_SLAB_ALLOCATORS - 1 &&
750              slab_size < ws->info.pte_fragment_size)
751             slab_size = ws->info.pte_fragment_size;
752          break;
753       }
754    }
755    assert(slab_size != 0);
756 
757    slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(ws,
758                                                     slab_size, slab_size,
759                                                     domains, flags));
760    if (!slab->buffer)
761       goto fail;
762 
763    slab_size = slab->buffer->base.size;
764 
765    slab->base.num_entries = slab_size / entry_size;
766    slab->base.num_free = slab->base.num_entries;
767    slab->entry_size = entry_size;
768    slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
769    if (!slab->entries)
770       goto fail_buffer;
771 
772    list_inithead(&slab->base.free);
773 
774    base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
775 
776    for (unsigned i = 0; i < slab->base.num_entries; ++i) {
777       struct amdgpu_winsys_bo *bo = &slab->entries[i];
778 
779       simple_mtx_init(&bo->lock, mtx_plain);
780       bo->base.alignment_log2 = util_logbase2(get_slab_entry_alignment(ws, entry_size));
781       bo->base.size = entry_size;
782       bo->base.vtbl = &amdgpu_winsys_bo_slab_vtbl;
783       bo->va = slab->buffer->va + i * entry_size;
784       bo->base.placement = domains;
785       bo->unique_id = base_id + i;
786       bo->u.slab.entry.slab = &slab->base;
787       bo->u.slab.entry.group_index = group_index;
788       bo->u.slab.entry.entry_size = entry_size;
789 
790       if (slab->buffer->bo) {
791          /* The slab is not suballocated. */
792          bo->u.slab.real = slab->buffer;
793       } else {
794          /* The slab is allocated out of a bigger slab. */
795          bo->u.slab.real = slab->buffer->u.slab.real;
796          assert(bo->u.slab.real->bo);
797       }
798 
799       list_addtail(&bo->u.slab.entry.head, &slab->base.free);
800    }
801 
802    /* Wasted alignment due to slabs with 3/4 allocations being aligned to a power of two. */
803    assert(slab->base.num_entries * entry_size <= slab_size);
804    if (domains & RADEON_DOMAIN_VRAM)
805       ws->slab_wasted_vram += slab_size - slab->base.num_entries * entry_size;
806    else
807       ws->slab_wasted_gtt += slab_size - slab->base.num_entries * entry_size;
808 
809    return &slab->base;
810 
811 fail_buffer:
812    amdgpu_winsys_bo_reference(ws, &slab->buffer, NULL);
813 fail:
814    FREE(slab);
815    return NULL;
816 }
817 
amdgpu_bo_slab_alloc_encrypted(void * priv,unsigned heap,unsigned entry_size,unsigned group_index)818 struct pb_slab *amdgpu_bo_slab_alloc_encrypted(void *priv, unsigned heap,
819                                                unsigned entry_size,
820                                                unsigned group_index)
821 {
822    return amdgpu_bo_slab_alloc(priv, heap, entry_size, group_index, true);
823 }
824 
amdgpu_bo_slab_alloc_normal(void * priv,unsigned heap,unsigned entry_size,unsigned group_index)825 struct pb_slab *amdgpu_bo_slab_alloc_normal(void *priv, unsigned heap,
826                                             unsigned entry_size,
827                                             unsigned group_index)
828 {
829    return amdgpu_bo_slab_alloc(priv, heap, entry_size, group_index, false);
830 }
831 
amdgpu_bo_slab_free(struct amdgpu_winsys * ws,struct pb_slab * pslab)832 void amdgpu_bo_slab_free(struct amdgpu_winsys *ws, struct pb_slab *pslab)
833 {
834    struct amdgpu_slab *slab = amdgpu_slab(pslab);
835    unsigned slab_size = slab->buffer->base.size;
836 
837    assert(slab->base.num_entries * slab->entry_size <= slab_size);
838    if (slab->buffer->base.placement & RADEON_DOMAIN_VRAM)
839       ws->slab_wasted_vram -= slab_size - slab->base.num_entries * slab->entry_size;
840    else
841       ws->slab_wasted_gtt -= slab_size - slab->base.num_entries * slab->entry_size;
842 
843    for (unsigned i = 0; i < slab->base.num_entries; ++i) {
844       amdgpu_bo_remove_fences(&slab->entries[i]);
845       simple_mtx_destroy(&slab->entries[i].lock);
846    }
847 
848    FREE(slab->entries);
849    amdgpu_winsys_bo_reference(ws, &slab->buffer, NULL);
850    FREE(slab);
851 }
852 
853 #if DEBUG_SPARSE_COMMITS
854 static void
sparse_dump(struct amdgpu_winsys_bo * bo,const char * func)855 sparse_dump(struct amdgpu_winsys_bo *bo, const char *func)
856 {
857    fprintf(stderr, "%s: %p (size=%"PRIu64", num_va_pages=%u) @ %s\n"
858                    "Commitments:\n",
859            __func__, bo, bo->base.size, bo->u.sparse.num_va_pages, func);
860 
861    struct amdgpu_sparse_backing *span_backing = NULL;
862    uint32_t span_first_backing_page = 0;
863    uint32_t span_first_va_page = 0;
864    uint32_t va_page = 0;
865 
866    for (;;) {
867       struct amdgpu_sparse_backing *backing = 0;
868       uint32_t backing_page = 0;
869 
870       if (va_page < bo->u.sparse.num_va_pages) {
871          backing = bo->u.sparse.commitments[va_page].backing;
872          backing_page = bo->u.sparse.commitments[va_page].page;
873       }
874 
875       if (span_backing &&
876           (backing != span_backing ||
877            backing_page != span_first_backing_page + (va_page - span_first_va_page))) {
878          fprintf(stderr, " %u..%u: backing=%p:%u..%u\n",
879                  span_first_va_page, va_page - 1, span_backing,
880                  span_first_backing_page,
881                  span_first_backing_page + (va_page - span_first_va_page) - 1);
882 
883          span_backing = NULL;
884       }
885 
886       if (va_page >= bo->u.sparse.num_va_pages)
887          break;
888 
889       if (backing && !span_backing) {
890          span_backing = backing;
891          span_first_backing_page = backing_page;
892          span_first_va_page = va_page;
893       }
894 
895       va_page++;
896    }
897 
898    fprintf(stderr, "Backing:\n");
899 
900    list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
901       fprintf(stderr, " %p (size=%"PRIu64")\n", backing, backing->bo->base.size);
902       for (unsigned i = 0; i < backing->num_chunks; ++i)
903          fprintf(stderr, "   %u..%u\n", backing->chunks[i].begin, backing->chunks[i].end);
904    }
905 }
906 #endif
907 
908 /*
909  * Attempt to allocate the given number of backing pages. Fewer pages may be
910  * allocated (depending on the fragmentation of existing backing buffers),
911  * which will be reflected by a change to *pnum_pages.
912  */
913 static struct amdgpu_sparse_backing *
sparse_backing_alloc(struct amdgpu_winsys * ws,struct amdgpu_winsys_bo * bo,uint32_t * pstart_page,uint32_t * pnum_pages)914 sparse_backing_alloc(struct amdgpu_winsys *ws, struct amdgpu_winsys_bo *bo,
915                      uint32_t *pstart_page, uint32_t *pnum_pages)
916 {
917    struct amdgpu_sparse_backing *best_backing;
918    unsigned best_idx;
919    uint32_t best_num_pages;
920 
921    best_backing = NULL;
922    best_idx = 0;
923    best_num_pages = 0;
924 
925    /* This is a very simple and inefficient best-fit algorithm. */
926    list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
927       for (unsigned idx = 0; idx < backing->num_chunks; ++idx) {
928          uint32_t cur_num_pages = backing->chunks[idx].end - backing->chunks[idx].begin;
929          if ((best_num_pages < *pnum_pages && cur_num_pages > best_num_pages) ||
930             (best_num_pages > *pnum_pages && cur_num_pages < best_num_pages)) {
931             best_backing = backing;
932             best_idx = idx;
933             best_num_pages = cur_num_pages;
934          }
935       }
936    }
937 
938    /* Allocate a new backing buffer if necessary. */
939    if (!best_backing) {
940       struct pb_buffer *buf;
941       uint64_t size;
942       uint32_t pages;
943 
944       best_backing = CALLOC_STRUCT(amdgpu_sparse_backing);
945       if (!best_backing)
946          return NULL;
947 
948       best_backing->max_chunks = 4;
949       best_backing->chunks = CALLOC(best_backing->max_chunks,
950                                     sizeof(*best_backing->chunks));
951       if (!best_backing->chunks) {
952          FREE(best_backing);
953          return NULL;
954       }
955 
956       assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, RADEON_SPARSE_PAGE_SIZE));
957 
958       size = MIN3(bo->base.size / 16,
959                   8 * 1024 * 1024,
960                   bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
961       size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
962 
963       buf = amdgpu_bo_create(ws, size, RADEON_SPARSE_PAGE_SIZE,
964                              bo->base.placement,
965                              (bo->base.usage & ~RADEON_FLAG_SPARSE &
966                               /* Set the interprocess sharing flag to disable pb_cache because
967                                * amdgpu_bo_wait doesn't wait for active CS jobs.
968                                */
969                               ~RADEON_FLAG_NO_INTERPROCESS_SHARING) | RADEON_FLAG_NO_SUBALLOC);
970       if (!buf) {
971          FREE(best_backing->chunks);
972          FREE(best_backing);
973          return NULL;
974       }
975 
976       /* We might have gotten a bigger buffer than requested via caching. */
977       pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
978 
979       best_backing->bo = amdgpu_winsys_bo(buf);
980       best_backing->num_chunks = 1;
981       best_backing->chunks[0].begin = 0;
982       best_backing->chunks[0].end = pages;
983 
984       list_add(&best_backing->list, &bo->u.sparse.backing);
985       bo->u.sparse.num_backing_pages += pages;
986 
987       best_idx = 0;
988       best_num_pages = pages;
989    }
990 
991    *pnum_pages = MIN2(*pnum_pages, best_num_pages);
992    *pstart_page = best_backing->chunks[best_idx].begin;
993    best_backing->chunks[best_idx].begin += *pnum_pages;
994 
995    if (best_backing->chunks[best_idx].begin >= best_backing->chunks[best_idx].end) {
996       memmove(&best_backing->chunks[best_idx], &best_backing->chunks[best_idx + 1],
997               sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1));
998       best_backing->num_chunks--;
999    }
1000 
1001    return best_backing;
1002 }
1003 
1004 static void
sparse_free_backing_buffer(struct amdgpu_winsys * ws,struct amdgpu_winsys_bo * bo,struct amdgpu_sparse_backing * backing)1005 sparse_free_backing_buffer(struct amdgpu_winsys *ws, struct amdgpu_winsys_bo *bo,
1006                            struct amdgpu_sparse_backing *backing)
1007 {
1008    bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
1009 
1010    simple_mtx_lock(&ws->bo_fence_lock);
1011    amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
1012    simple_mtx_unlock(&ws->bo_fence_lock);
1013 
1014    list_del(&backing->list);
1015    amdgpu_winsys_bo_reference(ws, &backing->bo, NULL);
1016    FREE(backing->chunks);
1017    FREE(backing);
1018 }
1019 
1020 /*
1021  * Return a range of pages from the given backing buffer back into the
1022  * free structure.
1023  */
1024 static bool
sparse_backing_free(struct amdgpu_winsys * ws,struct amdgpu_winsys_bo * bo,struct amdgpu_sparse_backing * backing,uint32_t start_page,uint32_t num_pages)1025 sparse_backing_free(struct amdgpu_winsys *ws, struct amdgpu_winsys_bo *bo,
1026                     struct amdgpu_sparse_backing *backing,
1027                     uint32_t start_page, uint32_t num_pages)
1028 {
1029    uint32_t end_page = start_page + num_pages;
1030    unsigned low = 0;
1031    unsigned high = backing->num_chunks;
1032 
1033    /* Find the first chunk with begin >= start_page. */
1034    while (low < high) {
1035       unsigned mid = low + (high - low) / 2;
1036 
1037       if (backing->chunks[mid].begin >= start_page)
1038          high = mid;
1039       else
1040          low = mid + 1;
1041    }
1042 
1043    assert(low >= backing->num_chunks || end_page <= backing->chunks[low].begin);
1044    assert(low == 0 || backing->chunks[low - 1].end <= start_page);
1045 
1046    if (low > 0 && backing->chunks[low - 1].end == start_page) {
1047       backing->chunks[low - 1].end = end_page;
1048 
1049       if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
1050          backing->chunks[low - 1].end = backing->chunks[low].end;
1051          memmove(&backing->chunks[low], &backing->chunks[low + 1],
1052                  sizeof(*backing->chunks) * (backing->num_chunks - low - 1));
1053          backing->num_chunks--;
1054       }
1055    } else if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
1056       backing->chunks[low].begin = start_page;
1057    } else {
1058       if (backing->num_chunks >= backing->max_chunks) {
1059          unsigned new_max_chunks = 2 * backing->max_chunks;
1060          struct amdgpu_sparse_backing_chunk *new_chunks =
1061             REALLOC(backing->chunks,
1062                     sizeof(*backing->chunks) * backing->max_chunks,
1063                     sizeof(*backing->chunks) * new_max_chunks);
1064          if (!new_chunks)
1065             return false;
1066 
1067          backing->max_chunks = new_max_chunks;
1068          backing->chunks = new_chunks;
1069       }
1070 
1071       memmove(&backing->chunks[low + 1], &backing->chunks[low],
1072               sizeof(*backing->chunks) * (backing->num_chunks - low));
1073       backing->chunks[low].begin = start_page;
1074       backing->chunks[low].end = end_page;
1075       backing->num_chunks++;
1076    }
1077 
1078    if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 &&
1079        backing->chunks[0].end == backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE)
1080       sparse_free_backing_buffer(ws, bo, backing);
1081 
1082    return true;
1083 }
1084 
amdgpu_bo_sparse_destroy(struct radeon_winsys * rws,struct pb_buffer * _buf)1085 static void amdgpu_bo_sparse_destroy(struct radeon_winsys *rws, struct pb_buffer *_buf)
1086 {
1087    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1088    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1089    int r;
1090 
1091    assert(!bo->bo && bo->base.usage & RADEON_FLAG_SPARSE);
1092 
1093    r = amdgpu_bo_va_op_raw(ws->dev, NULL, 0,
1094                            (uint64_t)bo->u.sparse.num_va_pages * RADEON_SPARSE_PAGE_SIZE,
1095                            bo->va, 0, AMDGPU_VA_OP_CLEAR);
1096    if (r) {
1097       fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
1098    }
1099 
1100    while (!list_is_empty(&bo->u.sparse.backing)) {
1101       sparse_free_backing_buffer(ws, bo,
1102                                  container_of(bo->u.sparse.backing.next,
1103                                               struct amdgpu_sparse_backing, list));
1104    }
1105 
1106    amdgpu_va_range_free(bo->u.sparse.va_handle);
1107    FREE(bo->u.sparse.commitments);
1108    simple_mtx_destroy(&bo->lock);
1109    FREE(bo);
1110 }
1111 
1112 static const struct pb_vtbl amdgpu_winsys_bo_sparse_vtbl = {
1113    /* Cast to void* because one of the function parameters is a struct pointer instead of void*. */
1114    (void*)amdgpu_bo_sparse_destroy
1115    /* other functions are never called */
1116 };
1117 
1118 static struct pb_buffer *
amdgpu_bo_sparse_create(struct amdgpu_winsys * ws,uint64_t size,enum radeon_bo_domain domain,enum radeon_bo_flag flags)1119 amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
1120                         enum radeon_bo_domain domain,
1121                         enum radeon_bo_flag flags)
1122 {
1123    struct amdgpu_winsys_bo *bo;
1124    uint64_t map_size;
1125    uint64_t va_gap_size;
1126    int r;
1127 
1128    /* We use 32-bit page numbers; refuse to attempt allocating sparse buffers
1129     * that exceed this limit. This is not really a restriction: we don't have
1130     * that much virtual address space anyway.
1131     */
1132    if (size > (uint64_t)INT32_MAX * RADEON_SPARSE_PAGE_SIZE)
1133       return NULL;
1134 
1135    bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1136    if (!bo)
1137       return NULL;
1138 
1139    simple_mtx_init(&bo->lock, mtx_plain);
1140    pipe_reference_init(&bo->base.reference, 1);
1141    bo->base.alignment_log2 = util_logbase2(RADEON_SPARSE_PAGE_SIZE);
1142    bo->base.size = size;
1143    bo->base.vtbl = &amdgpu_winsys_bo_sparse_vtbl;
1144    bo->base.placement = domain;
1145    bo->unique_id =  __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1146    bo->base.usage = flags;
1147 
1148    bo->u.sparse.num_va_pages = DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
1149    bo->u.sparse.commitments = CALLOC(bo->u.sparse.num_va_pages,
1150                                      sizeof(*bo->u.sparse.commitments));
1151    if (!bo->u.sparse.commitments)
1152       goto error_alloc_commitments;
1153 
1154    list_inithead(&bo->u.sparse.backing);
1155 
1156    /* For simplicity, we always map a multiple of the page size. */
1157    map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
1158    va_gap_size = ws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
1159    r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1160                              map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
1161                              0, &bo->va, &bo->u.sparse.va_handle,
1162 			     AMDGPU_VA_RANGE_HIGH);
1163    if (r)
1164       goto error_va_alloc;
1165 
1166    r = amdgpu_bo_va_op_raw(ws->dev, NULL, 0, size, bo->va,
1167                            AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_MAP);
1168    if (r)
1169       goto error_va_map;
1170 
1171    return &bo->base;
1172 
1173 error_va_map:
1174    amdgpu_va_range_free(bo->u.sparse.va_handle);
1175 error_va_alloc:
1176    FREE(bo->u.sparse.commitments);
1177 error_alloc_commitments:
1178    simple_mtx_destroy(&bo->lock);
1179    FREE(bo);
1180    return NULL;
1181 }
1182 
1183 static bool
amdgpu_bo_sparse_commit(struct radeon_winsys * rws,struct pb_buffer * buf,uint64_t offset,uint64_t size,bool commit)1184 amdgpu_bo_sparse_commit(struct radeon_winsys *rws, struct pb_buffer *buf,
1185                         uint64_t offset, uint64_t size, bool commit)
1186 {
1187    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1188    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
1189    struct amdgpu_sparse_commitment *comm;
1190    uint32_t va_page, end_va_page;
1191    bool ok = true;
1192    int r;
1193 
1194    assert(bo->base.usage & RADEON_FLAG_SPARSE);
1195    assert(offset % RADEON_SPARSE_PAGE_SIZE == 0);
1196    assert(offset <= bo->base.size);
1197    assert(size <= bo->base.size - offset);
1198    assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->base.size);
1199 
1200    comm = bo->u.sparse.commitments;
1201    va_page = offset / RADEON_SPARSE_PAGE_SIZE;
1202    end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
1203 
1204    simple_mtx_lock(&bo->lock);
1205 
1206 #if DEBUG_SPARSE_COMMITS
1207    sparse_dump(bo, __func__);
1208 #endif
1209 
1210    if (commit) {
1211       while (va_page < end_va_page) {
1212          uint32_t span_va_page;
1213 
1214          /* Skip pages that are already committed. */
1215          if (comm[va_page].backing) {
1216             va_page++;
1217             continue;
1218          }
1219 
1220          /* Determine length of uncommitted span. */
1221          span_va_page = va_page;
1222          while (va_page < end_va_page && !comm[va_page].backing)
1223             va_page++;
1224 
1225          /* Fill the uncommitted span with chunks of backing memory. */
1226          while (span_va_page < va_page) {
1227             struct amdgpu_sparse_backing *backing;
1228             uint32_t backing_start, backing_size;
1229 
1230             backing_size = va_page - span_va_page;
1231             backing = sparse_backing_alloc(ws, bo, &backing_start, &backing_size);
1232             if (!backing) {
1233                ok = false;
1234                goto out;
1235             }
1236 
1237             r = amdgpu_bo_va_op_raw(ws->dev, backing->bo->bo,
1238                                     (uint64_t)backing_start * RADEON_SPARSE_PAGE_SIZE,
1239                                     (uint64_t)backing_size * RADEON_SPARSE_PAGE_SIZE,
1240                                     bo->va + (uint64_t)span_va_page * RADEON_SPARSE_PAGE_SIZE,
1241                                     AMDGPU_VM_PAGE_READABLE |
1242                                     AMDGPU_VM_PAGE_WRITEABLE |
1243                                     AMDGPU_VM_PAGE_EXECUTABLE,
1244                                     AMDGPU_VA_OP_REPLACE);
1245             if (r) {
1246                ok = sparse_backing_free(ws, bo, backing, backing_start, backing_size);
1247                assert(ok && "sufficient memory should already be allocated");
1248 
1249                ok = false;
1250                goto out;
1251             }
1252 
1253             while (backing_size) {
1254                comm[span_va_page].backing = backing;
1255                comm[span_va_page].page = backing_start;
1256                span_va_page++;
1257                backing_start++;
1258                backing_size--;
1259             }
1260          }
1261       }
1262    } else {
1263       r = amdgpu_bo_va_op_raw(ws->dev, NULL, 0,
1264                               (uint64_t)(end_va_page - va_page) * RADEON_SPARSE_PAGE_SIZE,
1265                               bo->va + (uint64_t)va_page * RADEON_SPARSE_PAGE_SIZE,
1266                               AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_REPLACE);
1267       if (r) {
1268          ok = false;
1269          goto out;
1270       }
1271 
1272       while (va_page < end_va_page) {
1273          struct amdgpu_sparse_backing *backing;
1274          uint32_t backing_start;
1275          uint32_t span_pages;
1276 
1277          /* Skip pages that are already uncommitted. */
1278          if (!comm[va_page].backing) {
1279             va_page++;
1280             continue;
1281          }
1282 
1283          /* Group contiguous spans of pages. */
1284          backing = comm[va_page].backing;
1285          backing_start = comm[va_page].page;
1286          comm[va_page].backing = NULL;
1287 
1288          span_pages = 1;
1289          va_page++;
1290 
1291          while (va_page < end_va_page &&
1292                 comm[va_page].backing == backing &&
1293                 comm[va_page].page == backing_start + span_pages) {
1294             comm[va_page].backing = NULL;
1295             va_page++;
1296             span_pages++;
1297          }
1298 
1299          if (!sparse_backing_free(ws, bo, backing, backing_start, span_pages)) {
1300             /* Couldn't allocate tracking data structures, so we have to leak */
1301             fprintf(stderr, "amdgpu: leaking PRT backing memory\n");
1302             ok = false;
1303          }
1304       }
1305    }
1306 out:
1307 
1308    simple_mtx_unlock(&bo->lock);
1309 
1310    return ok;
1311 }
1312 
amdgpu_buffer_get_metadata(struct radeon_winsys * rws,struct pb_buffer * _buf,struct radeon_bo_metadata * md,struct radeon_surf * surf)1313 static void amdgpu_buffer_get_metadata(struct radeon_winsys *rws,
1314                                        struct pb_buffer *_buf,
1315                                        struct radeon_bo_metadata *md,
1316                                        struct radeon_surf *surf)
1317 {
1318    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1319    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1320    struct amdgpu_bo_info info = {0};
1321    int r;
1322 
1323    assert(bo->bo && "must not be called for slab entries");
1324 
1325    r = amdgpu_bo_query_info(bo->bo, &info);
1326    if (r)
1327       return;
1328 
1329    ac_surface_set_bo_metadata(&ws->info, surf, info.metadata.tiling_info,
1330                               &md->mode);
1331 
1332    md->size_metadata = info.metadata.size_metadata;
1333    memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
1334 }
1335 
amdgpu_buffer_set_metadata(struct radeon_winsys * rws,struct pb_buffer * _buf,struct radeon_bo_metadata * md,struct radeon_surf * surf)1336 static void amdgpu_buffer_set_metadata(struct radeon_winsys *rws,
1337                                        struct pb_buffer *_buf,
1338                                        struct radeon_bo_metadata *md,
1339                                        struct radeon_surf *surf)
1340 {
1341    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1342    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1343    struct amdgpu_bo_metadata metadata = {0};
1344 
1345    assert(bo->bo && "must not be called for slab entries");
1346 
1347    ac_surface_get_bo_metadata(&ws->info, surf, &metadata.tiling_info);
1348 
1349    metadata.size_metadata = md->size_metadata;
1350    memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
1351 
1352    amdgpu_bo_set_metadata(bo->bo, &metadata);
1353 }
1354 
1355 struct pb_buffer *
amdgpu_bo_create(struct amdgpu_winsys * ws,uint64_t size,unsigned alignment,enum radeon_bo_domain domain,enum radeon_bo_flag flags)1356 amdgpu_bo_create(struct amdgpu_winsys *ws,
1357                  uint64_t size,
1358                  unsigned alignment,
1359                  enum radeon_bo_domain domain,
1360                  enum radeon_bo_flag flags)
1361 {
1362    struct amdgpu_winsys_bo *bo;
1363    int heap = -1;
1364 
1365    if (domain & (RADEON_DOMAIN_GDS | RADEON_DOMAIN_OA))
1366       flags |= RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_SUBALLOC;
1367 
1368    /* VRAM implies WC. This is not optional. */
1369    assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
1370 
1371    /* NO_CPU_ACCESS is not valid with GTT. */
1372    assert(!(domain & RADEON_DOMAIN_GTT) || !(flags & RADEON_FLAG_NO_CPU_ACCESS));
1373 
1374    /* Sparse buffers must have NO_CPU_ACCESS set. */
1375    assert(!(flags & RADEON_FLAG_SPARSE) || flags & RADEON_FLAG_NO_CPU_ACCESS);
1376 
1377    struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ?
1378       ws->bo_slabs_encrypted : ws->bo_slabs;
1379    struct pb_slabs *last_slab = &slabs[NUM_SLAB_ALLOCATORS - 1];
1380    unsigned max_slab_entry_size = 1 << (last_slab->min_order + last_slab->num_orders - 1);
1381 
1382    /* Sub-allocate small buffers from slabs. */
1383    if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) &&
1384        size <= max_slab_entry_size) {
1385       struct pb_slab_entry *entry;
1386       int heap = radeon_get_heap_index(domain, flags);
1387 
1388       if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)
1389          goto no_slab;
1390 
1391       unsigned alloc_size = size;
1392 
1393       /* Always use slabs for sizes less than 4 KB because the kernel aligns
1394        * everything to 4 KB.
1395        */
1396       if (size < alignment && alignment <= 4 * 1024)
1397          alloc_size = alignment;
1398 
1399       if (alignment > get_slab_entry_alignment(ws, alloc_size)) {
1400          /* 3/4 allocations can return too small alignment. Try again with a power of two
1401           * allocation size.
1402           */
1403          unsigned pot_size = get_slab_pot_entry_size(ws, alloc_size);
1404 
1405          if (alignment <= pot_size) {
1406             /* This size works but wastes some memory to fulfil the alignment. */
1407             alloc_size = pot_size;
1408          } else {
1409             goto no_slab; /* can't fulfil alignment requirements */
1410          }
1411       }
1412 
1413       struct pb_slabs *slabs = get_slabs(ws, alloc_size, flags);
1414       entry = pb_slab_alloc(slabs, alloc_size, heap);
1415       if (!entry) {
1416          /* Clean up buffer managers and try again. */
1417          amdgpu_clean_up_buffer_managers(ws);
1418 
1419          entry = pb_slab_alloc(slabs, alloc_size, heap);
1420       }
1421       if (!entry)
1422          return NULL;
1423 
1424       bo = container_of(entry, struct amdgpu_winsys_bo, u.slab.entry);
1425       pipe_reference_init(&bo->base.reference, 1);
1426       bo->base.size = size;
1427       assert(alignment <= 1 << bo->base.alignment_log2);
1428 
1429       if (domain & RADEON_DOMAIN_VRAM)
1430          ws->slab_wasted_vram += get_slab_wasted_size(ws, bo);
1431       else
1432          ws->slab_wasted_gtt += get_slab_wasted_size(ws, bo);
1433 
1434       return &bo->base;
1435    }
1436 no_slab:
1437 
1438    if (flags & RADEON_FLAG_SPARSE) {
1439       assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
1440 
1441       return amdgpu_bo_sparse_create(ws, size, domain, flags);
1442    }
1443 
1444    /* This flag is irrelevant for the cache. */
1445    flags &= ~RADEON_FLAG_NO_SUBALLOC;
1446 
1447    /* Align size to page size. This is the minimum alignment for normal
1448     * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
1449     * like constant/uniform buffers, can benefit from better and more reuse.
1450     */
1451    if (domain & RADEON_DOMAIN_VRAM_GTT) {
1452       size = align64(size, ws->info.gart_page_size);
1453       alignment = align(alignment, ws->info.gart_page_size);
1454    }
1455 
1456    bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING;
1457 
1458    if (use_reusable_pool) {
1459        heap = radeon_get_heap_index(domain, flags & ~RADEON_FLAG_ENCRYPTED);
1460        assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
1461 
1462        /* Get a buffer from the cache. */
1463        bo = (struct amdgpu_winsys_bo*)
1464             pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, 0, heap);
1465        if (bo)
1466           return &bo->base;
1467    }
1468 
1469    /* Create a new one. */
1470    bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
1471    if (!bo) {
1472       /* Clean up buffer managers and try again. */
1473       amdgpu_clean_up_buffer_managers(ws);
1474 
1475       bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
1476       if (!bo)
1477          return NULL;
1478    }
1479 
1480    return &bo->base;
1481 }
1482 
1483 static struct pb_buffer *
amdgpu_buffer_create(struct radeon_winsys * ws,uint64_t size,unsigned alignment,enum radeon_bo_domain domain,enum radeon_bo_flag flags)1484 amdgpu_buffer_create(struct radeon_winsys *ws,
1485                      uint64_t size,
1486                      unsigned alignment,
1487                      enum radeon_bo_domain domain,
1488                      enum radeon_bo_flag flags)
1489 {
1490    struct pb_buffer * res = amdgpu_bo_create(amdgpu_winsys(ws), size, alignment, domain,
1491                            flags);
1492    return res;
1493 }
1494 
amdgpu_bo_from_handle(struct radeon_winsys * rws,struct winsys_handle * whandle,unsigned vm_alignment)1495 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
1496                                                struct winsys_handle *whandle,
1497                                                unsigned vm_alignment)
1498 {
1499    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1500    struct amdgpu_winsys_bo *bo = NULL;
1501    enum amdgpu_bo_handle_type type;
1502    struct amdgpu_bo_import_result result = {0};
1503    uint64_t va;
1504    amdgpu_va_handle va_handle = NULL;
1505    struct amdgpu_bo_info info = {0};
1506    enum radeon_bo_domain initial = 0;
1507    enum radeon_bo_flag flags = 0;
1508    int r;
1509 
1510    switch (whandle->type) {
1511    case WINSYS_HANDLE_TYPE_SHARED:
1512       type = amdgpu_bo_handle_type_gem_flink_name;
1513       break;
1514    case WINSYS_HANDLE_TYPE_FD:
1515       type = amdgpu_bo_handle_type_dma_buf_fd;
1516       break;
1517    default:
1518       return NULL;
1519    }
1520 
1521    r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
1522    if (r)
1523       return NULL;
1524 
1525    simple_mtx_lock(&ws->bo_export_table_lock);
1526    bo = util_hash_table_get(ws->bo_export_table, result.buf_handle);
1527 
1528    /* If the amdgpu_winsys_bo instance already exists, bump the reference
1529     * counter and return it.
1530     */
1531    if (bo) {
1532       p_atomic_inc(&bo->base.reference.count);
1533       simple_mtx_unlock(&ws->bo_export_table_lock);
1534 
1535       /* Release the buffer handle, because we don't need it anymore.
1536        * This function is returning an existing buffer, which has its own
1537        * handle.
1538        */
1539       amdgpu_bo_free(result.buf_handle);
1540       return &bo->base;
1541    }
1542 
1543    /* Get initial domains. */
1544    r = amdgpu_bo_query_info(result.buf_handle, &info);
1545    if (r)
1546       goto error;
1547 
1548    r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1549                              result.alloc_size,
1550                              amdgpu_get_optimal_alignment(ws, result.alloc_size,
1551                                                           vm_alignment),
1552                              0, &va, &va_handle, AMDGPU_VA_RANGE_HIGH);
1553    if (r)
1554       goto error;
1555 
1556    bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1557    if (!bo)
1558       goto error;
1559 
1560    r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
1561    if (r)
1562       goto error;
1563 
1564    if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
1565       initial |= RADEON_DOMAIN_VRAM;
1566    if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
1567       initial |= RADEON_DOMAIN_GTT;
1568    if (info.alloc_flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
1569       flags |= RADEON_FLAG_NO_CPU_ACCESS;
1570    if (info.alloc_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1571       flags |= RADEON_FLAG_GTT_WC;
1572    if (info.alloc_flags & AMDGPU_GEM_CREATE_ENCRYPTED) {
1573       /* Imports are always possible even if the importer isn't using TMZ.
1574        * For instance libweston needs to import the buffer to be able to determine
1575        * if it can be used for scanout.
1576        */
1577       flags |= RADEON_FLAG_ENCRYPTED;
1578    }
1579 
1580    /* Initialize the structure. */
1581    simple_mtx_init(&bo->lock, mtx_plain);
1582    pipe_reference_init(&bo->base.reference, 1);
1583    bo->base.alignment_log2 = util_logbase2(info.phys_alignment);
1584    bo->bo = result.buf_handle;
1585    bo->base.size = result.alloc_size;
1586    bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1587    bo->va = va;
1588    bo->u.real.va_handle = va_handle;
1589    bo->base.placement = initial;
1590    bo->base.usage = flags;
1591    bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1592    bo->u.real.is_shared = true;
1593 
1594    if (bo->base.placement & RADEON_DOMAIN_VRAM)
1595       ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
1596    else if (bo->base.placement & RADEON_DOMAIN_GTT)
1597       ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1598 
1599    amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->u.real.kms_handle);
1600 
1601    amdgpu_add_buffer_to_global_list(ws, bo);
1602 
1603    _mesa_hash_table_insert(ws->bo_export_table, bo->bo, bo);
1604    simple_mtx_unlock(&ws->bo_export_table_lock);
1605 
1606    return &bo->base;
1607 
1608 error:
1609    simple_mtx_unlock(&ws->bo_export_table_lock);
1610    if (bo)
1611       FREE(bo);
1612    if (va_handle)
1613       amdgpu_va_range_free(va_handle);
1614    amdgpu_bo_free(result.buf_handle);
1615    return NULL;
1616 }
1617 
amdgpu_bo_get_handle(struct radeon_winsys * rws,struct pb_buffer * buffer,struct winsys_handle * whandle)1618 static bool amdgpu_bo_get_handle(struct radeon_winsys *rws,
1619                                  struct pb_buffer *buffer,
1620                                  struct winsys_handle *whandle)
1621 {
1622    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
1623    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1624    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
1625    enum amdgpu_bo_handle_type type;
1626    struct hash_entry *entry;
1627    int r;
1628 
1629    /* Don't allow exports of slab entries and sparse buffers. */
1630    if (!bo->bo)
1631       return false;
1632 
1633    bo->u.real.use_reusable_pool = false;
1634 
1635    switch (whandle->type) {
1636    case WINSYS_HANDLE_TYPE_SHARED:
1637       type = amdgpu_bo_handle_type_gem_flink_name;
1638       break;
1639    case WINSYS_HANDLE_TYPE_KMS:
1640       if (sws->fd == ws->fd) {
1641          whandle->handle = bo->u.real.kms_handle;
1642 
1643          if (bo->u.real.is_shared)
1644             return true;
1645 
1646          goto hash_table_set;
1647       }
1648 
1649       simple_mtx_lock(&ws->sws_list_lock);
1650       entry = _mesa_hash_table_search(sws->kms_handles, bo);
1651       simple_mtx_unlock(&ws->sws_list_lock);
1652       if (entry) {
1653          whandle->handle = (uintptr_t)entry->data;
1654          return true;
1655       }
1656       FALLTHROUGH;
1657    case WINSYS_HANDLE_TYPE_FD:
1658       type = amdgpu_bo_handle_type_dma_buf_fd;
1659       break;
1660    default:
1661       return false;
1662    }
1663 
1664    r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
1665    if (r)
1666       return false;
1667 
1668    if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
1669       int dma_fd = whandle->handle;
1670 
1671       r = drmPrimeFDToHandle(sws->fd, dma_fd, &whandle->handle);
1672       close(dma_fd);
1673 
1674       if (r)
1675          return false;
1676 
1677       simple_mtx_lock(&ws->sws_list_lock);
1678       _mesa_hash_table_insert_pre_hashed(sws->kms_handles,
1679                                          bo->u.real.kms_handle, bo,
1680                                          (void*)(uintptr_t)whandle->handle);
1681       simple_mtx_unlock(&ws->sws_list_lock);
1682    }
1683 
1684  hash_table_set:
1685    simple_mtx_lock(&ws->bo_export_table_lock);
1686    _mesa_hash_table_insert(ws->bo_export_table, bo->bo, bo);
1687    simple_mtx_unlock(&ws->bo_export_table_lock);
1688 
1689    bo->u.real.is_shared = true;
1690    return true;
1691 }
1692 
amdgpu_bo_from_ptr(struct radeon_winsys * rws,void * pointer,uint64_t size)1693 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
1694 					    void *pointer, uint64_t size)
1695 {
1696     struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1697     amdgpu_bo_handle buf_handle;
1698     struct amdgpu_winsys_bo *bo;
1699     uint64_t va;
1700     amdgpu_va_handle va_handle;
1701     /* Avoid failure when the size is not page aligned */
1702     uint64_t aligned_size = align64(size, ws->info.gart_page_size);
1703 
1704     bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1705     if (!bo)
1706         return NULL;
1707 
1708     if (amdgpu_create_bo_from_user_mem(ws->dev, pointer,
1709                                        aligned_size, &buf_handle))
1710         goto error;
1711 
1712     if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1713                               aligned_size,
1714                               amdgpu_get_optimal_alignment(ws, aligned_size,
1715                                                            ws->info.gart_page_size),
1716                               0, &va, &va_handle, AMDGPU_VA_RANGE_HIGH))
1717         goto error_va_alloc;
1718 
1719     if (amdgpu_bo_va_op(buf_handle, 0, aligned_size, va, 0, AMDGPU_VA_OP_MAP))
1720         goto error_va_map;
1721 
1722     /* Initialize it. */
1723     bo->u.real.is_user_ptr = true;
1724     pipe_reference_init(&bo->base.reference, 1);
1725     simple_mtx_init(&bo->lock, mtx_plain);
1726     bo->bo = buf_handle;
1727     bo->base.alignment_log2 = 0;
1728     bo->base.size = size;
1729     bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1730     bo->u.real.cpu_ptr = pointer;
1731     bo->va = va;
1732     bo->u.real.va_handle = va_handle;
1733     bo->base.placement = RADEON_DOMAIN_GTT;
1734     bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1735 
1736     ws->allocated_gtt += aligned_size;
1737 
1738     amdgpu_add_buffer_to_global_list(ws, bo);
1739 
1740     amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->u.real.kms_handle);
1741 
1742     return (struct pb_buffer*)bo;
1743 
1744 error_va_map:
1745     amdgpu_va_range_free(va_handle);
1746 
1747 error_va_alloc:
1748     amdgpu_bo_free(buf_handle);
1749 
1750 error:
1751     FREE(bo);
1752     return NULL;
1753 }
1754 
amdgpu_bo_is_user_ptr(struct pb_buffer * buf)1755 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
1756 {
1757    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
1758 
1759    return bo->bo ? bo->u.real.is_user_ptr : false;
1760 }
1761 
amdgpu_bo_is_suballocated(struct pb_buffer * buf)1762 static bool amdgpu_bo_is_suballocated(struct pb_buffer *buf)
1763 {
1764    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
1765 
1766    return !bo->bo && !(bo->base.usage & RADEON_FLAG_SPARSE);
1767 }
1768 
amdgpu_bo_get_va(struct pb_buffer * buf)1769 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
1770 {
1771    return ((struct amdgpu_winsys_bo*)buf)->va;
1772 }
1773 
amdgpu_bo_init_functions(struct amdgpu_screen_winsys * ws)1774 void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws)
1775 {
1776    ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
1777    ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
1778    ws->base.buffer_map = amdgpu_bo_map;
1779    ws->base.buffer_unmap = amdgpu_bo_unmap;
1780    ws->base.buffer_wait = amdgpu_bo_wait;
1781    ws->base.buffer_create = amdgpu_buffer_create;
1782    ws->base.buffer_from_handle = amdgpu_bo_from_handle;
1783    ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
1784    ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
1785    ws->base.buffer_is_suballocated = amdgpu_bo_is_suballocated;
1786    ws->base.buffer_get_handle = amdgpu_bo_get_handle;
1787    ws->base.buffer_commit = amdgpu_bo_sparse_commit;
1788    ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
1789    ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
1790    ws->base.buffer_get_flags = amdgpu_bo_get_flags;
1791 }
1792