1 /*
2  * Copyright © 2021 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #ifdef X
25 #undef X
26 #endif
27 
28 #if PTRSZ == 32
29 #define X(n) n##_32
30 #else
31 #define X(n) n##_64
32 #endif
33 
X(emit_reloc_common)34 static void X(emit_reloc_common)(struct fd_ringbuffer *ring,
35                                  const struct fd_reloc *reloc)
36 {
37    (*ring->cur++) = (uint32_t)reloc->iova;
38 #if PTRSZ == 64
39    (*ring->cur++) = (uint32_t)(reloc->iova >> 32);
40 #endif
41 }
42 
X(msm_ringbuffer_sp_emit_reloc_nonobj)43 static void X(msm_ringbuffer_sp_emit_reloc_nonobj)(struct fd_ringbuffer *ring,
44                                                    const struct fd_reloc *reloc)
45 {
46    X(emit_reloc_common)(ring, reloc);
47 
48    assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
49 
50    struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
51 
52    struct msm_submit_sp *msm_submit = to_msm_submit_sp(msm_ring->u.submit);
53 
54    msm_submit_append_bo(msm_submit, reloc->bo);
55 }
56 
X(msm_ringbuffer_sp_emit_reloc_obj)57 static void X(msm_ringbuffer_sp_emit_reloc_obj)(struct fd_ringbuffer *ring,
58                                                 const struct fd_reloc *reloc)
59 {
60    X(emit_reloc_common)(ring, reloc);
61 
62    assert(ring->flags & _FD_RINGBUFFER_OBJECT);
63 
64    struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
65 
66    /* Avoid emitting duplicate BO references into the list.  Ringbuffer
67     * objects are long-lived, so this saves ongoing work at draw time in
68     * exchange for a bit at context setup/first draw.  And the number of
69     * relocs per ringbuffer object is fairly small, so the O(n^2) doesn't
70     * hurt much.
71     */
72    if (!msm_ringbuffer_references_bo(ring, reloc->bo)) {
73       APPEND(&msm_ring->u, reloc_bos, fd_bo_ref(reloc->bo));
74    }
75 }
76 
X(msm_ringbuffer_sp_emit_reloc_ring)77 static uint32_t X(msm_ringbuffer_sp_emit_reloc_ring)(
78    struct fd_ringbuffer *ring, struct fd_ringbuffer *target, uint32_t cmd_idx)
79 {
80    struct msm_ringbuffer_sp *msm_target = to_msm_ringbuffer_sp(target);
81    struct fd_bo *bo;
82    uint32_t size;
83 
84    if ((target->flags & FD_RINGBUFFER_GROWABLE) &&
85        (cmd_idx < msm_target->u.nr_cmds)) {
86       bo = msm_target->u.cmds[cmd_idx].ring_bo;
87       size = msm_target->u.cmds[cmd_idx].size;
88    } else {
89       bo = msm_target->ring_bo;
90       size = offset_bytes(target->cur, target->start);
91    }
92 
93    if (ring->flags & _FD_RINGBUFFER_OBJECT) {
94       X(msm_ringbuffer_sp_emit_reloc_obj)(ring, &(struct fd_reloc){
95                 .bo = bo,
96                 .iova = bo->iova + msm_target->offset,
97                 .offset = msm_target->offset,
98              });
99    } else {
100       X(msm_ringbuffer_sp_emit_reloc_nonobj)(ring, &(struct fd_reloc){
101                 .bo = bo,
102                 .iova = bo->iova + msm_target->offset,
103                 .offset = msm_target->offset,
104              });
105    }
106 
107    if (!(target->flags & _FD_RINGBUFFER_OBJECT))
108       return size;
109 
110    struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
111 
112    if (ring->flags & _FD_RINGBUFFER_OBJECT) {
113       for (unsigned i = 0; i < msm_target->u.nr_reloc_bos; i++) {
114          struct fd_bo *target_bo = msm_target->u.reloc_bos[i];
115          if (!msm_ringbuffer_references_bo(ring, target_bo))
116             APPEND(&msm_ring->u, reloc_bos, fd_bo_ref(target_bo));
117       }
118    } else {
119       // TODO it would be nice to know whether we have already
120       // seen this target before.  But hopefully we hit the
121       // append_bo() fast path enough for this to not matter:
122       struct msm_submit_sp *msm_submit = to_msm_submit_sp(msm_ring->u.submit);
123 
124       for (unsigned i = 0; i < msm_target->u.nr_reloc_bos; i++) {
125          msm_submit_append_bo(msm_submit, msm_target->u.reloc_bos[i]);
126       }
127    }
128 
129    return size;
130 }
131