1 /*
2  * Copyright 2021 Google LLC
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "vn_ring.h"
7 
8 #include "vn_cs.h"
9 #include "vn_renderer.h"
10 
11 enum vn_ring_status_flag {
12    VN_RING_STATUS_IDLE = 1u << 0,
13 };
14 
15 static uint32_t
vn_ring_load_head(const struct vn_ring * ring)16 vn_ring_load_head(const struct vn_ring *ring)
17 {
18    /* the renderer is expected to store the head with memory_order_release,
19     * forming a release-acquire ordering
20     */
21    return atomic_load_explicit(ring->shared.head, memory_order_acquire);
22 }
23 
24 static void
vn_ring_store_tail(struct vn_ring * ring)25 vn_ring_store_tail(struct vn_ring *ring)
26 {
27    /* the renderer is expected to load the tail with memory_order_acquire,
28     * forming a release-acquire ordering
29     */
30    return atomic_store_explicit(ring->shared.tail, ring->cur,
31                                 memory_order_release);
32 }
33 
34 static uint32_t
vn_ring_load_status(const struct vn_ring * ring)35 vn_ring_load_status(const struct vn_ring *ring)
36 {
37    /* this must be called and ordered after vn_ring_store_tail */
38    return atomic_load_explicit(ring->shared.status, memory_order_seq_cst);
39 }
40 
41 static void
vn_ring_write_buffer(struct vn_ring * ring,const void * data,uint32_t size)42 vn_ring_write_buffer(struct vn_ring *ring, const void *data, uint32_t size)
43 {
44    assert(ring->cur + size - vn_ring_load_head(ring) <= ring->buffer_size);
45 
46    const uint32_t offset = ring->cur & ring->buffer_mask;
47    if (offset + size <= ring->buffer_size) {
48       memcpy(ring->shared.buffer + offset, data, size);
49    } else {
50       const uint32_t s = ring->buffer_size - offset;
51       memcpy(ring->shared.buffer + offset, data, s);
52       memcpy(ring->shared.buffer, data + s, size - s);
53    }
54 
55    ring->cur += size;
56 }
57 
58 static bool
vn_ring_ge_seqno(const struct vn_ring * ring,uint32_t a,uint32_t b)59 vn_ring_ge_seqno(const struct vn_ring *ring, uint32_t a, uint32_t b)
60 {
61    /* this can return false negative when not called fast enough (e.g., when
62     * called once every couple hours), but following calls with larger a's
63     * will correct itself
64     *
65     * TODO use real seqnos?
66     */
67    if (a >= b)
68       return ring->cur >= a || ring->cur < b;
69    else
70       return ring->cur >= a && ring->cur < b;
71 }
72 
73 static void
vn_ring_retire_submits(struct vn_ring * ring,uint32_t seqno)74 vn_ring_retire_submits(struct vn_ring *ring, uint32_t seqno)
75 {
76    list_for_each_entry_safe(struct vn_ring_submit, submit, &ring->submits,
77                             head) {
78       if (!vn_ring_ge_seqno(ring, seqno, submit->seqno))
79          break;
80 
81       for (uint32_t i = 0; i < submit->shmem_count; i++)
82          vn_renderer_shmem_unref(ring->renderer, submit->shmems[i]);
83 
84       list_del(&submit->head);
85       list_add(&submit->head, &ring->free_submits);
86    }
87 }
88 
89 static uint32_t
vn_ring_wait_seqno(const struct vn_ring * ring,uint32_t seqno)90 vn_ring_wait_seqno(const struct vn_ring *ring, uint32_t seqno)
91 {
92    /* A renderer wait incurs several hops and the renderer might poll
93     * repeatedly anyway.  Let's just poll here.
94     */
95    uint32_t iter = 0;
96    do {
97       const uint32_t head = vn_ring_load_head(ring);
98       if (vn_ring_ge_seqno(ring, head, seqno))
99          return head;
100       vn_relax(&iter, "ring seqno");
101    } while (true);
102 }
103 
104 static uint32_t
vn_ring_wait_space(const struct vn_ring * ring,uint32_t size)105 vn_ring_wait_space(const struct vn_ring *ring, uint32_t size)
106 {
107    assert(size <= ring->buffer_size);
108 
109    /* see the reasoning in vn_ring_wait_seqno */
110    uint32_t iter = 0;
111    do {
112       const uint32_t head = vn_ring_load_head(ring);
113       if (ring->cur + size - head <= ring->buffer_size)
114          return head;
115       vn_relax(&iter, "ring space");
116    } while (true);
117 }
118 
119 void
vn_ring_get_layout(size_t buf_size,size_t extra_size,struct vn_ring_layout * layout)120 vn_ring_get_layout(size_t buf_size,
121                    size_t extra_size,
122                    struct vn_ring_layout *layout)
123 {
124    /* this can be changed/extended quite freely */
125    struct layout {
126       uint32_t head __attribute__((aligned(64)));
127       uint32_t tail __attribute__((aligned(64)));
128       uint32_t status __attribute__((aligned(64)));
129 
130       uint8_t buffer[] __attribute__((aligned(64)));
131    };
132 
133    assert(buf_size && util_is_power_of_two_or_zero(buf_size));
134 
135    layout->head_offset = offsetof(struct layout, head);
136    layout->tail_offset = offsetof(struct layout, tail);
137    layout->status_offset = offsetof(struct layout, status);
138 
139    layout->buffer_offset = offsetof(struct layout, buffer);
140    layout->buffer_size = buf_size;
141 
142    layout->extra_offset = layout->buffer_offset + layout->buffer_size;
143    layout->extra_size = extra_size;
144 
145    layout->shmem_size = layout->extra_offset + layout->extra_size;
146 }
147 
148 void
vn_ring_init(struct vn_ring * ring,struct vn_renderer * renderer,const struct vn_ring_layout * layout,void * shared)149 vn_ring_init(struct vn_ring *ring,
150              struct vn_renderer *renderer,
151              const struct vn_ring_layout *layout,
152              void *shared)
153 {
154    memset(ring, 0, sizeof(*ring));
155    memset(shared, 0, layout->shmem_size);
156 
157    ring->renderer = renderer;
158 
159    assert(layout->buffer_size &&
160           util_is_power_of_two_or_zero(layout->buffer_size));
161    ring->buffer_size = layout->buffer_size;
162    ring->buffer_mask = ring->buffer_size - 1;
163 
164    ring->shared.head = shared + layout->head_offset;
165    ring->shared.tail = shared + layout->tail_offset;
166    ring->shared.status = shared + layout->status_offset;
167    ring->shared.buffer = shared + layout->buffer_offset;
168    ring->shared.extra = shared + layout->extra_offset;
169 
170    list_inithead(&ring->submits);
171    list_inithead(&ring->free_submits);
172 }
173 
174 void
vn_ring_fini(struct vn_ring * ring)175 vn_ring_fini(struct vn_ring *ring)
176 {
177    vn_ring_retire_submits(ring, ring->cur);
178    assert(list_is_empty(&ring->submits));
179 
180    list_for_each_entry_safe(struct vn_ring_submit, submit,
181                             &ring->free_submits, head)
182       free(submit);
183 }
184 
185 struct vn_ring_submit *
vn_ring_get_submit(struct vn_ring * ring,uint32_t shmem_count)186 vn_ring_get_submit(struct vn_ring *ring, uint32_t shmem_count)
187 {
188    const uint32_t min_shmem_count = 2;
189    struct vn_ring_submit *submit;
190 
191    /* TODO this could be simplified if we could omit shmem_count */
192    if (shmem_count <= min_shmem_count &&
193        !list_is_empty(&ring->free_submits)) {
194       submit =
195          list_first_entry(&ring->free_submits, struct vn_ring_submit, head);
196       list_del(&submit->head);
197    } else {
198       shmem_count = MAX2(shmem_count, min_shmem_count);
199       submit =
200          malloc(sizeof(*submit) + sizeof(submit->shmems[0]) * shmem_count);
201    }
202 
203    return submit;
204 }
205 
206 bool
vn_ring_submit(struct vn_ring * ring,struct vn_ring_submit * submit,const struct vn_cs_encoder * cs,uint32_t * seqno)207 vn_ring_submit(struct vn_ring *ring,
208                struct vn_ring_submit *submit,
209                const struct vn_cs_encoder *cs,
210                uint32_t *seqno)
211 {
212    /* write cs to the ring */
213    assert(!vn_cs_encoder_is_empty(cs));
214    uint32_t cur_seqno;
215    for (uint32_t i = 0; i < cs->buffer_count; i++) {
216       const struct vn_cs_encoder_buffer *buf = &cs->buffers[i];
217       cur_seqno = vn_ring_wait_space(ring, buf->committed_size);
218       vn_ring_write_buffer(ring, buf->base, buf->committed_size);
219    }
220 
221    vn_ring_store_tail(ring);
222    const bool notify = vn_ring_load_status(ring) & VN_RING_STATUS_IDLE;
223 
224    vn_ring_retire_submits(ring, cur_seqno);
225 
226    submit->seqno = ring->cur;
227    list_addtail(&submit->head, &ring->submits);
228 
229    *seqno = submit->seqno;
230    return notify;
231 }
232 
233 /**
234  * This is thread-safe.
235  */
236 void
vn_ring_wait(const struct vn_ring * ring,uint32_t seqno)237 vn_ring_wait(const struct vn_ring *ring, uint32_t seqno)
238 {
239    vn_ring_wait_seqno(ring, seqno);
240 }
241