1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6 #ifndef VN_RENDERER_H
7 #define VN_RENDERER_H
8
9 #include "vn_common.h"
10
11 struct vn_renderer_shmem {
12 struct vn_refcount refcount;
13
14 uint32_t res_id;
15 size_t mmap_size; /* for internal use only (i.e., munmap) */
16 void *mmap_ptr;
17
18 struct list_head cache_head;
19 int64_t cache_timestamp;
20 };
21
22 struct vn_renderer_bo {
23 struct vn_refcount refcount;
24
25 uint32_t res_id;
26 /* for internal use only */
27 size_t mmap_size;
28 void *mmap_ptr;
29 };
30
31 /*
32 * A sync consists of a uint64_t counter. The counter can be updated by CPU
33 * or by GPU. It can also be waited on by CPU or by GPU until it reaches
34 * certain values.
35 *
36 * This models after timeline VkSemaphore rather than timeline drm_syncobj.
37 * The main difference is that drm_syncobj can have unsignaled value 0.
38 */
39 struct vn_renderer_sync {
40 uint32_t sync_id;
41 };
42
43 struct vn_renderer_info {
44 struct {
45 uint16_t vendor_id;
46 uint16_t device_id;
47
48 bool has_bus_info;
49 uint16_t domain;
50 uint8_t bus;
51 uint8_t device;
52 uint8_t function;
53 } pci;
54
55 bool has_dma_buf_import;
56 bool has_cache_management;
57 bool has_external_sync;
58 bool has_implicit_fencing;
59 bool has_guest_vram;
60
61 uint32_t max_sync_queue_count;
62
63 /* hw capset */
64 uint32_t wire_format_version;
65 uint32_t vk_xml_version;
66 uint32_t vk_ext_command_serialization_spec_version;
67 uint32_t vk_mesa_venus_protocol_spec_version;
68 uint32_t supports_blob_id_0;
69 };
70
71 struct vn_renderer_submit_batch {
72 const void *cs_data;
73 size_t cs_size;
74
75 /*
76 * Submit cs to the virtual sync queue identified by sync_queue_index. The
77 * virtual queue is assumed to be associated with the physical VkQueue
78 * identified by vk_queue_id. After the execution completes on the
79 * VkQueue, the virtual sync queue is signaled.
80 *
81 * sync_queue_index must be less than max_sync_queue_count.
82 *
83 * vk_queue_id specifies the object id of a VkQueue.
84 *
85 * When sync_queue_cpu is true, it specifies the special CPU sync queue,
86 * and sync_queue_index/vk_queue_id are ignored. TODO revisit this later
87 */
88 uint32_t sync_queue_index;
89 bool sync_queue_cpu;
90 vn_object_id vk_queue_id;
91
92 /* syncs to update when the virtual sync queue is signaled */
93 struct vn_renderer_sync *const *syncs;
94 /* TODO allow NULL when syncs are all binary? */
95 const uint64_t *sync_values;
96 uint32_t sync_count;
97 };
98
99 struct vn_renderer_submit {
100 /* BOs to pin and to fence implicitly
101 *
102 * TODO track all bos and automatically pin them. We don't do it yet
103 * because each vn_command_buffer owns a bo. We can probably make do by
104 * returning the bos to a bo cache and exclude bo cache from pinning.
105 */
106 struct vn_renderer_bo *const *bos;
107 uint32_t bo_count;
108
109 const struct vn_renderer_submit_batch *batches;
110 uint32_t batch_count;
111 };
112
113 struct vn_renderer_wait {
114 bool wait_any;
115 uint64_t timeout;
116
117 struct vn_renderer_sync *const *syncs;
118 /* TODO allow NULL when syncs are all binary? */
119 const uint64_t *sync_values;
120 uint32_t sync_count;
121 };
122
123 struct vn_renderer_ops {
124 void (*destroy)(struct vn_renderer *renderer,
125 const VkAllocationCallbacks *alloc);
126
127 VkResult (*submit)(struct vn_renderer *renderer,
128 const struct vn_renderer_submit *submit);
129
130 /*
131 * On success, returns VK_SUCCESS or VK_TIMEOUT. On failure, returns
132 * VK_ERROR_DEVICE_LOST or out of device/host memory.
133 */
134 VkResult (*wait)(struct vn_renderer *renderer,
135 const struct vn_renderer_wait *wait);
136 };
137
138 struct vn_renderer_shmem_ops {
139 struct vn_renderer_shmem *(*create)(struct vn_renderer *renderer,
140 size_t size);
141 void (*destroy)(struct vn_renderer *renderer,
142 struct vn_renderer_shmem *shmem);
143 };
144
145 struct vn_renderer_bo_ops {
146 VkResult (*create_from_device_memory)(
147 struct vn_renderer *renderer,
148 VkDeviceSize size,
149 vn_object_id mem_id,
150 VkMemoryPropertyFlags flags,
151 VkExternalMemoryHandleTypeFlags external_handles,
152 struct vn_renderer_bo **out_bo);
153
154 VkResult (*create_from_dma_buf)(struct vn_renderer *renderer,
155 VkDeviceSize size,
156 int fd,
157 VkMemoryPropertyFlags flags,
158 struct vn_renderer_bo **out_bo);
159
160 bool (*destroy)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
161
162 int (*export_dma_buf)(struct vn_renderer *renderer,
163 struct vn_renderer_bo *bo);
164
165 /* map is not thread-safe */
166 void *(*map)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
167
168 void (*flush)(struct vn_renderer *renderer,
169 struct vn_renderer_bo *bo,
170 VkDeviceSize offset,
171 VkDeviceSize size);
172 void (*invalidate)(struct vn_renderer *renderer,
173 struct vn_renderer_bo *bo,
174 VkDeviceSize offset,
175 VkDeviceSize size);
176 };
177
178 enum vn_renderer_sync_flags {
179 VN_RENDERER_SYNC_SHAREABLE = 1u << 0,
180 VN_RENDERER_SYNC_BINARY = 1u << 1,
181 };
182
183 struct vn_renderer_sync_ops {
184 VkResult (*create)(struct vn_renderer *renderer,
185 uint64_t initial_val,
186 uint32_t flags,
187 struct vn_renderer_sync **out_sync);
188
189 VkResult (*create_from_syncobj)(struct vn_renderer *renderer,
190 int fd,
191 bool sync_file,
192 struct vn_renderer_sync **out_sync);
193 void (*destroy)(struct vn_renderer *renderer,
194 struct vn_renderer_sync *sync);
195
196 int (*export_syncobj)(struct vn_renderer *renderer,
197 struct vn_renderer_sync *sync,
198 bool sync_file);
199
200 /* reset the counter */
201 VkResult (*reset)(struct vn_renderer *renderer,
202 struct vn_renderer_sync *sync,
203 uint64_t initial_val);
204
205 /* read the current value from the counter */
206 VkResult (*read)(struct vn_renderer *renderer,
207 struct vn_renderer_sync *sync,
208 uint64_t *val);
209
210 /* write a new value (larger than the current one) to the counter */
211 VkResult (*write)(struct vn_renderer *renderer,
212 struct vn_renderer_sync *sync,
213 uint64_t val);
214 };
215
216 struct vn_renderer {
217 struct vn_renderer_info info;
218 struct vn_renderer_ops ops;
219 struct vn_renderer_shmem_ops shmem_ops;
220 struct vn_renderer_bo_ops bo_ops;
221 struct vn_renderer_sync_ops sync_ops;
222 };
223
224 VkResult
225 vn_renderer_create_virtgpu(struct vn_instance *instance,
226 const VkAllocationCallbacks *alloc,
227 struct vn_renderer **renderer);
228
229 VkResult
230 vn_renderer_create_vtest(struct vn_instance *instance,
231 const VkAllocationCallbacks *alloc,
232 struct vn_renderer **renderer);
233
234 static inline VkResult
vn_renderer_create(struct vn_instance * instance,const VkAllocationCallbacks * alloc,struct vn_renderer ** renderer)235 vn_renderer_create(struct vn_instance *instance,
236 const VkAllocationCallbacks *alloc,
237 struct vn_renderer **renderer)
238 {
239 if (VN_DEBUG(VTEST)) {
240 VkResult result = vn_renderer_create_vtest(instance, alloc, renderer);
241 if (result == VK_SUCCESS)
242 return VK_SUCCESS;
243 }
244
245 return vn_renderer_create_virtgpu(instance, alloc, renderer);
246 }
247
248 static inline void
vn_renderer_destroy(struct vn_renderer * renderer,const VkAllocationCallbacks * alloc)249 vn_renderer_destroy(struct vn_renderer *renderer,
250 const VkAllocationCallbacks *alloc)
251 {
252 renderer->ops.destroy(renderer, alloc);
253 }
254
255 static inline VkResult
vn_renderer_submit(struct vn_renderer * renderer,const struct vn_renderer_submit * submit)256 vn_renderer_submit(struct vn_renderer *renderer,
257 const struct vn_renderer_submit *submit)
258 {
259 return renderer->ops.submit(renderer, submit);
260 }
261
262 static inline VkResult
vn_renderer_wait(struct vn_renderer * renderer,const struct vn_renderer_wait * wait)263 vn_renderer_wait(struct vn_renderer *renderer,
264 const struct vn_renderer_wait *wait)
265 {
266 return renderer->ops.wait(renderer, wait);
267 }
268
269 static inline struct vn_renderer_shmem *
vn_renderer_shmem_create(struct vn_renderer * renderer,size_t size)270 vn_renderer_shmem_create(struct vn_renderer *renderer, size_t size)
271 {
272 VN_TRACE_FUNC();
273 struct vn_renderer_shmem *shmem =
274 renderer->shmem_ops.create(renderer, size);
275 if (shmem) {
276 assert(vn_refcount_is_valid(&shmem->refcount));
277 assert(shmem->res_id);
278 assert(shmem->mmap_size >= size);
279 assert(shmem->mmap_ptr);
280 }
281
282 return shmem;
283 }
284
285 static inline struct vn_renderer_shmem *
vn_renderer_shmem_ref(struct vn_renderer * renderer,struct vn_renderer_shmem * shmem)286 vn_renderer_shmem_ref(struct vn_renderer *renderer,
287 struct vn_renderer_shmem *shmem)
288 {
289 vn_refcount_inc(&shmem->refcount);
290 return shmem;
291 }
292
293 static inline void
vn_renderer_shmem_unref(struct vn_renderer * renderer,struct vn_renderer_shmem * shmem)294 vn_renderer_shmem_unref(struct vn_renderer *renderer,
295 struct vn_renderer_shmem *shmem)
296 {
297 if (vn_refcount_dec(&shmem->refcount))
298 renderer->shmem_ops.destroy(renderer, shmem);
299 }
300
301 static inline VkResult
vn_renderer_bo_create_from_device_memory(struct vn_renderer * renderer,VkDeviceSize size,vn_object_id mem_id,VkMemoryPropertyFlags flags,VkExternalMemoryHandleTypeFlags external_handles,struct vn_renderer_bo ** out_bo)302 vn_renderer_bo_create_from_device_memory(
303 struct vn_renderer *renderer,
304 VkDeviceSize size,
305 vn_object_id mem_id,
306 VkMemoryPropertyFlags flags,
307 VkExternalMemoryHandleTypeFlags external_handles,
308 struct vn_renderer_bo **out_bo)
309 {
310 struct vn_renderer_bo *bo;
311 VkResult result = renderer->bo_ops.create_from_device_memory(
312 renderer, size, mem_id, flags, external_handles, &bo);
313 if (result != VK_SUCCESS)
314 return result;
315
316 assert(vn_refcount_is_valid(&bo->refcount));
317 assert(bo->res_id);
318 assert(!bo->mmap_size || bo->mmap_size >= size);
319
320 *out_bo = bo;
321 return VK_SUCCESS;
322 }
323
324 static inline VkResult
vn_renderer_bo_create_from_dma_buf(struct vn_renderer * renderer,VkDeviceSize size,int fd,VkMemoryPropertyFlags flags,struct vn_renderer_bo ** out_bo)325 vn_renderer_bo_create_from_dma_buf(struct vn_renderer *renderer,
326 VkDeviceSize size,
327 int fd,
328 VkMemoryPropertyFlags flags,
329 struct vn_renderer_bo **out_bo)
330 {
331 struct vn_renderer_bo *bo;
332 VkResult result =
333 renderer->bo_ops.create_from_dma_buf(renderer, size, fd, flags, &bo);
334 if (result != VK_SUCCESS)
335 return result;
336
337 assert(vn_refcount_is_valid(&bo->refcount));
338 assert(bo->res_id);
339 assert(!bo->mmap_size || bo->mmap_size >= size);
340
341 *out_bo = bo;
342 return VK_SUCCESS;
343 }
344
345 static inline struct vn_renderer_bo *
vn_renderer_bo_ref(struct vn_renderer * renderer,struct vn_renderer_bo * bo)346 vn_renderer_bo_ref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
347 {
348 vn_refcount_inc(&bo->refcount);
349 return bo;
350 }
351
352 static inline bool
vn_renderer_bo_unref(struct vn_renderer * renderer,struct vn_renderer_bo * bo)353 vn_renderer_bo_unref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
354 {
355 if (vn_refcount_dec(&bo->refcount))
356 return renderer->bo_ops.destroy(renderer, bo);
357 return false;
358 }
359
360 static inline int
vn_renderer_bo_export_dma_buf(struct vn_renderer * renderer,struct vn_renderer_bo * bo)361 vn_renderer_bo_export_dma_buf(struct vn_renderer *renderer,
362 struct vn_renderer_bo *bo)
363 {
364 return renderer->bo_ops.export_dma_buf(renderer, bo);
365 }
366
367 static inline void *
vn_renderer_bo_map(struct vn_renderer * renderer,struct vn_renderer_bo * bo)368 vn_renderer_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
369 {
370 return renderer->bo_ops.map(renderer, bo);
371 }
372
373 static inline void
vn_renderer_bo_flush(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize end)374 vn_renderer_bo_flush(struct vn_renderer *renderer,
375 struct vn_renderer_bo *bo,
376 VkDeviceSize offset,
377 VkDeviceSize end)
378 {
379 renderer->bo_ops.flush(renderer, bo, offset, end);
380 }
381
382 static inline void
vn_renderer_bo_invalidate(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize size)383 vn_renderer_bo_invalidate(struct vn_renderer *renderer,
384 struct vn_renderer_bo *bo,
385 VkDeviceSize offset,
386 VkDeviceSize size)
387 {
388 renderer->bo_ops.invalidate(renderer, bo, offset, size);
389 }
390
391 static inline VkResult
vn_renderer_sync_create(struct vn_renderer * renderer,uint64_t initial_val,uint32_t flags,struct vn_renderer_sync ** out_sync)392 vn_renderer_sync_create(struct vn_renderer *renderer,
393 uint64_t initial_val,
394 uint32_t flags,
395 struct vn_renderer_sync **out_sync)
396 {
397 return renderer->sync_ops.create(renderer, initial_val, flags, out_sync);
398 }
399
400 static inline VkResult
vn_renderer_sync_create_from_syncobj(struct vn_renderer * renderer,int fd,bool sync_file,struct vn_renderer_sync ** out_sync)401 vn_renderer_sync_create_from_syncobj(struct vn_renderer *renderer,
402 int fd,
403 bool sync_file,
404 struct vn_renderer_sync **out_sync)
405 {
406 return renderer->sync_ops.create_from_syncobj(renderer, fd, sync_file,
407 out_sync);
408 }
409
410 static inline void
vn_renderer_sync_destroy(struct vn_renderer * renderer,struct vn_renderer_sync * sync)411 vn_renderer_sync_destroy(struct vn_renderer *renderer,
412 struct vn_renderer_sync *sync)
413 {
414 renderer->sync_ops.destroy(renderer, sync);
415 }
416
417 static inline int
vn_renderer_sync_export_syncobj(struct vn_renderer * renderer,struct vn_renderer_sync * sync,bool sync_file)418 vn_renderer_sync_export_syncobj(struct vn_renderer *renderer,
419 struct vn_renderer_sync *sync,
420 bool sync_file)
421 {
422 return renderer->sync_ops.export_syncobj(renderer, sync, sync_file);
423 }
424
425 static inline VkResult
vn_renderer_sync_reset(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t initial_val)426 vn_renderer_sync_reset(struct vn_renderer *renderer,
427 struct vn_renderer_sync *sync,
428 uint64_t initial_val)
429 {
430 return renderer->sync_ops.reset(renderer, sync, initial_val);
431 }
432
433 static inline VkResult
vn_renderer_sync_read(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t * val)434 vn_renderer_sync_read(struct vn_renderer *renderer,
435 struct vn_renderer_sync *sync,
436 uint64_t *val)
437 {
438 return renderer->sync_ops.read(renderer, sync, val);
439 }
440
441 static inline VkResult
vn_renderer_sync_write(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t val)442 vn_renderer_sync_write(struct vn_renderer *renderer,
443 struct vn_renderer_sync *sync,
444 uint64_t val)
445 {
446 return renderer->sync_ops.write(renderer, sync, val);
447 }
448
449 #endif /* VN_RENDERER_H */
450