1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #ifndef VN_COMMON_H
12 #define VN_COMMON_H
13 
14 #include <assert.h>
15 #include <inttypes.h>
16 #include <limits.h>
17 #include <stdatomic.h>
18 #include <stdbool.h>
19 #include <stddef.h>
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <vulkan/vulkan.h>
24 
25 #include "c11/threads.h"
26 #include "util/bitscan.h"
27 #include "util/compiler.h"
28 #include "util/list.h"
29 #include "util/macros.h"
30 #include "util/os_time.h"
31 #include "util/u_math.h"
32 #include "util/xmlconfig.h"
33 #include "vk_alloc.h"
34 #include "vk_debug_report.h"
35 #include "vk_device.h"
36 #include "vk_instance.h"
37 #include "vk_object.h"
38 #include "vk_physical_device.h"
39 #include "vk_util.h"
40 
41 #include "vn_entrypoints.h"
42 
43 #define VN_DEFAULT_ALIGN 8
44 
45 #define VN_DEBUG(category) (unlikely(vn_debug & VN_DEBUG_##category))
46 
47 #define vn_error(instance, error)                                            \
48    (VN_DEBUG(RESULT) ? vn_log_result((instance), (error), __func__) : (error))
49 #define vn_result(instance, result)                                          \
50    ((result) >= VK_SUCCESS ? (result) : vn_error((instance), (result)))
51 
52 #ifdef ANDROID
53 
54 #include <cutils/trace.h>
55 
56 #define VN_TRACE_BEGIN(name) atrace_begin(ATRACE_TAG_GRAPHICS, name)
57 #define VN_TRACE_END() atrace_end(ATRACE_TAG_GRAPHICS)
58 
59 #else
60 
61 /* XXX we would like to use perfetto, but it lacks a C header */
62 #define VN_TRACE_BEGIN(name)
63 #define VN_TRACE_END()
64 
65 #endif /* ANDROID */
66 
67 #if __has_attribute(cleanup) && __has_attribute(unused)
68 
69 #define VN_TRACE_SCOPE(name)                                                 \
70    int _vn_trace_scope_##__LINE__                                            \
71       __attribute__((cleanup(vn_trace_scope_end), unused)) =                 \
72          vn_trace_scope_begin(name)
73 
74 static inline int
vn_trace_scope_begin(const char * name)75 vn_trace_scope_begin(const char *name)
76 {
77    VN_TRACE_BEGIN(name);
78    return 0;
79 }
80 
81 static inline void
vn_trace_scope_end(int * scope)82 vn_trace_scope_end(int *scope)
83 {
84    VN_TRACE_END();
85 }
86 
87 #else
88 
89 #define VN_TRACE_SCOPE(name)
90 
91 #endif /* __has_attribute(cleanup) && __has_attribute(unused) */
92 
93 #define VN_TRACE_FUNC() VN_TRACE_SCOPE(__func__)
94 
95 struct vn_instance;
96 struct vn_physical_device;
97 struct vn_device;
98 struct vn_queue;
99 struct vn_fence;
100 struct vn_semaphore;
101 struct vn_device_memory;
102 struct vn_buffer;
103 struct vn_buffer_view;
104 struct vn_image;
105 struct vn_image_view;
106 struct vn_sampler;
107 struct vn_sampler_ycbcr_conversion;
108 struct vn_descriptor_set_layout;
109 struct vn_descriptor_pool;
110 struct vn_descriptor_set;
111 struct vn_descriptor_update_template;
112 struct vn_render_pass;
113 struct vn_framebuffer;
114 struct vn_event;
115 struct vn_query_pool;
116 struct vn_shader_module;
117 struct vn_pipeline_layout;
118 struct vn_pipeline_cache;
119 struct vn_pipeline;
120 struct vn_command_pool;
121 struct vn_command_buffer;
122 
123 struct vn_cs_encoder;
124 struct vn_cs_decoder;
125 
126 struct vn_renderer;
127 struct vn_renderer_shmem;
128 struct vn_renderer_bo;
129 struct vn_renderer_sync;
130 
131 enum vn_debug {
132    VN_DEBUG_INIT = 1ull << 0,
133    VN_DEBUG_RESULT = 1ull << 1,
134    VN_DEBUG_VTEST = 1ull << 2,
135    VN_DEBUG_WSI = 1ull << 3,
136 };
137 
138 typedef uint64_t vn_object_id;
139 
140 /* base class of vn_instance */
141 struct vn_instance_base {
142    struct vk_instance base;
143    vn_object_id id;
144 };
145 
146 /* base class of vn_physical_device */
147 struct vn_physical_device_base {
148    struct vk_physical_device base;
149    vn_object_id id;
150 };
151 
152 /* base class of vn_device */
153 struct vn_device_base {
154    struct vk_device base;
155    vn_object_id id;
156 };
157 
158 /* base class of other driver objects */
159 struct vn_object_base {
160    struct vk_object_base base;
161    vn_object_id id;
162 };
163 
164 struct vn_refcount {
165    atomic_int count;
166 };
167 
168 extern uint64_t vn_debug;
169 
170 void
171 vn_debug_init(void);
172 
173 void
174 vn_trace_init(void);
175 
176 void
177 vn_log(struct vn_instance *instance, const char *format, ...)
178    PRINTFLIKE(2, 3);
179 
180 VkResult
181 vn_log_result(struct vn_instance *instance,
182               VkResult result,
183               const char *where);
184 
185 #define VN_REFCOUNT_INIT(val)                                                \
186    (struct vn_refcount) { .count = (val) }
187 
188 static inline int
vn_refcount_load_relaxed(const struct vn_refcount * ref)189 vn_refcount_load_relaxed(const struct vn_refcount *ref)
190 {
191    return atomic_load_explicit(&ref->count, memory_order_relaxed);
192 }
193 
194 static inline int
vn_refcount_fetch_add_relaxed(struct vn_refcount * ref,int val)195 vn_refcount_fetch_add_relaxed(struct vn_refcount *ref, int val)
196 {
197    return atomic_fetch_add_explicit(&ref->count, val, memory_order_relaxed);
198 }
199 
200 static inline int
vn_refcount_fetch_sub_release(struct vn_refcount * ref,int val)201 vn_refcount_fetch_sub_release(struct vn_refcount *ref, int val)
202 {
203    return atomic_fetch_sub_explicit(&ref->count, val, memory_order_release);
204 }
205 
206 static inline bool
vn_refcount_is_valid(const struct vn_refcount * ref)207 vn_refcount_is_valid(const struct vn_refcount *ref)
208 {
209    return vn_refcount_load_relaxed(ref) > 0;
210 }
211 
212 static inline void
vn_refcount_inc(struct vn_refcount * ref)213 vn_refcount_inc(struct vn_refcount *ref)
214 {
215    /* no ordering imposed */
216    ASSERTED const int old = vn_refcount_fetch_add_relaxed(ref, 1);
217    assert(old >= 1);
218 }
219 
220 static inline bool
vn_refcount_dec(struct vn_refcount * ref)221 vn_refcount_dec(struct vn_refcount *ref)
222 {
223    /* prior reads/writes cannot be reordered after this */
224    const int old = vn_refcount_fetch_sub_release(ref, 1);
225    assert(old >= 1);
226 
227    /* subsequent free cannot be reordered before this */
228    if (old == 1)
229       atomic_thread_fence(memory_order_acquire);
230 
231    return old == 1;
232 }
233 
234 void
235 vn_relax(uint32_t *iter, const char *reason);
236 
237 static_assert(sizeof(vn_object_id) >= sizeof(uintptr_t), "");
238 
239 static inline VkResult
vn_instance_base_init(struct vn_instance_base * instance,const struct vk_instance_extension_table * supported_extensions,const struct vk_instance_dispatch_table * dispatch_table,const VkInstanceCreateInfo * info,const VkAllocationCallbacks * alloc)240 vn_instance_base_init(
241    struct vn_instance_base *instance,
242    const struct vk_instance_extension_table *supported_extensions,
243    const struct vk_instance_dispatch_table *dispatch_table,
244    const VkInstanceCreateInfo *info,
245    const VkAllocationCallbacks *alloc)
246 {
247    VkResult result = vk_instance_init(&instance->base, supported_extensions,
248                                       dispatch_table, info, alloc);
249    instance->id = (uintptr_t)instance;
250    return result;
251 }
252 
253 static inline void
vn_instance_base_fini(struct vn_instance_base * instance)254 vn_instance_base_fini(struct vn_instance_base *instance)
255 {
256    vk_instance_finish(&instance->base);
257 }
258 
259 static inline VkResult
vn_physical_device_base_init(struct vn_physical_device_base * physical_dev,struct vn_instance_base * instance,const struct vk_device_extension_table * supported_extensions,const struct vk_physical_device_dispatch_table * dispatch_table)260 vn_physical_device_base_init(
261    struct vn_physical_device_base *physical_dev,
262    struct vn_instance_base *instance,
263    const struct vk_device_extension_table *supported_extensions,
264    const struct vk_physical_device_dispatch_table *dispatch_table)
265 {
266    VkResult result =
267       vk_physical_device_init(&physical_dev->base, &instance->base,
268                               supported_extensions, dispatch_table);
269    physical_dev->id = (uintptr_t)physical_dev;
270    return result;
271 }
272 
273 static inline void
vn_physical_device_base_fini(struct vn_physical_device_base * physical_dev)274 vn_physical_device_base_fini(struct vn_physical_device_base *physical_dev)
275 {
276    vk_physical_device_finish(&physical_dev->base);
277 }
278 
279 static inline VkResult
vn_device_base_init(struct vn_device_base * dev,struct vn_physical_device_base * physical_dev,const struct vk_device_dispatch_table * dispatch_table,const VkDeviceCreateInfo * info,const VkAllocationCallbacks * alloc)280 vn_device_base_init(struct vn_device_base *dev,
281                     struct vn_physical_device_base *physical_dev,
282                     const struct vk_device_dispatch_table *dispatch_table,
283                     const VkDeviceCreateInfo *info,
284                     const VkAllocationCallbacks *alloc)
285 {
286    VkResult result = vk_device_init(&dev->base, &physical_dev->base,
287                                     dispatch_table, info, alloc);
288    dev->id = (uintptr_t)dev;
289    return result;
290 }
291 
292 static inline void
vn_device_base_fini(struct vn_device_base * dev)293 vn_device_base_fini(struct vn_device_base *dev)
294 {
295    vk_device_finish(&dev->base);
296 }
297 
298 static inline void
vn_object_base_init(struct vn_object_base * obj,VkObjectType type,struct vn_device_base * dev)299 vn_object_base_init(struct vn_object_base *obj,
300                     VkObjectType type,
301                     struct vn_device_base *dev)
302 {
303    vk_object_base_init(&dev->base, &obj->base, type);
304    obj->id = (uintptr_t)obj;
305 }
306 
307 static inline void
vn_object_base_fini(struct vn_object_base * obj)308 vn_object_base_fini(struct vn_object_base *obj)
309 {
310    vk_object_base_finish(&obj->base);
311 }
312 
313 static inline void
vn_object_set_id(void * obj,vn_object_id id,VkObjectType type)314 vn_object_set_id(void *obj, vn_object_id id, VkObjectType type)
315 {
316    assert(((const struct vk_object_base *)obj)->type == type);
317    switch (type) {
318    case VK_OBJECT_TYPE_INSTANCE:
319       ((struct vn_instance_base *)obj)->id = id;
320       break;
321    case VK_OBJECT_TYPE_PHYSICAL_DEVICE:
322       ((struct vn_physical_device_base *)obj)->id = id;
323       break;
324    case VK_OBJECT_TYPE_DEVICE:
325       ((struct vn_device_base *)obj)->id = id;
326       break;
327    default:
328       ((struct vn_object_base *)obj)->id = id;
329       break;
330    }
331 }
332 
333 static inline vn_object_id
vn_object_get_id(const void * obj,VkObjectType type)334 vn_object_get_id(const void *obj, VkObjectType type)
335 {
336    assert(((const struct vk_object_base *)obj)->type == type);
337    switch (type) {
338    case VK_OBJECT_TYPE_INSTANCE:
339       return ((struct vn_instance_base *)obj)->id;
340    case VK_OBJECT_TYPE_PHYSICAL_DEVICE:
341       return ((struct vn_physical_device_base *)obj)->id;
342    case VK_OBJECT_TYPE_DEVICE:
343       return ((struct vn_device_base *)obj)->id;
344    default:
345       return ((struct vn_object_base *)obj)->id;
346    }
347 }
348 
349 #endif /* VN_COMMON_H */
350