1 #include "gpu_tests.h"
2 #include "vulkan/command.h"
3 #include "vulkan/gpu.h"
4 #include <vulkan/vulkan.h>
5 
vulkan_interop_tests(pl_vulkan pl_vk,enum pl_handle_type handle_type)6 static void vulkan_interop_tests(pl_vulkan pl_vk,
7                                  enum pl_handle_type handle_type)
8 {
9     pl_gpu gpu = pl_vk->gpu;
10     printf("testing vulkan interop for handle type 0x%x\n", handle_type);
11 
12     if (gpu->export_caps.buf & handle_type) {
13         pl_buf buf = pl_buf_create(gpu, &(struct pl_buf_params) {
14             .size = 1024,
15             .export_handle = handle_type,
16         });
17 
18         REQUIRE(buf);
19         REQUIRE_HANDLE(buf->shared_mem, handle_type);
20         REQUIRE(buf->shared_mem.size >= buf->params.size);
21         REQUIRE(pl_buf_export(gpu, buf));
22         pl_buf_destroy(gpu, &buf);
23     }
24 
25     pl_fmt fmt = pl_find_fmt(gpu, PL_FMT_UNORM, 1, 0, 0, PL_FMT_CAP_BLITTABLE);
26     if (!fmt)
27         return;
28 
29     if (gpu->export_caps.sync & handle_type) {
30         pl_sync sync = pl_sync_create(gpu, handle_type);
31         pl_tex tex = pl_tex_create(gpu, &(struct pl_tex_params) {
32             .w = 32,
33             .h = 32,
34             .format = fmt,
35             .blit_dst = true,
36         });
37 
38         REQUIRE(sync);
39         REQUIRE(tex);
40 
41         // Note: For testing purposes, we have to fool pl_tex_export into
42         // thinking this texture is actually exportable. Just hack it in
43         // horribly.
44         ((struct pl_tex_params *) &tex->params)->export_handle = PL_HANDLE_DMA_BUF;
45 
46         REQUIRE(pl_tex_export(gpu, tex, sync));
47 
48         // Re-use our internal helpers to signal this VkSemaphore
49         struct vk_ctx *vk = PL_PRIV(pl_vk);
50         struct vk_cmd *cmd = vk_cmd_begin(vk, vk->pool_graphics);
51         REQUIRE(cmd);
52         struct pl_sync_vk *sync_vk = PL_PRIV(sync);
53         vk_cmd_sig(cmd, sync_vk->signal);
54         vk_cmd_queue(vk, &cmd);
55         REQUIRE(vk_flush_commands(vk));
56 
57         // Do something with the image again to "import" it
58         pl_tex_clear(gpu, tex, (float[4]){0});
59         pl_gpu_finish(gpu);
60         REQUIRE(!pl_tex_poll(gpu, tex, 0));
61 
62         pl_sync_destroy(gpu, &sync);
63         pl_tex_destroy(gpu, &tex);
64     }
65 }
66 
vulkan_swapchain_tests(pl_vulkan vk,VkSurfaceKHR surf)67 static void vulkan_swapchain_tests(pl_vulkan vk, VkSurfaceKHR surf)
68 {
69     if (!surf)
70         return;
71 
72     printf("testing vulkan swapchain\n");
73     pl_gpu gpu = vk->gpu;
74     pl_swapchain sw;
75     sw = pl_vulkan_create_swapchain(vk, &(struct pl_vulkan_swapchain_params) {
76         .surface = surf,
77     });
78     REQUIRE(sw);
79 
80     // Attempt actually initializing the swapchain
81     int w = 640, h = 480;
82     REQUIRE(pl_swapchain_resize(sw, &w, &h));
83 
84     for (int i = 0; i < 10; i++) {
85         struct pl_swapchain_frame frame;
86         REQUIRE(pl_swapchain_start_frame(sw, &frame));
87         if (frame.fbo->params.blit_dst)
88             pl_tex_clear(gpu, frame.fbo, (float[4]){0});
89 
90         // TODO: test this with an actual pl_renderer instance
91         struct pl_frame target;
92         pl_frame_from_swapchain(&target, &frame);
93 
94         REQUIRE(pl_swapchain_submit_frame(sw));
95         pl_swapchain_swap_buffers(sw);
96 
97         // Try resizing the swapchain in the middle of rendering
98         if (i == 5) {
99             w = 320;
100             h = 240;
101             REQUIRE(pl_swapchain_resize(sw, &w, &h));
102         }
103     }
104 
105     pl_swapchain_destroy(&sw);
106 }
107 
main()108 int main()
109 {
110     pl_log log = pl_test_logger();
111     pl_vk_inst inst = pl_vk_inst_create(log, &(struct pl_vk_inst_params) {
112         .debug = true,
113         .debug_extra = true,
114         .get_proc_addr = vkGetInstanceProcAddr,
115         .opt_extensions = (const char *[]){
116             VK_KHR_SURFACE_EXTENSION_NAME,
117             "VK_EXT_headless_surface", // in case it isn't defined
118         },
119         .num_opt_extensions = 2,
120     });
121 
122     if (!inst)
123         return SKIP;
124 
125     PL_VK_LOAD_FUN(inst->instance, EnumeratePhysicalDevices, inst->get_proc_addr);
126     PL_VK_LOAD_FUN(inst->instance, GetPhysicalDeviceProperties, inst->get_proc_addr);
127 
128     uint32_t num = 0;
129     EnumeratePhysicalDevices(inst->instance, &num, NULL);
130     if (!num)
131         return SKIP;
132 
133     VkPhysicalDevice *devices = calloc(num, sizeof(*devices));
134     if (!devices)
135         return 1;
136     EnumeratePhysicalDevices(inst->instance, &num, devices);
137 
138     VkSurfaceKHR surf = NULL;
139 
140 #ifdef VK_EXT_headless_surface
141     PL_VK_LOAD_FUN(inst->instance, CreateHeadlessSurfaceEXT, inst->get_proc_addr);
142     if (CreateHeadlessSurfaceEXT) {
143         VkHeadlessSurfaceCreateInfoEXT info = {
144             .sType = VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT,
145         };
146 
147         VkResult res = CreateHeadlessSurfaceEXT(inst->instance, &info, NULL, &surf);
148         REQUIRE(res == VK_SUCCESS);
149     }
150 #endif // VK_EXT_headless_surface
151 
152     // Make sure choosing any device works
153     VkPhysicalDevice dev;
154     dev = pl_vulkan_choose_device(log, &(struct pl_vulkan_device_params) {
155         .instance = inst->instance,
156         .get_proc_addr = inst->get_proc_addr,
157         .allow_software = true,
158         .surface = surf,
159     });
160     REQUIRE(dev);
161 
162     // Test all attached devices
163     for (int i = 0; i < num; i++) {
164         VkPhysicalDeviceProperties props = {0};
165         GetPhysicalDeviceProperties(devices[i], &props);
166 #ifndef CI_ALLOW_SW
167         if (props.deviceType == VK_PHYSICAL_DEVICE_TYPE_CPU) {
168             printf("Skipping device %d: %s\n", i, props.deviceName);
169             continue;
170         }
171 #endif
172         printf("Testing device %d: %s\n", i, props.deviceName);
173 
174         // Make sure we can choose this device by name
175         dev = pl_vulkan_choose_device(log, &(struct pl_vulkan_device_params) {
176             .instance = inst->instance,
177             .get_proc_addr = inst->get_proc_addr,
178             .device_name = props.deviceName,
179         });
180         REQUIRE(dev == devices[i]);
181 
182         struct pl_vulkan_params params = pl_vulkan_default_params;
183         params.instance = inst->instance;
184         params.get_proc_addr = inst->get_proc_addr;
185         params.device = devices[i];
186         params.queue_count = 8; // test inter-queue stuff
187         params.surface = surf;
188 
189         pl_vulkan vk = pl_vulkan_create(log, &params);
190         if (!vk)
191             continue;
192 
193         gpu_shader_tests(vk->gpu);
194         vulkan_swapchain_tests(vk, surf);
195 
196         // Print heap statistics
197         pl_vk_print_heap(vk->gpu, PL_LOG_DEBUG);
198 
199         // Test importing this context via the vulkan interop API
200         struct pl_vulkan_import_params iparams = {
201             .instance = vk->instance,
202             .get_proc_addr = inst->get_proc_addr,
203             .phys_device = vk->phys_device,
204             .device = vk->device,
205 
206             .extensions = vk->extensions,
207             .num_extensions = vk->num_extensions,
208             .features = vk->features,
209             .queue_graphics = vk->queue_graphics,
210             .queue_compute = vk->queue_compute,
211             .queue_transfer = vk->queue_transfer,
212         };
213         pl_vulkan vk2 = pl_vulkan_import(log, &iparams);
214         REQUIRE(vk2);
215         pl_vulkan_destroy(&vk2);
216 
217         // Run these tests last because they disable some validation layers
218 #ifdef PL_HAVE_UNIX
219         vulkan_interop_tests(vk, PL_HANDLE_FD);
220         vulkan_interop_tests(vk, PL_HANDLE_DMA_BUF);
221 #endif
222 #ifdef PL_HAVE_WIN32
223         vulkan_interop_tests(vk, PL_HANDLE_WIN32);
224         vulkan_interop_tests(vk, PL_HANDLE_WIN32_KMT);
225 #endif
226         gpu_interop_tests(vk->gpu);
227         pl_vulkan_destroy(&vk);
228 
229         // Re-run the same export/import tests with async queues disabled
230         params.async_compute = false;
231         params.async_transfer = false;
232         vk = pl_vulkan_create(log, &params);
233         REQUIRE(vk); // it succeeded the first time
234 
235 #ifdef PL_HAVE_UNIX
236         vulkan_interop_tests(vk, PL_HANDLE_FD);
237         vulkan_interop_tests(vk, PL_HANDLE_DMA_BUF);
238 #endif
239 #ifdef PL_HAVE_WIN32
240         vulkan_interop_tests(vk, PL_HANDLE_WIN32);
241         vulkan_interop_tests(vk, PL_HANDLE_WIN32_KMT);
242 #endif
243         gpu_interop_tests(vk->gpu);
244         pl_vulkan_destroy(&vk);
245 
246         // Reduce log spam after first tested device
247         pl_log_level_update(log, PL_LOG_INFO);
248     }
249 
250     vkDestroySurfaceKHR(inst->instance, surf, NULL);
251     pl_vk_inst_destroy(&inst);
252     pl_log_destroy(&log);
253     free(devices);
254 }
255