1 /**************************************************************************
2  *
3  * Copyright (C) 2019 Chromium.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included
13  * in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  **************************************************************************/
24 
25 #ifndef _GNU_SOURCE
26 #define _GNU_SOURCE 1
27 #endif
28 
29 #include <stdio.h>
30 #include <dirent.h>
31 #include <fcntl.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <xf86drm.h>
35 #include <unistd.h>
36 
37 #include "util/u_math.h"
38 #include "util/u_memory.h"
39 #include "pipe/p_state.h"
40 
41 #include "virgl_gbm.h"
42 #include "virgl_hw.h"
43 #include "vrend_debug.h"
44 
45 struct planar_layout {
46     size_t num_planes;
47     int horizontal_subsampling[VIRGL_GBM_MAX_PLANES];
48     int vertical_subsampling[VIRGL_GBM_MAX_PLANES];
49     int bytes_per_pixel[VIRGL_GBM_MAX_PLANES];
50 };
51 
52 struct format_conversion {
53     uint32_t gbm_format;
54     uint32_t virgl_format;
55 };
56 
57 static const struct planar_layout packed_1bpp_layout = {
58     .num_planes = 1,
59     .horizontal_subsampling = { 1 },
60     .vertical_subsampling = { 1 },
61     .bytes_per_pixel = { 1 }
62 };
63 
64 static const struct planar_layout packed_2bpp_layout = {
65     .num_planes = 1,
66     .horizontal_subsampling = { 1 },
67     .vertical_subsampling = { 1 },
68     .bytes_per_pixel = { 2 }
69 };
70 
71 static const struct planar_layout packed_4bpp_layout = {
72     .num_planes = 1,
73     .horizontal_subsampling = { 1 },
74     .vertical_subsampling = { 1 },
75     .bytes_per_pixel = { 4 }
76 };
77 
78 static const struct planar_layout biplanar_yuv_420_layout = {
79     .num_planes = 2,
80     .horizontal_subsampling = { 1, 2 },
81     .vertical_subsampling = { 1, 2 },
82     .bytes_per_pixel = { 1, 2 }
83 };
84 
85 static const struct planar_layout triplanar_yuv_420_layout = {
86     .num_planes = 3,
87     .horizontal_subsampling = { 1, 2, 2 },
88     .vertical_subsampling = { 1, 2, 2 },
89     .bytes_per_pixel = { 1, 1, 1 }
90 };
91 
92 static const struct format_conversion conversions[] = {
93     { GBM_FORMAT_RGB565, VIRGL_FORMAT_B5G6R5_UNORM },
94     { GBM_FORMAT_ARGB8888, VIRGL_FORMAT_B8G8R8A8_UNORM },
95     { GBM_FORMAT_XRGB8888, VIRGL_FORMAT_B8G8R8X8_UNORM },
96     { GBM_FORMAT_NV12, VIRGL_FORMAT_NV12 },
97     { GBM_FORMAT_ABGR8888, VIRGL_FORMAT_R8G8B8A8_UNORM},
98     { GBM_FORMAT_XBGR8888, VIRGL_FORMAT_R8G8B8X8_UNORM},
99     { GBM_FORMAT_R8, VIRGL_FORMAT_R8_UNORM},
100     { GBM_FORMAT_YVU420, VIRGL_FORMAT_YV12},
101     { GBM_FORMAT_ABGR8888, VIRGL_FORMAT_B8G8R8A8_UNORM_EMULATED},
102     { GBM_FORMAT_XBGR8888, VIRGL_FORMAT_B8G8R8X8_UNORM_EMULATED},
103 };
104 
rendernode_open(void)105 static int rendernode_open(void)
106 {
107    DIR *dir;
108    int ret, fd;
109    bool undesired_found;
110    drmVersionPtr version;
111    char *rendernode_name;
112    struct dirent *dir_ent;
113    const char *undesired[3] = { "vgem", "pvr", NULL };
114 
115    dir = opendir("/dev/dri");
116    if (!dir)
117       return -1;
118 
119    fd = -1;
120    while ((dir_ent = readdir(dir))) {
121       if (dir_ent->d_type != DT_CHR)
122          continue;
123 
124       if (strncmp(dir_ent->d_name, "renderD", 7))
125          continue;
126 
127       ret = asprintf(&rendernode_name, "/dev/dri/%s", dir_ent->d_name);
128       if (ret < 0)
129          goto out;
130 
131       fd = open(rendernode_name, O_RDWR | O_CLOEXEC | O_NOCTTY | O_NONBLOCK);
132       free(rendernode_name);
133 
134       if (fd < 0)
135          continue;
136 
137       version = drmGetVersion(fd);
138       if (!version) {
139          close(fd);
140          fd = -1;
141          continue;
142       }
143 
144       undesired_found = false;
145       for (uint32_t i = 0; i < ARRAY_SIZE(undesired); i++) {
146          if (undesired[i] && !strcmp(version->name, undesired[i]))
147             undesired_found = true;
148       }
149 
150       drmFreeVersion(version);
151       if (undesired_found) {
152          close(fd);
153          fd = -1;
154          continue;
155       }
156 
157       break;
158    }
159 
160 out:
161    closedir(dir);
162    return fd;
163 }
164 
layout_from_format(uint32_t format)165 static const struct planar_layout *layout_from_format(uint32_t format)
166 {
167    switch (format) {
168    case GBM_FORMAT_R8:
169       return &packed_1bpp_layout;
170    case GBM_FORMAT_YVU420:
171       return &triplanar_yuv_420_layout;
172    case GBM_FORMAT_NV12:
173       return &biplanar_yuv_420_layout;
174    case GBM_FORMAT_RGB565:
175       return &packed_2bpp_layout;
176    case GBM_FORMAT_ARGB8888:
177    case GBM_FORMAT_XRGB8888:
178    case GBM_FORMAT_ABGR8888:
179    case GBM_FORMAT_XBGR8888:
180       return &packed_4bpp_layout;
181    default:
182       return NULL;
183    }
184 }
185 
virgl_gbm_transfer_internal(uint32_t planar_bytes_per_pixel,uint32_t subsampled_width,uint32_t subsampled_height,uint32_t guest_plane_stride,uint32_t guest_resource_offset,uint32_t host_plane_stride,uint8_t * host_address,struct iovec * iovecs,uint32_t num_iovecs,uint32_t direction)186 static void virgl_gbm_transfer_internal(uint32_t planar_bytes_per_pixel,
187                                         uint32_t subsampled_width,
188                                         uint32_t subsampled_height,
189                                         uint32_t guest_plane_stride,
190                                         uint32_t guest_resource_offset,
191                                         uint32_t host_plane_stride, uint8_t *host_address,
192                                         struct iovec *iovecs, uint32_t num_iovecs,
193                                         uint32_t direction)
194 {
195    bool next_iovec, next_line;
196    uint32_t current_height, current_iovec, iovec_start_offset;
197    current_height = current_iovec = iovec_start_offset = 0;
198 
199    while (current_height < subsampled_height && current_iovec < num_iovecs) {
200       uint32_t iovec_size = iovecs[current_iovec].iov_len;
201       uint32_t iovec_end_offset = iovec_start_offset + iovec_size;
202 
203       uint32_t box_start_offset = guest_resource_offset + current_height * guest_plane_stride;
204       uint32_t box_end_offset = box_start_offset + subsampled_width * planar_bytes_per_pixel;
205 
206       uint32_t max_start = MAX2(iovec_start_offset, box_start_offset);
207       uint32_t min_end = MIN2(iovec_end_offset, box_end_offset);
208 
209       if (max_start < min_end) {
210          uint32_t offset_in_iovec = (max_start > iovec_start_offset) ?
211                                     (max_start - iovec_start_offset) : 0;
212 
213          uint32_t copy_iovec_size = min_end - max_start;
214          if (min_end >= iovec_end_offset) {
215             next_iovec = true;
216             next_line = false;
217          } else {
218             next_iovec = false;
219             next_line = true;
220          }
221 
222          uint8_t *guest_start = (uint8_t*)iovecs[current_iovec].iov_base + offset_in_iovec;
223          uint8_t *host_start = host_address + (current_height * host_plane_stride) +
224                                (max_start - box_start_offset);
225 
226          if (direction == VIRGL_TRANSFER_TO_HOST)
227             memcpy(host_start, guest_start, copy_iovec_size);
228          else
229             memcpy(guest_start, host_start, copy_iovec_size);
230       } else {
231          if (box_start_offset >= iovec_start_offset) {
232             next_iovec = true;
233             next_line = false;
234          } else {
235             next_iovec = false;
236             next_line = true;
237          }
238       }
239 
240       if (next_iovec) {
241          iovec_start_offset += iovec_size;
242          current_iovec++;
243       }
244 
245       if (next_line)
246          current_height++;
247    }
248 }
249 
virgl_gbm_init(int fd)250 struct virgl_gbm *virgl_gbm_init(int fd)
251 {
252    struct virgl_gbm *gbm = calloc(1, sizeof(struct virgl_gbm));
253    if (!gbm)
254       return NULL;
255 
256    gbm->fd = -1;
257    if (fd < 0) {
258       gbm->fd = rendernode_open();
259       if (gbm->fd < 0)
260          goto out_error;
261 
262       gbm->device = gbm_create_device(gbm->fd);
263       if (!gbm->device) {
264          close(gbm->fd);
265          goto out_error;
266       }
267    } else {
268       gbm->device = gbm_create_device(fd);
269       if (!gbm->device)
270          goto out_error;
271    }
272 
273    return gbm;
274 
275 out_error:
276    free(gbm);
277    return NULL;
278 }
279 
virgl_gbm_fini(struct virgl_gbm * gbm)280 void virgl_gbm_fini(struct virgl_gbm *gbm)
281 {
282    gbm_device_destroy(gbm->device);
283    if (gbm->fd >= 0)
284       close(gbm->fd);
285    free(gbm);
286 }
287 
virgl_gbm_convert_format(uint32_t * virgl_format,uint32_t * gbm_format)288 int virgl_gbm_convert_format(uint32_t *virgl_format, uint32_t *gbm_format)
289 {
290 
291     if (!virgl_format || !gbm_format)
292       return -1;
293 
294     if (*virgl_format != 0 && *gbm_format != 0)
295       return -1;
296 
297     for (uint32_t i = 0; i < ARRAY_SIZE(conversions); i++) {
298       if (conversions[i].gbm_format == *gbm_format ||
299           conversions[i].virgl_format == *virgl_format) {
300          *gbm_format = conversions[i].gbm_format;
301          *virgl_format = conversions[i].virgl_format;
302          return 0;
303       }
304     }
305 
306     return -1;
307 }
308 
309 #ifdef ENABLE_GBM_ALLOCATION
virgl_gbm_transfer(struct gbm_bo * bo,uint32_t direction,struct iovec * iovecs,uint32_t num_iovecs,const struct vrend_transfer_info * info)310 int virgl_gbm_transfer(struct gbm_bo *bo, uint32_t direction, struct iovec *iovecs,
311                        uint32_t num_iovecs, const struct vrend_transfer_info *info)
312 {
313    void *map_data;
314    uint32_t guest_plane_offset, guest_stride0, calc_stride0, host_map_stride0;
315 
316    uint32_t width = gbm_bo_get_width(bo);
317    uint32_t height = gbm_bo_get_height(bo);
318    uint32_t format = gbm_bo_get_format(bo);
319    int plane_count = gbm_bo_get_plane_count(bo);
320    const struct planar_layout *layout = layout_from_format(format);
321    if (!layout)
322       return -1;
323 
324    guest_plane_offset = host_map_stride0 = guest_stride0 = 0;
325    uint32_t map_flags = (direction == VIRGL_TRANSFER_TO_HOST) ? GBM_BO_TRANSFER_WRITE :
326                                                                 GBM_BO_TRANSFER_READ;
327    void *addr = gbm_bo_map(bo, 0, 0, width, height, map_flags, &host_map_stride0, &map_data);
328    if (!addr)
329       return -1;
330 
331    /*
332     * Unfortunately, the kernel doesn't actually pass the guest layer_stride and
333     * guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
334     * the level (always zero for 2D images) to work around this.
335     */
336    guest_stride0 = info->stride;
337    calc_stride0 = width * layout->bytes_per_pixel[0];
338    if (!guest_stride0)
339       guest_stride0 = (info->level > 0) ? (uint32_t)info->level : calc_stride0;
340 
341    if (guest_stride0 < calc_stride0)
342       return -1;
343 
344    if (guest_stride0 > host_map_stride0)
345       return -1;
346 
347    for (int plane = 0; plane < plane_count; plane++) {
348       uint32_t host_plane_offset = gbm_bo_get_offset(bo, plane);
349 
350       uint32_t subsampled_x = info->box->x / layout->horizontal_subsampling[plane];
351       uint32_t subsampled_y = info->box->y / layout->vertical_subsampling[plane];
352       uint32_t subsampled_width = info->box->width / layout->horizontal_subsampling[plane];
353       uint32_t subsampled_height = info->box->height / layout->vertical_subsampling[plane];
354       uint32_t plane_height = height / layout->vertical_subsampling[plane];
355 
356       uint32_t plane_byte_ratio = layout->bytes_per_pixel[plane] / layout->bytes_per_pixel[0];
357       uint32_t guest_plane_stride = (guest_stride0 * plane_byte_ratio)
358             / layout->horizontal_subsampling[plane];
359       uint32_t host_plane_stride = plane == 0
360             ? host_map_stride0 : gbm_bo_get_stride_for_plane(bo, plane);
361 
362       uint32_t guest_resource_offset = guest_plane_offset + (subsampled_y * guest_plane_stride)
363                                        + subsampled_x * layout->bytes_per_pixel[plane];
364       uint32_t host_resource_offset = host_plane_offset + (subsampled_y * host_plane_stride)
365                                        + subsampled_x * layout->bytes_per_pixel[plane];
366 
367       uint8_t *host_address = (uint8_t*)addr + host_resource_offset;
368 
369       virgl_gbm_transfer_internal(layout->bytes_per_pixel[plane], subsampled_width,
370                                   subsampled_height, guest_plane_stride, guest_resource_offset,
371                                   host_plane_stride, host_address, iovecs, num_iovecs, direction);
372 
373       guest_plane_offset += plane_height * guest_plane_stride;
374    }
375 
376    gbm_bo_unmap(bo, map_data);
377    return 0;
378 }
379 
virgl_gbm_convert_flags(uint32_t virgl_bind_flags)380 uint32_t virgl_gbm_convert_flags(uint32_t virgl_bind_flags)
381 {
382    uint32_t flags = 0;
383    if (virgl_bind_flags & VIRGL_BIND_RENDER_TARGET)
384       flags |= GBM_BO_USE_RENDERING;
385    if (virgl_bind_flags & VIRGL_BIND_SCANOUT)
386       flags |= GBM_BO_USE_SCANOUT;
387    if (virgl_bind_flags & VIRGL_BIND_CURSOR)
388       flags |= GBM_BO_USE_CURSOR;
389    if (virgl_bind_flags & VIRGL_BIND_LINEAR)
390       flags |= GBM_BO_USE_LINEAR;
391 
392    return flags;
393 }
394 
395 
virgl_gbm_export_query(struct gbm_bo * bo,struct virgl_renderer_export_query * query)396 int virgl_gbm_export_query(struct gbm_bo *bo, struct virgl_renderer_export_query *query)
397 {
398    int ret = -1;
399    uint32_t handles[VIRGL_GBM_MAX_PLANES] = { 0 };
400    struct gbm_device *gbm = gbm_bo_get_device(bo);
401    int num_planes = gbm_bo_get_plane_count(bo);
402    if (num_planes < 0 || num_planes > VIRGL_GBM_MAX_PLANES)
403       return ret;
404 
405    query->out_num_fds = 0;
406    query->out_fourcc = 0;
407    query->out_modifier = 0;
408    for (int plane = 0; plane < VIRGL_GBM_MAX_PLANES; plane++) {
409       query->out_fds[plane] = -1;
410       query->out_strides[plane] = 0;
411       query->out_offsets[plane] = 0;
412    }
413 
414    for (int plane = 0; plane < num_planes; plane++) {
415       uint32_t i, handle;
416       query->out_strides[plane] = gbm_bo_get_stride_for_plane(bo, plane);
417       query->out_offsets[plane] = gbm_bo_get_offset(bo, plane);
418       handle = gbm_bo_get_handle_for_plane(bo, plane).u32;
419 
420       for (i = 0; i < query->out_num_fds; i++) {
421          if (handles[i] == handle)
422             break;
423       }
424 
425       if (i == query->out_num_fds) {
426          if (query->in_export_fds) {
427             ret = virgl_gbm_export_fd(gbm, handle, &query->out_fds[query->out_num_fds]);
428             if (ret)
429                goto err_close;
430          }
431          handles[query->out_num_fds] = handle;
432          query->out_num_fds++;
433       }
434    }
435 
436    query->out_modifier = gbm_bo_get_modifier(bo);
437    query->out_fourcc = gbm_bo_get_format(bo);
438    return 0;
439 
440 err_close:
441    for (int plane = 0; plane < VIRGL_GBM_MAX_PLANES; plane++) {
442       if (query->out_fds[plane] >= 0) {
443          close(query->out_fds[plane]);
444          query->out_fds[plane] = -1;
445       }
446 
447       query->out_strides[plane] = 0;
448       query->out_offsets[plane] = 0;
449    }
450 
451    query->out_num_fds = 0;
452    return ret;
453 }
454 #endif
455 
virgl_gbm_export_fd(struct gbm_device * gbm,uint32_t handle,int32_t * out_fd)456 int virgl_gbm_export_fd(struct gbm_device *gbm, uint32_t handle, int32_t *out_fd)
457 {
458    int ret;
459    ret = drmPrimeHandleToFD(gbm_device_get_fd(gbm), handle, DRM_CLOEXEC | DRM_RDWR, out_fd);
460    // Kernels with older DRM core versions block DRM_RDWR but give a
461    // read/write mapping anyway.
462    if (ret)
463       ret = drmPrimeHandleToFD(gbm_device_get_fd(gbm), handle, DRM_CLOEXEC, out_fd);
464 
465    return ret;
466 }
467 
virgl_gbm_get_plane_width(struct gbm_bo * bo,int plane)468 int virgl_gbm_get_plane_width(struct gbm_bo *bo, int plane) {
469    uint32_t format = gbm_bo_get_format(bo);
470    const struct planar_layout *layout = layout_from_format(format);
471    if (!layout)
472       return -1;
473    return gbm_bo_get_width(bo) / layout->horizontal_subsampling[plane];
474 }
475 
virgl_gbm_get_plane_height(struct gbm_bo * bo,int plane)476 int virgl_gbm_get_plane_height(struct gbm_bo *bo, int plane) {
477    uint32_t format = gbm_bo_get_format(bo);
478    const struct planar_layout *layout = layout_from_format(format);
479    if (!layout)
480       return -1;
481    return gbm_bo_get_height(bo) / layout->vertical_subsampling[plane];
482 }
483 
virgl_gbm_get_plane_bytes_per_pixel(struct gbm_bo * bo,int plane)484 int virgl_gbm_get_plane_bytes_per_pixel(struct gbm_bo *bo, int plane) {
485    uint32_t format = gbm_bo_get_format(bo);
486    const struct planar_layout *layout = layout_from_format(format);
487    if (!layout)
488       return -1;
489    return layout->bytes_per_pixel[plane];
490 }
491 
virgl_gbm_external_allocation_preferred(uint32_t flags)492 bool virgl_gbm_external_allocation_preferred(uint32_t flags) {
493    return (flags & (VIRGL_RES_BIND_SCANOUT | VIRGL_RES_BIND_SHARED)) != 0;
494 }
495 
virgl_gbm_gpu_import_required(uint32_t flags)496 bool virgl_gbm_gpu_import_required(uint32_t flags) {
497    return !virgl_gbm_external_allocation_preferred(flags) ||
498           (flags & (VIRGL_BIND_RENDER_TARGET | VIRGL_BIND_SAMPLER_VIEW)) != 0;
499 }
500