1 /* Cairo - a vector graphics library with display and print output
2 *
3 * Copyright © 2009 Chris Wilson
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it either under the terms of the GNU Lesser General Public
7 * License version 2.1 as published by the Free Software Foundation
8 * (the "LGPL") or, at your option, under the terms of the Mozilla
9 * Public License Version 1.1 (the "MPL"). If you do not alter this
10 * notice, a recipient may use your version of this file under either
11 * the MPL or the LGPL.
12 *
13 * You should have received a copy of the LGPL along with this library
14 * in the file COPYING-LGPL-2.1; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
16 * You should have received a copy of the MPL along with this library
17 * in the file COPYING-MPL-1.1
18 *
19 * The contents of this file are subject to the Mozilla Public License
20 * Version 1.1 (the "License"); you may not use this file except in
21 * compliance with the License. You may obtain a copy of the License at
22 * http://www.mozilla.org/MPL/
23 *
24 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
25 * OF ANY KIND, either express or implied. See the LGPL or the MPL for
26 * the specific language governing rights and limitations.
27 *
28 */
29
30 #include "cairoint.h"
31
32 #include "cairo-drm-private.h"
33 #include "cairo-drm-radeon-private.h"
34
35 #include "cairo-error-private.h"
36 #include "cairo-image-surface-private.h"
37
38 #include <sys/ioctl.h>
39 #include <sys/mman.h>
40 #include <errno.h>
41 #include <drm/radeon_drm.h>
42
43 cairo_bool_t
radeon_info(int fd,uint64_t * gart_size,uint64_t * vram_size)44 radeon_info (int fd,
45 uint64_t *gart_size,
46 uint64_t *vram_size)
47 {
48 struct drm_radeon_gem_info info;
49 int ret;
50
51 ret = ioctl (fd, DRM_IOCTL_RADEON_GEM_INFO, &info);
52 if (ret == -1)
53 return FALSE;
54
55 if (gart_size != NULL)
56 *gart_size = info.gart_size;
57
58 if (vram_size != NULL)
59 *vram_size = info.vram_size;
60
61 return TRUE;
62 }
63
64 void
radeon_bo_write(const radeon_device_t * device,radeon_bo_t * bo,unsigned long offset,unsigned long size,const void * data)65 radeon_bo_write (const radeon_device_t *device,
66 radeon_bo_t *bo,
67 unsigned long offset,
68 unsigned long size,
69 const void *data)
70 {
71 struct drm_radeon_gem_pwrite pwrite;
72 int ret;
73
74 memset (&pwrite, 0, sizeof (pwrite));
75 pwrite.handle = bo->base.handle;
76 pwrite.offset = offset;
77 pwrite.size = size;
78 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
79 do {
80 ret = ioctl (device->base.fd, DRM_IOCTL_RADEON_GEM_PWRITE, &pwrite);
81 } while (ret == -1 && errno == EINTR);
82
83 /* XXX temporary workaround */
84 if (ret == -1 && errno == ENOSYS) {
85 uint8_t *ptr;
86
87 ptr = radeon_bo_map (device, bo);
88 if (ptr != NULL) {
89 memcpy (ptr + offset, data, size);
90 radeon_bo_unmap (bo);
91 }
92 }
93 }
94
95 void
radeon_bo_read(const radeon_device_t * device,radeon_bo_t * bo,unsigned long offset,unsigned long size,void * data)96 radeon_bo_read (const radeon_device_t *device,
97 radeon_bo_t *bo,
98 unsigned long offset,
99 unsigned long size,
100 void *data)
101 {
102 struct drm_radeon_gem_pread pread;
103 int ret;
104
105 memset (&pread, 0, sizeof (pread));
106 pread.handle = bo->base.handle;
107 pread.offset = offset;
108 pread.size = size;
109 pread.data_ptr = (uint64_t) (uintptr_t) data;
110 do {
111 ret = ioctl (device->base.fd, DRM_IOCTL_RADEON_GEM_PREAD, &pread);
112 } while (ret == -1 && errno == EINTR);
113
114 /* XXX temporary workaround */
115 if (ret == -1 && errno == ENOSYS) {
116 uint8_t *ptr;
117
118 ptr = radeon_bo_map (device, bo);
119 if (ptr != NULL) {
120 memcpy (data, ptr + offset, size);
121 radeon_bo_unmap (bo);
122 }
123 }
124
125 VG (VALGRIND_MAKE_MEM_DEFINED (data, size));
126 }
127
128 void
radeon_bo_wait(const radeon_device_t * device,radeon_bo_t * bo)129 radeon_bo_wait (const radeon_device_t *device, radeon_bo_t *bo)
130 {
131 struct drm_radeon_gem_wait_idle wait;
132 int ret;
133
134 wait.handle = bo->base.handle;
135 do {
136 ret = ioctl (device->base.fd, DRM_IOCTL_RADEON_GEM_WAIT_IDLE, &wait);
137 } while (ret == -1 && (errno == EINTR || errno == EBUSY));
138 }
139
140 void *
radeon_bo_map(const radeon_device_t * device,radeon_bo_t * bo)141 radeon_bo_map (const radeon_device_t *device, radeon_bo_t *bo)
142 {
143 struct drm_radeon_gem_mmap mmap_arg;
144 void *ptr;
145 int ret;
146
147 assert (bo->virtual == NULL);
148
149 memset (&mmap_arg, 0, sizeof (mmap_arg));
150 mmap_arg.handle = bo->base.handle;
151 mmap_arg.offset = 0;
152 mmap_arg.size = bo->base.size;
153
154 do {
155 ret = ioctl (device->base.fd, DRM_IOCTL_RADEON_GEM_MMAP, &mmap_arg);
156 } while (ret == -1 && errno == EINTR);
157 if (unlikely (ret != 0)) {
158 _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
159 return NULL;
160 }
161
162 VG (VALGRIND_MAKE_MEM_DEFINED (&mmap_arg, sizeof (mmap_arg)));
163
164 /* and mmap it */
165 ptr = mmap (0, bo->base.size, PROT_READ | PROT_WRITE,
166 MAP_SHARED, device->base.fd,
167 mmap_arg.addr_ptr);
168 if (unlikely (ptr == MAP_FAILED)) {
169 _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
170 return NULL;
171 }
172
173 bo->virtual = ptr;
174
175 /* XXX set_domain? */
176 return bo->virtual;
177 }
178
179 void
radeon_bo_unmap(radeon_bo_t * bo)180 radeon_bo_unmap (radeon_bo_t *bo)
181 {
182 assert (bo->virtual != NULL);
183
184 munmap (bo->virtual, bo->base.size);
185 bo->virtual = NULL;
186 }
187
188 cairo_drm_bo_t *
radeon_bo_create(radeon_device_t * device,uint32_t size,uint32_t initial_domain)189 radeon_bo_create (radeon_device_t *device,
190 uint32_t size,
191 uint32_t initial_domain)
192 {
193 struct drm_radeon_gem_create create;
194 radeon_bo_t *bo;
195 int ret;
196
197 bo = _cairo_freepool_alloc (&device->bo_pool);
198 if (unlikely (bo == NULL))
199 return NULL;
200
201 create.size = size;
202 create.alignment = 0;
203 create.initial_domain = initial_domain;
204 create.flags = 0;
205 create.handle = 0;
206
207 do {
208 ret = ioctl (device->base.fd, DRM_IOCTL_RADEON_GEM_CREATE, &create);
209 } while (ret == -1 && errno == EINTR);
210 if (ret == -1) {
211 _cairo_freepool_free (&device->bo_pool, bo);
212 return NULL;
213 }
214
215 bo->base.handle = create.handle;
216 bo->base.size = size;
217
218 bo->virtual = NULL;
219
220 bo->in_batch = FALSE;
221 bo->read_domains = 0;
222 bo->write_domain = 0;
223
224 CAIRO_REFERENCE_COUNT_INIT (&bo->base.ref_count, 1);
225 return &bo->base;
226 }
227
228 cairo_drm_bo_t *
radeon_bo_create_for_name(radeon_device_t * device,uint32_t name)229 radeon_bo_create_for_name (radeon_device_t *device,
230 uint32_t name)
231 {
232 radeon_bo_t *bo;
233 cairo_status_t status;
234
235 bo = _cairo_freepool_alloc (&device->bo_pool);
236 if (unlikely (bo == NULL))
237 return NULL;
238
239 status = _cairo_drm_bo_open_for_name (&device->base, &bo->base, name);
240 if (unlikely (status)) {
241 _cairo_freepool_free (&device->bo_pool, bo);
242 return NULL;
243 }
244
245 bo->virtual = NULL;
246
247 bo->in_batch = FALSE;
248 bo->read_domains = 0;
249 bo->write_domain = 0;
250
251 CAIRO_REFERENCE_COUNT_INIT (&bo->base.ref_count, 1);
252 return &bo->base;
253 }
254
255 static void
radeon_bo_release(void * _dev,void * _bo)256 radeon_bo_release (void *_dev, void *_bo)
257 {
258 radeon_device_t *device = _dev;
259 radeon_bo_t *bo = _bo;
260
261 _cairo_drm_bo_close (&device->base, &bo->base);
262 _cairo_freepool_free (&device->bo_pool, bo);
263 }
264
265 cairo_surface_t *
radeon_bo_get_image(const radeon_device_t * device,radeon_bo_t * bo,const cairo_drm_surface_t * surface)266 radeon_bo_get_image (const radeon_device_t *device,
267 radeon_bo_t *bo,
268 const cairo_drm_surface_t *surface)
269 {
270 cairo_image_surface_t *image;
271 uint8_t *dst;
272 int size, row;
273
274 image = (cairo_image_surface_t *)
275 cairo_image_surface_create (surface->format,
276 surface->width,
277 surface->height);
278 if (unlikely (image->base.status))
279 return &image->base;
280
281 if (image->stride == surface->stride) {
282 size = surface->stride * surface->height;
283 radeon_bo_read (device, bo, 0, size, image->data);
284 } else {
285 int offset;
286
287 size = surface->width;
288 if (surface->format != CAIRO_FORMAT_A8)
289 size *= 4;
290
291 offset = 0;
292 row = surface->height;
293 dst = image->data;
294 while (row--) {
295 radeon_bo_read (device, bo, offset, size, dst);
296 offset += surface->stride;
297 dst += image->stride;
298 }
299 }
300
301 return &image->base;
302 }
303
304 static void
_radeon_device_init_bo_cache(radeon_device_t * device)305 _radeon_device_init_bo_cache (radeon_device_t *device)
306 {
307 _cairo_freepool_init (&device->bo_pool, sizeof (radeon_bo_t));
308 }
309
310 cairo_status_t
radeon_device_init(radeon_device_t * device,int fd)311 radeon_device_init (radeon_device_t *device, int fd)
312 {
313 _radeon_device_init_bo_cache (device);
314
315 device->base.bo.release = radeon_bo_release;
316
317 return CAIRO_STATUS_SUCCESS;
318 }
319
320 static void
_radeon_bo_cache_fini(radeon_device_t * device)321 _radeon_bo_cache_fini (radeon_device_t *device)
322 {
323 _cairo_freepool_fini (&device->bo_pool);
324 }
325
326 void
radeon_device_fini(radeon_device_t * device)327 radeon_device_fini (radeon_device_t *device)
328 {
329 _radeon_bo_cache_fini (device);
330 _cairo_drm_device_fini (&device->base);
331 }
332