1 /* Cairo - a vector graphics library with display and print output
2  *
3  * Copyright © 2009 Chris Wilson
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it either under the terms of the GNU Lesser General Public
7  * License version 2.1 as published by the Free Software Foundation
8  * (the "LGPL") or, at your option, under the terms of the Mozilla
9  * Public License Version 1.1 (the "MPL"). If you do not alter this
10  * notice, a recipient may use your version of this file under either
11  * the MPL or the LGPL.
12  *
13  * You should have received a copy of the LGPL along with this library
14  * in the file COPYING-LGPL-2.1; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
16  * You should have received a copy of the MPL along with this library
17  * in the file COPYING-MPL-1.1
18  *
19  * The contents of this file are subject to the Mozilla Public License
20  * Version 1.1 (the "License"); you may not use this file except in
21  * compliance with the License. You may obtain a copy of the License at
22  * http://www.mozilla.org/MPL/
23  *
24  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
25  * OF ANY KIND, either express or implied. See the LGPL or the MPL for
26  * the specific language governing rights and limitations.
27  *
28  */
29 
30 #include "cairoint.h"
31 
32 #include "cairo-drm-private.h"
33 #include "cairo-drm-intel-private.h"
34 #include "cairo-drm-intel-ioctl-private.h"
35 
36 #include "cairo-error-private.h"
37 #include "cairo-freelist-private.h"
38 #include "cairo-pattern-private.h"
39 #include "cairo-image-surface-private.h"
40 
41 #include <sys/ioctl.h>
42 #include <sys/mman.h>
43 #include <errno.h>
44 #include <drm/i915_drm.h>
45 
46 #define GLYPH_CACHE_WIDTH 1024
47 #define GLYPH_CACHE_HEIGHT 1024
48 #define GLYPH_CACHE_MIN_SIZE 1
49 #define GLYPH_CACHE_MAX_SIZE 128
50 
51 #define IMAGE_CACHE_WIDTH 1024
52 #define IMAGE_CACHE_HEIGHT 1024
53 
54 int
intel_get(int fd,int param)55 intel_get (int fd, int param)
56 {
57     struct drm_i915_getparam gp;
58     int value;
59 
60     gp.param = param;
61     gp.value = &value;
62     if (ioctl (fd, DRM_IOCTL_I915_GETPARAM, &gp) < 0)
63 	return 0;
64 
65     VG (VALGRIND_MAKE_MEM_DEFINED (&value, sizeof (value)));
66 
67     return value;
68 }
69 
70 cairo_bool_t
intel_info(int fd,uint64_t * gtt_size)71 intel_info (int fd, uint64_t *gtt_size)
72 {
73     struct drm_i915_gem_get_aperture info;
74 
75     if (! intel_get (fd, I915_PARAM_HAS_GEM))
76 	return FALSE;
77 
78     if (! intel_get (fd, I915_PARAM_HAS_EXECBUF2))
79 	return FALSE;
80 
81     if (ioctl (fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &info) < 0)
82 	return FALSE;
83 
84     VG (VALGRIND_MAKE_MEM_DEFINED (&info, sizeof (info)));
85 
86     if (gtt_size != NULL)
87 	*gtt_size = info.aper_size;
88 
89     return TRUE;
90 }
91 
92 void
intel_bo_write(const intel_device_t * device,intel_bo_t * bo,unsigned long offset,unsigned long size,const void * data)93 intel_bo_write (const intel_device_t *device,
94 		intel_bo_t *bo,
95 		unsigned long offset,
96 		unsigned long size,
97 		const void *data)
98 {
99     struct drm_i915_gem_pwrite pwrite;
100     int ret;
101 
102     assert (bo->tiling == I915_TILING_NONE);
103     assert (size);
104     assert (offset < bo->base.size);
105     assert (size+offset <= bo->base.size);
106 
107     intel_bo_set_tiling (device, bo);
108 
109     assert (bo->_tiling == I915_TILING_NONE);
110 
111     memset (&pwrite, 0, sizeof (pwrite));
112     pwrite.handle = bo->base.handle;
113     pwrite.offset = offset;
114     pwrite.size = size;
115     pwrite.data_ptr = (uint64_t) (uintptr_t) data;
116     do {
117 	ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
118     } while (ret == -1 && errno == EINTR);
119     assert (ret == 0);
120 
121     bo->busy = FALSE;
122 }
123 
124 void
intel_bo_read(const intel_device_t * device,intel_bo_t * bo,unsigned long offset,unsigned long size,void * data)125 intel_bo_read (const intel_device_t *device,
126 	       intel_bo_t *bo,
127 	       unsigned long offset,
128 	       unsigned long size,
129 	       void *data)
130 {
131     struct drm_i915_gem_pread pread;
132     int ret;
133 
134     assert (bo->tiling == I915_TILING_NONE);
135     assert (size);
136     assert (offset < bo->base.size);
137     assert (size+offset <= bo->base.size);
138 
139     intel_bo_set_tiling (device, bo);
140 
141     assert (bo->_tiling == I915_TILING_NONE);
142 
143     memset (&pread, 0, sizeof (pread));
144     pread.handle = bo->base.handle;
145     pread.offset = offset;
146     pread.size = size;
147     pread.data_ptr = (uint64_t) (uintptr_t) data;
148     do {
149 	ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
150     } while (ret == -1 && errno == EINTR);
151     assert (ret == 0);
152 
153     bo->cpu = TRUE;
154     bo->busy = FALSE;
155 }
156 
157 void *
intel_bo_map(const intel_device_t * device,intel_bo_t * bo)158 intel_bo_map (const intel_device_t *device, intel_bo_t *bo)
159 {
160     struct drm_i915_gem_set_domain set_domain;
161     uint32_t domain;
162     int ret;
163 
164     intel_bo_set_tiling (device, bo);
165 
166     if (bo->virtual != NULL)
167 	return bo->virtual;
168 
169     if (bo->cpu && bo->tiling == I915_TILING_NONE) {
170 	struct drm_i915_gem_mmap mmap_arg;
171 
172 	mmap_arg.handle = bo->base.handle;
173 	mmap_arg.offset = 0;
174 	mmap_arg.size = bo->base.size;
175 	mmap_arg.addr_ptr = 0;
176 
177 	do {
178 	    ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
179 	} while (ret == -1 && errno == EINTR);
180 	if (unlikely (ret != 0)) {
181 	    _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
182 	    return NULL;
183 	}
184 
185 	bo->virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
186 	domain = I915_GEM_DOMAIN_CPU;
187     } else {
188 	struct drm_i915_gem_mmap_gtt mmap_arg;
189 	void *ptr;
190 
191 	/* Get the fake offset back... */
192 	mmap_arg.handle = bo->base.handle;
193 	do {
194 	    ret = ioctl (device->base.fd,
195 			 DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
196 	} while (ret == -1 && errno == EINTR);
197 	if (unlikely (ret != 0)) {
198 	    _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
199 	    return NULL;
200 	}
201 
202 	/* and mmap it */
203 	ptr = mmap (0, bo->base.size, PROT_READ | PROT_WRITE,
204 		    MAP_SHARED, device->base.fd,
205 		    mmap_arg.offset);
206 	if (unlikely (ptr == MAP_FAILED)) {
207 	    _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
208 	    return NULL;
209 	}
210 
211 	bo->virtual = ptr;
212 	domain = I915_GEM_DOMAIN_GTT;
213     }
214 
215     VG (VALGRIND_MAKE_MEM_DEFINED (bo->virtual, bo->base.size));
216 
217     set_domain.handle = bo->base.handle;
218     set_domain.read_domains = domain;
219     set_domain.write_domain = domain;
220 
221     do {
222 	ret = ioctl (device->base.fd,
223 		     DRM_IOCTL_I915_GEM_SET_DOMAIN,
224 		     &set_domain);
225     } while (ret == -1 && errno == EINTR);
226 
227     if (ret != 0) {
228 	intel_bo_unmap (bo);
229 	_cairo_error_throw (CAIRO_STATUS_DEVICE_ERROR);
230 	return NULL;
231     }
232 
233     bo->busy = FALSE;
234     return bo->virtual;
235 }
236 
237 void
intel_bo_unmap(intel_bo_t * bo)238 intel_bo_unmap (intel_bo_t *bo)
239 {
240     munmap (bo->virtual, bo->base.size);
241     bo->virtual = NULL;
242 }
243 
244 cairo_bool_t
intel_bo_is_inactive(const intel_device_t * device,intel_bo_t * bo)245 intel_bo_is_inactive (const intel_device_t *device, intel_bo_t *bo)
246 {
247     struct drm_i915_gem_busy busy;
248 
249     if (! bo->busy)
250 	return TRUE;
251 
252     /* Is this buffer busy for our intended usage pattern? */
253     busy.handle = bo->base.handle;
254     busy.busy = 1;
255     ioctl (device->base.fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
256 
257     bo->busy = busy.busy;
258     return ! busy.busy;
259 }
260 
261 cairo_bool_t
intel_bo_wait(const intel_device_t * device,const intel_bo_t * bo)262 intel_bo_wait (const intel_device_t *device, const intel_bo_t *bo)
263 {
264     struct drm_i915_gem_set_domain set_domain;
265     int ret;
266 
267     set_domain.handle = bo->base.handle;
268     set_domain.read_domains = I915_GEM_DOMAIN_GTT;
269     set_domain.write_domain = 0;
270 
271     do {
272 	ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
273     } while (ret == -1 && errno == EINTR);
274 
275     return ret == 0;
276 }
277 
278 static inline int
pot(int v)279 pot (int v)
280 {
281     v--;
282     v |= v >> 1;
283     v |= v >> 2;
284     v |= v >> 4;
285     v |= v >> 8;
286     v |= v >> 16;
287     v++;
288     return v;
289 }
290 
291 cairo_bool_t
intel_bo_madvise(intel_device_t * device,intel_bo_t * bo,int advice)292 intel_bo_madvise (intel_device_t *device,
293 		  intel_bo_t *bo,
294 		  int advice)
295 {
296     struct drm_i915_gem_madvise madv;
297 
298     madv.handle = bo->base.handle;
299     madv.madv = advice;
300     madv.retained = TRUE;
301     ioctl (device->base.fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
302     return madv.retained;
303 }
304 
305 static void
intel_bo_set_real_size(intel_device_t * device,intel_bo_t * bo,size_t size)306 intel_bo_set_real_size (intel_device_t *device,
307 			intel_bo_t *bo,
308 			size_t size)
309 {
310     struct drm_i915_gem_real_size arg;
311     int ret;
312 
313     return;
314 
315     if (size == bo->base.size)
316 	return;
317 
318     arg.handle = bo->base.handle;
319     arg.size = size;
320     do {
321 	ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_REAL_SIZE, &arg);
322     } while (ret == -1 && errno == EINTR);
323 
324     if (ret == 0) {
325 	if (size > bo->base.size) {
326 	    assert (bo->exec == NULL);
327 	    bo->cpu = TRUE;
328 	    bo->busy = FALSE;
329 	}
330 
331 	bo->base.size = size;
332     }
333 }
334 
335 intel_bo_t *
intel_bo_create(intel_device_t * device,uint32_t max_size,uint32_t real_size,cairo_bool_t gpu_target,uint32_t tiling,uint32_t stride)336 intel_bo_create (intel_device_t *device,
337 	         uint32_t max_size,
338 	         uint32_t real_size,
339 	         cairo_bool_t gpu_target,
340 		 uint32_t tiling,
341 		 uint32_t stride)
342 {
343     intel_bo_t *bo;
344     uint32_t cache_size;
345     struct drm_i915_gem_create create;
346     int bucket;
347     int ret;
348 
349     max_size = (max_size + 4095) & -4096;
350     real_size = (real_size + 4095) & -4096;
351     cache_size = pot (max_size);
352     bucket = ffs (cache_size / 4096) - 1;
353     if (bucket >= INTEL_BO_CACHE_BUCKETS)
354 	cache_size = max_size;
355 
356     if (gpu_target) {
357 	intel_bo_t *first = NULL;
358 
359 	cairo_list_foreach_entry (bo, intel_bo_t,
360 				  &device->bo_in_flight,
361 				  cache_list)
362 	{
363 	    assert (bo->exec != NULL);
364 	    if (tiling && bo->_tiling &&
365 		(bo->_tiling != tiling || bo->_stride != stride))
366 	    {
367 		continue;
368 	    }
369 
370 	    if (real_size <= bo->base.size) {
371 		if (real_size >= bo->base.size/2) {
372 		    cairo_list_del (&bo->cache_list);
373 		    bo = intel_bo_reference (bo);
374 		    goto DONE;
375 		}
376 
377 		if (first == NULL)
378 		    first = bo;
379 	    }
380 	}
381 
382 	if (first != NULL) {
383 	    cairo_list_del (&first->cache_list);
384 	    bo = intel_bo_reference (first);
385 	    goto DONE;
386 	}
387     }
388 
389     /* no cached buffer available, allocate fresh */
390     bo = _cairo_freepool_alloc (&device->bo_pool);
391     if (unlikely (bo == NULL)) {
392 	_cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
393 	return bo;
394     }
395 
396     cairo_list_init (&bo->cache_list);
397 
398     bo->base.name = 0;
399 
400     bo->offset = 0;
401     bo->virtual = NULL;
402     bo->cpu = TRUE;
403 
404     bo->_tiling = I915_TILING_NONE;
405     bo->_stride = 0;
406     bo->purgeable = 0;
407     bo->busy = FALSE;
408 
409     bo->opaque0 = 0;
410     bo->opaque1 = 0;
411 
412     bo->exec = NULL;
413     bo->batch_read_domains = 0;
414     bo->batch_write_domain = 0;
415     cairo_list_init (&bo->link);
416 
417     create.size = cache_size;
418     create.handle = 0;
419     ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_CREATE, &create);
420     if (unlikely (ret != 0)) {
421 	_cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
422 	_cairo_freepool_free (&device->bo_pool, bo);
423 	return NULL;
424     }
425 
426     bo->base.handle = create.handle;
427     bo->full_size = bo->base.size = create.size;
428 
429     intel_bo_set_real_size (device, bo, real_size);
430     CAIRO_REFERENCE_COUNT_INIT (&bo->base.ref_count, 1);
431 DONE:
432     bo->tiling = tiling;
433     bo->stride = stride;
434     return bo;
435 }
436 
437 intel_bo_t *
intel_bo_create_for_name(intel_device_t * device,uint32_t name)438 intel_bo_create_for_name (intel_device_t *device, uint32_t name)
439 {
440     struct drm_i915_gem_get_tiling get_tiling;
441     cairo_status_t status;
442     intel_bo_t *bo;
443     int ret;
444 
445     bo = _cairo_freepool_alloc (&device->bo_pool);
446     if (unlikely (bo == NULL)) {
447 	_cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
448 	return NULL;
449     }
450 
451     status = _cairo_drm_bo_open_for_name (&device->base, &bo->base, name);
452     if (unlikely (status))
453 	goto FAIL;
454 
455     CAIRO_REFERENCE_COUNT_INIT (&bo->base.ref_count, 1);
456     cairo_list_init (&bo->cache_list);
457 
458     bo->full_size = bo->base.size;
459     bo->offset = 0;
460     bo->virtual = NULL;
461     bo->purgeable = 0;
462     bo->busy = TRUE;
463     bo->cpu = FALSE;
464 
465     bo->opaque0 = 0;
466     bo->opaque1 = 0;
467 
468     bo->exec = NULL;
469     bo->batch_read_domains = 0;
470     bo->batch_write_domain = 0;
471     cairo_list_init (&bo->link);
472 
473     memset (&get_tiling, 0, sizeof (get_tiling));
474     get_tiling.handle = bo->base.handle;
475 
476     ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
477     if (unlikely (ret != 0)) {
478 	_cairo_error_throw (CAIRO_STATUS_DEVICE_ERROR);
479 	_cairo_drm_bo_close (&device->base, &bo->base);
480 	goto FAIL;
481     }
482 
483     bo->_tiling = bo->tiling = get_tiling.tiling_mode;
484     // bo->stride = get_tiling.stride; /* XXX not available from get_tiling */
485 
486     return bo;
487 
488 FAIL:
489     _cairo_freepool_free (&device->bo_pool, bo);
490     return NULL;
491 }
492 
493 static void
intel_bo_release(void * _dev,void * _bo)494 intel_bo_release (void *_dev, void *_bo)
495 {
496     intel_device_t *device = _dev;
497     intel_bo_t *bo = _bo;
498 
499     if (bo->virtual != NULL)
500 	intel_bo_unmap (bo);
501 
502     assert (bo->exec == NULL);
503     assert (cairo_list_is_empty (&bo->cache_list));
504 
505     _cairo_drm_bo_close (&device->base, &bo->base);
506     _cairo_freepool_free (&device->bo_pool, bo);
507 }
508 
509 void
intel_bo_set_tiling(const intel_device_t * device,intel_bo_t * bo)510 intel_bo_set_tiling (const intel_device_t *device,
511 	             intel_bo_t *bo)
512 {
513     struct drm_i915_gem_set_tiling set_tiling;
514     int ret;
515 
516     if (bo->tiling == bo->_tiling &&
517 	(bo->tiling == I915_TILING_NONE || bo->stride == bo->_stride))
518 	return;
519 
520     do {
521 	set_tiling.handle = bo->base.handle;
522 	set_tiling.tiling_mode = bo->tiling;
523 	set_tiling.stride = bo->stride;
524 
525 	ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
526     } while (ret == -1 && errno == EINTR);
527 
528     assert (ret == 0);
529     bo->_tiling = bo->tiling;
530     bo->_stride = bo->stride;
531 }
532 
533 static cairo_status_t
_intel_bo_put_a1_image(intel_device_t * device,intel_bo_t * bo,cairo_image_surface_t * src,int src_x,int src_y,int width,int height,int dst_x,int dst_y)534 _intel_bo_put_a1_image (intel_device_t *device,
535 			intel_bo_t *bo,
536 			cairo_image_surface_t *src,
537 			int src_x, int src_y,
538 			int width, int height,
539 			int dst_x, int dst_y)
540 {
541     uint8_t buf[CAIRO_STACK_BUFFER_SIZE];
542     uint8_t *a8 = buf;
543     uint8_t *data;
544     int x;
545 
546     data = src->data + src_y * src->stride;
547 
548     if (bo->tiling == I915_TILING_NONE && width == bo->stride) {
549 	uint8_t *p;
550 	int size;
551 
552 	size = bo->stride * height;
553 	if (size > (int) sizeof (buf)) {
554 	    a8 = _cairo_malloc_ab (bo->stride, height);
555 	    if (a8 == NULL)
556 		return _cairo_error (CAIRO_STATUS_NO_MEMORY);
557 	}
558 
559 	p = a8;
560 	while (height--) {
561 	    for (x = 0; x < width; x++) {
562 		int i = src_x + x;
563 		int byte = i / 8;
564 		int bit = i % 8;
565 		p[x] = data[byte] & (1 << bit) ? 0xff : 0x00;
566 	    }
567 
568 	    data += src->stride;
569 	    p += bo->stride;
570 	}
571 
572 	intel_bo_write (device, bo,
573 			dst_y * bo->stride + dst_x, /* XXX  bo_offset */
574 			size, a8);
575     } else {
576 	uint8_t *dst;
577 
578 	if (width > (int) sizeof (buf)) {
579 	    a8 = _cairo_malloc (width);
580 	    if (a8 == NULL)
581 		return _cairo_error (CAIRO_STATUS_NO_MEMORY);
582 	}
583 
584 	dst = intel_bo_map (device, bo);
585 	if (dst == NULL) {
586 	    if (a8 != buf)
587 		free (a8);
588 	    return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
589 	}
590 
591 	dst += dst_y * bo->stride + dst_x; /* XXX  bo_offset */
592 	while (height--) {
593 	    for (x = 0; x < width; x++) {
594 		int i = src_x + x;
595 		int byte = i / 8;
596 		int bit = i % 8;
597 		a8[x] = data[byte] & (1 << bit) ? 0xff : 0x00;
598 	    }
599 
600 	    memcpy (dst, a8, width);
601 	    dst  += bo->stride;
602 	    data += src->stride;
603 	}
604     }
605 
606     if (a8 != buf)
607 	free (a8);
608 
609     return CAIRO_STATUS_SUCCESS;
610 }
611 
612 cairo_status_t
intel_bo_put_image(intel_device_t * device,intel_bo_t * bo,cairo_image_surface_t * src,int src_x,int src_y,int width,int height,int dst_x,int dst_y)613 intel_bo_put_image (intel_device_t *device,
614 		    intel_bo_t *bo,
615 		    cairo_image_surface_t *src,
616 		    int src_x, int src_y,
617 		    int width, int height,
618 		    int dst_x, int dst_y)
619 {
620     uint8_t *data;
621     int size;
622     int offset;
623 
624     intel_bo_set_tiling (device, bo);
625 
626     offset = dst_y * bo->stride;
627     data = src->data + src_y * src->stride;
628     switch (src->format) {
629     case CAIRO_FORMAT_ARGB32:
630     case CAIRO_FORMAT_RGB24:
631 	offset += 4 * dst_x;
632 	data   += 4 * src_x;
633 	size    = 4 * width;
634 	break;
635     case CAIRO_FORMAT_RGB16_565:
636 	offset += 2 * dst_x;
637 	data   += 2 * src_x;
638 	size    = 2 * width;
639 	break;
640     case CAIRO_FORMAT_A8:
641 	offset += dst_x;
642 	data   += src_x;
643 	size    = width;
644 	break;
645     case CAIRO_FORMAT_A1:
646 	return _intel_bo_put_a1_image (device, bo, src,
647 				       src_x, src_y,
648 				       width, height,
649 				       dst_x, dst_y);
650     default:
651     case CAIRO_FORMAT_INVALID:
652 	return _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
653     }
654 
655     if (bo->tiling == I915_TILING_NONE && src->stride == bo->stride) {
656 	intel_bo_write (device, bo, offset, bo->stride * height, data);
657     } else {
658 	uint8_t *dst;
659 
660 	dst = intel_bo_map (device, bo);
661 	if (unlikely (dst == NULL))
662 	    return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
663 
664 	dst += offset;
665 	while (height--) {
666 	    memcpy (dst, data, size);
667 	    dst  += bo->stride;
668 	    data += src->stride;
669 	}
670     }
671 
672     return CAIRO_STATUS_SUCCESS;
673 }
674 
675 static cairo_bool_t
_intel_snapshot_cache_entry_can_remove(const void * closure)676 _intel_snapshot_cache_entry_can_remove (const void *closure)
677 {
678     return TRUE;
679 }
680 
681 static void
_intel_snapshot_cache_entry_destroy(void * closure)682 _intel_snapshot_cache_entry_destroy (void *closure)
683 {
684     intel_surface_t *surface = cairo_container_of (closure,
685 						   intel_surface_t,
686 						   snapshot_cache_entry);
687 
688     surface->snapshot_cache_entry.hash = 0;
689 }
690 
691 cairo_status_t
intel_device_init(intel_device_t * device,int fd)692 intel_device_init (intel_device_t *device, int fd)
693 {
694     struct drm_i915_gem_get_aperture aperture;
695     cairo_status_t status;
696     size_t size;
697     int ret;
698     int n;
699 
700     ret = ioctl (fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
701     if (ret != 0)
702 	return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
703 
704     CAIRO_MUTEX_INIT (device->mutex);
705 
706     device->gtt_max_size = aperture.aper_size;
707     device->gtt_avail_size = aperture.aper_available_size;
708     device->gtt_avail_size -= device->gtt_avail_size >> 5;
709 
710     size = aperture.aper_size / 8;
711     device->snapshot_cache_max_size = size / 4;
712     status = _cairo_cache_init (&device->snapshot_cache,
713 			        NULL,
714 				_intel_snapshot_cache_entry_can_remove,
715 				_intel_snapshot_cache_entry_destroy,
716 				size);
717     if (unlikely (status))
718 	return status;
719 
720     for (n = 0; n < ARRAY_LENGTH (device->glyph_cache); n++) {
721 	device->glyph_cache[n].buffer.bo = NULL;
722 	cairo_list_init (&device->glyph_cache[n].rtree.pinned);
723     }
724     cairo_list_init (&device->fonts);
725 
726     device->gradient_cache.size = 0;
727 
728     device->base.bo.release = intel_bo_release;
729 
730     return CAIRO_STATUS_SUCCESS;
731 }
732 
733 static void
_intel_gradient_cache_fini(intel_device_t * device)734 _intel_gradient_cache_fini (intel_device_t *device)
735 {
736     unsigned int n;
737 
738     for (n = 0; n < device->gradient_cache.size; n++) {
739 	_cairo_pattern_fini (&device->gradient_cache.cache[n].pattern.base);
740 	if (device->gradient_cache.cache[n].buffer.bo != NULL)
741 	    cairo_drm_bo_destroy (&device->base.base,
742 				  &device->gradient_cache.cache[n].buffer.bo->base);
743     }
744 }
745 
746 static void
_intel_glyph_cache_fini(intel_device_t * device,intel_buffer_cache_t * cache)747 _intel_glyph_cache_fini (intel_device_t *device, intel_buffer_cache_t *cache)
748 {
749     if (cache->buffer.bo == NULL)
750 	return;
751 
752     intel_bo_destroy (device, cache->buffer.bo);
753     _cairo_rtree_fini (&cache->rtree);
754 }
755 
756 void
intel_device_fini(intel_device_t * device)757 intel_device_fini (intel_device_t *device)
758 {
759     cairo_scaled_font_t *scaled_font, *next_scaled_font;
760     int n;
761 
762     cairo_list_foreach_entry_safe (scaled_font,
763 				   next_scaled_font,
764 				   cairo_scaled_font_t,
765 				   &device->fonts,
766 				   link)
767     {
768 	_cairo_scaled_font_revoke_ownership (scaled_font);
769     }
770 
771     for (n = 0; n < ARRAY_LENGTH (device->glyph_cache); n++)
772 	_intel_glyph_cache_fini (device, &device->glyph_cache[n]);
773 
774     _cairo_cache_fini (&device->snapshot_cache);
775 
776     _intel_gradient_cache_fini (device);
777     _cairo_freepool_fini (&device->bo_pool);
778 
779     _cairo_drm_device_fini (&device->base);
780 }
781 
782 void
intel_throttle(intel_device_t * device)783 intel_throttle (intel_device_t *device)
784 {
785     ioctl (device->base.fd, DRM_IOCTL_I915_GEM_THROTTLE);
786 }
787 
788 void
intel_glyph_cache_unpin(intel_device_t * device)789 intel_glyph_cache_unpin (intel_device_t *device)
790 {
791     int n;
792 
793     for (n = 0; n < ARRAY_LENGTH (device->glyph_cache); n++)
794 	_cairo_rtree_unpin (&device->glyph_cache[n].rtree);
795 }
796 
797 static cairo_status_t
intel_glyph_cache_add_glyph(intel_device_t * device,intel_buffer_cache_t * cache,cairo_scaled_glyph_t * scaled_glyph)798 intel_glyph_cache_add_glyph (intel_device_t *device,
799 	                     intel_buffer_cache_t *cache,
800 			     cairo_scaled_glyph_t  *scaled_glyph)
801 {
802     cairo_image_surface_t *glyph_surface = scaled_glyph->surface;
803     intel_glyph_t *glyph;
804     cairo_rtree_node_t *node = NULL;
805     double sf_x, sf_y;
806     cairo_status_t status;
807     uint8_t *dst, *src;
808     int width, height;
809 
810     width = glyph_surface->width;
811     if (width < GLYPH_CACHE_MIN_SIZE)
812 	width = GLYPH_CACHE_MIN_SIZE;
813     height = glyph_surface->height;
814     if (height < GLYPH_CACHE_MIN_SIZE)
815 	height = GLYPH_CACHE_MIN_SIZE;
816 
817     /* search for an available slot */
818     status = _cairo_rtree_insert (&cache->rtree, width, height, &node);
819     /* search for an unpinned slot */
820     if (status == CAIRO_INT_STATUS_UNSUPPORTED) {
821 	status = _cairo_rtree_evict_random (&cache->rtree, width, height, &node);
822 	if (status == CAIRO_STATUS_SUCCESS)
823 	    status = _cairo_rtree_node_insert (&cache->rtree, node, width, height, &node);
824     }
825     if (unlikely (status))
826 	return status;
827 
828     /* XXX streaming upload? */
829 
830     height = glyph_surface->height;
831     src = glyph_surface->data;
832     dst = cache->buffer.bo->virtual;
833     if (dst == NULL) {
834 	dst = intel_bo_map (device, cache->buffer.bo);
835 	if (unlikely (dst == NULL))
836 	    return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
837     }
838 
839     dst += node->y * cache->buffer.stride;
840     switch (glyph_surface->format) {
841     case CAIRO_FORMAT_A1: {
842 	uint8_t buf[CAIRO_STACK_BUFFER_SIZE];
843 	uint8_t *a8 = buf;
844 	int x;
845 
846 	if (width > (int) sizeof (buf)) {
847 	    a8 = _cairo_malloc (width);
848 	    if (unlikely (a8 == NULL))
849 		return _cairo_error (CAIRO_STATUS_NO_MEMORY);
850 	}
851 
852 	dst += node->x;
853 	width = glyph_surface->width;
854 	while (height--) {
855 	    for (x = 0; x < width; x++)
856 		a8[x] = src[x>>3] & (1 << (x&7)) ? 0xff : 0x00;
857 
858 	    memcpy (dst, a8, width);
859 	    dst += cache->buffer.stride;
860 	    src += glyph_surface->stride;
861 	}
862 
863 	if (a8 != buf)
864 	    free (a8);
865 	break;
866     }
867 
868     case CAIRO_FORMAT_A8:
869 	dst  += node->x;
870 	width = glyph_surface->width;
871 	while (height--) {
872 	    memcpy (dst, src, width);
873 	    dst += cache->buffer.stride;
874 	    src += glyph_surface->stride;
875 	}
876 	break;
877 
878     case CAIRO_FORMAT_ARGB32:
879 	dst  += 4*node->x;
880 	width = 4*glyph_surface->width;
881 	while (height--) {
882 	    memcpy (dst, src, width);
883 	    dst += cache->buffer.stride;
884 	    src += glyph_surface->stride;
885 	}
886 	break;
887     default:
888     case CAIRO_FORMAT_RGB16_565:
889     case CAIRO_FORMAT_RGB24:
890     case CAIRO_FORMAT_INVALID:
891 	ASSERT_NOT_REACHED;
892 	return _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
893     }
894 
895     scaled_glyph->surface_private = node;
896 
897     glyph= (intel_glyph_t *) node;
898     glyph->node.owner = &scaled_glyph->surface_private;
899     glyph->cache = cache;
900 
901     /* compute tex coords: bottom-right, bottom-left, top-left */
902     sf_x = 1. / cache->buffer.width;
903     sf_y = 1. / cache->buffer.height;
904     glyph->texcoord[0] =
905 	texcoord_2d_16 (sf_x * (node->x + glyph_surface->width),
906 		        sf_y * (node->y + glyph_surface->height));
907     glyph->texcoord[1] =
908 	texcoord_2d_16 (sf_x * node->x,
909 		        sf_y * (node->y + glyph_surface->height));
910     glyph->texcoord[2] =
911 	texcoord_2d_16 (sf_x * node->x,
912 	                sf_y * node->y);
913 
914     glyph->width  = glyph_surface->width;
915     glyph->height = glyph_surface->height;
916 
917     return CAIRO_STATUS_SUCCESS;
918 }
919 
920 void
intel_scaled_glyph_fini(cairo_scaled_glyph_t * scaled_glyph,cairo_scaled_font_t * scaled_font)921 intel_scaled_glyph_fini (cairo_scaled_glyph_t *scaled_glyph,
922 			 cairo_scaled_font_t  *scaled_font)
923 {
924     intel_glyph_t *glyph;
925 
926     glyph = scaled_glyph->surface_private;
927     if (glyph != NULL) {
928 	/* XXX thread-safety? Probably ok due to the frozen scaled-font. */
929 	glyph->node.owner = NULL;
930 	if (! glyph->node.pinned)
931 	    _cairo_rtree_node_remove (&glyph->cache->rtree, &glyph->node);
932     }
933 }
934 
935 void
intel_scaled_font_fini(cairo_scaled_font_t * scaled_font)936 intel_scaled_font_fini (cairo_scaled_font_t *scaled_font)
937 {
938     cairo_list_del (&scaled_font->link);
939 }
940 
941 static cairo_status_t
intel_get_glyph_cache(intel_device_t * device,cairo_format_t format,intel_buffer_cache_t ** out)942 intel_get_glyph_cache (intel_device_t *device,
943 		       cairo_format_t format,
944 		       intel_buffer_cache_t **out)
945 {
946     intel_buffer_cache_t *cache;
947     cairo_status_t status;
948 
949     switch (format) {
950     case CAIRO_FORMAT_ARGB32:
951 	cache = &device->glyph_cache[0];
952 	format = CAIRO_FORMAT_ARGB32;
953 	break;
954     case CAIRO_FORMAT_A8:
955     case CAIRO_FORMAT_A1:
956 	cache = &device->glyph_cache[1];
957 	format = CAIRO_FORMAT_A8;
958 	break;
959     default:
960     case CAIRO_FORMAT_RGB16_565:
961     case CAIRO_FORMAT_RGB24:
962     case CAIRO_FORMAT_INVALID:
963 	ASSERT_NOT_REACHED;
964 	return _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
965     }
966 
967     if (unlikely (cache->buffer.bo == NULL)) {
968 	status = intel_buffer_cache_init (cache, device, format,
969 					 INTEL_GLYPH_CACHE_WIDTH,
970 					 INTEL_GLYPH_CACHE_HEIGHT);
971 	if (unlikely (status))
972 	    return status;
973 
974 	_cairo_rtree_init (&cache->rtree,
975 			   INTEL_GLYPH_CACHE_WIDTH,
976 			   INTEL_GLYPH_CACHE_HEIGHT,
977 			   0, sizeof (intel_glyph_t));
978     }
979 
980     *out = cache;
981     return CAIRO_STATUS_SUCCESS;
982 }
983 
984 cairo_int_status_t
intel_get_glyph(intel_device_t * device,cairo_scaled_font_t * scaled_font,cairo_scaled_glyph_t * scaled_glyph)985 intel_get_glyph (intel_device_t *device,
986 		 cairo_scaled_font_t *scaled_font,
987 		 cairo_scaled_glyph_t *scaled_glyph)
988 {
989     cairo_bool_t own_surface = FALSE;
990     intel_buffer_cache_t *cache;
991     cairo_status_t status;
992 
993     if (scaled_glyph->surface == NULL) {
994 	status =
995 	    scaled_font->backend->scaled_glyph_init (scaled_font,
996 						     scaled_glyph,
997 						     CAIRO_SCALED_GLYPH_INFO_SURFACE);
998 	if (unlikely (status))
999 	    return status;
1000 
1001 	if (unlikely (scaled_glyph->surface == NULL))
1002 	    return CAIRO_INT_STATUS_UNSUPPORTED;
1003 
1004 	own_surface = TRUE;
1005     }
1006 
1007     if (unlikely (scaled_glyph->surface->width == 0 ||
1008 		  scaled_glyph->surface->height == 0))
1009     {
1010 	return CAIRO_INT_STATUS_NOTHING_TO_DO;
1011     }
1012 
1013     if (unlikely (scaled_glyph->surface->width  > GLYPH_CACHE_MAX_SIZE ||
1014 		  scaled_glyph->surface->height > GLYPH_CACHE_MAX_SIZE))
1015     {
1016 	return CAIRO_INT_STATUS_UNSUPPORTED;
1017     }
1018 
1019     status = intel_get_glyph_cache (device,
1020 				    scaled_glyph->surface->format,
1021 				    &cache);
1022     if (unlikely (status))
1023 	return status;
1024 
1025     status = intel_glyph_cache_add_glyph (device, cache, scaled_glyph);
1026     if (unlikely (_cairo_status_is_error (status)))
1027 	return status;
1028 
1029     if (unlikely (status == CAIRO_INT_STATUS_UNSUPPORTED)) {
1030 	/* no room, replace entire cache */
1031 
1032 	assert (cache->buffer.bo->exec != NULL);
1033 
1034 	_cairo_rtree_reset (&cache->rtree);
1035 	intel_bo_destroy (device, cache->buffer.bo);
1036 	cache->buffer.bo = NULL;
1037 
1038 	status = intel_buffer_cache_init (cache, device,
1039 					  scaled_glyph->surface->format,
1040 					  GLYPH_CACHE_WIDTH,
1041 					  GLYPH_CACHE_HEIGHT);
1042 	if (unlikely (status))
1043 	    return status;
1044 
1045 	status = intel_glyph_cache_add_glyph (device, cache, scaled_glyph);
1046 	if (unlikely (status))
1047 	    return status;
1048     }
1049 
1050     if (own_surface) {
1051 	/* and release the copy of the image from system memory */
1052 	cairo_surface_destroy (&scaled_glyph->surface->base);
1053 	scaled_glyph->surface = NULL;
1054     }
1055 
1056     return CAIRO_STATUS_SUCCESS;
1057 }
1058 
1059 cairo_status_t
intel_buffer_cache_init(intel_buffer_cache_t * cache,intel_device_t * device,cairo_format_t format,int width,int height)1060 intel_buffer_cache_init (intel_buffer_cache_t *cache,
1061 		        intel_device_t *device,
1062 			cairo_format_t format,
1063 			int width, int height)
1064 {
1065     const uint32_t tiling = I915_TILING_Y;
1066     uint32_t stride, size;
1067 
1068     assert ((width & 3) == 0);
1069     assert ((height & 1) == 0);
1070     cache->buffer.format = format;
1071     cache->buffer.width = width;
1072     cache->buffer.height = height;
1073 
1074     switch (format) {
1075     default:
1076     case CAIRO_FORMAT_A1:
1077     case CAIRO_FORMAT_RGB16_565:
1078     case CAIRO_FORMAT_RGB24:
1079     case CAIRO_FORMAT_INVALID:
1080 	ASSERT_NOT_REACHED;
1081 	return _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
1082     case CAIRO_FORMAT_ARGB32:
1083 	cache->buffer.map0 = MAPSURF_32BIT | MT_32BIT_ARGB8888;
1084 	stride = width * 4;
1085 	break;
1086     case CAIRO_FORMAT_A8:
1087 	cache->buffer.map0 = MAPSURF_8BIT | MT_8BIT_I8;
1088 	stride = width;
1089 	break;
1090     }
1091 
1092     size = height * stride;
1093     cache->buffer.bo = intel_bo_create (device,
1094 					size, size,
1095 					FALSE, tiling, stride);
1096     if (unlikely (cache->buffer.bo == NULL))
1097 	return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1098 
1099     cache->buffer.stride = stride;
1100 
1101     cache->buffer.offset = 0;
1102     cache->buffer.map0 |= MS3_tiling (tiling);
1103     cache->buffer.map0 |= ((height - 1) << MS3_HEIGHT_SHIFT) |
1104 	                  ((width - 1)  << MS3_WIDTH_SHIFT);
1105     cache->buffer.map1 = ((stride / 4) - 1) << MS4_PITCH_SHIFT;
1106 
1107     cache->ref_count = 0;
1108     cairo_list_init (&cache->link);
1109 
1110     return CAIRO_STATUS_SUCCESS;
1111 }
1112 
1113 cairo_status_t
intel_snapshot_cache_insert(intel_device_t * device,intel_surface_t * surface)1114 intel_snapshot_cache_insert (intel_device_t *device,
1115 			     intel_surface_t *surface)
1116 {
1117     cairo_status_t status;
1118 
1119     surface->snapshot_cache_entry.size = surface->drm.bo->size;
1120     if (surface->snapshot_cache_entry.size >
1121 	device->snapshot_cache_max_size)
1122     {
1123 	return CAIRO_STATUS_SUCCESS;
1124     }
1125 
1126     if (device->snapshot_cache.freeze_count == 0)
1127 	_cairo_cache_freeze (&device->snapshot_cache);
1128 
1129     surface->snapshot_cache_entry.hash = (unsigned long) surface;
1130     status = _cairo_cache_insert (&device->snapshot_cache,
1131 	                          &surface->snapshot_cache_entry);
1132     if (unlikely (status)) {
1133 	surface->snapshot_cache_entry.hash = 0;
1134 	return status;
1135     }
1136 
1137     return CAIRO_STATUS_SUCCESS;
1138 }
1139 
1140 void
intel_surface_detach_snapshot(cairo_surface_t * abstract_surface)1141 intel_surface_detach_snapshot (cairo_surface_t *abstract_surface)
1142 {
1143     intel_surface_t *surface = (intel_surface_t *) abstract_surface;
1144 
1145     if (surface->snapshot_cache_entry.hash) {
1146 	intel_device_t *device;
1147 
1148 	device = (intel_device_t *) surface->drm.base.device;
1149 	_cairo_cache_remove (&device->snapshot_cache,
1150 		             &surface->snapshot_cache_entry);
1151 	assert (surface->snapshot_cache_entry.hash == 0);
1152     }
1153 }
1154 
1155 void
intel_snapshot_cache_thaw(intel_device_t * device)1156 intel_snapshot_cache_thaw (intel_device_t *device)
1157 {
1158     if (device->snapshot_cache.freeze_count)
1159 	_cairo_cache_thaw (&device->snapshot_cache);
1160 }
1161 
1162 static cairo_bool_t
_gradient_color_stops_equal(const cairo_gradient_pattern_t * a,const cairo_gradient_pattern_t * b)1163 _gradient_color_stops_equal (const cairo_gradient_pattern_t *a,
1164 			     const cairo_gradient_pattern_t *b)
1165 {
1166     unsigned int n;
1167 
1168     if (a->n_stops != b->n_stops)
1169 	return FALSE;
1170 
1171     for (n = 0; n < a->n_stops; n++) {
1172 	if (_cairo_fixed_from_double (a->stops[n].offset) !=
1173 	    _cairo_fixed_from_double (b->stops[n].offset))
1174 	{
1175 	    return FALSE;
1176 	}
1177 
1178 	if (! _cairo_color_stop_equal (&a->stops[n].color, &b->stops[n].color))
1179 	    return FALSE;
1180     }
1181 
1182     return TRUE;
1183 }
1184 
1185 static uint32_t
hars_petruska_f54_1_random(void)1186 hars_petruska_f54_1_random (void)
1187 {
1188 #define rol(x,k) ((x << k) | (x >> (32-k)))
1189     static uint32_t x;
1190     return x = (x ^ rol (x, 5) ^ rol (x, 24)) + 0x37798849;
1191 #undef rol
1192 }
1193 
1194 static int
intel_gradient_sample_width(const cairo_gradient_pattern_t * gradient)1195 intel_gradient_sample_width (const cairo_gradient_pattern_t *gradient)
1196 {
1197     unsigned int n;
1198     int width;
1199 
1200     width = 8;
1201     for (n = 1; n < gradient->n_stops; n++) {
1202 	double dx = gradient->stops[n].offset - gradient->stops[n-1].offset;
1203 	double delta, max;
1204 	int ramp;
1205 
1206 	if (dx == 0)
1207 	    continue;
1208 
1209 	max = gradient->stops[n].color.red -
1210 	      gradient->stops[n-1].color.red;
1211 
1212 	delta = gradient->stops[n].color.green -
1213 	        gradient->stops[n-1].color.green;
1214 	if (delta > max)
1215 	    max = delta;
1216 
1217 	delta = gradient->stops[n].color.blue -
1218 	        gradient->stops[n-1].color.blue;
1219 	if (delta > max)
1220 	    max = delta;
1221 
1222 	delta = gradient->stops[n].color.alpha -
1223 	        gradient->stops[n-1].color.alpha;
1224 	if (delta > max)
1225 	    max = delta;
1226 
1227 	ramp = 128 * max / dx;
1228 	if (ramp > width)
1229 	    width = ramp;
1230     }
1231 
1232     width = (width + 7) & -8;
1233     return MIN (width, 1024);
1234 }
1235 
1236 cairo_status_t
intel_gradient_render(intel_device_t * device,const cairo_gradient_pattern_t * pattern,intel_buffer_t * buffer)1237 intel_gradient_render (intel_device_t *device,
1238 		       const cairo_gradient_pattern_t *pattern,
1239 		       intel_buffer_t *buffer)
1240 {
1241     pixman_image_t *gradient, *image;
1242     pixman_gradient_stop_t pixman_stops_stack[32];
1243     pixman_gradient_stop_t *pixman_stops;
1244     pixman_point_fixed_t p1, p2;
1245     int width;
1246     unsigned int i;
1247     cairo_status_t status;
1248 
1249     for (i = 0; i < device->gradient_cache.size; i++) {
1250 	if (_gradient_color_stops_equal (pattern,
1251 					 &device->gradient_cache.cache[i].pattern.gradient.base)) {
1252 	    *buffer = device->gradient_cache.cache[i].buffer;
1253 	    return CAIRO_STATUS_SUCCESS;
1254 	}
1255     }
1256 
1257     pixman_stops = pixman_stops_stack;
1258     if (unlikely (pattern->n_stops > ARRAY_LENGTH (pixman_stops_stack))) {
1259 	pixman_stops = _cairo_malloc_ab (pattern->n_stops,
1260 					 sizeof (pixman_gradient_stop_t));
1261 	if (unlikely (pixman_stops == NULL))
1262 	    return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1263     }
1264 
1265     for (i = 0; i < pattern->n_stops; i++) {
1266 	pixman_stops[i].x = _cairo_fixed_16_16_from_double (pattern->stops[i].offset);
1267 	pixman_stops[i].color.red   = pattern->stops[i].color.red_short;
1268 	pixman_stops[i].color.green = pattern->stops[i].color.green_short;
1269 	pixman_stops[i].color.blue  = pattern->stops[i].color.blue_short;
1270 	pixman_stops[i].color.alpha = pattern->stops[i].color.alpha_short;
1271     }
1272 
1273     width = intel_gradient_sample_width (pattern);
1274 
1275     p1.x = 0;
1276     p1.y = 0;
1277     p2.x = width << 16;
1278     p2.y = 0;
1279 
1280     gradient = pixman_image_create_linear_gradient (&p1, &p2,
1281 						    pixman_stops,
1282 						    pattern->n_stops);
1283     if (pixman_stops != pixman_stops_stack)
1284 	free (pixman_stops);
1285 
1286     if (unlikely (gradient == NULL))
1287 	return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1288 
1289     pixman_image_set_filter (gradient, PIXMAN_FILTER_BILINEAR, NULL, 0);
1290     pixman_image_set_repeat (gradient, PIXMAN_REPEAT_PAD);
1291 
1292     image = pixman_image_create_bits (PIXMAN_a8r8g8b8, width, 1, NULL, 0);
1293     if (unlikely (image == NULL)) {
1294 	pixman_image_unref (gradient);
1295 	return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1296     }
1297 
1298     pixman_image_composite32 (PIXMAN_OP_SRC,
1299                               gradient, NULL, image,
1300                               0, 0,
1301                               0, 0,
1302                               0, 0,
1303                               width, 1);
1304 
1305     pixman_image_unref (gradient);
1306 
1307     buffer->bo = intel_bo_create (device,
1308 				  4*width, 4*width,
1309 				  FALSE, I915_TILING_NONE, 4*width);
1310     if (unlikely (buffer->bo == NULL)) {
1311 	pixman_image_unref (image);
1312 	return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1313     }
1314 
1315     intel_bo_write (device, buffer->bo, 0, 4*width, pixman_image_get_data (image));
1316     pixman_image_unref (image);
1317 
1318     buffer->offset = 0;
1319     buffer->width  = width;
1320     buffer->height = 1;
1321     buffer->stride = 4*width;
1322     buffer->format = CAIRO_FORMAT_ARGB32;
1323     buffer->map0 = MAPSURF_32BIT | MT_32BIT_ARGB8888;
1324     buffer->map0 |= ((width - 1) << MS3_WIDTH_SHIFT);
1325     buffer->map1 = (width - 1) << MS4_PITCH_SHIFT;
1326 
1327     if (device->gradient_cache.size < GRADIENT_CACHE_SIZE) {
1328 	i = device->gradient_cache.size++;
1329     } else {
1330 	i = hars_petruska_f54_1_random () % GRADIENT_CACHE_SIZE;
1331 	_cairo_pattern_fini (&device->gradient_cache.cache[i].pattern.base);
1332 	intel_bo_destroy (device, device->gradient_cache.cache[i].buffer.bo);
1333     }
1334 
1335     status = _cairo_pattern_init_copy (&device->gradient_cache.cache[i].pattern.base,
1336 				       &pattern->base);
1337     if (unlikely (status)) {
1338 	intel_bo_destroy (device, buffer->bo);
1339 	/* Ensure the cache is correctly initialised for i965_device_destroy */
1340 	_cairo_pattern_init_solid (&device->gradient_cache.cache[i].pattern.solid,
1341 		                   CAIRO_COLOR_TRANSPARENT);
1342 	return status;
1343     }
1344 
1345     device->gradient_cache.cache[i].buffer = *buffer;
1346     return CAIRO_STATUS_SUCCESS;
1347 }
1348