1 /* Cairo - a vector graphics library with display and print output
2  *
3  * Copyright © 2009 Chris Wilson
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it either under the terms of the GNU Lesser General Public
7  * License version 2.1 as published by the Free Software Foundation
8  * (the "LGPL") or, at your option, under the terms of the Mozilla
9  * Public License Version 1.1 (the "MPL"). If you do not alter this
10  * notice, a recipient may use your version of this file under either
11  * the MPL or the LGPL.
12  *
13  * You should have received a copy of the LGPL along with this library
14  * in the file COPYING-LGPL-2.1; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
16  * You should have received a copy of the MPL along with this library
17  * in the file COPYING-MPL-1.1
18  *
19  * The contents of this file are subject to the Mozilla Public License
20  * Version 1.1 (the "License"); you may not use this file except in
21  * compliance with the License. You may obtain a copy of the License at
22  * http://www.mozilla.org/MPL/
23  *
24  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
25  * OF ANY KIND, either express or implied. See the LGPL or the MPL for
26  * the specific language governing rights and limitations.
27  *
28  * **************************************************************************
29  * This work was initially based upon xf86-video-intel/src/i915_render.c:
30  * Copyright © 2006 Intel Corporation
31  *
32  * Permission is hereby granted, free of charge, to any person obtaining a
33  * copy of this software and associated documentation files (the "Software"),
34  * to deal in the Software without restriction, including without limitation
35  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
36  * and/or sell copies of the Software, and to permit persons to whom the
37  * Software is furnished to do so, subject to the following conditions:
38  *
39  * The above copyright notice and this permission notice (including the next
40  * paragraph) shall be included in all copies or substantial portions of the
41  * Software.
42  *
43  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
44  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
45  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
46  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
47  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
48  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
49  * SOFTWARE.
50  *
51  * Authors:
52  *    Wang Zhenyu <zhenyu.z.wang@intel.com>
53  *    Eric Anholt <eric@anholt.net>
54  *
55  * **************************************************************************
56  * and also upon libdrm/intel/intel_bufmgr_gem.c:
57  * Copyright © 2007 Red Hat Inc.
58  * Copyright © 2007 Intel Corporation
59  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
60  * All Rights Reserved.
61  *
62  * Permission is hereby granted, free of charge, to any person obtaining a
63  * copy of this software and associated documentation files (the
64  * "Software"), to deal in the Software without restriction, including
65  * without limitation the rights to use, copy, modify, merge, publish,
66  * distribute, sub license, and/or sell copies of the Software, and to
67  * permit persons to whom the Software is furnished to do so, subject to
68  * the following conditions:
69  *
70  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
71  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
72  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
73  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
74  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
75  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
76  * USE OR OTHER DEALINGS IN THE SOFTWARE.
77  *
78  * The above copyright notice and this permission notice (including the
79  * next paragraph) shall be included in all copies or substantial portions
80  * of the Software.
81  *
82  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
83  *          Keith Whitwell <keithw-at-tungstengraphics-dot-com>
84  *          Eric Anholt <eric@anholt.net>
85  *          Dave Airlie <airlied@linux.ie>
86  */
87 
88 /* XXX
89  *
90  * - Per thread context? Would it actually avoid many locks?
91  *
92  */
93 
94 #include "cairoint.h"
95 
96 #include "cairo-drm-private.h"
97 #include "cairo-drm-intel-private.h"
98 #include "cairo-drm-intel-command-private.h"
99 #include "cairo-drm-intel-ioctl-private.h"
100 #include "cairo-drm-i915-private.h"
101 
102 #include "cairo-boxes-private.h"
103 #include "cairo-cache-private.h"
104 #include "cairo-composite-rectangles-private.h"
105 #include "cairo-default-context-private.h"
106 #include "cairo-error-private.h"
107 #include "cairo-freelist-private.h"
108 #include "cairo-list-private.h"
109 #include "cairo-path-fixed-private.h"
110 #include "cairo-region-private.h"
111 #include "cairo-surface-offset-private.h"
112 #include "cairo-image-surface-private.h"
113 
114 #include <sys/ioctl.h>
115 #include <sys/mman.h>
116 #include <errno.h>
117 
118 static const uint32_t i915_batch_setup[] = {
119     /* Disable line anti-aliasing */
120     _3DSTATE_AA_CMD,
121 
122     /* Disable independent alpha blend */
123     _3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD |
124 	IAB_MODIFY_ENABLE |
125 	IAB_MODIFY_FUNC | (BLENDFUNC_ADD << IAB_FUNC_SHIFT) |
126 	IAB_MODIFY_SRC_FACTOR | (BLENDFACT_ONE << IAB_SRC_FACTOR_SHIFT) |
127 	IAB_MODIFY_DST_FACTOR | (BLENDFACT_ZERO << IAB_DST_FACTOR_SHIFT),
128 
129     /* Disable texture crossbar */
130     _3DSTATE_COORD_SET_BINDINGS |
131 	CSB_TCB (0, 0) |
132 	CSB_TCB (1, 1) |
133 	CSB_TCB (2, 2) |
134 	CSB_TCB (3, 3) |
135 	CSB_TCB (4, 4) |
136 	CSB_TCB (5, 5) |
137 	CSB_TCB (6, 6) |
138 	CSB_TCB (7, 7),
139 
140     _3DSTATE_MODES_4_CMD | ENABLE_LOGIC_OP_FUNC | LOGIC_OP_FUNC (LOGICOP_COPY),
141 
142     _3DSTATE_LOAD_STATE_IMMEDIATE_1 |
143 	I1_LOAD_S (2) |
144 	I1_LOAD_S (3) |
145 	I1_LOAD_S (4) |
146 	I1_LOAD_S (5) |
147 	I1_LOAD_S (6) |
148 	4,
149     S2_TEXCOORD_NONE,
150     0, /* Disable texture coordinate wrap-shortest */
151     (1 << S4_POINT_WIDTH_SHIFT) |
152 	S4_LINE_WIDTH_ONE |
153 	S4_FLATSHADE_ALPHA |
154 	S4_FLATSHADE_FOG |
155 	S4_FLATSHADE_SPECULAR |
156 	S4_FLATSHADE_COLOR |
157 	S4_CULLMODE_NONE |
158 	S4_VFMT_XY,
159     0, /* Disable stencil buffer */
160     S6_COLOR_WRITE_ENABLE,
161 
162     _3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT,
163 
164     /* disable indirect state */
165     _3DSTATE_LOAD_INDIRECT,
166     0,
167 };
168 
169 static const cairo_surface_backend_t i915_surface_backend;
170 
171 static cairo_surface_t *
172 i915_surface_create_from_cacheable_image (cairo_drm_device_t *base_dev,
173 	                                   cairo_surface_t *source);
174 
175 static cairo_status_t
i915_bo_exec(i915_device_t * device,intel_bo_t * bo,uint32_t offset)176 i915_bo_exec (i915_device_t *device, intel_bo_t *bo, uint32_t offset)
177 {
178     struct drm_i915_gem_execbuffer2 execbuf;
179     int ret, cnt, i;
180 
181     /* Add the batch buffer to the validation list.  */
182     cnt = device->batch.exec_count;
183     if (cnt > 0 && bo->base.handle == device->batch.exec[cnt-1].handle)
184 	i = cnt - 1;
185     else
186 	i = device->batch.exec_count++;
187     device->batch.exec[i].handle = bo->base.handle;
188     device->batch.exec[i].relocation_count = device->batch.reloc_count;
189     device->batch.exec[i].relocs_ptr = (uintptr_t) device->batch.reloc;
190     device->batch.exec[i].alignment = 0;
191     device->batch.exec[i].offset = 0;
192     device->batch.exec[i].flags = 0;
193     device->batch.exec[i].rsvd1 = 0;
194     device->batch.exec[i].rsvd2 = 0;
195 
196     execbuf.buffers_ptr = (uintptr_t) device->batch.exec;
197     execbuf.buffer_count = device->batch.exec_count;
198     execbuf.batch_start_offset = offset;
199     execbuf.batch_len = (device->batch.used << 2) + sizeof (device->batch_header);
200     execbuf.DR1 = 0;
201     execbuf.DR4 = 0;
202     execbuf.num_cliprects = 0;
203     execbuf.cliprects_ptr = 0;
204     execbuf.flags = 0;
205     execbuf.rsvd1 = 0;
206     execbuf.rsvd2 = 0;
207 
208     do {
209 	ret = ioctl (device->intel.base.fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
210     } while (ret != 0 && errno == EINTR);
211 
212     if (device->debug & I915_DEBUG_SYNC && ret == 0)
213 	ret = ! intel_bo_wait (&device->intel, bo);
214 
215     if (0 && ret) {
216 	int n, m;
217 
218 	fprintf (stderr, "Batch submission failed: %d\n", errno);
219 	fprintf (stderr, "   relocation entries: %d/%d\n",
220 		 device->batch.reloc_count, I915_MAX_RELOCS);
221 	fprintf (stderr, "   gtt size: (%zd/%zd), (%zd/%zd)\n",
222 		 device->batch.est_gtt_size, device->batch.gtt_avail_size,
223 		 device->batch.total_gtt_size, device->intel.gtt_avail_size);
224 
225 	fprintf (stderr, "   buffers:\n");
226 	for (n = 0; n < device->batch.exec_count; n++) {
227 	    fprintf (stderr, "  exec[%d] = %d, %d/%d bytes, gtt = %qx\n",
228 		    n,
229 		    device->batch.exec[n].handle,
230 		    n == device->batch.exec_count - 1 ? bo->base.size : device->batch.target_bo[n]->base.size,
231 		    n == device->batch.exec_count - 1 ? bo->full_size : device->batch.target_bo[n]->full_size,
232 		    device->batch.exec[n].offset);
233 	}
234 	for (n = 0; n < device->batch.reloc_count; n++) {
235 	    for (m = 0; m < device->batch.exec_count; m++)
236 		if (device->batch.exec[m].handle == device->batch.reloc[n].target_handle)
237 		    break;
238 
239 	    fprintf (stderr, "  reloc[%d] = %d @ %qx -> %qx + %qx\n", n,
240 		     device->batch.reloc[n].target_handle,
241 		     device->batch.reloc[n].offset,
242 		     (unsigned long long) device->batch.exec[m].offset,
243 		     (unsigned long long) device->batch.reloc[n].delta);
244 
245 	    device->batch_base[(device->batch.reloc[n].offset - sizeof (device->batch_header)) / 4] =
246 		device->batch.exec[m].offset + device->batch.reloc[n].delta;
247 	}
248 
249 	intel_dump_batchbuffer (device->batch_header,
250 				execbuf.batch_len,
251 				device->intel.base.chip_id);
252     }
253     assert (ret == 0);
254 
255     VG (VALGRIND_MAKE_MEM_DEFINED (device->batch.exec, sizeof (device->batch.exec[0]) * i));
256 
257     bo->offset = device->batch.exec[i].offset;
258     bo->busy = TRUE;
259     if (bo->virtual)
260 	intel_bo_unmap (bo);
261     bo->cpu = FALSE;
262 
263     while (cnt--) {
264 	intel_bo_t *bo = device->batch.target_bo[cnt];
265 
266 	bo->offset = device->batch.exec[cnt].offset;
267 	bo->exec = NULL;
268 	bo->busy = TRUE;
269 	bo->batch_read_domains = 0;
270 	bo->batch_write_domain = 0;
271 	cairo_list_del (&bo->cache_list);
272 
273 	if (bo->virtual)
274 	    intel_bo_unmap (bo);
275 	bo->cpu = FALSE;
276 
277 	intel_bo_destroy (&device->intel, bo);
278     }
279     assert (cairo_list_is_empty (&device->intel.bo_in_flight));
280 
281     device->batch.exec_count = 0;
282     device->batch.reloc_count = 0;
283     device->batch.fences = 0;
284 
285     device->batch.est_gtt_size = I915_BATCH_SIZE;
286     device->batch.total_gtt_size = I915_BATCH_SIZE;
287 
288     return ret == 0 ? CAIRO_STATUS_SUCCESS : _cairo_error (CAIRO_STATUS_NO_MEMORY);
289 }
290 
291 void
i915_batch_add_reloc(i915_device_t * device,uint32_t pos,intel_bo_t * bo,uint32_t offset,uint32_t read_domains,uint32_t write_domain,cairo_bool_t needs_fence)292 i915_batch_add_reloc (i915_device_t *device,
293 		      uint32_t pos,
294 		      intel_bo_t *bo,
295 		      uint32_t offset,
296 		      uint32_t read_domains,
297 		      uint32_t write_domain,
298 		      cairo_bool_t needs_fence)
299 {
300     int index;
301 
302     assert (offset < bo->base.size);
303 
304     if (bo->exec == NULL) {
305 	device->batch.total_gtt_size += bo->base.size;
306 
307 	if (! bo->busy)
308 	    device->batch.est_gtt_size += bo->base.size;
309 
310 	assert (device->batch.exec_count < ARRAY_LENGTH (device->batch.exec));
311 
312 	index = device->batch.exec_count++;
313 	device->batch.exec[index].handle = bo->base.handle;
314 	device->batch.exec[index].relocation_count = 0;
315 	device->batch.exec[index].relocs_ptr = 0;
316 	device->batch.exec[index].alignment = 0;
317 	device->batch.exec[index].offset = 0;
318 	device->batch.exec[index].flags = 0;
319 	device->batch.exec[index].rsvd1 = 0;
320 	device->batch.exec[index].rsvd2 = 0;
321 
322 	device->batch.target_bo[index] = intel_bo_reference (bo);
323 
324 	bo->exec = &device->batch.exec[index];
325     }
326 
327     if (bo->tiling != I915_TILING_NONE) {
328 	uint32_t alignment;
329 
330 #if 0
331 	/* We presume that we will want to use a fence with X tiled objects... */
332 	if (needs_fence || bo->tiling == I915_TILING_X)
333 	    alignment = bo->full_size;
334 	else
335 	    alignment = 2*((bo->stride + 4095) & -4096);
336 #else
337 	alignment = bo->full_size;
338 #endif
339 	if (bo->exec->alignment < alignment)
340 	    bo->exec->alignment = alignment;
341 
342 	if (needs_fence && (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
343 	    bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE;
344 	    device->batch.fences++;
345 
346 	    intel_bo_set_tiling (&device->intel, bo);
347 	}
348     }
349 
350     assert (device->batch.reloc_count < ARRAY_LENGTH (device->batch.reloc));
351 
352     index = device->batch.reloc_count++;
353     device->batch.reloc[index].offset = (pos << 2) + sizeof (device->batch_header);
354     device->batch.reloc[index].delta = offset;
355     device->batch.reloc[index].target_handle = bo->base.handle;
356     device->batch.reloc[index].read_domains = read_domains;
357     device->batch.reloc[index].write_domain = write_domain;
358     device->batch.reloc[index].presumed_offset = bo->offset;
359 
360     assert (write_domain == 0 || bo->batch_write_domain == 0 || bo->batch_write_domain == write_domain);
361     bo->batch_read_domains |= read_domains;
362     bo->batch_write_domain |= write_domain;
363 }
364 
365 void
i915_vbo_finish(i915_device_t * device)366 i915_vbo_finish (i915_device_t *device)
367 {
368     intel_bo_t *vbo;
369 
370     assert (CAIRO_MUTEX_IS_LOCKED (device->intel.base.base.mutex));
371     assert (device->vbo_used);
372 
373     if (device->vertex_count) {
374 	if (device->vbo == 0) {
375 	    OUT_DWORD (_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
376 		       I1_LOAD_S (0) |
377 		       I1_LOAD_S (1) |
378 		       1);
379 	    device->vbo = device->batch.used++;
380 	    device->vbo_max_index = device->batch.used;
381 	    OUT_DWORD ((device->floats_per_vertex << S1_VERTEX_WIDTH_SHIFT) |
382 		       (device->floats_per_vertex << S1_VERTEX_PITCH_SHIFT));
383 	}
384 
385 	OUT_DWORD (PRIM3D_RECTLIST |
386 		   PRIM3D_INDIRECT_SEQUENTIAL |
387 		   device->vertex_count);
388 	OUT_DWORD (device->vertex_index);
389     }
390 
391     if (device->last_vbo != NULL) {
392 	intel_bo_in_flight_add (&device->intel, device->last_vbo);
393 	intel_bo_destroy (&device->intel, device->last_vbo);
394     }
395 
396     device->batch_base[device->vbo_max_index] |= device->vertex_index + device->vertex_count;
397 
398     /* will include a few bytes of inter-array padding */
399     vbo = intel_bo_create (&device->intel,
400 			   device->vbo_used, device->vbo_used,
401 			   FALSE, I915_TILING_NONE, 0);
402     i915_batch_fill_reloc (device, device->vbo, vbo, 0,
403 			   I915_GEM_DOMAIN_VERTEX, 0);
404     intel_bo_write (&device->intel, vbo, 0, device->vbo_used, device->vbo_base);
405     device->last_vbo = vbo;
406     device->last_vbo_offset = (device->vbo_used+7)&-8;
407     device->last_vbo_space = vbo->base.size - device->last_vbo_offset;
408 
409     device->vbo = 0;
410 
411     device->vbo_used = device->vbo_offset = 0;
412     device->vertex_index = device->vertex_count = 0;
413 
414     if (! i915_check_aperture_size (device, 1, I915_VBO_SIZE, I915_VBO_SIZE)) {
415 	cairo_status_t status;
416 
417 	status = i915_batch_flush (device);
418 	if (unlikely (status))
419 	    longjmp (device->shader->unwind, status);
420 
421 	status = i915_shader_commit (device->shader, device);
422 	if (unlikely (status))
423 	    longjmp (device->shader->unwind, status);
424     }
425 }
426 
427 /* XXX improve state tracker/difference and flush state on vertex emission */
428 static void
i915_device_reset(i915_device_t * device)429 i915_device_reset (i915_device_t *device)
430 {
431     if (device->current_source != NULL)
432 	*device->current_source = 0;
433     if (device->current_mask != NULL)
434 	*device->current_mask = 0;
435     if (device->current_clip != NULL)
436 	*device->current_clip = 0;
437 
438     device->current_target = NULL;
439     device->current_size = 0;
440     device->current_source = NULL;
441     device->current_mask = NULL;
442     device->current_clip = NULL;
443     device->current_texcoords = ~0;
444     device->current_blend = 0;
445     device->current_n_constants = 0;
446     device->current_n_samplers = 0;
447     device->current_n_maps = 0;
448     device->current_colorbuf = 0;
449     device->current_diffuse = 0;
450     device->current_program = ~0;
451     device->clear_alpha = ~0;
452 
453     device->last_source_fragment = ~0;
454 }
455 
456 static void
i915_batch_cleanup(i915_device_t * device)457 i915_batch_cleanup (i915_device_t *device)
458 {
459     int i;
460 
461     for (i = 0; i < device->batch.exec_count; i++) {
462 	intel_bo_t *bo = device->batch.target_bo[i];
463 
464 	bo->exec = NULL;
465 	bo->batch_read_domains = 0;
466 	bo->batch_write_domain = 0;
467 	cairo_list_del (&bo->cache_list);
468 
469 	intel_bo_destroy (&device->intel, bo);
470     }
471 
472     device->batch.exec_count = 0;
473     device->batch.reloc_count = 0;
474 }
475 
476 static void
i915_batch_vbo_finish(i915_device_t * device)477 i915_batch_vbo_finish (i915_device_t *device)
478 {
479     assert (CAIRO_MUTEX_IS_LOCKED (device->intel.base.base.mutex));
480 
481     if (device->vbo || i915_batch_space (device) < (int32_t) device->vbo_used) {
482 	intel_bo_t *vbo;
483 
484 	if (device->vertex_count) {
485 	    if (device->vbo == 0) {
486 		OUT_DWORD (_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
487 			   I1_LOAD_S (0) |
488 			   I1_LOAD_S (1) |
489 			   1);
490 		device->vbo = device->batch.used++;
491 		device->vbo_max_index = device->batch.used;
492 		OUT_DWORD ((device->floats_per_vertex << S1_VERTEX_WIDTH_SHIFT) |
493 			(device->floats_per_vertex << S1_VERTEX_PITCH_SHIFT));
494 	    }
495 
496 	    OUT_DWORD (PRIM3D_RECTLIST |
497 		       PRIM3D_INDIRECT_SEQUENTIAL |
498 		       device->vertex_count);
499 	    OUT_DWORD (device->vertex_index);
500 	}
501 
502 	if (device->last_vbo != NULL)
503 	    intel_bo_destroy (&device->intel, device->last_vbo);
504 
505 	device->batch_base[device->vbo_max_index] |= device->vertex_index + device->vertex_count;
506 
507 	/* will include a few bytes of inter-array padding */
508 	vbo = intel_bo_create (&device->intel,
509 			       device->vbo_used, device->vbo_used,
510 			       FALSE, I915_TILING_NONE, 0);
511 	i915_batch_fill_reloc (device, device->vbo,
512 			       vbo, 0,
513 			       I915_GEM_DOMAIN_VERTEX, 0);
514 	intel_bo_write (&device->intel, vbo, 0, device->vbo_used, device->vbo_base);
515 	device->last_vbo = vbo;
516 	device->last_vbo_offset = (device->vbo_used+7)&-8;
517 	device->last_vbo_space = vbo->base.size - device->last_vbo_offset;
518 
519 	device->vbo = 0;
520     }
521     else
522     {
523 	/* Only a single rectlist in this batch, and no active vertex buffer. */
524 	OUT_DWORD (PRIM3D_RECTLIST | (device->vbo_used / 4 - 1));
525 
526 	memcpy (BATCH_PTR (device), device->vbo_base, device->vbo_used);
527 	device->batch.used += device->vbo_used >> 2;
528     }
529 
530     device->vbo_used = device->vbo_offset = 0;
531     device->vertex_index = device->vertex_count = 0;
532 }
533 
534 cairo_status_t
i915_batch_flush(i915_device_t * device)535 i915_batch_flush (i915_device_t *device)
536 {
537     intel_bo_t *batch;
538     cairo_status_t status;
539     uint32_t length, offset;
540     int n;
541 
542     assert (CAIRO_MUTEX_IS_LOCKED (device->intel.base.base.mutex));
543 
544     if (device->vbo_used)
545 	i915_batch_vbo_finish (device);
546 
547     if (device->batch.used == 0)
548 	return CAIRO_STATUS_SUCCESS;
549 
550     i915_batch_emit_dword (device, MI_BATCH_BUFFER_END);
551     if ((device->batch.used & 1) != ((sizeof (device->batch_header)>>2) & 1))
552 	i915_batch_emit_dword (device, MI_NOOP);
553 
554     length = (device->batch.used << 2) + sizeof (device->batch_header);
555 
556     /* NB: it is faster to copy the data then map/unmap the batch,
557      * presumably because we frequently only use a small part of the buffer.
558      */
559     batch = NULL;
560     if (device->last_vbo) {
561 	if (length <= device->last_vbo_space) {
562 	    batch = device->last_vbo;
563 	    offset = device->last_vbo_offset;
564 
565 	    /* fixup the relocations */
566 	    for (n = 0; n < device->batch.reloc_count; n++)
567 		device->batch.reloc[n].offset += offset;
568 	} else
569 	    intel_bo_destroy (&device->intel, device->last_vbo);
570 	device->last_vbo = NULL;
571     }
572     if (batch == NULL) {
573 	batch = intel_bo_create (&device->intel,
574 				 length, length,
575 				 FALSE, I915_TILING_NONE, 0);
576 	if (unlikely (batch == NULL)) {
577 	    status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
578 	    i915_batch_cleanup (device);
579 	    goto BAIL;
580 	}
581 
582 	offset = 0;
583     }
584     intel_bo_write (&device->intel, batch, offset, length, device->batch_header);
585     status = i915_bo_exec (device, batch, offset);
586     intel_bo_destroy (&device->intel, batch);
587 
588 BAIL:
589     device->batch.used = 0;
590 
591     intel_glyph_cache_unpin (&device->intel);
592     intel_snapshot_cache_thaw (&device->intel);
593 
594     i915_device_reset (device);
595 
596     return status;
597 }
598 
599 #if 0
600 static float *
601 i915_add_rectangles (i915_device_t *device, int num_rects, int *count)
602 {
603     float *vertices;
604     uint32_t size;
605     int cnt;
606 
607     assert (device->floats_per_vertex);
608 
609     size = device->rectangle_size;
610     if (unlikely (device->vbo_offset + size > I915_VBO_SIZE))
611 	i915_vbo_finish (device);
612 
613     vertices = (float *) (device->vbo_base + device->vbo_offset);
614     cnt = (I915_VBO_SIZE - device->vbo_offset) / size;
615     if (cnt > num_rects)
616 	cnt = num_rects;
617     device->vbo_used = device->vbo_offset += size * cnt;
618     device->vertex_count += 3 * cnt;
619     *count = cnt;
620     return vertices;
621 }
622 #endif
623 
624 static cairo_surface_t *
i915_surface_create_similar(void * abstract_other,cairo_content_t content,int width,int height)625 i915_surface_create_similar (void *abstract_other,
626 			     cairo_content_t content,
627 			     int width, int height)
628 {
629     i915_surface_t *other;
630     cairo_format_t format;
631     uint32_t tiling = I915_TILING_DEFAULT;
632 
633     other = abstract_other;
634     if (content == other->intel.drm.base.content)
635 	format = other->intel.drm.format;
636     else
637 	format = _cairo_format_from_content (content);
638 
639     if (width * _cairo_format_bits_per_pixel (format) > 8 * 32*1024 || height > 64*1024)
640 	return NULL;
641 
642     /* we presume that a similar surface will be used for blitting */
643     if (i915_surface_needs_tiling (other))
644 	tiling = I915_TILING_X;
645 
646     return i915_surface_create_internal ((cairo_drm_device_t *) other->intel.drm.base.device,
647 					 format,
648 					 width, height,
649 					 tiling, TRUE);
650 }
651 
652 static cairo_status_t
i915_surface_finish(void * abstract_surface)653 i915_surface_finish (void *abstract_surface)
654 {
655     i915_surface_t *surface = abstract_surface;
656     i915_device_t *device = i915_device (surface);
657 
658     if (surface->stencil != NULL) {
659 	intel_bo_in_flight_add (&device->intel, surface->stencil);
660 	intel_bo_destroy (&device->intel, surface->stencil);
661     }
662 
663     if (surface->is_current_texture) {
664 	if (surface->is_current_texture & CURRENT_SOURCE)
665 	    device->current_source = NULL;
666 	if (surface->is_current_texture & CURRENT_MASK)
667 	    device->current_mask = NULL;
668 	if (surface->is_current_texture & CURRENT_CLIP)
669 	    device->current_clip = NULL;
670 	device->current_n_samplers = 0;
671     }
672 
673     if (surface == device->current_target)
674 	device->current_target = NULL;
675 
676     if (surface->cache != NULL) {
677 	i915_image_private_t *node = surface->cache;
678 	intel_buffer_cache_t *cache = node->container;
679 
680 	if (--cache->ref_count == 0) {
681 	    intel_bo_in_flight_add (&device->intel, cache->buffer.bo);
682 	    intel_bo_destroy (&device->intel, cache->buffer.bo);
683 	    _cairo_rtree_fini (&cache->rtree);
684 	    cairo_list_del (&cache->link);
685 	    free (cache);
686 	} else {
687 	    node->node.state = CAIRO_RTREE_NODE_AVAILABLE;
688 	    cairo_list_move (&node->node.link, &cache->rtree.available);
689 	    _cairo_rtree_node_collapse (&cache->rtree, node->node.parent);
690 	}
691     }
692 
693     return intel_surface_finish (&surface->intel);
694 }
695 
696 static cairo_status_t
i915_surface_batch_flush(i915_surface_t * surface)697 i915_surface_batch_flush (i915_surface_t *surface)
698 {
699     cairo_status_t status;
700     intel_bo_t *bo;
701 
702     assert (surface->intel.drm.fallback == NULL);
703 
704     bo = to_intel_bo (surface->intel.drm.bo);
705     if (bo == NULL || bo->batch_write_domain == 0)
706 	return CAIRO_STATUS_SUCCESS;
707 
708     status = cairo_device_acquire (surface->intel.drm.base.device);
709     if (unlikely (status))
710 	return status;
711 
712     status = i915_batch_flush (i915_device (surface));
713     cairo_device_release (surface->intel.drm.base.device);
714 
715     return status;
716 }
717 
718 static cairo_status_t
i915_surface_flush(void * abstract_surface,unsigned flags)719 i915_surface_flush (void *abstract_surface,
720 		    unsigned flags)
721 {
722     i915_surface_t *surface = abstract_surface;
723     cairo_status_t status;
724 
725     if (flags)
726 	return CAIRO_STATUS_SUCCESS;
727 
728     if (surface->intel.drm.fallback == NULL) {
729 	if (surface->intel.drm.base.finished) {
730 	    /* Forgo flushing on finish as the user cannot access the surface directly. */
731 	    return CAIRO_STATUS_SUCCESS;
732 	}
733 
734 	if (surface->deferred_clear) {
735 	    status = i915_surface_clear (surface);
736 	    if (unlikely (status))
737 		return status;
738 	}
739 
740 	return i915_surface_batch_flush (surface);
741     }
742 
743     return intel_surface_flush (abstract_surface, flags);
744 }
745 
746 /* rasterisation */
747 
748 static cairo_status_t
_composite_boxes_spans(void * closure,cairo_span_renderer_t * renderer,const cairo_rectangle_int_t * extents)749 _composite_boxes_spans (void			*closure,
750 			cairo_span_renderer_t	*renderer,
751 			const cairo_rectangle_int_t	*extents)
752 {
753     cairo_boxes_t *boxes = closure;
754     cairo_rectangular_scan_converter_t converter;
755     struct _cairo_boxes_chunk *chunk;
756     cairo_status_t status;
757     int i;
758 
759     _cairo_rectangular_scan_converter_init (&converter, extents);
760     for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
761 	cairo_box_t *box = chunk->base;
762 	for (i = 0; i < chunk->count; i++) {
763 	    status = _cairo_rectangular_scan_converter_add_box (&converter, &box[i], 1);
764 	    if (unlikely (status))
765 		goto CLEANUP;
766 	}
767     }
768 
769     status = converter.base.generate (&converter.base, renderer);
770 
771 CLEANUP:
772     converter.base.destroy (&converter.base);
773     return status;
774 }
775 
776 cairo_status_t
i915_fixup_unbounded(i915_surface_t * dst,const cairo_composite_rectangles_t * extents,cairo_clip_t * clip)777 i915_fixup_unbounded (i915_surface_t *dst,
778 		      const cairo_composite_rectangles_t *extents,
779 		      cairo_clip_t *clip)
780 {
781     i915_shader_t shader;
782     i915_device_t *device;
783     cairo_status_t status;
784 
785     if (clip != NULL) {
786 	cairo_region_t *clip_region = NULL;
787 
788 	status = _cairo_clip_get_region (clip, &clip_region);
789 	assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);
790 	assert (clip_region == NULL);
791 
792 	if (status != CAIRO_INT_STATUS_UNSUPPORTED)
793 	    clip = NULL;
794     } else {
795 	if (extents->bounded.width == extents->unbounded.width &&
796 	    extents->bounded.height == extents->unbounded.height)
797 	{
798 	    return CAIRO_STATUS_SUCCESS;
799 	}
800     }
801 
802     if (clip != NULL) {
803 	i915_shader_init (&shader, dst, CAIRO_OPERATOR_DEST_OVER, 1.);
804 	i915_shader_set_clip (&shader, clip);
805 	status = i915_shader_acquire_pattern (&shader,
806 					      &shader.source,
807 					      &_cairo_pattern_white.base,
808 					      &extents->unbounded);
809 	assert (status == CAIRO_STATUS_SUCCESS);
810     } else {
811 	i915_shader_init (&shader, dst, CAIRO_OPERATOR_CLEAR, 1.);
812 	status = i915_shader_acquire_pattern (&shader,
813 					      &shader.source,
814 					      &_cairo_pattern_clear.base,
815 					      &extents->unbounded);
816 	assert (status == CAIRO_STATUS_SUCCESS);
817     }
818 
819     device = i915_device (dst);
820     status = cairo_device_acquire (&device->intel.base.base);
821     if (unlikely (status))
822 	return status;
823 
824     status = i915_shader_commit (&shader, device);
825     if (unlikely (status))
826 	goto BAIL;
827 
828     if (extents->bounded.width == 0 || extents->bounded.height == 0) {
829 	shader.add_rectangle (&shader,
830 			      extents->unbounded.x,
831 			      extents->unbounded.y,
832 			      extents->unbounded.width,
833 			      extents->unbounded.height);
834     } else {
835 	/* top */
836 	if (extents->bounded.y != extents->unbounded.y) {
837 	    shader.add_rectangle (&shader,
838 				  extents->unbounded.x,
839 				  extents->unbounded.y,
840 				  extents->unbounded.width,
841 				  extents->bounded.y - extents->unbounded.y);
842 	}
843 
844 	/* left */
845 	if (extents->bounded.x != extents->unbounded.x) {
846 	    shader.add_rectangle (&shader,
847 				  extents->unbounded.x,
848 				  extents->bounded.y,
849 				  extents->bounded.x - extents->unbounded.x,
850 				  extents->bounded.height);
851 	}
852 
853 	/* right */
854 	if (extents->bounded.x + extents->bounded.width != extents->unbounded.x + extents->unbounded.width) {
855 	    shader.add_rectangle (&shader,
856 				  extents->bounded.x + extents->bounded.width,
857 				  extents->bounded.y,
858 				  extents->unbounded.x + extents->unbounded.width - (extents->bounded.x + extents->bounded.width),
859 				  extents->bounded.height);
860 	}
861 
862 	/* bottom */
863 	if (extents->bounded.y + extents->bounded.height != extents->unbounded.y + extents->unbounded.height) {
864 	    shader.add_rectangle (&shader,
865 				  extents->unbounded.x,
866 				  extents->bounded.y + extents->bounded.height,
867 				  extents->unbounded.width,
868 				  extents->unbounded.y + extents->unbounded.height - (extents->bounded.y + extents->bounded.height));
869 	}
870     }
871 
872     i915_shader_fini (&shader);
873   BAIL:
874     cairo_device_release (&device->intel.base.base);
875     return status;
876 }
877 
878 static cairo_status_t
i915_fixup_unbounded_boxes(i915_surface_t * dst,const cairo_composite_rectangles_t * extents,cairo_clip_t * clip,cairo_boxes_t * boxes)879 i915_fixup_unbounded_boxes (i915_surface_t *dst,
880 			    const cairo_composite_rectangles_t *extents,
881 			    cairo_clip_t *clip,
882 			    cairo_boxes_t *boxes)
883 {
884     cairo_boxes_t clear;
885     cairo_box_t box;
886     cairo_region_t *clip_region = NULL;
887     cairo_status_t status;
888     struct _cairo_boxes_chunk *chunk;
889     int i;
890 
891     if (boxes->num_boxes <= 1)
892 	return i915_fixup_unbounded (dst, extents, clip);
893 
894     _cairo_boxes_init (&clear);
895 
896     box.p1.x = _cairo_fixed_from_int (extents->unbounded.x + extents->unbounded.width);
897     box.p1.y = _cairo_fixed_from_int (extents->unbounded.y);
898     box.p2.x = _cairo_fixed_from_int (extents->unbounded.x);
899     box.p2.y = _cairo_fixed_from_int (extents->unbounded.y + extents->unbounded.height);
900 
901     if (clip != NULL) {
902 	status = _cairo_clip_get_region (clip, &clip_region);
903 	assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);
904 	if (status != CAIRO_INT_STATUS_UNSUPPORTED)
905 	    clip = NULL;
906     }
907 
908     if (clip_region == NULL) {
909 	cairo_boxes_t tmp;
910 
911 	_cairo_boxes_init (&tmp);
912 
913 	status = _cairo_boxes_add (&tmp, &box);
914 	assert (status == CAIRO_STATUS_SUCCESS);
915 
916 	tmp.chunks.next = &boxes->chunks;
917 	tmp.num_boxes += boxes->num_boxes;
918 
919 	status = _cairo_bentley_ottmann_tessellate_boxes (&tmp,
920 							  CAIRO_FILL_RULE_WINDING,
921 							  &clear);
922 
923 	tmp.chunks.next = NULL;
924     } else {
925 	pixman_box32_t *pbox;
926 
927 	pbox = pixman_region32_rectangles (&clip_region->rgn, &i);
928 	_cairo_boxes_limit (&clear, (cairo_box_t *) pbox, i);
929 
930 	status = _cairo_boxes_add (&clear, &box);
931 	assert (status == CAIRO_STATUS_SUCCESS);
932 
933 	for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
934 	    for (i = 0; i < chunk->count; i++) {
935 		status = _cairo_boxes_add (&clear, &chunk->base[i]);
936 		if (unlikely (status)) {
937 		    _cairo_boxes_fini (&clear);
938 		    return status;
939 		}
940 	    }
941 	}
942 
943 	status = _cairo_bentley_ottmann_tessellate_boxes (&clear,
944 							  CAIRO_FILL_RULE_WINDING,
945 							  &clear);
946     }
947 
948     if (likely (status == CAIRO_STATUS_SUCCESS && clear.num_boxes)) {
949 	i915_shader_t shader;
950 	i915_device_t *device;
951 
952 	if (clip != NULL) {
953 	    i915_shader_init (&shader, dst, CAIRO_OPERATOR_DEST_OVER, 1.);
954 	    i915_shader_set_clip (&shader, clip);
955 	    status = i915_shader_acquire_pattern (&shader,
956 						  &shader.source,
957 						  &_cairo_pattern_white.base,
958 						  &extents->unbounded);
959 	    assert (status == CAIRO_STATUS_SUCCESS);
960 	} else {
961 	    i915_shader_init (&shader, dst, CAIRO_OPERATOR_CLEAR, 1.);
962 	    status = i915_shader_acquire_pattern (&shader,
963 						  &shader.source,
964 						  &_cairo_pattern_clear.base,
965 						  &extents->unbounded);
966 	    assert (status == CAIRO_STATUS_SUCCESS);
967 	}
968 
969 	device = i915_device (dst);
970 	status = cairo_device_acquire (&device->intel.base.base);
971 	if (unlikely (status))
972 	    goto err_shader;
973 
974 	status = i915_shader_commit (&shader, device);
975 	if (unlikely (status))
976 	    goto err_device;
977 
978 	for (chunk = &clear.chunks; chunk != NULL; chunk = chunk->next) {
979 	    for (i = 0; i < chunk->count; i++) {
980 		int x1 = _cairo_fixed_integer_part (chunk->base[i].p1.x);
981 		int y1 = _cairo_fixed_integer_part (chunk->base[i].p1.y);
982 		int x2 = _cairo_fixed_integer_part (chunk->base[i].p2.x);
983 		int y2 = _cairo_fixed_integer_part (chunk->base[i].p2.y);
984 
985 		shader.add_rectangle (&shader, x1, y1, x2 - x1, y2 - y1);
986 	    }
987 	}
988 err_device:
989 	cairo_device_release (&device->intel.base.base);
990 err_shader:
991 	i915_shader_fini (&shader);
992     }
993 
994     _cairo_boxes_fini (&clear);
995 
996     return status;
997 }
998 
999 static cairo_bool_t
i915_can_blt(i915_surface_t * dst,const cairo_pattern_t * pattern)1000 i915_can_blt (i915_surface_t *dst,
1001 	      const cairo_pattern_t *pattern)
1002 {
1003     const cairo_surface_pattern_t *spattern;
1004     i915_surface_t *src;
1005 
1006     spattern = (const cairo_surface_pattern_t *) pattern;
1007     src = (i915_surface_t *) spattern->surface;
1008 
1009     if (src->intel.drm.base.device != dst->intel.drm.base.device)
1010 	return FALSE;
1011 
1012     if (! i915_surface_needs_tiling (dst))
1013 	return FALSE;
1014 
1015     if (! _cairo_matrix_is_translation (&pattern->matrix))
1016 	return FALSE;
1017 
1018     if (! (pattern->filter == CAIRO_FILTER_NEAREST ||
1019 	   pattern->filter == CAIRO_FILTER_FAST))
1020     {
1021 	if (! _cairo_fixed_is_integer (_cairo_fixed_from_double (pattern->matrix.x0)) ||
1022 	    ! _cairo_fixed_is_integer (_cairo_fixed_from_double (pattern->matrix.y0)))
1023 	{
1024 	    return FALSE;
1025 	}
1026     }
1027 
1028     return _cairo_format_bits_per_pixel (src->intel.drm.format) ==
1029 	_cairo_format_bits_per_pixel (dst->intel.drm.format);
1030 }
1031 
1032 static cairo_status_t
i915_blt(i915_surface_t * src,i915_surface_t * dst,int src_x,int src_y,int width,int height,int dst_x,int dst_y,cairo_bool_t flush)1033 i915_blt (i915_surface_t *src,
1034 	  i915_surface_t *dst,
1035 	  int src_x, int src_y,
1036 	  int width, int height,
1037 	  int dst_x, int dst_y,
1038 	  cairo_bool_t flush)
1039 {
1040     i915_device_t *device;
1041     intel_bo_t *bo_array[2];
1042     cairo_status_t status;
1043     int br13, cmd;
1044 
1045     bo_array[0] = to_intel_bo (dst->intel.drm.bo);
1046     bo_array[1] = to_intel_bo (src->intel.drm.bo);
1047 
1048     status = i915_surface_fallback_flush (src);
1049     if (unlikely (status))
1050 	return status;
1051 
1052     device = i915_device (dst);
1053     status = cairo_device_acquire (&device->intel.base.base);
1054     if (unlikely (status))
1055 	return status;
1056 
1057     if (! i915_check_aperture_and_fences (device, bo_array, 2) ||
1058 	i915_batch_space (device) < 9)
1059     {
1060 	status = i915_batch_flush (device);
1061 	if (unlikely (status))
1062 	    goto CLEANUP;
1063     }
1064 
1065     cmd = XY_SRC_COPY_BLT_CMD;
1066     br13 = (0xCC << 16) | dst->intel.drm.stride;
1067     switch (dst->intel.drm.format) {
1068     default:
1069     case CAIRO_FORMAT_INVALID:
1070     case CAIRO_FORMAT_A1:
1071 	ASSERT_NOT_REACHED;
1072     case CAIRO_FORMAT_A8:
1073 	break;
1074     case CAIRO_FORMAT_RGB16_565:
1075 	br13 |= BR13_565;
1076 	break;
1077     case CAIRO_FORMAT_RGB24:
1078     case CAIRO_FORMAT_ARGB32:
1079 	br13 |= BR13_8888;
1080 	cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
1081 	break;
1082     }
1083 
1084     OUT_DWORD (cmd);
1085     OUT_DWORD (br13);
1086     OUT_DWORD ((dst_y << 16) | dst_x);
1087     OUT_DWORD (((dst_y + height - 1) << 16) | (dst_x + width - 1));
1088     OUT_RELOC_FENCED (dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1089     OUT_DWORD ((src_y << 16) | src_x);
1090     OUT_DWORD (src->intel.drm.stride);
1091     OUT_RELOC_FENCED (src, I915_GEM_DOMAIN_RENDER, 0);
1092     /* require explicit RenderCache flush for 2D -> 3D sampler? */
1093     if (flush)
1094 	OUT_DWORD (MI_FLUSH);
1095 
1096 CLEANUP:
1097     cairo_device_release (&device->intel.base.base);
1098     return CAIRO_STATUS_SUCCESS;
1099 }
1100 
1101 cairo_status_t
i915_surface_copy_subimage(i915_device_t * device,i915_surface_t * src,const cairo_rectangle_int_t * extents,cairo_bool_t flush,i915_surface_t ** clone_out)1102 i915_surface_copy_subimage (i915_device_t *device,
1103 			    i915_surface_t *src,
1104 			    const cairo_rectangle_int_t *extents,
1105 			    cairo_bool_t flush,
1106 			    i915_surface_t **clone_out)
1107 {
1108     i915_surface_t *clone;
1109     cairo_status_t status;
1110 
1111     clone = (i915_surface_t *)
1112 	i915_surface_create_internal (&device->intel.base,
1113 				      src->intel.drm.format,
1114 				      extents->width,
1115 				      extents->height,
1116 				      I915_TILING_X, TRUE);
1117     if (unlikely (clone->intel.drm.base.status))
1118 	return clone->intel.drm.base.status;
1119 
1120     status = i915_blt (src, clone,
1121 		       extents->x, extents->y,
1122 		       extents->width, extents->height,
1123 		       0, 0,
1124 		       flush);
1125 
1126     if (unlikely (status)) {
1127 	cairo_surface_destroy (&clone->intel.drm.base);
1128 	return status;
1129     }
1130 
1131     *clone_out = clone;
1132     return CAIRO_STATUS_SUCCESS;
1133 }
1134 
1135 static cairo_status_t
i915_clear_boxes(i915_surface_t * dst,const cairo_boxes_t * boxes)1136 i915_clear_boxes (i915_surface_t *dst,
1137 		  const cairo_boxes_t *boxes)
1138 {
1139     i915_device_t *device = i915_device (dst);
1140     const struct _cairo_boxes_chunk *chunk;
1141     cairo_status_t status;
1142     intel_bo_t *bo_array[1] = { to_intel_bo (dst->intel.drm.bo) };
1143     int cmd, br13, clear = 0, i;
1144 
1145     cmd = XY_COLOR_BLT_CMD;
1146     br13 = (0xCC << 16) | dst->intel.drm.stride;
1147     switch (dst->intel.drm.format) {
1148     default:
1149     case CAIRO_FORMAT_INVALID:
1150     case CAIRO_FORMAT_A1:
1151 	ASSERT_NOT_REACHED;
1152     case CAIRO_FORMAT_A8:
1153 	break;
1154     case CAIRO_FORMAT_RGB16_565:
1155 	br13 |= BR13_565;
1156 	break;
1157     case CAIRO_FORMAT_RGB24:
1158 	clear = 0xff000000;
1159     case CAIRO_FORMAT_ARGB32:
1160 	br13 |= BR13_8888;
1161 	cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
1162 	break;
1163     }
1164 
1165     status = cairo_device_acquire (&device->intel.base.base);
1166     if (unlikely (status))
1167 	return status;
1168 
1169     if (! i915_check_aperture_and_fences (device, bo_array, 1) ||
1170 	i915_batch_space (device) < 6 * boxes->num_boxes)
1171     {
1172 	status = i915_batch_flush (device);
1173 	if (unlikely (status))
1174 	    goto RELEASE;
1175     }
1176 
1177     if (device->vertex_count)
1178 	i915_vbo_flush (device);
1179 
1180     for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1181 	const cairo_box_t *box = chunk->base;
1182 	for (i = 0; i < chunk->count; i++) {
1183 	    int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1184 	    int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1185 	    int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1186 	    int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1187 
1188 	    if (x2 <= x1 || y2 <= y1)
1189 		continue;
1190 
1191 	    OUT_DWORD (cmd);
1192 	    OUT_DWORD (br13);
1193 	    OUT_DWORD ((y1 << 16) | x1);
1194 	    OUT_DWORD (((y2 - 1) << 16) | (x2 - 1));
1195 	    OUT_RELOC_FENCED (dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1196 	    OUT_DWORD (clear);
1197 	}
1198     }
1199 
1200 RELEASE:
1201     cairo_device_release (&device->intel.base.base);
1202     return status;
1203 }
1204 
1205 static cairo_status_t
i915_surface_extract_X_from_Y(i915_device_t * device,i915_surface_t * src,const cairo_rectangle_int_t * extents,i915_surface_t ** clone_out)1206 i915_surface_extract_X_from_Y (i915_device_t *device,
1207 			       i915_surface_t *src,
1208 			       const cairo_rectangle_int_t *extents,
1209 			       i915_surface_t **clone_out)
1210 {
1211     i915_surface_t *clone;
1212     i915_shader_t shader;
1213     cairo_surface_pattern_t pattern;
1214     cairo_rectangle_int_t rect;
1215     cairo_status_t status;
1216 
1217     status = i915_surface_fallback_flush (src);
1218     if (unlikely (status))
1219 	return status;
1220 
1221     clone = (i915_surface_t *)
1222 	i915_surface_create_internal (&device->intel.base,
1223 				      src->intel.drm.format,
1224 				      extents->width,
1225 				      extents->height,
1226 				      I915_TILING_X, TRUE);
1227     if (unlikely (clone->intel.drm.base.status))
1228 	return clone->intel.drm.base.status;
1229 
1230     i915_shader_init (&shader, clone, CAIRO_OPERATOR_SOURCE, 1.);
1231 
1232     _cairo_pattern_init_for_surface (&pattern, &src->intel.drm.base);
1233     pattern.base.filter = CAIRO_FILTER_NEAREST;
1234     cairo_matrix_init_translate (&pattern.base.matrix, extents->x, extents->y);
1235 
1236     rect.x = rect.y = 0;
1237     rect.width = extents->width;
1238     rect.height = extents->height;
1239     status = i915_shader_acquire_pattern (&shader, &shader.source, &pattern.base, &rect);
1240     _cairo_pattern_fini (&pattern.base);
1241 
1242     if (unlikely (status))
1243 	goto err_shader;
1244 
1245     status = cairo_device_acquire (&device->intel.base.base);
1246     if (unlikely (status))
1247 	goto err_shader;
1248 
1249     status = i915_shader_commit (&shader, device);
1250     if (unlikely (status))
1251 	goto err_device;
1252 
1253     shader.add_rectangle (&shader, 0, 0, extents->width, extents->height);
1254 
1255     cairo_device_release (&device->intel.base.base);
1256     i915_shader_fini (&shader);
1257 
1258     *clone_out = clone;
1259     return CAIRO_STATUS_SUCCESS;
1260 
1261 err_device:
1262     cairo_device_release (&device->intel.base.base);
1263 err_shader:
1264     i915_shader_fini (&shader);
1265     cairo_surface_destroy (&clone->intel.drm.base);
1266     return status;
1267 }
1268 
1269 static cairo_status_t
i915_blt_boxes(i915_surface_t * dst,const cairo_pattern_t * pattern,const cairo_rectangle_int_t * extents,const cairo_boxes_t * boxes)1270 i915_blt_boxes (i915_surface_t *dst,
1271 		const cairo_pattern_t *pattern,
1272 		const cairo_rectangle_int_t *extents,
1273 		const cairo_boxes_t *boxes)
1274 {
1275     const cairo_surface_pattern_t *spattern;
1276     i915_device_t *device;
1277     i915_surface_t *src;
1278     cairo_surface_t *free_me = NULL;
1279     const struct _cairo_boxes_chunk *chunk;
1280     cairo_status_t status;
1281     int br13, cmd, tx, ty;
1282     intel_bo_t *bo_array[2];
1283     int i;
1284 
1285     if (! i915_can_blt (dst, pattern))
1286 	return CAIRO_INT_STATUS_UNSUPPORTED;
1287 
1288     spattern = (const cairo_surface_pattern_t *) pattern;
1289     src = (i915_surface_t *) spattern->surface;
1290 
1291     if (src->intel.drm.base.is_clear)
1292 	return i915_clear_boxes (dst, boxes);
1293 
1294     if (pattern->extend != CAIRO_EXTEND_NONE &&
1295 	(extents->x + tx < 0 ||
1296 	 extents->y + ty < 0 ||
1297 	 extents->x + tx + extents->width  > src->intel.drm.width ||
1298 	 extents->y + ty + extents->height > src->intel.drm.height))
1299     {
1300 	return CAIRO_INT_STATUS_UNSUPPORTED;
1301     }
1302 
1303     status = i915_surface_fallback_flush (src);
1304     if (unlikely (status))
1305 	return status;
1306 
1307     tx = _cairo_lround (pattern->matrix.x0);
1308     ty = _cairo_lround (pattern->matrix.y0);
1309 
1310     device = i915_device (dst);
1311     if (to_intel_bo (src->intel.drm.bo)->tiling == I915_TILING_Y) {
1312 	cairo_rectangle_int_t extents;
1313 
1314 	_cairo_boxes_extents (boxes, &extents);
1315 	extents.x += tx;
1316 	extents.y += ty;
1317 
1318 	status = i915_surface_extract_X_from_Y (device, src, &extents, &src);
1319 	if (unlikely (status))
1320 	    return status;
1321 
1322 	free_me = &src->intel.drm.base;
1323 	tx = -extents.x;
1324 	ty = -extents.y;
1325     }
1326 
1327     bo_array[0] = to_intel_bo (dst->intel.drm.bo);
1328     bo_array[1] = to_intel_bo (src->intel.drm.bo);
1329 
1330     status = cairo_device_acquire (&device->intel.base.base);
1331     if (unlikely (status))
1332 	goto CLEANUP_SURFACE;
1333 
1334     if (! i915_check_aperture_and_fences (device, bo_array, 2) ||
1335 	i915_batch_space (device) < 8 * boxes->num_boxes)
1336     {
1337 	status = i915_batch_flush (device);
1338 	if (unlikely (status))
1339 	    goto CLEANUP_DEVICE;
1340     }
1341 
1342     cmd = XY_SRC_COPY_BLT_CMD;
1343     br13 = (0xCC << 16) | dst->intel.drm.stride;
1344     switch (dst->intel.drm.format) {
1345     default:
1346     case CAIRO_FORMAT_INVALID:
1347     case CAIRO_FORMAT_A1:
1348 	ASSERT_NOT_REACHED;
1349     case CAIRO_FORMAT_A8:
1350 	break;
1351     case CAIRO_FORMAT_RGB16_565:
1352 	br13 |= BR13_565;
1353 	break;
1354     case CAIRO_FORMAT_RGB24:
1355     case CAIRO_FORMAT_ARGB32:
1356 	br13 |= BR13_8888;
1357 	cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
1358 	break;
1359     }
1360 
1361     for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1362 	const cairo_box_t *box = chunk->base;
1363 	for (i = 0; i < chunk->count; i++) {
1364 	    int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1365 	    int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1366 	    int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1367 	    int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1368 
1369 	    if (x1 + tx < 0)
1370 		x1 = -tx;
1371 	    if (x2 + tx > src->intel.drm.width)
1372 		x2 = src->intel.drm.width - tx;
1373 
1374 	    if (y1 + ty < 0)
1375 		y1 = -ty;
1376 	    if (y2 + ty > src->intel.drm.height)
1377 		y2 = src->intel.drm.height - ty;
1378 
1379 	    if (x2 <= x1 || y2 <= y1)
1380 		continue;
1381 	    if (x2 < 0 || y2 < 0)
1382 		continue;
1383 	    if (x1 >= dst->intel.drm.width || y2 >= dst->intel.drm.height)
1384 		continue;
1385 
1386 	    OUT_DWORD (cmd);
1387 	    OUT_DWORD (br13);
1388 	    OUT_DWORD ((y1 << 16) | x1);
1389 	    OUT_DWORD (((y2 - 1) << 16) | (x2 - 1));
1390 	    OUT_RELOC_FENCED (dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1391 	    OUT_DWORD (((y1 + ty) << 16) | (x1 + tx));
1392 	    OUT_DWORD (src->intel.drm.stride);
1393 	    OUT_RELOC_FENCED (src, I915_GEM_DOMAIN_RENDER, 0);
1394 	}
1395     }
1396 
1397     /* XXX fixup blank portions */
1398 
1399 CLEANUP_DEVICE:
1400     cairo_device_release (&device->intel.base.base);
1401 CLEANUP_SURFACE:
1402     cairo_surface_destroy (free_me);
1403     return status;
1404 }
1405 
1406 static cairo_status_t
_upload_image_inplace(i915_surface_t * surface,const cairo_pattern_t * source,const cairo_rectangle_int_t * extents,const cairo_boxes_t * boxes)1407 _upload_image_inplace (i915_surface_t *surface,
1408 		       const cairo_pattern_t *source,
1409 		       const cairo_rectangle_int_t *extents,
1410 		       const cairo_boxes_t *boxes)
1411 {
1412     i915_device_t *device;
1413     const cairo_surface_pattern_t *pattern;
1414     cairo_image_surface_t *image;
1415     const struct _cairo_boxes_chunk *chunk;
1416     intel_bo_t *bo;
1417     int tx, ty, i;
1418 
1419     if (source->type != CAIRO_PATTERN_TYPE_SURFACE)
1420 	return CAIRO_INT_STATUS_UNSUPPORTED;
1421 
1422     pattern = (const cairo_surface_pattern_t *) source;
1423     if (pattern->surface->type != CAIRO_SURFACE_TYPE_IMAGE)
1424 	return CAIRO_INT_STATUS_UNSUPPORTED;
1425 
1426     if (! _cairo_matrix_is_integer_translation (&source->matrix, &tx, &ty))
1427 	return CAIRO_INT_STATUS_UNSUPPORTED;
1428 
1429     image = (cairo_image_surface_t *) pattern->surface;
1430     if (source->extend != CAIRO_EXTEND_NONE &&
1431 	(extents->x + tx < 0 ||
1432 	 extents->y + ty < 0 ||
1433 	 extents->x + tx + extents->width  > image->width ||
1434 	 extents->y + ty + extents->height > image->height))
1435     {
1436 	return CAIRO_INT_STATUS_UNSUPPORTED;
1437     }
1438 
1439     device = i915_device (surface);
1440     bo = to_intel_bo (surface->intel.drm.bo);
1441     if (bo->exec != NULL || ! intel_bo_is_inactive (&device->intel, bo)) {
1442 	intel_bo_t *new_bo;
1443 	cairo_bool_t need_clear = FALSE;
1444 
1445 	if (boxes->num_boxes != 1 ||
1446 	    extents->width < surface->intel.drm.width ||
1447 	    extents->height < surface->intel.drm.height)
1448 	{
1449 	    if (! surface->intel.drm.base.is_clear)
1450 		return CAIRO_INT_STATUS_UNSUPPORTED;
1451 
1452 	    need_clear = TRUE;
1453 	}
1454 
1455 	new_bo = intel_bo_create (&device->intel,
1456 				  bo->full_size, bo->base.size,
1457 				  FALSE, bo->tiling, bo->stride);
1458 	if (unlikely (new_bo == NULL))
1459 	    return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1460 
1461 	intel_bo_in_flight_add (&device->intel, bo);
1462 	intel_bo_destroy (&device->intel, bo);
1463 
1464 	bo = new_bo;
1465 	surface->intel.drm.bo = &bo->base;
1466 
1467 	if (need_clear) {
1468 	    memset (intel_bo_map (&device->intel, bo), 0,
1469 		    bo->stride * surface->intel.drm.height);
1470 	}
1471     }
1472 
1473     if (image->format == surface->intel.drm.format) {
1474 	for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1475 	    cairo_box_t *box = chunk->base;
1476 	    for (i = 0; i < chunk->count; i++) {
1477 		int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1478 		int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1479 		int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1480 		int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1481 		cairo_status_t status;
1482 
1483 		if (x1 + tx < 0)
1484 		    x1 = -tx;
1485 		if (x2 + tx > image->width)
1486 		    x2 = image->width - tx;
1487 
1488 		if (y1 + ty < 0)
1489 		    y1 = -ty;
1490 		if (y2 + ty > image->height)
1491 		    y2 = image->height - ty;
1492 
1493 		if (x2 <= x1 || y2 <= y1)
1494 		    continue;
1495 		if (x2 < 0 || y2 < 0)
1496 		    continue;
1497 		if (x1 >= surface->intel.drm.width || y2 >= surface->intel.drm.height)
1498 		    continue;
1499 
1500 		status = intel_bo_put_image (&device->intel,
1501 					     bo,
1502 					     image,
1503 					     x1 + tx, y1 + ty,
1504 					     x2 - x1, y2 - y1,
1505 					     x1, y1);
1506 		if (unlikely (status))
1507 		    return status;
1508 	    }
1509 	}
1510     } else {
1511 	pixman_image_t *dst;
1512 	void *ptr;
1513 
1514 	ptr = intel_bo_map (&device->intel, bo);
1515 	if (unlikely (ptr == NULL))
1516 	    return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
1517 
1518 	dst = pixman_image_create_bits (_cairo_format_to_pixman_format_code (surface->intel.drm.format),
1519 					surface->intel.drm.width,
1520 					surface->intel.drm.height,
1521 					ptr,
1522 					surface->intel.drm.stride);
1523 	if (unlikely (dst == NULL))
1524 	    return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1525 
1526 	for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1527 	    cairo_box_t *box = chunk->base;
1528 	    for (i = 0; i < chunk->count; i++) {
1529 		int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1530 		int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1531 		int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1532 		int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1533 
1534 		if (x1 + tx < 0)
1535 		    x1 = -tx;
1536 		if (x2 + tx > image->width)
1537 		    x2 = image->width - tx;
1538 
1539 		if (y1 + ty < 0)
1540 		    y1 = -ty;
1541 		if (y2 + ty > image->height)
1542 		    y2 = image->height - ty;
1543 
1544 		if (x2 <= x1 || y2 <= y1)
1545 		    continue;
1546 		if (x2 < 0 || y2 < 0)
1547 		    continue;
1548 		if (x1 >= surface->intel.drm.width || y2 >= surface->intel.drm.height)
1549 		    continue;
1550 
1551 		pixman_image_composite32 (PIXMAN_OP_SRC,
1552 					  image->pixman_image, NULL, dst,
1553 					  x1 + tx, y1 + ty,
1554 					  0, 0,
1555 					  x1, y1,
1556 					  x2 - x1, y2 - y1);
1557 	    }
1558 	}
1559 
1560 	pixman_image_unref (dst);
1561     }
1562 
1563     return CAIRO_STATUS_SUCCESS;
1564 }
1565 
1566 static cairo_status_t
_composite_boxes(i915_surface_t * dst,cairo_operator_t op,const cairo_pattern_t * pattern,cairo_boxes_t * boxes,cairo_antialias_t antialias,cairo_clip_t * clip,double opacity,const cairo_composite_rectangles_t * extents)1567 _composite_boxes (i915_surface_t *dst,
1568 		  cairo_operator_t op,
1569 		  const cairo_pattern_t *pattern,
1570 		  cairo_boxes_t *boxes,
1571 		  cairo_antialias_t antialias,
1572 		  cairo_clip_t *clip,
1573 		  double opacity,
1574 		  const cairo_composite_rectangles_t *extents)
1575 {
1576     cairo_bool_t need_clip_surface = FALSE;
1577     cairo_region_t *clip_region = NULL;
1578     const struct _cairo_boxes_chunk *chunk;
1579     cairo_status_t status;
1580     i915_shader_t shader;
1581     i915_device_t *device;
1582     int i;
1583 
1584     /* If the boxes are not pixel-aligned, we will need to compute a real mask */
1585     if (antialias != CAIRO_ANTIALIAS_NONE) {
1586 	if (! boxes->is_pixel_aligned)
1587 	    return CAIRO_INT_STATUS_UNSUPPORTED;
1588     }
1589 
1590     if (clip == NULL && op == CAIRO_OPERATOR_SOURCE && opacity == 1.) {
1591 	if (pattern->type == CAIRO_PATTERN_TYPE_SURFACE) {
1592 	    status = i915_blt_boxes (dst, pattern, &extents->bounded, boxes);
1593 	    if (status == CAIRO_INT_STATUS_UNSUPPORTED) {
1594 		status = _upload_image_inplace (dst, pattern,
1595 						&extents->bounded, boxes);
1596 	    }
1597 	    if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1598 		return status;
1599 	}
1600     }
1601 
1602     if (i915_surface_needs_tiling (dst)) {
1603 	ASSERT_NOT_REACHED;
1604 	return CAIRO_INT_STATUS_UNSUPPORTED;
1605     }
1606 
1607     i915_shader_init (&shader, dst, op, opacity);
1608 
1609     status = i915_shader_acquire_pattern (&shader,
1610 					  &shader.source,
1611 					  pattern,
1612 					  &extents->bounded);
1613     if (unlikely (status))
1614 	return status;
1615 
1616     if (clip != NULL) {
1617 	status = _cairo_clip_get_region (clip, &clip_region);
1618 	assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);
1619 	need_clip_surface = status == CAIRO_INT_STATUS_UNSUPPORTED;
1620 	if (need_clip_surface)
1621 	    i915_shader_set_clip (&shader, clip);
1622     }
1623 
1624     device = i915_device (dst);
1625     status = cairo_device_acquire (&device->intel.base.base);
1626     if (unlikely (status))
1627 	goto err_shader;
1628 
1629     status = i915_shader_commit (&shader, device);
1630     if (unlikely (status))
1631 	goto err_device;
1632 
1633     for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1634 	cairo_box_t *box = chunk->base;
1635 	for (i = 0; i < chunk->count; i++) {
1636 	    int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1637 	    int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1638 	    int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1639 	    int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1640 
1641 	    if (x2 > x1 && y2 > y1)
1642 		shader.add_rectangle (&shader, x1, y1, x2 - x1, y2 - y1);
1643 	}
1644     }
1645 
1646     if (! extents->is_bounded)
1647 	status = i915_fixup_unbounded_boxes (dst, extents, clip, boxes);
1648 
1649   err_device:
1650     cairo_device_release (&device->intel.base.base);
1651   err_shader:
1652     i915_shader_fini (&shader);
1653 
1654     return status;
1655 }
1656 
1657 cairo_status_t
i915_surface_clear(i915_surface_t * dst)1658 i915_surface_clear (i915_surface_t *dst)
1659 {
1660     i915_device_t *device;
1661     cairo_status_t status;
1662     intel_bo_t *bo_array[1] = { to_intel_bo (dst->intel.drm.bo) };
1663 
1664     device = i915_device (dst);
1665     status = cairo_device_acquire (&device->intel.base.base);
1666     if (unlikely (status))
1667 	return status;
1668 
1669     if (i915_surface_needs_tiling (dst)) {
1670 	int cmd, br13, clear = 0;
1671 
1672 	if (! i915_check_aperture_and_fences (device, bo_array, 1) ||
1673 	    i915_batch_space (device) < 6)
1674 	{
1675 	    status = i915_batch_flush (device);
1676 	    if (unlikely (status)) {
1677 		cairo_device_release (&device->intel.base.base);
1678 		return status;
1679 	    }
1680 	}
1681 
1682 	if (device->vertex_count)
1683 	    i915_vbo_flush (device);
1684 
1685 	cmd = XY_COLOR_BLT_CMD;
1686 	br13 = (0xCC << 16) | dst->intel.drm.stride;
1687 	switch (dst->intel.drm.format) {
1688 	default:
1689 	case CAIRO_FORMAT_INVALID:
1690 	case CAIRO_FORMAT_A1:
1691 	    ASSERT_NOT_REACHED;
1692 	case CAIRO_FORMAT_A8:
1693 	    break;
1694 	case CAIRO_FORMAT_RGB16_565:
1695 	    br13 |= BR13_565;
1696 	    break;
1697 	case CAIRO_FORMAT_RGB24:
1698 	    clear = 0xff000000;
1699 	case CAIRO_FORMAT_ARGB32:
1700 	    br13 |= BR13_8888;
1701 	    cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
1702 	    break;
1703 	}
1704 
1705 	OUT_DWORD (cmd);
1706 	OUT_DWORD (br13);
1707 	OUT_DWORD (0);
1708 	OUT_DWORD (((dst->intel.drm.height - 1) << 16) |
1709 		   (dst->intel.drm.width - 1));
1710 	OUT_RELOC_FENCED (dst,
1711 			  I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1712 	OUT_DWORD (clear);
1713     } else {
1714 	if (! i915_check_aperture (device, bo_array, 1) ||
1715 	    i915_batch_space (device) < 24)
1716 	{
1717 	    status = i915_batch_flush (device);
1718 	    if (unlikely (status)) {
1719 		cairo_device_release (&device->intel.base.base);
1720 		return status;
1721 	    }
1722 	}
1723 
1724 	if (device->vertex_count)
1725 	    i915_vbo_flush (device);
1726 
1727 	i915_set_dst (device, dst);
1728 
1729 	/* set clear parameters */
1730 	if (device->clear_alpha != (dst->intel.drm.base.content & CAIRO_CONTENT_ALPHA)) {
1731 	    device->clear_alpha = dst->intel.drm.base.content & CAIRO_CONTENT_ALPHA;
1732 	    OUT_DWORD (_3DSTATE_CLEAR_PARAMETERS);
1733 	    OUT_DWORD (CLEARPARAM_CLEAR_RECT | CLEARPARAM_WRITE_COLOR);
1734 	    /* ZONE_INIT color */
1735 	    if (device->clear_alpha) /* XXX depends on pixel format, 16bit needs replication, 8bit? */
1736 		OUT_DWORD (0x00000000);
1737 	    else
1738 		OUT_DWORD (0xff000000);
1739 	    OUT_DWORD (0); /* ZONE_INIT depth */
1740 	    /* CLEAR_RECT color */
1741 	    if (device->clear_alpha)
1742 		OUT_DWORD (0x00000000);
1743 	    else
1744 		OUT_DWORD (0xff000000);
1745 	    OUT_DWORD (0); /* CLEAR_RECT depth */
1746 	    OUT_DWORD (0); /* CLEAR_RECT stencil */
1747 	}
1748 
1749 	OUT_DWORD (PRIM3D_CLEAR_RECT | 5);
1750 	OUT_DWORD (pack_float (dst->intel.drm.width));
1751 	OUT_DWORD (pack_float (dst->intel.drm.height));
1752 	OUT_DWORD (0);
1753 	OUT_DWORD (pack_float (dst->intel.drm.height));
1754 	OUT_DWORD (0);
1755 	OUT_DWORD (0);
1756     }
1757 
1758     cairo_device_release (&device->intel.base.base);
1759 
1760     dst->deferred_clear = FALSE;
1761     return status;
1762 }
1763 
1764 static cairo_status_t
_clip_and_composite_boxes(i915_surface_t * dst,cairo_operator_t op,const cairo_pattern_t * src,cairo_boxes_t * boxes,cairo_antialias_t antialias,const cairo_composite_rectangles_t * extents,cairo_clip_t * clip,double opacity)1765 _clip_and_composite_boxes (i915_surface_t *dst,
1766 			   cairo_operator_t op,
1767 			   const cairo_pattern_t *src,
1768 			   cairo_boxes_t *boxes,
1769 			   cairo_antialias_t antialias,
1770 			   const cairo_composite_rectangles_t *extents,
1771 			   cairo_clip_t *clip,
1772 			   double opacity)
1773 {
1774     cairo_status_t status;
1775 
1776     if (boxes->num_boxes == 0) {
1777 	if (extents->is_bounded)
1778 	    return CAIRO_STATUS_SUCCESS;
1779 
1780 	return i915_fixup_unbounded (dst, extents, clip);
1781     }
1782 
1783     if (clip == NULL &&
1784 	(op == CAIRO_OPERATOR_SOURCE || (op == CAIRO_OPERATOR_OVER && dst->intel.drm.base.is_clear)) &&
1785 	opacity == 1. &&
1786 	boxes->num_boxes == 1 &&
1787 	extents->bounded.width  == dst->intel.drm.width &&
1788 	extents->bounded.height == dst->intel.drm.height)
1789     {
1790 	op = CAIRO_OPERATOR_SOURCE;
1791 	dst->deferred_clear = FALSE;
1792 
1793 	status = _upload_image_inplace (dst, src,
1794 					&extents->bounded, boxes);
1795 	if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1796 	    return status;
1797     }
1798 
1799     if (dst->deferred_clear) {
1800 	status = i915_surface_clear (dst);
1801 	if (unlikely (status))
1802 	    return status;
1803     }
1804 
1805     /* Use a fast path if the boxes are pixel aligned */
1806     status = _composite_boxes (dst, op, src, boxes, antialias, clip, opacity, extents);
1807     if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1808 	return status;
1809 
1810     /* Otherwise render the boxes via an implicit mask and composite in the usual
1811      * fashion.
1812      */
1813     return i915_clip_and_composite_spans (dst, op, src, antialias,
1814 					  _composite_boxes_spans, boxes,
1815 					  extents, clip, opacity);
1816 }
1817 
1818 static cairo_clip_path_t *
_clip_get_solitary_path(cairo_clip_t * clip)1819 _clip_get_solitary_path (cairo_clip_t *clip)
1820 {
1821     cairo_clip_path_t *iter = clip->path;
1822     cairo_clip_path_t *path = NULL;
1823 
1824     do {
1825 	if ((iter->flags & CAIRO_CLIP_PATH_IS_BOX) == 0) {
1826 	    if (path != NULL)
1827 		return FALSE;
1828 
1829 	    path = iter;
1830 	}
1831 	iter = iter->prev;
1832     } while (iter != NULL);
1833 
1834     return path;
1835 }
1836 
1837 typedef struct {
1838     cairo_polygon_t		polygon;
1839     cairo_fill_rule_t		 fill_rule;
1840     cairo_antialias_t		 antialias;
1841 } composite_polygon_info_t;
1842 
1843 static cairo_status_t
_composite_polygon_spans(void * closure,cairo_span_renderer_t * renderer,const cairo_rectangle_int_t * extents)1844 _composite_polygon_spans (void                          *closure,
1845 			  cairo_span_renderer_t		*renderer,
1846 			  const cairo_rectangle_int_t   *extents)
1847 {
1848     composite_polygon_info_t *info = closure;
1849     cairo_botor_scan_converter_t converter;
1850     cairo_status_t status;
1851     cairo_box_t box;
1852 
1853     box.p1.x = _cairo_fixed_from_int (extents->x);
1854     box.p1.y = _cairo_fixed_from_int (extents->y);
1855     box.p2.x = _cairo_fixed_from_int (extents->x + extents->width);
1856     box.p2.y = _cairo_fixed_from_int (extents->y + extents->height);
1857 
1858     _cairo_botor_scan_converter_init (&converter, &box, info->fill_rule);
1859 
1860     status = converter.base.add_polygon (&converter.base, &info->polygon);
1861     if (likely (status == CAIRO_STATUS_SUCCESS))
1862 	status = converter.base.generate (&converter.base, renderer);
1863 
1864     converter.base.destroy (&converter.base);
1865 
1866     return status;
1867 }
1868 
1869 static cairo_int_status_t
i915_surface_fill_with_alpha(void * abstract_dst,cairo_operator_t op,const cairo_pattern_t * source,cairo_path_fixed_t * path,cairo_fill_rule_t fill_rule,double tolerance,cairo_antialias_t antialias,cairo_clip_t * clip,double opacity)1870 i915_surface_fill_with_alpha (void			*abstract_dst,
1871 			      cairo_operator_t		 op,
1872 			      const cairo_pattern_t	*source,
1873 			      cairo_path_fixed_t	*path,
1874 			      cairo_fill_rule_t		 fill_rule,
1875 			      double			 tolerance,
1876 			      cairo_antialias_t		 antialias,
1877 			      cairo_clip_t		*clip,
1878 			      double			 opacity)
1879 {
1880     i915_surface_t *dst = abstract_dst;
1881     cairo_composite_rectangles_t extents;
1882     composite_polygon_info_t info;
1883     cairo_box_t boxes_stack[32], *clip_boxes = boxes_stack;
1884     cairo_clip_t local_clip;
1885     cairo_bool_t have_clip = FALSE;
1886     int num_boxes = ARRAY_LENGTH (boxes_stack);
1887     cairo_status_t status;
1888 
1889     status = _cairo_composite_rectangles_init_for_fill (&extents,
1890 							dst->intel.drm.width,
1891 							dst->intel.drm.height,
1892 							op, source, path,
1893 							clip);
1894     if (unlikely (status))
1895 	return status;
1896 
1897     if (_cairo_clip_contains_extents (clip, &extents))
1898 	clip = NULL;
1899 
1900     if (extents.is_bounded && clip != NULL) {
1901 	cairo_clip_path_t *clip_path;
1902 
1903 	if (((clip_path = _clip_get_solitary_path (clip)) != NULL) &&
1904 	    _cairo_path_fixed_equal (&clip_path->path, path))
1905 	{
1906 	    clip = NULL;
1907 	}
1908     }
1909 
1910     if (clip != NULL) {
1911 	clip = _cairo_clip_init_copy (&local_clip, clip);
1912 	have_clip = TRUE;
1913     }
1914 
1915     status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
1916     if (unlikely (status)) {
1917 	if (have_clip)
1918 	    _cairo_clip_fini (&local_clip);
1919 
1920 	return status;
1921     }
1922 
1923     assert (! _cairo_path_fixed_fill_is_empty (path));
1924 
1925     if (_cairo_path_fixed_fill_is_rectilinear (path)) {
1926 	cairo_boxes_t boxes;
1927 
1928 	_cairo_boxes_init (&boxes);
1929 	_cairo_boxes_limit (&boxes, clip_boxes, num_boxes);
1930 	status = _cairo_path_fixed_fill_rectilinear_to_boxes (path,
1931 							      fill_rule,
1932 							      &boxes);
1933 	if (likely (status == CAIRO_STATUS_SUCCESS)) {
1934 	    status = _clip_and_composite_boxes (dst, op, source,
1935 						&boxes, antialias,
1936 						&extents, clip,
1937 						opacity);
1938 	}
1939 
1940 	_cairo_boxes_fini (&boxes);
1941 
1942 	if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1943 	    goto CLEANUP_BOXES;
1944     }
1945 
1946     _cairo_polygon_init (&info.polygon, clip_boxes, num_boxes);
1947 
1948     status = _cairo_path_fixed_fill_to_polygon (path, tolerance, &info.polygon);
1949     if (unlikely (status))
1950 	goto CLEANUP_POLYGON;
1951 
1952     if (extents.is_bounded) {
1953 	cairo_rectangle_int_t rect;
1954 
1955 	_cairo_box_round_to_rectangle (&info.polygon.extents, &rect);
1956 	if (! _cairo_rectangle_intersect (&extents.bounded, &rect))
1957 	    goto CLEANUP_POLYGON;
1958     }
1959 
1960     if (info.polygon.num_edges == 0) {
1961 	if (! extents.is_bounded)
1962 	    status = i915_fixup_unbounded (dst, &extents, clip);
1963 
1964 	goto CLEANUP_POLYGON;
1965     }
1966 
1967     info.fill_rule = fill_rule;
1968     info.antialias = antialias;
1969     status = i915_clip_and_composite_spans (dst, op, source, antialias,
1970 					    _composite_polygon_spans, &info,
1971 					    &extents, clip, opacity);
1972 
1973 CLEANUP_POLYGON:
1974     _cairo_polygon_fini (&info.polygon);
1975 
1976 CLEANUP_BOXES:
1977     if (clip_boxes != boxes_stack)
1978 	free (clip_boxes);
1979 
1980     if (have_clip)
1981 	_cairo_clip_fini (&local_clip);
1982 
1983     return status;
1984 }
1985 
1986 static cairo_int_status_t
i915_surface_paint_with_alpha(void * abstract_dst,cairo_operator_t op,const cairo_pattern_t * source,cairo_clip_t * clip,double opacity)1987 i915_surface_paint_with_alpha (void			*abstract_dst,
1988 			       cairo_operator_t		 op,
1989 			       const cairo_pattern_t	*source,
1990 			       cairo_clip_t		*clip,
1991 			       double			 opacity)
1992 {
1993     i915_surface_t *dst = abstract_dst;
1994     cairo_composite_rectangles_t extents;
1995     cairo_clip_t local_clip;
1996     cairo_bool_t have_clip = FALSE;
1997     cairo_clip_path_t *clip_path;
1998     cairo_boxes_t boxes;
1999     int num_boxes = ARRAY_LENGTH (boxes.boxes_embedded);
2000     cairo_box_t *clip_boxes = boxes.boxes_embedded;
2001     cairo_status_t status;
2002 
2003     status = _cairo_composite_rectangles_init_for_paint (&extents,
2004 							 dst->intel.drm.width,
2005 							 dst->intel.drm.height,
2006 							 op, source,
2007 							 clip);
2008     if (unlikely (status))
2009 	return status;
2010 
2011     if (_cairo_clip_contains_extents (clip, &extents))
2012 	clip = NULL;
2013 
2014     if (clip != NULL) {
2015 	clip = _cairo_clip_init_copy (&local_clip, clip);
2016 	have_clip = TRUE;
2017     }
2018 
2019     status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
2020     if (unlikely (status)) {
2021 	if (have_clip)
2022 	    _cairo_clip_fini (&local_clip);
2023 
2024 	return status;
2025     }
2026 
2027     /* If the clip cannot be reduced to a set of boxes, we will need to
2028      * use a clipmask. Paint is special as it is the only operation that
2029      * does not implicitly use a mask, so we may be able to reduce this
2030      * operation to a fill...
2031      */
2032     if (clip != NULL &&
2033 	extents.is_bounded &&
2034 	(clip_path = _clip_get_solitary_path (clip)) != NULL)
2035     {
2036 	status = i915_surface_fill_with_alpha (dst, op, source,
2037 					       &clip_path->path,
2038 					       clip_path->fill_rule,
2039 					       clip_path->tolerance,
2040 					       clip_path->antialias,
2041 					       NULL, opacity);
2042     }
2043     else
2044     {
2045 	_cairo_boxes_init_for_array (&boxes, clip_boxes, num_boxes);
2046 	status = _clip_and_composite_boxes (dst, op, source,
2047 					    &boxes, CAIRO_ANTIALIAS_DEFAULT,
2048 					    &extents, clip, opacity);
2049     }
2050     if (clip_boxes != boxes.boxes_embedded)
2051 	free (clip_boxes);
2052 
2053     if (have_clip)
2054 	_cairo_clip_fini (&local_clip);
2055 
2056     return status;
2057 }
2058 
2059 static cairo_int_status_t
i915_surface_paint(void * abstract_dst,cairo_operator_t op,const cairo_pattern_t * source,cairo_clip_t * clip)2060 i915_surface_paint (void			*abstract_dst,
2061 		    cairo_operator_t		 op,
2062 		    const cairo_pattern_t	*source,
2063 		    cairo_clip_t		*clip)
2064 {
2065     i915_surface_t *dst = abstract_dst;
2066 
2067     /* XXX unsupported operators? use pixel shader blending, eventually */
2068 
2069     if (op == CAIRO_OPERATOR_CLEAR && clip == NULL) {
2070 	dst->deferred_clear = TRUE;
2071 	return CAIRO_STATUS_SUCCESS;
2072     }
2073 
2074     return i915_surface_paint_with_alpha (dst, op, source, clip, 1.);
2075 }
2076 
2077 static cairo_int_status_t
i915_surface_mask(void * abstract_dst,cairo_operator_t op,const cairo_pattern_t * source,const cairo_pattern_t * mask,cairo_clip_t * clip)2078 i915_surface_mask (void				*abstract_dst,
2079 		   cairo_operator_t		 op,
2080 		   const cairo_pattern_t	*source,
2081 		   const cairo_pattern_t	*mask,
2082 		   cairo_clip_t			*clip)
2083 {
2084     i915_surface_t *dst = abstract_dst;
2085     i915_device_t *device;
2086     cairo_composite_rectangles_t extents;
2087     i915_shader_t shader;
2088     cairo_clip_t local_clip;
2089     cairo_region_t *clip_region = NULL;
2090     cairo_bool_t need_clip_surface = FALSE;
2091     cairo_bool_t have_clip = FALSE;
2092     cairo_status_t status;
2093 
2094     if (mask->type == CAIRO_PATTERN_TYPE_SOLID) {
2095 	const cairo_solid_pattern_t *solid = (cairo_solid_pattern_t *) mask;
2096 	return i915_surface_paint_with_alpha (dst, op, source, clip, solid->color.alpha);
2097     }
2098 
2099     status = _cairo_composite_rectangles_init_for_mask (&extents,
2100 							dst->intel.drm.width,
2101 							dst->intel.drm.height,
2102 							op, source, mask, clip);
2103     if (unlikely (status))
2104 	return status;
2105 
2106     if (_cairo_clip_contains_extents (clip, &extents))
2107 	clip = NULL;
2108 
2109     if (clip != NULL && extents.is_bounded) {
2110 	clip = _cairo_clip_init_copy (&local_clip, clip);
2111 	status = _cairo_clip_rectangle (clip, &extents.bounded);
2112 	if (unlikely (status)) {
2113 	    _cairo_clip_fini (&local_clip);
2114 	    return status;
2115 	}
2116 
2117 	have_clip = TRUE;
2118     }
2119 
2120     i915_shader_init (&shader, dst, op, 1.);
2121 
2122     status = i915_shader_acquire_pattern (&shader,
2123 					  &shader.source,
2124 					  source,
2125 					  &extents.bounded);
2126     if (unlikely (status))
2127 	goto err_shader;
2128 
2129     status = i915_shader_acquire_pattern (&shader,
2130 					  &shader.mask,
2131 					  mask,
2132 					  &extents.bounded);
2133     if (unlikely (status))
2134 	goto err_shader;
2135 
2136     if (clip != NULL) {
2137 	status = _cairo_clip_get_region (clip, &clip_region);
2138 	if (unlikely (_cairo_status_is_error (status) ||
2139 		      status == CAIRO_INT_STATUS_NOTHING_TO_DO))
2140 	{
2141 	    goto err_shader;
2142 	}
2143 
2144 	need_clip_surface = status == CAIRO_INT_STATUS_UNSUPPORTED;
2145 	if (need_clip_surface)
2146 	    i915_shader_set_clip (&shader, clip);
2147 
2148 	if (clip_region != NULL) {
2149 	    cairo_rectangle_int_t rect;
2150 	    cairo_bool_t is_empty;
2151 
2152 	    status = CAIRO_STATUS_SUCCESS;
2153 	    cairo_region_get_extents (clip_region, &rect);
2154 	    is_empty = ! _cairo_rectangle_intersect (&extents.unbounded, &rect);
2155 	    if (unlikely (is_empty))
2156 		goto err_shader;
2157 
2158 	    is_empty = ! _cairo_rectangle_intersect (&extents.bounded, &rect);
2159 	    if (unlikely (is_empty && extents.is_bounded))
2160 		goto err_shader;
2161 
2162 	    if (cairo_region_num_rectangles (clip_region) == 1)
2163 		clip_region = NULL;
2164 	}
2165     }
2166 
2167     if (i915_surface_needs_tiling (dst)) {
2168 	ASSERT_NOT_REACHED;
2169 	return CAIRO_INT_STATUS_UNSUPPORTED;
2170     }
2171 
2172     device = i915_device (dst);
2173     status = cairo_device_acquire (&device->intel.base.base);
2174     if (unlikely (status))
2175 	goto err_shader;
2176 
2177     if (dst->deferred_clear) {
2178 	status = i915_surface_clear (dst);
2179 	if (unlikely (status))
2180 	    goto err_shader;
2181     }
2182 
2183     status = i915_shader_commit (&shader, device);
2184     if (unlikely (status))
2185 	goto err_device;
2186 
2187     if (clip_region != NULL) {
2188 	unsigned int n, num_rectangles;
2189 
2190 	num_rectangles = cairo_region_num_rectangles (clip_region);
2191 	for (n = 0; n < num_rectangles; n++) {
2192 	    cairo_rectangle_int_t rect;
2193 
2194 	    cairo_region_get_rectangle (clip_region, n, &rect);
2195 
2196 	    shader.add_rectangle (&shader,
2197 				  rect.x, rect.y,
2198 				  rect.x + rect.width, rect.y + rect.height);
2199 	}
2200     } else {
2201 	shader.add_rectangle (&shader,
2202 			      extents.bounded.x, extents.bounded.y,
2203 			      extents.bounded.x + extents.bounded.width,
2204 			      extents.bounded.y + extents.bounded.height);
2205     }
2206 
2207     if (! extents.is_bounded)
2208 	status = i915_fixup_unbounded (dst, &extents, clip);
2209 
2210   err_device:
2211     cairo_device_release (&device->intel.base.base);
2212   err_shader:
2213     i915_shader_fini (&shader);
2214     if (have_clip)
2215 	_cairo_clip_fini (&local_clip);
2216 
2217     return status;
2218 }
2219 
2220 static cairo_int_status_t
i915_surface_stroke(void * abstract_dst,cairo_operator_t op,const cairo_pattern_t * source,cairo_path_fixed_t * path,const cairo_stroke_style_t * stroke_style,const cairo_matrix_t * ctm,const cairo_matrix_t * ctm_inverse,double tolerance,cairo_antialias_t antialias,cairo_clip_t * clip)2221 i915_surface_stroke (void			*abstract_dst,
2222 		     cairo_operator_t		 op,
2223 		     const cairo_pattern_t	*source,
2224 		     cairo_path_fixed_t		*path,
2225 		     const cairo_stroke_style_t	*stroke_style,
2226 		     const cairo_matrix_t	*ctm,
2227 		     const cairo_matrix_t	*ctm_inverse,
2228 		     double			 tolerance,
2229 		     cairo_antialias_t		 antialias,
2230 		     cairo_clip_t		*clip)
2231 {
2232     i915_surface_t *dst = abstract_dst;
2233     cairo_composite_rectangles_t extents;
2234     composite_polygon_info_t info;
2235     cairo_box_t boxes_stack[32], *clip_boxes = boxes_stack;
2236     int num_boxes = ARRAY_LENGTH (boxes_stack);
2237     cairo_clip_t local_clip;
2238     cairo_bool_t have_clip = FALSE;
2239     cairo_status_t status;
2240 
2241     status = _cairo_composite_rectangles_init_for_stroke (&extents,
2242 							  dst->intel.drm.width,
2243 							  dst->intel.drm.height,
2244 							  op, source,
2245 							  path, stroke_style, ctm,
2246 							  clip);
2247     if (unlikely (status))
2248 	return status;
2249 
2250     if (_cairo_clip_contains_extents (clip, &extents))
2251 	clip = NULL;
2252 
2253     if (clip != NULL) {
2254 	clip = _cairo_clip_init_copy (&local_clip, clip);
2255 	have_clip = TRUE;
2256     }
2257 
2258     status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
2259     if (unlikely (status)) {
2260 	if (have_clip)
2261 	    _cairo_clip_fini (&local_clip);
2262 
2263 	return status;
2264     }
2265 
2266     if (_cairo_path_fixed_stroke_is_rectilinear (path)) {
2267 	cairo_boxes_t boxes;
2268 
2269 	_cairo_boxes_init (&boxes);
2270 	_cairo_boxes_limit (&boxes, clip_boxes, num_boxes);
2271 	status = _cairo_path_fixed_stroke_rectilinear_to_boxes (path,
2272 								stroke_style,
2273 								ctm,
2274 								&boxes);
2275 	if (likely (status == CAIRO_STATUS_SUCCESS)) {
2276 	    status = _clip_and_composite_boxes (dst, op, source,
2277 						&boxes, antialias,
2278 						&extents, clip, 1.);
2279 	}
2280 
2281 	_cairo_boxes_fini (&boxes);
2282 
2283 	if (status != CAIRO_INT_STATUS_UNSUPPORTED)
2284 	    goto CLEANUP_BOXES;
2285     }
2286 
2287     _cairo_polygon_init (&info.polygon, clip_boxes, num_boxes);
2288 
2289     status = _cairo_path_fixed_stroke_to_polygon (path,
2290 						  stroke_style,
2291 						  ctm, ctm_inverse,
2292 						  tolerance,
2293 						  &info.polygon);
2294     if (unlikely (status))
2295 	goto CLEANUP_POLYGON;
2296 
2297     if (extents.is_bounded) {
2298 	cairo_rectangle_int_t rect;
2299 
2300 	_cairo_box_round_to_rectangle (&info.polygon.extents, &rect);
2301 	if (! _cairo_rectangle_intersect (&extents.bounded, &rect))
2302 	    goto CLEANUP_POLYGON;
2303     }
2304 
2305     if (info.polygon.num_edges == 0) {
2306 	if (! extents.is_bounded)
2307 	    status = i915_fixup_unbounded (dst, &extents, clip);
2308 
2309 	goto CLEANUP_POLYGON;
2310     }
2311 
2312     info.fill_rule = CAIRO_FILL_RULE_WINDING;
2313     info.antialias = antialias;
2314     status = i915_clip_and_composite_spans (dst, op, source, antialias,
2315 					    _composite_polygon_spans, &info,
2316 					    &extents, clip, 1.);
2317 
2318 CLEANUP_POLYGON:
2319     _cairo_polygon_fini (&info.polygon);
2320 
2321 CLEANUP_BOXES:
2322     if (clip_boxes != boxes_stack)
2323 	free (clip_boxes);
2324 
2325     if (have_clip)
2326 	_cairo_clip_fini (&local_clip);
2327 
2328     return status;
2329 }
2330 
2331 static cairo_int_status_t
i915_surface_fill(void * abstract_dst,cairo_operator_t op,const cairo_pattern_t * source,cairo_path_fixed_t * path,cairo_fill_rule_t fill_rule,double tolerance,cairo_antialias_t antialias,cairo_clip_t * clip)2332 i915_surface_fill (void			*abstract_dst,
2333 		   cairo_operator_t	 op,
2334 		   const cairo_pattern_t*source,
2335 		   cairo_path_fixed_t	*path,
2336 		   cairo_fill_rule_t	 fill_rule,
2337 		   double		 tolerance,
2338 		   cairo_antialias_t	 antialias,
2339 		   cairo_clip_t		*clip)
2340 {
2341     return i915_surface_fill_with_alpha (abstract_dst, op, source, path, fill_rule, tolerance, antialias, clip, 1.);
2342 }
2343 
2344 static const cairo_surface_backend_t i915_surface_backend = {
2345     CAIRO_SURFACE_TYPE_DRM,
2346     _cairo_default_context_create,
2347 
2348     i915_surface_create_similar,
2349     i915_surface_finish,
2350 
2351     NULL,
2352     intel_surface_acquire_source_image,
2353     intel_surface_release_source_image,
2354 
2355     NULL, NULL, NULL,
2356     NULL, /* composite */
2357     NULL, /* fill */
2358     NULL, /* trapezoids */
2359     NULL, /* span */
2360     NULL, /* check-span */
2361 
2362     NULL, /* copy_page */
2363     NULL, /* show_page */
2364     _cairo_drm_surface_get_extents,
2365     NULL, /* old-glyphs */
2366     _cairo_drm_surface_get_font_options,
2367 
2368     i915_surface_flush,
2369     NULL, /* mark_dirty */
2370     intel_scaled_font_fini,
2371     intel_scaled_glyph_fini,
2372 
2373     i915_surface_paint,
2374     i915_surface_mask,
2375     i915_surface_stroke,
2376     i915_surface_fill,
2377     i915_surface_glyphs,
2378 };
2379 
2380 static void
i915_surface_init(i915_surface_t * surface,cairo_drm_device_t * device,cairo_format_t format,int width,int height)2381 i915_surface_init (i915_surface_t *surface,
2382 		   cairo_drm_device_t *device,
2383 		   cairo_format_t format,
2384 		   int width, int height)
2385 {
2386     intel_surface_init (&surface->intel, &i915_surface_backend, device,
2387 			format, width, height);
2388 
2389     switch (format) {
2390     default:
2391     case CAIRO_FORMAT_INVALID:
2392     case CAIRO_FORMAT_A1:
2393 	ASSERT_NOT_REACHED;
2394     case CAIRO_FORMAT_ARGB32:
2395 	surface->map0 = MAPSURF_32BIT | MT_32BIT_ARGB8888;
2396 	surface->colorbuf = COLR_BUF_ARGB8888 | DEPTH_FRMT_24_FIXED_8_OTHER;
2397 	break;
2398     case CAIRO_FORMAT_RGB24:
2399 	surface->map0 = MAPSURF_32BIT | MT_32BIT_XRGB8888;
2400 	surface->colorbuf = COLR_BUF_ARGB8888 | DEPTH_FRMT_24_FIXED_8_OTHER;
2401 	break;
2402     case CAIRO_FORMAT_RGB16_565:
2403 	surface->map0 = MAPSURF_16BIT | MT_16BIT_RGB565;
2404 	surface->colorbuf = COLR_BUF_RGB565;
2405 	break;
2406     case CAIRO_FORMAT_A8:
2407 	surface->map0 = MAPSURF_8BIT | MT_8BIT_A8;
2408 	surface->colorbuf = COLR_BUF_8BIT | DEPTH_FRMT_24_FIXED_8_OTHER;
2409 	break;
2410     }
2411     surface->colorbuf |= DSTORG_HORT_BIAS (0x8) | DSTORG_VERT_BIAS (0x8);
2412     surface->map0 |= ((height - 1) << MS3_HEIGHT_SHIFT) |
2413 		     ((width  - 1) << MS3_WIDTH_SHIFT);
2414     surface->map1 = 0;
2415 
2416     surface->is_current_texture = 0;
2417     surface->deferred_clear = FALSE;
2418 
2419     surface->offset = 0;
2420 
2421     surface->stencil  = NULL;
2422     surface->cache = NULL;
2423 }
2424 
2425 cairo_surface_t *
i915_surface_create_internal(cairo_drm_device_t * base_dev,cairo_format_t format,int width,int height,uint32_t tiling,cairo_bool_t gpu_target)2426 i915_surface_create_internal (cairo_drm_device_t *base_dev,
2427 		              cairo_format_t format,
2428 			      int width, int height,
2429 			      uint32_t tiling,
2430 			      cairo_bool_t gpu_target)
2431 {
2432     i915_surface_t *surface;
2433     cairo_status_t status_ignored;
2434 
2435     surface = _cairo_malloc (sizeof (i915_surface_t));
2436     if (unlikely (surface == NULL))
2437 	return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
2438 
2439     i915_surface_init (surface, base_dev, format, width, height);
2440 
2441     if (width && height) {
2442 	uint32_t size, stride;
2443 	intel_bo_t *bo;
2444 
2445 	width = (width + 3) & -4;
2446 	stride = cairo_format_stride_for_width (surface->intel.drm.format, width);
2447 	/* check for tiny surfaces for which tiling is irrelevant */
2448 	if (height * stride <= 4096)
2449 	    tiling = I915_TILING_NONE;
2450 	if (tiling != I915_TILING_NONE && stride <= 512)
2451 	    tiling = I915_TILING_NONE;
2452 	if (tiling != I915_TILING_NONE) {
2453 	    if (height <= 8)
2454 		tiling = I915_TILING_NONE;
2455 	    else if (height <= 16)
2456 		tiling = I915_TILING_X;
2457 	}
2458 	/* large surfaces we need to blt, so force TILING_X */
2459 	if (height > 2048)
2460 	    tiling = I915_TILING_X;
2461 	/* but there is a maximum limit to the tiling pitch */
2462 	if (tiling != I915_TILING_NONE && stride > 8192)
2463 	    tiling = I915_TILING_NONE;
2464 
2465 	stride = i915_tiling_stride (tiling, stride);
2466 	assert (stride >= (uint32_t) cairo_format_stride_for_width (surface->intel.drm.format, width));
2467 	assert (tiling == I915_TILING_NONE || stride <= 8192);
2468 	height = i915_tiling_height (tiling, height);
2469 	if (height > 64*1024) {
2470 	    free (surface);
2471 	    cairo_device_destroy (&base_dev->base);
2472 	    return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_SIZE));
2473 	}
2474 
2475 	size = stride * height;
2476 	bo = intel_bo_create (to_intel_device (&base_dev->base),
2477 			      i915_tiling_size (tiling, size), size,
2478 			      gpu_target, tiling, stride);
2479 	if (bo == NULL) {
2480 	    status_ignored = _cairo_drm_surface_finish (&surface->intel.drm);
2481 	    free (surface);
2482 	    return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
2483 	}
2484 	assert (bo->base.size >= size);
2485 
2486 	surface->intel.drm.bo = &bo->base;
2487 	surface->intel.drm.stride = stride;
2488 
2489 	surface->map0 |= MS3_tiling (tiling);
2490 	surface->map1 = (stride/4 - 1) << MS4_PITCH_SHIFT;
2491     }
2492 
2493     return &surface->intel.drm.base;
2494 }
2495 
2496 static cairo_surface_t *
i915_surface_create(cairo_drm_device_t * base_dev,cairo_format_t format,int width,int height)2497 i915_surface_create (cairo_drm_device_t *base_dev,
2498 		     cairo_format_t format,
2499 		     int width, int height)
2500 {
2501     switch (format) {
2502     case CAIRO_FORMAT_ARGB32:
2503     case CAIRO_FORMAT_RGB16_565:
2504     case CAIRO_FORMAT_RGB24:
2505     case CAIRO_FORMAT_A8:
2506 	break;
2507     case CAIRO_FORMAT_INVALID:
2508     default:
2509     case CAIRO_FORMAT_A1:
2510 	return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
2511     }
2512 
2513     return i915_surface_create_internal (base_dev, format, width, height,
2514 	                                 I915_TILING_DEFAULT, TRUE);
2515 }
2516 
2517 static cairo_surface_t *
i915_surface_create_for_name(cairo_drm_device_t * base_dev,unsigned int name,cairo_format_t format,int width,int height,int stride)2518 i915_surface_create_for_name (cairo_drm_device_t *base_dev,
2519 			      unsigned int name,
2520 			      cairo_format_t format,
2521 			      int width, int height, int stride)
2522 {
2523     i915_surface_t *surface;
2524 
2525     /* Vol I, p134: size restrictions for textures */
2526     /* Vol I, p129: destination surface stride must be a multiple of 32 bytes */
2527     if (stride < cairo_format_stride_for_width (format, (width + 3) & -4) ||
2528 	stride & 31)
2529     {
2530 	return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_STRIDE));
2531     }
2532 
2533     switch (format) {
2534     default:
2535     case CAIRO_FORMAT_INVALID:
2536     case CAIRO_FORMAT_A1:
2537 	return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
2538     case CAIRO_FORMAT_ARGB32:
2539     case CAIRO_FORMAT_RGB16_565:
2540     case CAIRO_FORMAT_RGB24:
2541     case CAIRO_FORMAT_A8:
2542 	break;
2543     }
2544 
2545     surface = _cairo_malloc (sizeof (i915_surface_t));
2546     if (unlikely (surface == NULL))
2547 	return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
2548 
2549     i915_surface_init (surface, base_dev, format, width, height);
2550 
2551     if (width && height) {
2552 	surface->intel.drm.stride = stride;
2553 	surface->map1 = (surface->intel.drm.stride/4 - 1) << MS4_PITCH_SHIFT;
2554 
2555 	surface->intel.drm.bo =
2556 	    &intel_bo_create_for_name (to_intel_device (&base_dev->base),
2557 				       name)->base;
2558 	if (unlikely (surface->intel.drm.bo == NULL)) {
2559 	    free (surface);
2560 	    return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
2561 	}
2562 	to_intel_bo (surface->intel.drm.bo)->stride = stride;
2563 
2564 	surface->map0 |= MS3_tiling (to_intel_bo (surface->intel.drm.bo)->tiling);
2565     }
2566 
2567     return &surface->intel.drm.base;
2568 }
2569 
2570 static cairo_status_t
i915_buffer_cache_init(intel_buffer_cache_t * cache,i915_device_t * device,cairo_format_t format,int width,int height)2571 i915_buffer_cache_init (intel_buffer_cache_t *cache,
2572 		        i915_device_t *device,
2573 			cairo_format_t format,
2574 			int width, int height)
2575 {
2576     const uint32_t tiling = I915_TILING_DEFAULT;
2577     uint32_t stride, size;
2578 
2579     assert ((width & 3) == 0);
2580     assert ((height & 1) == 0);
2581     cache->buffer.width = width;
2582     cache->buffer.height = height;
2583 
2584     switch (format) {
2585     case CAIRO_FORMAT_INVALID:
2586     case CAIRO_FORMAT_A1:
2587     case CAIRO_FORMAT_RGB24:
2588     case CAIRO_FORMAT_RGB16_565:
2589 	ASSERT_NOT_REACHED;
2590     case CAIRO_FORMAT_ARGB32:
2591 	cache->buffer.map0 = MAPSURF_32BIT | MT_32BIT_ARGB8888;
2592 	stride = width * 4;
2593 	break;
2594     case CAIRO_FORMAT_A8:
2595 	cache->buffer.map0 = MAPSURF_8BIT | MT_8BIT_I8;
2596 	stride = width;
2597 	break;
2598     }
2599     assert ((stride & 7) == 0);
2600     assert (i915_tiling_stride (tiling, stride) == stride);
2601     assert (i915_tiling_height (tiling, height) == height);
2602 
2603     size = height * stride;
2604     assert (i915_tiling_size (tiling, size) == size);
2605     cache->buffer.bo = intel_bo_create (&device->intel, size, size, FALSE, tiling, stride);
2606     if (unlikely (cache->buffer.bo == NULL))
2607 	return _cairo_error (CAIRO_STATUS_NO_MEMORY);
2608 
2609     cache->buffer.stride = cache->buffer.bo->stride;
2610 
2611     cache->buffer.map0 |= ((height - 1) << MS3_HEIGHT_SHIFT) |
2612 			  ((width  - 1) << MS3_WIDTH_SHIFT);
2613     cache->buffer.map0 |= MS3_tiling (tiling);
2614     cache->buffer.map1 = ((stride / 4) - 1) << MS4_PITCH_SHIFT;
2615 
2616     cache->ref_count = 0;
2617     cairo_list_init (&cache->link);
2618 
2619     return CAIRO_STATUS_SUCCESS;
2620 }
2621 
2622 i915_surface_t *
i915_surface_create_from_cacheable_image_internal(i915_device_t * device,cairo_image_surface_t * image)2623 i915_surface_create_from_cacheable_image_internal (i915_device_t *device,
2624 						   cairo_image_surface_t *image)
2625 {
2626     i915_surface_t *surface;
2627     cairo_status_t status;
2628     cairo_list_t *caches;
2629     intel_buffer_cache_t *cache;
2630     cairo_rtree_node_t *node;
2631     cairo_format_t format;
2632     int width, height, bpp;
2633 
2634     format = image->format;
2635     if (format == CAIRO_FORMAT_A1)
2636 	format = CAIRO_FORMAT_A8;
2637 
2638     width = image->width;
2639     height = image->height;
2640     if (width > IMAGE_CACHE_WIDTH/2 || height > IMAGE_CACHE_HEIGHT/2) {
2641 	surface = (i915_surface_t *)
2642 	    i915_surface_create_internal (&device->intel.base,
2643 					  format,
2644 					  width, height,
2645 					  I915_TILING_NONE, FALSE);
2646 	if (unlikely (surface->intel.drm.base.status))
2647 	    return surface;
2648 
2649 	status = intel_bo_put_image (&device->intel,
2650 				     to_intel_bo (surface->intel.drm.bo),
2651 				     image,
2652 				     0, 0,
2653 				     width, height,
2654 				     0, 0);
2655 
2656 	if (unlikely (status)) {
2657 	    cairo_surface_destroy (&surface->intel.drm.base);
2658 	    return (i915_surface_t *) _cairo_surface_create_in_error (status);
2659 	}
2660 
2661 	return surface;
2662     }
2663 
2664     status = cairo_device_acquire (&device->intel.base.base);
2665     if (unlikely (status))
2666 	return (i915_surface_t *) _cairo_surface_create_in_error (status);
2667 
2668     switch (image->format) {
2669     case CAIRO_FORMAT_ARGB32:
2670     case CAIRO_FORMAT_RGB24:
2671     case CAIRO_FORMAT_RGB16_565:
2672 	caches = &device->image_caches[0];
2673 	format = CAIRO_FORMAT_ARGB32;
2674 	bpp = 4;
2675 	break;
2676     case CAIRO_FORMAT_A8:
2677     case CAIRO_FORMAT_A1:
2678 	caches = &device->image_caches[1];
2679 	format = CAIRO_FORMAT_A8;
2680 	bpp = 1;
2681 	break;
2682     case CAIRO_FORMAT_INVALID:
2683     default:
2684 	ASSERT_NOT_REACHED;
2685 	status = _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
2686 	goto CLEANUP_DEVICE;
2687     }
2688 
2689     node = NULL;
2690     cairo_list_foreach_entry (cache, intel_buffer_cache_t, caches, link) {
2691 	if (! intel_bo_is_inactive (&device->intel, cache->buffer.bo))
2692 	    continue;
2693 
2694 	status = _cairo_rtree_insert (&cache->rtree, width, height, &node);
2695 	if (unlikely (_cairo_status_is_error (status)))
2696 	    goto CLEANUP_DEVICE;
2697 	if (status == CAIRO_STATUS_SUCCESS)
2698 	    break;
2699     }
2700     if (node == NULL) {
2701 	cache = _cairo_malloc (sizeof (intel_buffer_cache_t));
2702 	if (unlikely (cache == NULL)) {
2703 	    status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
2704 	    goto CLEANUP_DEVICE;
2705 	}
2706 
2707 	status = i915_buffer_cache_init (cache, device, format,
2708 					 IMAGE_CACHE_WIDTH,
2709 					 IMAGE_CACHE_HEIGHT);
2710 	if (unlikely (status)) {
2711 	    free (cache);
2712 	    goto CLEANUP_DEVICE;
2713 	}
2714 
2715 	_cairo_rtree_init (&cache->rtree,
2716 			   IMAGE_CACHE_WIDTH,
2717 			   IMAGE_CACHE_HEIGHT,
2718 			   4,
2719 			   sizeof (i915_image_private_t));
2720 
2721 	status = _cairo_rtree_insert (&cache->rtree, width, height, &node);
2722 	assert (status == CAIRO_STATUS_SUCCESS);
2723 
2724 	cairo_list_init (&cache->link);
2725     }
2726     cairo_list_move (&cache->link, caches);
2727     ((i915_image_private_t *) node)->container = cache;
2728 
2729     status = intel_bo_put_image (&device->intel,
2730 				 cache->buffer.bo,
2731 				 image,
2732 				 0, 0,
2733 				 width, height,
2734 				 node->x, node->y);
2735     if (unlikely (status))
2736 	goto CLEANUP_CACHE;
2737 
2738     surface = _cairo_malloc (sizeof (i915_surface_t));
2739     if (unlikely (surface == NULL)) {
2740 	status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
2741 	goto CLEANUP_CACHE;
2742     }
2743 
2744     i915_surface_init (surface, &device->intel.base,
2745 		       format, width, height);
2746 
2747     surface->intel.drm.stride = cache->buffer.stride;
2748 
2749     surface->map0 |= MS3_tiling (cache->buffer.bo->tiling);
2750     surface->map1 = (surface->intel.drm.stride/4 - 1) << MS4_PITCH_SHIFT;
2751 
2752     surface->intel.drm.bo = &intel_bo_reference (cache->buffer.bo)->base;
2753     surface->offset = node->y * cache->buffer.stride + bpp * node->x;
2754 
2755     surface->cache = (i915_image_private_t *) node;
2756     cache->ref_count++;
2757 
2758     cairo_device_release (&device->intel.base.base);
2759 
2760     return surface;
2761 
2762 CLEANUP_CACHE:
2763     _cairo_rtree_node_destroy (&cache->rtree, node);
2764     if (cache->ref_count == 0) {
2765 	intel_bo_destroy (&device->intel, cache->buffer.bo);
2766 	_cairo_rtree_fini (&cache->rtree);
2767 	cairo_list_del (&cache->link);
2768 	free (cache);
2769     }
2770 CLEANUP_DEVICE:
2771     cairo_device_release (&device->intel.base.base);
2772     return (i915_surface_t *) _cairo_surface_create_in_error (status);
2773 }
2774 
2775 static cairo_surface_t *
i915_surface_create_from_cacheable_image(cairo_drm_device_t * device,cairo_surface_t * source)2776 i915_surface_create_from_cacheable_image (cairo_drm_device_t *device,
2777 					  cairo_surface_t *source)
2778 {
2779     i915_surface_t *surface;
2780     cairo_image_surface_t *image;
2781     void *image_extra;
2782     cairo_status_t status;
2783 
2784     status = _cairo_surface_acquire_source_image (source, &image, &image_extra);
2785     if (unlikely (status))
2786 	return _cairo_surface_create_in_error (status);
2787 
2788     surface = i915_surface_create_from_cacheable_image_internal ((i915_device_t *) device, image);
2789 
2790     _cairo_surface_release_source_image (source, image, image_extra);
2791 
2792     return &surface->intel.drm.base;
2793 }
2794 
2795 static cairo_status_t
i915_surface_enable_scan_out(void * abstract_surface)2796 i915_surface_enable_scan_out (void *abstract_surface)
2797 {
2798     i915_surface_t *surface = abstract_surface;
2799     intel_bo_t *bo;
2800     cairo_status_t status;
2801 
2802     if (unlikely (surface->intel.drm.bo == NULL))
2803 	return _cairo_error (CAIRO_STATUS_INVALID_SIZE);
2804 
2805     bo = to_intel_bo (surface->intel.drm.bo);
2806     if (bo->tiling == I915_TILING_Y) {
2807 	status = i915_surface_batch_flush (surface);
2808 	if (unlikely (status))
2809 	    return status;
2810 
2811 	bo->tiling = I915_TILING_X;
2812 	surface->map0 &= ~MS3_tiling (I915_TILING_Y);
2813 	surface->map0 |= MS3_tiling (I915_TILING_X);
2814     }
2815 
2816 
2817     return CAIRO_STATUS_SUCCESS;
2818 }
2819 
2820 static cairo_int_status_t
i915_device_flush(cairo_drm_device_t * device)2821 i915_device_flush (cairo_drm_device_t *device)
2822 {
2823     cairo_status_t status;
2824 
2825     if (unlikely (device->base.finished))
2826 	return CAIRO_STATUS_SUCCESS;
2827 
2828     status = cairo_device_acquire (&device->base);
2829     if (likely (status == CAIRO_STATUS_SUCCESS)) {
2830 	status = i915_batch_flush ((i915_device_t *) device);
2831 	cairo_device_release (&device->base);
2832     }
2833 
2834     return status;
2835 }
2836 
2837 static cairo_int_status_t
i915_device_throttle(cairo_drm_device_t * device)2838 i915_device_throttle (cairo_drm_device_t *device)
2839 {
2840     cairo_status_t status;
2841 
2842     status = cairo_device_acquire (&device->base);
2843     if (unlikely (status))
2844 	return status;
2845 
2846     status = i915_batch_flush ((i915_device_t *) device);
2847     intel_throttle ((intel_device_t *) device);
2848 
2849     cairo_device_release (&device->base);
2850 
2851     return status;
2852 }
2853 
2854 static void
i915_device_destroy(void * data)2855 i915_device_destroy (void *data)
2856 {
2857     i915_device_t *device = data;
2858 
2859     if (device->last_vbo)
2860 	intel_bo_destroy (&device->intel, device->last_vbo);
2861 
2862     i915_batch_cleanup (device);
2863 
2864     intel_device_fini (&device->intel);
2865     free (device);
2866 }
2867 
2868 COMPILE_TIME_ASSERT (sizeof (i915_batch_setup) == sizeof (((i915_device_t *)0)->batch_header));
2869 COMPILE_TIME_ASSERT (offsetof (i915_device_t, batch_base) == offsetof (i915_device_t, batch_header) + sizeof (i915_batch_setup));
2870 
2871 cairo_drm_device_t *
_cairo_drm_i915_device_create(int fd,dev_t dev_id,int vendor_id,int chip_id)2872 _cairo_drm_i915_device_create (int fd, dev_t dev_id, int vendor_id, int chip_id)
2873 {
2874     i915_device_t *device;
2875     cairo_status_t status;
2876     uint64_t gtt_size;
2877     int n;
2878 
2879     if (! intel_info (fd, &gtt_size))
2880 	return NULL;
2881 
2882     device = _cairo_malloc (sizeof (i915_device_t));
2883     if (device == NULL)
2884 	return (cairo_drm_device_t *) _cairo_device_create_in_error (CAIRO_STATUS_NO_MEMORY);
2885 
2886     status = intel_device_init (&device->intel, fd);
2887     if (unlikely (status)) {
2888 	free (device);
2889 	return (cairo_drm_device_t *) _cairo_device_create_in_error (status);
2890     }
2891 
2892     device->debug = 0;
2893     if (getenv ("CAIRO_DEBUG_DRM") != NULL)
2894 	device->debug = I915_DEBUG_SYNC;
2895 
2896     n = intel_get (fd, I915_PARAM_NUM_FENCES_AVAIL);
2897     if (n == 0)
2898 	n = 8;
2899     device->batch.fences_avail = n - 2; /* conservative */
2900 
2901     device->batch.gtt_avail_size = device->intel.gtt_avail_size / 4;
2902     device->batch.est_gtt_size = I915_BATCH_SIZE;
2903     device->batch.total_gtt_size = I915_BATCH_SIZE;
2904     device->batch.exec_count = 0;
2905     device->batch.reloc_count = 0;
2906     device->batch.used = 0;
2907     device->batch.fences = 0;
2908 
2909     memcpy (device->batch_header, i915_batch_setup, sizeof (i915_batch_setup));
2910     device->vbo = 0;
2911     device->vbo_offset = 0;
2912     device->vbo_used = 0;
2913     device->vertex_index = 0;
2914     device->vertex_count = 0;
2915     device->last_vbo = NULL;
2916 
2917     for (n = 0; n < ARRAY_LENGTH (device->image_caches); n++)
2918 	cairo_list_init (&device->image_caches[n]);
2919 
2920     device->intel.base.surface.create = i915_surface_create;
2921     device->intel.base.surface.create_for_name = i915_surface_create_for_name;
2922     device->intel.base.surface.create_from_cacheable_image = i915_surface_create_from_cacheable_image;
2923 
2924     device->intel.base.surface.flink = _cairo_drm_surface_flink;
2925     device->intel.base.surface.enable_scan_out = i915_surface_enable_scan_out;
2926     device->intel.base.surface.map_to_image = intel_surface_map_to_image;
2927 
2928     device->intel.base.device.flush = i915_device_flush;
2929     device->intel.base.device.throttle = i915_device_throttle;
2930     device->intel.base.device.destroy = i915_device_destroy;
2931 
2932     device->floats_per_vertex = 0;
2933     device->current_source = NULL;
2934     device->current_mask = NULL;
2935     device->current_clip = NULL;
2936 
2937     i915_device_reset (device);
2938 
2939     return _cairo_drm_device_init (&device->intel.base,
2940 				   fd, dev_id, vendor_id, chip_id,
2941 				   16*1024);
2942 }
2943