1 /* Cairo - a vector graphics library with display and print output
2  *
3  * Copyright © 2009 Kristian Høgsberg
4  * Copyright © 2009 Chris Wilson
5  * Copyright © 2009 Intel Corporation
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it either under the terms of the GNU Lesser General Public
9  * License version 2.1 as published by the Free Software Foundation
10  * (the "LGPL") or, at your option, under the terms of the Mozilla
11  * Public License Version 1.1 (the "MPL"). If you do not alter this
12  * notice, a recipient may use your version of this file under either
13  * the MPL or the LGPL.
14  *
15  * You should have received a copy of the LGPL along with this library
16  * in the file COPYING-LGPL-2.1; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
18  * You should have received a copy of the MPL along with this library
19  * in the file COPYING-MPL-1.1
20  *
21  * The contents of this file are subject to the Mozilla Public License
22  * Version 1.1 (the "License"); you may not use this file except in
23  * compliance with the License. You may obtain a copy of the License at
24  * http://www.mozilla.org/MPL/
25  *
26  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
27  * OF ANY KIND, either express or implied. See the LGPL or the MPL for
28  * the specific language governing rights and limitations.
29  *
30  * The Original Code is the cairo graphics library.
31  *
32  * The Initial Developer of the Original Code is Kristian Høgsberg.
33  *
34  * Based on the xf86-intel-driver i965 render acceleration code,
35  * authored by:
36  *    Wang Zhenyu <zhenyu.z.wang@intel.com>
37  *    Eric Anholt <eric@anholt.net>
38  *    Carl Worth <cworth@redhat.com>
39  *    Keith Packard <keithp@keithp.com>
40  */
41 
42 /* XXX
43  *
44  * FIXME: Use brw_PLN for [DevCTG-B+]
45  *
46  */
47 
48 #include "cairoint.h"
49 
50 #include "cairo-drm-private.h"
51 #include "cairo-drm-intel-private.h"
52 #include "cairo-drm-intel-command-private.h"
53 #include "cairo-drm-intel-ioctl-private.h"
54 #include "cairo-drm-i965-private.h"
55 
56 #include "cairo-boxes-private.h"
57 #include "cairo-composite-rectangles-private.h"
58 #include "cairo-default-context-private.h"
59 #include "cairo-error-private.h"
60 #include "cairo-region-private.h"
61 #include "cairo-surface-offset-private.h"
62 
63 #include <sys/ioctl.h>
64 #include <errno.h>
65 
66 #define I965_MAX_SIZE 8192
67 
68 static const cairo_surface_backend_t i965_surface_backend;
69 
70 static void
i965_stream_init(i965_stream_t * stream,uint8_t * data,uint32_t size,struct i965_pending_relocation * pending,int max_pending,struct drm_i915_gem_relocation_entry * relocations,int max_relocations)71 i965_stream_init (i965_stream_t *stream,
72 		  uint8_t *data, uint32_t size,
73 		  struct i965_pending_relocation *pending, int max_pending,
74 		  struct drm_i915_gem_relocation_entry *relocations, int max_relocations)
75 
76 {
77     stream->used = stream->committed = 0;
78     stream->data = data;
79     stream->size = size;
80     stream->serial = 1;
81 
82     stream->num_pending_relocations = 0;
83     stream->max_pending_relocations = max_pending;
84     stream->pending_relocations = pending;
85 
86     stream->num_relocations = 0;
87     stream->max_relocations = max_relocations;
88     stream->relocations = relocations;
89 }
90 
91 static void
i965_add_relocation(i965_device_t * device,intel_bo_t * bo,uint32_t read_domains,uint32_t write_domain)92 i965_add_relocation (i965_device_t *device,
93 		     intel_bo_t *bo,
94 		     uint32_t read_domains,
95 		     uint32_t write_domain)
96 {
97     if (bo->exec == NULL) {
98 	int i;
99 
100 	device->exec.gtt_size += bo->base.size;
101 
102 	i = device->exec.count++;
103 	assert (i < ARRAY_LENGTH (device->exec.exec));
104 
105 	device->exec.exec[i].handle = bo->base.handle;
106 	device->exec.exec[i].relocation_count = 0;
107 	device->exec.exec[i].relocs_ptr = 0;
108 	device->exec.exec[i].alignment  = 0;
109 	device->exec.exec[i].offset = 0;
110 	device->exec.exec[i].flags  = 0;
111 	device->exec.exec[i].rsvd1  = 0;
112 	device->exec.exec[i].rsvd2  = 0;
113 
114 	device->exec.bo[i] = intel_bo_reference (bo);
115 	bo->exec = &device->exec.exec[i];
116     }
117 
118     if (cairo_list_is_empty (&bo->link))
119 	cairo_list_add_tail (&device->flush, &bo->link);
120 
121     assert (write_domain == 0 || bo->batch_write_domain == 0 || bo->batch_write_domain == write_domain);
122     bo->batch_read_domains |= read_domains;
123     bo->batch_write_domain |= write_domain;
124 }
125 
126 void
i965_emit_relocation(i965_device_t * device,i965_stream_t * stream,intel_bo_t * target,uint32_t target_offset,uint32_t read_domains,uint32_t write_domain,uint32_t offset)127 i965_emit_relocation (i965_device_t *device,
128 		      i965_stream_t *stream,
129 		      intel_bo_t *target,
130 		      uint32_t target_offset,
131 		      uint32_t read_domains,
132 		      uint32_t write_domain,
133 		      uint32_t offset)
134 {
135     int n;
136 
137     assert (target_offset < target->base.size);
138 
139     i965_add_relocation (device, target, read_domains, write_domain);
140 
141     n = stream->num_relocations++;
142     assert (n < stream->max_relocations);
143 
144     stream->relocations[n].offset = offset;
145     stream->relocations[n].delta  = target_offset;
146     stream->relocations[n].target_handle   = target->base.handle;
147     stream->relocations[n].read_domains    = read_domains;
148     stream->relocations[n].write_domain    = write_domain;
149     stream->relocations[n].presumed_offset = target->offset;
150 }
151 
152 static void
i965_stream_reset(i965_stream_t * stream)153 i965_stream_reset (i965_stream_t *stream)
154 {
155     stream->used = stream->committed = 0;
156     stream->num_relocations = 0;
157     stream->num_pending_relocations = 0;
158     if (++stream->serial == 0)
159 	stream->serial = 1;
160 }
161 
162 void
i965_stream_commit(i965_device_t * device,i965_stream_t * stream)163 i965_stream_commit (i965_device_t *device,
164 		    i965_stream_t *stream)
165 {
166     intel_bo_t *bo;
167     int n;
168 
169     assert (stream->used);
170 
171     bo = intel_bo_create (&device->intel,
172 			  stream->used, stream->used,
173 			  FALSE, I915_TILING_NONE, 0);
174 
175     /* apply pending relocations */
176     for (n = 0; n < stream->num_pending_relocations; n++) {
177 	struct i965_pending_relocation *p = &stream->pending_relocations[n];
178 
179 	i965_emit_relocation (device, &device->batch, bo,
180 			      p->delta,
181 			      p->read_domains,
182 			      p->write_domain,
183 			      p->offset);
184 	if (bo->offset)
185 	    *(uint32_t *) (device->batch.data + p->offset) = bo->offset + p->delta;
186     }
187 
188     intel_bo_write (&device->intel, bo, 0, stream->used, stream->data);
189 
190     if (stream->num_relocations) {
191 	assert (bo->exec != NULL);
192 	bo->exec->relocs_ptr = (uintptr_t) stream->relocations;
193 	bo->exec->relocation_count = stream->num_relocations;
194     }
195 
196     intel_bo_destroy (&device->intel, bo);
197 
198     i965_stream_reset (stream);
199 }
200 
201 static void
sf_states_pluck(void * entry,void * closure)202 sf_states_pluck (void *entry, void *closure)
203 {
204     i965_device_t *device = closure;
205 
206     _cairo_hash_table_remove (device->sf_states, entry);
207     _cairo_freelist_free (&device->sf_freelist, entry);
208 }
209 
210 static void
cc_offsets_pluck(void * entry,void * closure)211 cc_offsets_pluck (void *entry, void *closure)
212 {
213     i965_device_t *device = closure;
214 
215     _cairo_hash_table_remove (device->cc_states, entry);
216     _cairo_freelist_free (&device->cc_freelist, entry);
217 }
218 
219 static void
wm_kernels_pluck(void * entry,void * closure)220 wm_kernels_pluck (void *entry, void *closure)
221 {
222     i965_device_t *device = closure;
223 
224     _cairo_hash_table_remove (device->wm_kernels, entry);
225     _cairo_freelist_free (&device->wm_kernel_freelist, entry);
226 }
227 
228 static void
wm_states_pluck(void * entry,void * closure)229 wm_states_pluck (void *entry, void *closure)
230 {
231     i965_device_t *device = closure;
232 
233     _cairo_hash_table_remove (device->wm_states, entry);
234     _cairo_freelist_free (&device->wm_state_freelist, entry);
235 }
236 
237 static void
wm_bindings_pluck(void * entry,void * closure)238 wm_bindings_pluck (void *entry, void *closure)
239 {
240     i965_device_t *device = closure;
241 
242     _cairo_hash_table_remove (device->wm_bindings, entry);
243     _cairo_freelist_free (&device->wm_binding_freelist, entry);
244 }
245 
246 static void
samplers_pluck(void * entry,void * closure)247 samplers_pluck (void *entry, void *closure)
248 {
249     i965_device_t *device = closure;
250 
251     _cairo_hash_table_remove (device->samplers, entry);
252     _cairo_freelist_free (&device->sampler_freelist, entry);
253 }
254 
255 void
i965_general_state_reset(i965_device_t * device)256 i965_general_state_reset (i965_device_t *device)
257 {
258     _cairo_hash_table_foreach (device->sf_states,
259 			       sf_states_pluck,
260 			       device);
261 
262     _cairo_hash_table_foreach (device->cc_states,
263 			       cc_offsets_pluck,
264 			       device);
265 
266     _cairo_hash_table_foreach (device->wm_kernels,
267 			       wm_kernels_pluck,
268 			       device);
269 
270     _cairo_hash_table_foreach (device->wm_states,
271 			       wm_states_pluck,
272 			       device);
273 
274     _cairo_hash_table_foreach (device->wm_bindings,
275 			       wm_bindings_pluck,
276 			       device);
277 
278     _cairo_hash_table_foreach (device->samplers,
279 			       samplers_pluck,
280 			       device);
281 
282     device->vs_offset = (uint32_t) -1;
283     device->border_color_offset = (uint32_t) -1;
284 
285     if (device->general_state != NULL) {
286 	intel_bo_destroy (&device->intel, device->general_state);
287 	device->general_state = NULL;
288     }
289 }
290 
291 static void
i965_device_reset(i965_device_t * device)292 i965_device_reset (i965_device_t *device)
293 {
294     device->exec.count = 0;
295     device->exec.gtt_size = I965_VERTEX_SIZE +
296 	                    I965_SURFACE_SIZE +
297 			    I965_GENERAL_SIZE +
298 			    I965_BATCH_SIZE;
299 
300     device->sf_state.entry.hash = (uint32_t) -1;
301     device->wm_state.entry.hash = (uint32_t) -1;
302     device->wm_binding.entry.hash = (uint32_t) -1;
303     device->cc_state.entry.hash = (uint32_t) -1;
304 
305     device->target = NULL;
306     device->source = NULL;
307     device->mask = NULL;
308     device->clip = NULL;
309 
310     device->draw_rectangle = (uint32_t) -1;
311 
312     device->vertex_type = (uint32_t) -1;
313     device->vertex_size = 0;
314     device->rectangle_size   = 0;
315     device->last_vertex_size = 0;
316 
317     device->constants = NULL;
318     device->constants_size = 0;
319 
320     device->have_urb_fences = FALSE;
321 }
322 
323 static cairo_status_t
i965_exec(i965_device_t * device,uint32_t offset)324 i965_exec (i965_device_t *device, uint32_t offset)
325 {
326     struct drm_i915_gem_execbuffer2 execbuf;
327     cairo_status_t status = CAIRO_STATUS_SUCCESS;
328     int ret, i;
329 
330     execbuf.buffers_ptr = (uintptr_t) device->exec.exec;
331     execbuf.buffer_count = device->exec.count;
332     execbuf.batch_start_offset = offset;
333     execbuf.batch_len = device->batch.used;
334     execbuf.DR1 = 0;
335     execbuf.DR4 = 0;
336     execbuf.num_cliprects = 0;
337     execbuf.cliprects_ptr = 0;
338     execbuf.flags = I915_GEM_3D_PIPELINE;
339     execbuf.rsvd1 = 0;
340     execbuf.rsvd2 = 0;
341 
342 #if 0
343     printf ("exec: offset=%d, length=%d, buffers=%d\n",
344 	    offset, device->batch.used, device->exec.count);
345     intel_dump_batchbuffer ((uint32_t *) device->batch.data,
346 			    device->batch.used,
347 			    device->intel.base.chip_id);
348 #endif
349 
350     ret = 0;
351     do {
352 	ret = ioctl (device->intel.base.fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
353     } while (ret != 0 && errno == EINTR);
354     if (unlikely (ret)) {
355 	if (errno == ENOMEM)
356 	    status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
357 	else
358 	    status = _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
359 
360 	fprintf (stderr, "Batch submission failed: %d\n", errno);
361 	fprintf (stderr, "   gtt size: %zd/%zd\n",
362 		 device->exec.gtt_size, device->intel.gtt_avail_size);
363 
364 	fprintf (stderr, "   %d buffers:\n",
365 		 device->exec.count);
366 	for (i = 0; i < device->exec.count; i++) {
367 	    fprintf (stderr, "     exec[%d] = %d\n",
368 		     i, device->exec.bo[i]->base.size);
369 	}
370 
371 	intel_dump_batchbuffer ((uint32_t *) device->batch.data,
372 				device->batch.used,
373 				device->intel.base.chip_id);
374     }
375 
376     /* XXX any write target within the batch should now be in error */
377     for (i = 0; i < device->exec.count; i++) {
378 	intel_bo_t *bo = device->exec.bo[i];
379 	cairo_bool_t ret;
380 
381 	bo->offset = device->exec.exec[i].offset;
382 	bo->exec = NULL;
383 	bo->batch_read_domains = 0;
384 	bo->batch_write_domain = 0;
385 
386 	if (bo->virtual)
387 	    intel_bo_unmap (bo);
388 	bo->cpu = FALSE;
389 
390 	if (bo->purgeable)
391 	    ret = intel_bo_madvise (&device->intel, bo, I915_MADV_DONTNEED);
392 	    /* ignore immediate notification of purging */
393 
394 	cairo_list_del (&bo->cache_list);
395 	cairo_list_init (&bo->link);
396 	intel_bo_destroy (&device->intel, bo);
397     }
398     cairo_list_init (&device->flush);
399 
400     device->exec.count = 0;
401 
402     return status;
403 }
404 
405 static inline uint32_t
next_bo_size(uint32_t v)406 next_bo_size (uint32_t v)
407 {
408     v = (v + 8191) / 8192;
409 
410     v--;
411     v |= v >> 1;
412     v |= v >> 2;
413     v |= v >> 4;
414     v |= v >> 8;
415     v |= v >> 16;
416     v++;
417 
418     return v * 8192;
419 }
420 
421 static void
_copy_to_bo_and_apply_relocations(i965_device_t * device,intel_bo_t * bo,i965_stream_t * stream,uint32_t offset)422 _copy_to_bo_and_apply_relocations (i965_device_t *device,
423 				   intel_bo_t *bo,
424 				   i965_stream_t *stream,
425 				   uint32_t offset)
426 {
427     int n;
428 
429     intel_bo_write (&device->intel, bo,
430 		    offset, stream->used,
431 		    stream->data);
432 
433     for (n = 0; n < stream->num_pending_relocations; n++) {
434 	struct i965_pending_relocation *p = &stream->pending_relocations[n];
435 
436 	i965_emit_relocation (device, &device->batch, bo,
437 			      p->delta + offset,
438 			      p->read_domains,
439 			      p->write_domain,
440 			      p->offset);
441 
442 	if (bo->offset) {
443 	    *(uint32_t *) (device->batch.data + p->offset) =
444 		bo->offset + p->delta + offset;
445 	}
446     }
447 }
448 
449 cairo_status_t
i965_device_flush(i965_device_t * device)450 i965_device_flush (i965_device_t *device)
451 {
452     cairo_status_t status;
453     uint32_t aligned, max;
454     intel_bo_t *bo;
455     int n;
456 
457     if (device->batch.used == 0)
458 	return CAIRO_STATUS_SUCCESS;
459 
460     i965_flush_vertices (device);
461 
462     OUT_BATCH (MI_BATCH_BUFFER_END);
463     /* Emit a padding dword if we aren't going to be quad-word aligned. */
464     if (device->batch.used & 4)
465 	OUT_BATCH (MI_NOOP);
466 
467 #if 0
468     printf ("device flush: vertex=%d, constant=%d, surface=%d, general=%d, batch=%d\n",
469 	    device->vertex.used,
470 	    device->constant.used,
471 	    device->surface.used,
472 	    device->general.used,
473 	    device->batch.used);
474 #endif
475 
476     /* can we pack the surface state into the tail of the general state? */
477     if (device->general.used == device->general.committed) {
478 	if (device->general.used) {
479 	    assert (device->general.num_pending_relocations == 1);
480 	    assert (device->general_state != NULL);
481 	    i965_emit_relocation (device, &device->batch,
482 				  device->general_state,
483 				  device->general.pending_relocations[0].delta,
484 				  device->general.pending_relocations[0].read_domains,
485 				  device->general.pending_relocations[0].write_domain,
486 				  device->general.pending_relocations[0].offset);
487 
488 	    if (device->general_state->offset) {
489 		*(uint32_t *) (device->batch.data +
490 			       device->general.pending_relocations[0].offset) =
491 		    device->general_state->offset +
492 		    device->general.pending_relocations[0].delta;
493 	    }
494 	}
495     } else {
496 	assert (device->general.num_pending_relocations == 1);
497 	if (device->general_state != NULL) {
498 	    intel_bo_destroy (&device->intel, device->general_state);
499 	    device->general_state = NULL;
500 	}
501 
502 	bo = intel_bo_create (&device->intel,
503 			      device->general.used,
504 			      device->general.used,
505 			      FALSE, I915_TILING_NONE, 0);
506 	if (unlikely (bo == NULL))
507 	    return _cairo_error (CAIRO_STATUS_NO_MEMORY);
508 
509 	aligned = (device->general.used + 31) & -32;
510 	if (device->surface.used &&
511 	    aligned + device->surface.used <= bo->base.size)
512 	{
513 	    _copy_to_bo_and_apply_relocations (device, bo, &device->general, 0);
514 	    _copy_to_bo_and_apply_relocations (device, bo, &device->surface, aligned);
515 
516 	    if (device->surface.num_relocations) {
517 		for (n = 0; n < device->surface.num_relocations; n++)
518 		    device->surface.relocations[n].offset += aligned;
519 
520 		assert (bo->exec != NULL);
521 		bo->exec->relocs_ptr = (uintptr_t) device->surface.relocations;
522 		bo->exec->relocation_count = device->surface.num_relocations;
523 	    }
524 
525 	    i965_stream_reset (&device->surface);
526 	}
527 	else
528 	{
529 	    _copy_to_bo_and_apply_relocations (device, bo, &device->general, 0);
530 	}
531 
532 	/* Note we don't reset the general state, just mark what data we've committed. */
533 	device->general.committed = device->general.used;
534 	device->general_state = bo;
535     }
536     device->general.num_pending_relocations = 0;
537 
538     /* Combine vertex+constant+surface+batch streams? */
539     max = aligned = device->vertex.used;
540     if (device->surface.used) {
541 	aligned = (aligned + 63) & -64;
542 	aligned += device->surface.used;
543 	if (device->surface.used > max)
544 	    max = device->surface.used;
545     }
546     aligned = (aligned + 63) & -64;
547     aligned += device->batch.used;
548     if (device->batch.used > max)
549 	max = device->batch.used;
550     if (aligned <= next_bo_size (max)) {
551 	int batch_num_relocations;
552 
553 	if (aligned <= 8192)
554 	    max = aligned;
555 
556 	bo = intel_bo_create (&device->intel,
557 			      max, max,
558 			      FALSE, I915_TILING_NONE, 0);
559 	if (unlikely (bo == NULL))
560 	    return _cairo_error (CAIRO_STATUS_NO_MEMORY);
561 
562 	assert (aligned <= bo->base.size);
563 
564 	if (device->vertex.used)
565 	    _copy_to_bo_and_apply_relocations (device, bo, &device->vertex, 0);
566 
567 	aligned = device->vertex.used;
568 
569 	batch_num_relocations = device->batch.num_relocations;
570 	if (device->surface.used) {
571 	    aligned = (aligned + 63) & -64;
572 	    _copy_to_bo_and_apply_relocations (device, bo, &device->surface, aligned);
573 
574 	    batch_num_relocations = device->batch.num_relocations;
575 	    if (device->surface.num_relocations) {
576 		assert (device->batch.num_relocations + device->surface.num_relocations < device->batch.max_relocations);
577 
578 		memcpy (device->batch.relocations + device->batch.num_relocations,
579 			device->surface.relocations,
580 			sizeof (device->surface.relocations[0]) * device->surface.num_relocations);
581 
582 		for (n = 0; n < device->surface.num_relocations; n++)
583 		    device->batch.relocations[device->batch.num_relocations + n].offset += aligned;
584 
585 		device->batch.num_relocations += device->surface.num_relocations;
586 	    }
587 
588 	    aligned += device->surface.used;
589 	}
590 
591 	aligned = (aligned + 63) & -64;
592 	intel_bo_write (&device->intel, bo,
593 			aligned, device->batch.used,
594 			device->batch.data);
595 
596 	for (n = 0; n < batch_num_relocations; n++)
597 	    device->batch.relocations[n].offset += aligned;
598 
599 	if (device->exec.bo[device->exec.count-1] == bo) {
600 	    assert (bo->exec == &device->exec.exec[device->exec.count-1]);
601 
602 	    bo->exec->relocation_count = device->batch.num_relocations;
603 	    bo->exec->relocs_ptr = (uintptr_t) device->batch.relocations;
604 	    intel_bo_destroy (&device->intel, bo);
605 	} else {
606 	    assert (bo->exec ==  NULL);
607 
608 	    n = device->exec.count++;
609 	    device->exec.exec[n].handle = bo->base.handle;
610 	    device->exec.exec[n].relocation_count = device->batch.num_relocations;
611 	    device->exec.exec[n].relocs_ptr = (uintptr_t) device->batch.relocations;
612 	    device->exec.exec[n].alignment = 0;
613 	    device->exec.exec[n].offset = 0;
614 	    device->exec.exec[n].flags = 0;
615 	    device->exec.exec[n].rsvd1 = 0;
616 	    device->exec.exec[n].rsvd2 = 0;
617 
618 	    /* transfer ownership to the exec */
619 	    device->exec.bo[n] = bo;
620 	}
621     } else {
622 	i965_stream_commit (device, &device->vertex);
623 	if (device->surface.used)
624 	    i965_stream_commit (device, &device->surface);
625 
626 	bo = intel_bo_create (&device->intel,
627 			      device->batch.used, device->batch.used,
628 			      FALSE, I915_TILING_NONE, 0);
629 	if (unlikely (bo == NULL))
630 	    return _cairo_error (CAIRO_STATUS_NO_MEMORY);
631 
632 	intel_bo_write (&device->intel, bo,
633 			0, device->batch.used,
634 			device->batch.data);
635 
636 	n = device->exec.count++;
637 	device->exec.exec[n].handle = bo->base.handle;
638 	device->exec.exec[n].relocation_count = device->batch.num_relocations;
639 	device->exec.exec[n].relocs_ptr = (uintptr_t) device->batch.relocations;
640 	device->exec.exec[n].alignment = 0;
641 	device->exec.exec[n].offset = 0;
642 	device->exec.exec[n].flags = 0;
643 	device->exec.exec[n].rsvd1 = 0;
644 	device->exec.exec[n].rsvd2 = 0;
645 
646 	/* transfer ownership to the exec */
647 	device->exec.bo[n] = bo;
648 	aligned = 0;
649     }
650 
651     status = i965_exec (device, aligned);
652 
653     i965_stream_reset (&device->vertex);
654     i965_stream_reset (&device->surface);
655     i965_stream_reset (&device->batch);
656 
657     intel_glyph_cache_unpin (&device->intel);
658     intel_snapshot_cache_thaw (&device->intel);
659 
660     i965_device_reset (device);
661 
662     return status;
663 }
664 
665 static cairo_surface_t *
i965_surface_create_similar(void * abstract_other,cairo_content_t content,int width,int height)666 i965_surface_create_similar (void *abstract_other,
667 			     cairo_content_t content,
668 			     int width, int height)
669 {
670     i965_surface_t *other;
671     cairo_format_t format;
672 
673     if (width > 8192 || height > 8192)
674 	return NULL;
675 
676     other = abstract_other;
677     if (content == other->intel.drm.base.content)
678 	format = other->intel.drm.format;
679     else
680 	format = _cairo_format_from_content (content);
681 
682     return i965_surface_create_internal ((cairo_drm_device_t *) other->intel.drm.base.device,
683 					 format,
684 					 width, height,
685 					 I965_TILING_DEFAULT, TRUE);
686 }
687 
688 static cairo_status_t
i965_surface_finish(void * abstract_surface)689 i965_surface_finish (void *abstract_surface)
690 {
691     i965_surface_t *surface = abstract_surface;
692 
693     return intel_surface_finish (&surface->intel);
694 }
695 
696 static cairo_status_t
i965_surface_flush(void * abstract_surface,unsigned flags)697 i965_surface_flush (void *abstract_surface, unsigned flags)
698 {
699     i965_surface_t *surface = abstract_surface;
700     cairo_status_t status = CAIRO_STATUS_SUCCESS;
701 
702     if (flags)
703 	return CAIRO_STATUS_SUCCESS;
704 
705     if (surface->intel.drm.fallback != NULL)
706 	return intel_surface_flush (abstract_surface);
707 
708     /* Forgo flushing on finish as the user cannot access the surface directly. */
709     if (! surface->intel.drm.base.finished &&
710 	to_intel_bo (surface->intel.drm.bo)->exec != NULL)
711     {
712 	status = cairo_device_acquire (surface->intel.drm.base.device);
713 	if (likely (status == CAIRO_STATUS_SUCCESS)) {
714 	    i965_device_t *device;
715 
716 	    device = i965_device (surface);
717 	    status = i965_device_flush (device);
718 	    cairo_device_release (&device->intel.base.base);
719 	}
720     }
721 
722     return status;
723 }
724 
725 /* rasterisation */
726 
727 static cairo_status_t
_composite_boxes_spans(void * closure,cairo_span_renderer_t * renderer,const cairo_rectangle_int_t * extents)728 _composite_boxes_spans (void				*closure,
729 			cairo_span_renderer_t		*renderer,
730 			const cairo_rectangle_int_t	*extents)
731 {
732     cairo_boxes_t *boxes = closure;
733     cairo_rectangular_scan_converter_t converter;
734     struct _cairo_boxes_chunk *chunk;
735     cairo_status_t status;
736 
737     _cairo_rectangular_scan_converter_init (&converter, extents);
738     for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
739 	cairo_box_t *box = chunk->base;
740 	int i;
741 
742 	for (i = 0; i < chunk->count; i++) {
743 	    status = _cairo_rectangular_scan_converter_add_box (&converter, &box[i], 1);
744 	    if (unlikely (status))
745 		goto CLEANUP;
746 	}
747     }
748 
749     status = converter.base.generate (&converter.base, renderer);
750 
751   CLEANUP:
752     converter.base.destroy (&converter.base);
753     return status;
754 }
755 
756 cairo_status_t
i965_fixup_unbounded(i965_surface_t * dst,const cairo_composite_rectangles_t * extents,cairo_clip_t * clip)757 i965_fixup_unbounded (i965_surface_t *dst,
758 		      const cairo_composite_rectangles_t *extents,
759 		      cairo_clip_t *clip)
760 {
761     i965_shader_t shader;
762     i965_device_t *device;
763     cairo_status_t status;
764 
765     i965_shader_init (&shader, dst, CAIRO_OPERATOR_CLEAR);
766 
767     if (clip != NULL) {
768 	cairo_region_t *clip_region = NULL;
769 
770 	status = _cairo_clip_get_region (clip, &clip_region);
771 	assert (status == CAIRO_STATUS_SUCCESS || CAIRO_INT_STATUS_UNSUPPORTED);
772 	assert (clip_region == NULL);
773 
774 	if (status == CAIRO_INT_STATUS_UNSUPPORTED)
775 	    i965_shader_set_clip (&shader, clip);
776     } else {
777 	if (extents->bounded.width  == extents->unbounded.width &&
778 	    extents->bounded.height == extents->unbounded.height)
779 	{
780 	    return CAIRO_STATUS_SUCCESS;
781 	}
782     }
783 
784     status = i965_shader_acquire_pattern (&shader,
785 					  &shader.source,
786 					  &_cairo_pattern_clear.base,
787 					  &extents->unbounded);
788     if (unlikely (status)) {
789 	i965_shader_fini (&shader);
790 	return status;
791     }
792 
793     device = i965_device (dst);
794     status = cairo_device_acquire (&device->intel.base.base);
795     if (unlikely (status))
796 	return status;
797 
798     status = i965_shader_commit (&shader, device);
799     if (unlikely (status)) {
800 	goto BAIL;
801     }
802 
803     if (extents->bounded.width == 0 || extents->bounded.height == 0) {
804 	i965_shader_add_rectangle (&shader,
805 				   extents->unbounded.x,
806 				   extents->unbounded.y,
807 				   extents->unbounded.width,
808 				   extents->unbounded.height);
809     } else { /* top */
810 	if (extents->bounded.y != extents->unbounded.y) {
811 	    cairo_rectangle_int_t rect;
812 
813 	    rect.x = extents->unbounded.x;
814 	    rect.y = extents->unbounded.y;
815 	    rect.width  = extents->unbounded.width;
816 	    rect.height = extents->bounded.y - rect.y;
817 
818 	    i965_shader_add_rectangle (&shader,
819 				       rect.x, rect.y,
820 				       rect.width, rect.height);
821 	}
822 
823 	/* left */
824 	if (extents->bounded.x != extents->unbounded.x) {
825 	    cairo_rectangle_int_t rect;
826 
827 	    rect.x = extents->unbounded.x;
828 	    rect.y = extents->bounded.y;
829 	    rect.width  = extents->bounded.x - extents->unbounded.x;
830 	    rect.height = extents->bounded.height;
831 
832 	    i965_shader_add_rectangle (&shader,
833 				       rect.x, rect.y,
834 				       rect.width, rect.height);
835 	}
836 
837 	/* right */
838 	if (extents->bounded.x + extents->bounded.width != extents->unbounded.x + extents->unbounded.width) {
839 	    cairo_rectangle_int_t rect;
840 
841 	    rect.x = extents->bounded.x + extents->bounded.width;
842 	    rect.y = extents->bounded.y;
843 	    rect.width  = extents->unbounded.x + extents->unbounded.width - rect.x;
844 	    rect.height = extents->bounded.height;
845 
846 	    i965_shader_add_rectangle (&shader,
847 				       rect.x, rect.y,
848 				       rect.width, rect.height);
849 	}
850 
851 	/* bottom */
852 	if (extents->bounded.y + extents->bounded.height != extents->unbounded.y + extents->unbounded.height) {
853 	    cairo_rectangle_int_t rect;
854 
855 	    rect.x = extents->unbounded.x;
856 	    rect.y = extents->bounded.y + extents->bounded.height;
857 	    rect.width  = extents->unbounded.width;
858 	    rect.height = extents->unbounded.y + extents->unbounded.height - rect.y;
859 
860 	    i965_shader_add_rectangle (&shader,
861 				       rect.x, rect.y,
862 				       rect.width, rect.height);
863 	}
864     }
865 
866     i965_shader_fini (&shader);
867   BAIL:
868     cairo_device_release (&device->intel.base.base);
869     return status;
870 }
871 
872 static cairo_status_t
i965_fixup_unbounded_boxes(i965_surface_t * dst,const cairo_composite_rectangles_t * extents,cairo_clip_t * clip,cairo_boxes_t * boxes)873 i965_fixup_unbounded_boxes (i965_surface_t *dst,
874 			    const cairo_composite_rectangles_t *extents,
875 			    cairo_clip_t *clip,
876 			    cairo_boxes_t *boxes)
877 {
878     cairo_boxes_t clear;
879     cairo_box_t box;
880     cairo_region_t *clip_region = NULL;
881     cairo_status_t status;
882     struct _cairo_boxes_chunk *chunk;
883     i965_shader_t shader;
884     int i;
885 
886     if (boxes->num_boxes <= 1)
887 	return i965_fixup_unbounded (dst, extents, clip);
888 
889     i965_shader_init (&shader, dst, CAIRO_OPERATOR_CLEAR);
890     if (clip != NULL) {
891 	status = _cairo_clip_get_region (clip, &clip_region);
892 	assert (status == CAIRO_STATUS_SUCCESS || CAIRO_INT_STATUS_UNSUPPORTED);
893 	if (status == CAIRO_INT_STATUS_UNSUPPORTED)
894 	    i965_shader_set_clip (&shader, clip);
895     }
896 
897     status = i965_shader_acquire_pattern (&shader,
898 					  &shader.source,
899 					  &_cairo_pattern_clear.base,
900 					  &extents->unbounded);
901     if (unlikely (status)) {
902 	i965_shader_fini (&shader);
903 	return status;
904     }
905 
906     _cairo_boxes_init (&clear);
907 
908     box.p1.x = _cairo_fixed_from_int (extents->unbounded.x + extents->unbounded.width);
909     box.p1.y = _cairo_fixed_from_int (extents->unbounded.y);
910     box.p2.x = _cairo_fixed_from_int (extents->unbounded.x);
911     box.p2.y = _cairo_fixed_from_int (extents->unbounded.y + extents->unbounded.height);
912 
913     if (clip_region == NULL) {
914 	cairo_boxes_t tmp;
915 
916 	_cairo_boxes_init (&tmp);
917 
918 	status = _cairo_boxes_add (&tmp, &box);
919 	assert (status == CAIRO_STATUS_SUCCESS);
920 
921 	tmp.chunks.next = &boxes->chunks;
922 	tmp.num_boxes += boxes->num_boxes;
923 
924 	status = _cairo_bentley_ottmann_tessellate_boxes (&tmp,
925 							  CAIRO_FILL_RULE_WINDING,
926 							  &clear);
927 
928 	tmp.chunks.next = NULL;
929     } else {
930 	pixman_box32_t *pbox;
931 
932 	pbox = pixman_region32_rectangles (&clip_region->rgn, &i);
933 	_cairo_boxes_limit (&clear, (cairo_box_t *) pbox, i);
934 
935 	status = _cairo_boxes_add (&clear, &box);
936 	assert (status == CAIRO_STATUS_SUCCESS);
937 
938 	for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
939 	    for (i = 0; i < chunk->count; i++) {
940 		status = _cairo_boxes_add (&clear, &chunk->base[i]);
941 		if (unlikely (status)) {
942 		    _cairo_boxes_fini (&clear);
943 		    return status;
944 		}
945 	    }
946 	}
947 
948 	status = _cairo_bentley_ottmann_tessellate_boxes (&clear,
949 							  CAIRO_FILL_RULE_WINDING,
950 							  &clear);
951     }
952 
953     if (likely (status == CAIRO_STATUS_SUCCESS && clear.num_boxes)) {
954 	i965_device_t *device;
955 
956 	device = i965_device (dst);
957 	status = cairo_device_acquire (&device->intel.base.base);
958 	if (unlikely (status))
959 	    goto err_shader;
960 
961 	status = i965_shader_commit (&shader, device);
962 	if (unlikely (status))
963 	    goto err_device;
964 
965 	for (chunk = &clear.chunks; chunk != NULL; chunk = chunk->next) {
966 	    for (i = 0; i < chunk->count; i++) {
967 		int x1 = _cairo_fixed_integer_part (chunk->base[i].p1.x);
968 		int y1 = _cairo_fixed_integer_part (chunk->base[i].p1.y);
969 		int x2 = _cairo_fixed_integer_part (chunk->base[i].p2.x);
970 		int y2 = _cairo_fixed_integer_part (chunk->base[i].p2.y);
971 
972 		i965_shader_add_rectangle (&shader, x1, y1, x2 - x1, y2 - y1);
973 	    }
974 	}
975 
976 err_device:
977 	cairo_device_release (&device->intel.base.base);
978 err_shader:
979 	i965_shader_fini (&shader);
980     }
981 
982     _cairo_boxes_fini (&clear);
983 
984     return status;
985 }
986 
987 static cairo_status_t
_composite_boxes(i965_surface_t * dst,cairo_operator_t op,const cairo_pattern_t * pattern,cairo_boxes_t * boxes,cairo_antialias_t antialias,cairo_clip_t * clip,const cairo_composite_rectangles_t * extents)988 _composite_boxes (i965_surface_t *dst,
989 		  cairo_operator_t op,
990 		  const cairo_pattern_t *pattern,
991 		  cairo_boxes_t *boxes,
992 		  cairo_antialias_t antialias,
993 		  cairo_clip_t *clip,
994 		  const cairo_composite_rectangles_t *extents)
995 {
996     cairo_bool_t need_clip_surface = FALSE;
997     cairo_region_t *clip_region = NULL;
998     const struct _cairo_boxes_chunk *chunk;
999     cairo_status_t status;
1000     i965_shader_t shader;
1001     i965_device_t *device;
1002     int i;
1003 
1004     /* If the boxes are not pixel-aligned, we will need to compute a real mask */
1005     if (antialias != CAIRO_ANTIALIAS_NONE) {
1006 	if (! boxes->is_pixel_aligned)
1007 	    return CAIRO_INT_STATUS_UNSUPPORTED;
1008     }
1009 
1010     i965_shader_init (&shader, dst, op);
1011 
1012     status = i965_shader_acquire_pattern (&shader,
1013 					  &shader.source,
1014 					  pattern,
1015 					  &extents->bounded);
1016     if (unlikely (status))
1017 	return status;
1018 
1019     if (clip != NULL) {
1020 	status = _cairo_clip_get_region (clip, &clip_region);
1021 	assert (status == CAIRO_STATUS_SUCCESS || CAIRO_INT_STATUS_UNSUPPORTED);
1022 	need_clip_surface = status == CAIRO_INT_STATUS_UNSUPPORTED;
1023 	if (need_clip_surface)
1024 	    i965_shader_set_clip (&shader, clip);
1025     }
1026 
1027     device = i965_device (dst);
1028     status = cairo_device_acquire (&device->intel.base.base);
1029     if (unlikely (status))
1030 	goto err_shader;
1031 
1032     status = i965_shader_commit (&shader, i965_device (dst));
1033     if (unlikely (status))
1034 	goto err_device;
1035 
1036     for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1037 	cairo_box_t *box = chunk->base;
1038 	for (i = 0; i < chunk->count; i++) {
1039 	    int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1040 	    int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1041 	    int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1042 	    int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1043 
1044 	    if (x2 > x1 && y2 > y1)
1045 		i965_shader_add_rectangle (&shader, x1, y1, x2 - x1, y2 - y1);
1046 	}
1047     }
1048 
1049     if (! extents->is_bounded)
1050 	status = i965_fixup_unbounded_boxes (dst, extents, clip, boxes);
1051 
1052   err_device:
1053     cairo_device_release (&device->intel.base.base);
1054   err_shader:
1055     i965_shader_fini (&shader);
1056 
1057     return status;
1058 }
1059 
1060 static cairo_status_t
_clip_and_composite_boxes(i965_surface_t * dst,cairo_operator_t op,const cairo_pattern_t * src,cairo_boxes_t * boxes,cairo_antialias_t antialias,const cairo_composite_rectangles_t * extents,cairo_clip_t * clip)1061 _clip_and_composite_boxes (i965_surface_t *dst,
1062 			   cairo_operator_t op,
1063 			   const cairo_pattern_t *src,
1064 			   cairo_boxes_t *boxes,
1065 			   cairo_antialias_t antialias,
1066 			   const cairo_composite_rectangles_t *extents,
1067 			   cairo_clip_t *clip)
1068 {
1069     cairo_status_t status;
1070 
1071     if (boxes->num_boxes == 0) {
1072 	if (extents->is_bounded)
1073 	    return CAIRO_STATUS_SUCCESS;
1074 
1075 	return i965_fixup_unbounded (dst, extents, clip);
1076     }
1077 
1078     /* Use a fast path if the boxes are pixel aligned */
1079     status = _composite_boxes (dst, op, src, boxes, antialias, clip, extents);
1080     if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1081 	return status;
1082 
1083     /* Otherwise render the boxes via an implicit mask and composite in the usual
1084      * fashion.
1085      */
1086     return i965_clip_and_composite_spans (dst, op, src, antialias,
1087 					  _composite_boxes_spans, boxes,
1088 					  extents, clip);
1089 }
1090 
1091 static cairo_int_status_t
i965_surface_paint(void * abstract_dst,cairo_operator_t op,const cairo_pattern_t * source,cairo_clip_t * clip)1092 i965_surface_paint (void			*abstract_dst,
1093 		    cairo_operator_t		 op,
1094 		    const cairo_pattern_t	*source,
1095 		    cairo_clip_t		*clip)
1096 {
1097     i965_surface_t *dst = abstract_dst;
1098     cairo_composite_rectangles_t extents;
1099     cairo_boxes_t boxes;
1100     cairo_box_t *clip_boxes = boxes.boxes_embedded;
1101     cairo_clip_t local_clip;
1102     cairo_bool_t have_clip = FALSE;
1103     int num_boxes = ARRAY_LENGTH (boxes.boxes_embedded);
1104     cairo_status_t status;
1105 
1106     /* XXX unsupported operators? use pixel shader blending, eventually */
1107 
1108     status = _cairo_composite_rectangles_init_for_paint (&extents,
1109 							 dst->intel.drm.width,
1110 							 dst->intel.drm.height,
1111 							 op, source,
1112 							 clip);
1113     if (unlikely (status))
1114 	return status;
1115 
1116     if (clip != NULL && _cairo_clip_contains_extents (clip, &extents))
1117 	clip = NULL;
1118 
1119     if (clip != NULL) {
1120 	clip = _cairo_clip_init_copy (&local_clip, clip);
1121 	have_clip = TRUE;
1122     }
1123 
1124     status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
1125     if (unlikely (status)) {
1126 	if (have_clip)
1127 	    _cairo_clip_fini (&local_clip);
1128 
1129 	return status;
1130     }
1131 
1132     _cairo_boxes_init_for_array (&boxes, clip_boxes, num_boxes);
1133     status = _clip_and_composite_boxes (dst, op, source,
1134 					&boxes, CAIRO_ANTIALIAS_DEFAULT,
1135 					&extents, clip);
1136     if (clip_boxes != boxes.boxes_embedded)
1137 	free (clip_boxes);
1138 
1139     if (have_clip)
1140 	_cairo_clip_fini (&local_clip);
1141 
1142     return status;
1143 }
1144 
1145 static cairo_int_status_t
i965_surface_mask(void * abstract_dst,cairo_operator_t op,const cairo_pattern_t * source,const cairo_pattern_t * mask,cairo_clip_t * clip)1146 i965_surface_mask (void				*abstract_dst,
1147 		   cairo_operator_t		 op,
1148 		   const cairo_pattern_t	*source,
1149 		   const cairo_pattern_t	*mask,
1150 		   cairo_clip_t			*clip)
1151 {
1152     i965_surface_t *dst = abstract_dst;
1153     cairo_composite_rectangles_t extents;
1154     i965_shader_t shader;
1155     i965_device_t *device;
1156     cairo_clip_t local_clip;
1157     cairo_region_t *clip_region = NULL;
1158     cairo_bool_t need_clip_surface = FALSE;
1159     cairo_bool_t have_clip = FALSE;
1160     cairo_status_t status;
1161 
1162     status = _cairo_composite_rectangles_init_for_mask (&extents,
1163 							dst->intel.drm.width,
1164 							dst->intel.drm.height,
1165 							op, source, mask, clip);
1166     if (unlikely (status))
1167 	return status;
1168 
1169     if (clip != NULL && _cairo_clip_contains_extents (clip, &extents))
1170 	clip = NULL;
1171 
1172     if (clip != NULL && extents.is_bounded) {
1173 	clip = _cairo_clip_init_copy (&local_clip, clip);
1174 	status = _cairo_clip_rectangle (clip, &extents.bounded);
1175 	if (unlikely (status)) {
1176 	    _cairo_clip_fini (&local_clip);
1177 	    return status;
1178 	}
1179 
1180 	have_clip = TRUE;
1181     }
1182 
1183     i965_shader_init (&shader, dst, op);
1184 
1185     status = i965_shader_acquire_pattern (&shader,
1186 					  &shader.source,
1187 					  source,
1188 					  &extents.bounded);
1189     if (unlikely (status))
1190 	goto err_shader;
1191 
1192     status = i965_shader_acquire_pattern (&shader,
1193 					  &shader.mask,
1194 					  mask,
1195 					  &extents.bounded);
1196     if (unlikely (status))
1197 	goto err_shader;
1198 
1199     if (clip != NULL) {
1200 	status = _cairo_clip_get_region (clip, &clip_region);
1201 	assert (status == CAIRO_STATUS_SUCCESS || CAIRO_INT_STATUS_UNSUPPORTED);
1202 	need_clip_surface = status == CAIRO_INT_STATUS_UNSUPPORTED;
1203 	if (need_clip_surface)
1204 	    i965_shader_set_clip (&shader, clip);
1205     }
1206 
1207     device = i965_device (dst);
1208     status = cairo_device_acquire (&device->intel.base.base);
1209     if (unlikely (status))
1210 	goto err_shader;
1211 
1212     status = i965_shader_commit (&shader, device);
1213     if (unlikely (status))
1214 	goto err_device;
1215 
1216     if (clip_region != NULL) {
1217 	unsigned int n, num_rectangles;
1218 
1219 	num_rectangles = cairo_region_num_rectangles (clip_region);
1220 	for (n = 0; n < num_rectangles; n++) {
1221 	    cairo_rectangle_int_t rect;
1222 
1223 	    cairo_region_get_rectangle (clip_region, n, &rect);
1224 
1225 	    i965_shader_add_rectangle (&shader,
1226 				       rect.x, rect.y,
1227 				       rect.width, rect.height);
1228 	}
1229     } else {
1230 	i965_shader_add_rectangle (&shader,
1231 				   extents.bounded.x,
1232 				   extents.bounded.y,
1233 				   extents.bounded.width,
1234 				   extents.bounded.height);
1235     }
1236 
1237     if (! extents.is_bounded)
1238 	status = i965_fixup_unbounded (dst, &extents, clip);
1239 
1240   err_device:
1241     cairo_device_release (&device->intel.base.base);
1242   err_shader:
1243     i965_shader_fini (&shader);
1244     if (have_clip)
1245 	_cairo_clip_fini (&local_clip);
1246 
1247     return status;
1248 }
1249 
1250 typedef struct {
1251     cairo_polygon_t		polygon;
1252     cairo_fill_rule_t		 fill_rule;
1253     cairo_antialias_t		 antialias;
1254 } composite_polygon_info_t;
1255 
1256 static cairo_status_t
_composite_polygon_spans(void * closure,cairo_span_renderer_t * renderer,const cairo_rectangle_int_t * extents)1257 _composite_polygon_spans (void                          *closure,
1258 			  cairo_span_renderer_t		*renderer,
1259 			  const cairo_rectangle_int_t   *extents)
1260 {
1261     composite_polygon_info_t *info = closure;
1262     cairo_botor_scan_converter_t converter;
1263     cairo_status_t status;
1264     cairo_box_t box;
1265 
1266     box.p1.x = _cairo_fixed_from_int (extents->x);
1267     box.p1.y = _cairo_fixed_from_int (extents->y);
1268     box.p2.x = _cairo_fixed_from_int (extents->x + extents->width);
1269     box.p2.y = _cairo_fixed_from_int (extents->y + extents->height);
1270 
1271     _cairo_botor_scan_converter_init (&converter, &box, info->fill_rule);
1272 
1273     status = converter.base.add_polygon (&converter.base, &info->polygon);
1274     if (likely (status == CAIRO_STATUS_SUCCESS))
1275 	status = converter.base.generate (&converter.base, renderer);
1276 
1277     converter.base.destroy (&converter.base);
1278 
1279     return status;
1280 }
1281 
1282 static cairo_int_status_t
i965_surface_stroke(void * abstract_dst,cairo_operator_t op,const cairo_pattern_t * source,cairo_path_fixed_t * path,const cairo_stroke_style_t * stroke_style,const cairo_matrix_t * ctm,const cairo_matrix_t * ctm_inverse,double tolerance,cairo_antialias_t antialias,cairo_clip_t * clip)1283 i965_surface_stroke (void			*abstract_dst,
1284 		     cairo_operator_t		 op,
1285 		     const cairo_pattern_t	*source,
1286 		     cairo_path_fixed_t		*path,
1287 		     const cairo_stroke_style_t	*stroke_style,
1288 		     const cairo_matrix_t	*ctm,
1289 		     const cairo_matrix_t	*ctm_inverse,
1290 		     double			 tolerance,
1291 		     cairo_antialias_t		 antialias,
1292 		     cairo_clip_t		*clip)
1293 {
1294     i965_surface_t *dst = abstract_dst;
1295     cairo_composite_rectangles_t extents;
1296     composite_polygon_info_t info;
1297     cairo_box_t boxes_stack[32], *clip_boxes = boxes_stack;
1298     int num_boxes = ARRAY_LENGTH (boxes_stack);
1299     cairo_clip_t local_clip;
1300     cairo_bool_t have_clip = FALSE;
1301     cairo_status_t status;
1302 
1303     status = _cairo_composite_rectangles_init_for_stroke (&extents,
1304 							  dst->intel.drm.width,
1305 							  dst->intel.drm.height,
1306 							  op, source,
1307 							  path, stroke_style, ctm,
1308 							  clip);
1309     if (unlikely (status))
1310 	return status;
1311 
1312     if (clip != NULL && _cairo_clip_contains_extents (clip, &extents))
1313 	clip = NULL;
1314 
1315     if (clip != NULL) {
1316 	clip = _cairo_clip_init_copy (&local_clip, clip);
1317 	have_clip = TRUE;
1318     }
1319 
1320     status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
1321     if (unlikely (status)) {
1322 	if (have_clip)
1323 	    _cairo_clip_fini (&local_clip);
1324 
1325 	return status;
1326     }
1327 
1328     if (_cairo_path_fixed_stroke_is_rectilinear (path)) {
1329 	cairo_boxes_t boxes;
1330 
1331 	_cairo_boxes_init (&boxes);
1332 	_cairo_boxes_limit (&boxes, clip_boxes, num_boxes);
1333 	status = _cairo_path_fixed_stroke_rectilinear_to_boxes (path,
1334 								stroke_style,
1335 								ctm,
1336 								&boxes);
1337 	if (likely (status == CAIRO_STATUS_SUCCESS)) {
1338 	    status = _clip_and_composite_boxes (dst, op, source,
1339 						&boxes, antialias,
1340 						&extents, clip);
1341 	}
1342 
1343 	_cairo_boxes_fini (&boxes);
1344 
1345 	if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1346 	    goto CLEANUP_BOXES;
1347     }
1348 
1349     _cairo_polygon_init (&info.polygon, clip_boxes, num_boxes);
1350 
1351     status = _cairo_path_fixed_stroke_to_polygon (path,
1352 						  stroke_style,
1353 						  ctm, ctm_inverse,
1354 						  tolerance,
1355 						  &info.polygon);
1356     if (unlikely (status))
1357 	goto CLEANUP_POLYGON;
1358 
1359     if (extents.is_bounded) {
1360 	cairo_rectangle_int_t rect;
1361 
1362 	_cairo_box_round_to_rectangle (&info.polygon.extents, &rect);
1363 	if (! _cairo_rectangle_intersect (&extents.bounded, &rect))
1364 	    goto CLEANUP_POLYGON;
1365     }
1366 
1367     if (info.polygon.num_edges == 0) {
1368 	if (! extents.is_bounded)
1369 	    status = i965_fixup_unbounded (dst, &extents, clip);
1370     } else {
1371 	info.fill_rule = CAIRO_FILL_RULE_WINDING;
1372 	info.antialias = antialias;
1373 	status = i965_clip_and_composite_spans (dst, op, source, antialias,
1374 						_composite_polygon_spans, &info,
1375 						&extents, clip);
1376     }
1377 
1378 CLEANUP_POLYGON:
1379     _cairo_polygon_fini (&info.polygon);
1380 
1381 CLEANUP_BOXES:
1382     if (clip_boxes != boxes_stack)
1383 	free (clip_boxes);
1384 
1385     if (have_clip)
1386 	_cairo_clip_fini (&local_clip);
1387 
1388     return status;
1389 }
1390 
1391 static cairo_int_status_t
i965_surface_fill(void * abstract_dst,cairo_operator_t op,const cairo_pattern_t * source,cairo_path_fixed_t * path,cairo_fill_rule_t fill_rule,double tolerance,cairo_antialias_t antialias,cairo_clip_t * clip)1392 i965_surface_fill (void			*abstract_dst,
1393 		   cairo_operator_t	 op,
1394 		   const cairo_pattern_t*source,
1395 		   cairo_path_fixed_t	*path,
1396 		   cairo_fill_rule_t	 fill_rule,
1397 		   double		 tolerance,
1398 		   cairo_antialias_t	 antialias,
1399 		   cairo_clip_t		*clip)
1400 {
1401     i965_surface_t *dst = abstract_dst;
1402     cairo_composite_rectangles_t extents;
1403     composite_polygon_info_t info;
1404     cairo_box_t boxes_stack[32], *clip_boxes = boxes_stack;
1405     cairo_clip_t local_clip;
1406     cairo_bool_t have_clip = FALSE;
1407     int num_boxes = ARRAY_LENGTH (boxes_stack);
1408     cairo_status_t status;
1409 
1410     status = _cairo_composite_rectangles_init_for_fill (&extents,
1411 							dst->intel.drm.width,
1412 							dst->intel.drm.height,
1413 							op, source, path,
1414 							clip);
1415     if (unlikely (status))
1416 	return status;
1417 
1418     if (clip != NULL && _cairo_clip_contains_extents (clip, &extents))
1419 	clip = NULL;
1420 
1421     if (clip != NULL) {
1422 	clip = _cairo_clip_init_copy (&local_clip, clip);
1423 	have_clip = TRUE;
1424     }
1425 
1426     status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
1427     if (unlikely (status)) {
1428 	if (have_clip)
1429 	    _cairo_clip_fini (&local_clip);
1430 
1431 	return status;
1432     }
1433 
1434     assert (! _cairo_path_fixed_fill_is_empty (path));
1435 
1436     if (_cairo_path_fixed_fill_is_rectilinear (path)) {
1437 	cairo_boxes_t boxes;
1438 
1439 	_cairo_boxes_init (&boxes);
1440 	_cairo_boxes_limit (&boxes, clip_boxes, num_boxes);
1441 	status = _cairo_path_fixed_fill_rectilinear_to_boxes (path,
1442 							      fill_rule,
1443 							      &boxes);
1444 	if (likely (status == CAIRO_STATUS_SUCCESS)) {
1445 	    status = _clip_and_composite_boxes (dst, op, source,
1446 						&boxes, antialias,
1447 						&extents, clip);
1448 	}
1449 
1450 	_cairo_boxes_fini (&boxes);
1451 
1452 	if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1453 	    goto CLEANUP_BOXES;
1454     }
1455 
1456     _cairo_polygon_init (&info.polygon, clip_boxes, num_boxes);
1457 
1458     status = _cairo_path_fixed_fill_to_polygon (path, tolerance, &info.polygon);
1459     if (unlikely (status))
1460 	goto CLEANUP_POLYGON;
1461 
1462     if (extents.is_bounded) {
1463 	cairo_rectangle_int_t rect;
1464 
1465 	_cairo_box_round_to_rectangle (&info.polygon.extents, &rect);
1466 	if (! _cairo_rectangle_intersect (&extents.bounded, &rect))
1467 	    goto CLEANUP_POLYGON;
1468     }
1469 
1470     if (info.polygon.num_edges == 0) {
1471 	if (! extents.is_bounded)
1472 	    status = i965_fixup_unbounded (dst, &extents, clip);
1473     } else {
1474 	info.fill_rule = fill_rule;
1475 	info.antialias = antialias;
1476 	status = i965_clip_and_composite_spans (dst, op, source, antialias,
1477 						_composite_polygon_spans, &info,
1478 						&extents, clip);
1479     }
1480 
1481 CLEANUP_POLYGON:
1482     _cairo_polygon_fini (&info.polygon);
1483 
1484 CLEANUP_BOXES:
1485     if (clip_boxes != boxes_stack)
1486 	free (clip_boxes);
1487 
1488     if (have_clip)
1489 	_cairo_clip_fini (&local_clip);
1490 
1491     return status;
1492 }
1493 
1494 static const cairo_surface_backend_t i965_surface_backend = {
1495     CAIRO_SURFACE_TYPE_DRM,
1496     _cairo_default_context_create,
1497 
1498     i965_surface_create_similar,
1499     i965_surface_finish,
1500 
1501     NULL,
1502     intel_surface_acquire_source_image,
1503     intel_surface_release_source_image,
1504 
1505     NULL, NULL, NULL,
1506     NULL, /* composite */
1507     NULL, /* fill */
1508     NULL, /* trapezoids */
1509     NULL, /* span */
1510     NULL, /* check-span */
1511 
1512     NULL, /* copy_page */
1513     NULL, /* show_page */
1514     _cairo_drm_surface_get_extents,
1515     NULL, /* old-glyphs */
1516     _cairo_drm_surface_get_font_options,
1517 
1518     i965_surface_flush,
1519     NULL, /* mark_dirty */
1520     intel_scaled_font_fini,
1521     intel_scaled_glyph_fini,
1522 
1523     i965_surface_paint,
1524     i965_surface_mask,
1525     i965_surface_stroke,
1526     i965_surface_fill,
1527     i965_surface_glyphs,
1528 };
1529 
1530 static void
i965_surface_init(i965_surface_t * surface,cairo_drm_device_t * device,cairo_format_t format,int width,int height)1531 i965_surface_init (i965_surface_t *surface,
1532 		   cairo_drm_device_t *device,
1533 	           cairo_format_t format,
1534 		   int width, int height)
1535 {
1536     intel_surface_init (&surface->intel, &i965_surface_backend, device,
1537 			format, width, height);
1538     surface->stream = 0;
1539 }
1540 
1541 static inline int cairo_const
i965_tiling_stride(uint32_t tiling,int stride)1542 i965_tiling_stride (uint32_t tiling, int stride)
1543 {
1544     if (tiling == I915_TILING_NONE)
1545 	return stride;
1546 
1547     return (stride + 127) & -128;
1548 }
1549 
1550 static inline int cairo_const
i965_tiling_height(uint32_t tiling,int height)1551 i965_tiling_height (uint32_t tiling, int height)
1552 {
1553     switch (tiling) {
1554     default:
1555     case I915_TILING_NONE: return (height + 1) & -2;
1556     case I915_TILING_X: return (height + 7) & -8;
1557     case I915_TILING_Y: return (height + 31) & -32;
1558     }
1559 }
1560 
1561 cairo_surface_t *
i965_surface_create_internal(cairo_drm_device_t * base_dev,cairo_format_t format,int width,int height,uint32_t tiling,cairo_bool_t gpu_target)1562 i965_surface_create_internal (cairo_drm_device_t *base_dev,
1563 		              cairo_format_t format,
1564 			      int width, int height,
1565 			      uint32_t tiling,
1566 			      cairo_bool_t gpu_target)
1567 {
1568     i965_surface_t *surface;
1569     cairo_status_t status_ignored;
1570 
1571     surface = _cairo_malloc (sizeof (i965_surface_t));
1572     if (unlikely (surface == NULL))
1573 	return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
1574 
1575     i965_surface_init (surface, base_dev, format, width, height);
1576 
1577     if (width && height) {
1578 	uint32_t size, stride;
1579 	intel_bo_t *bo;
1580 
1581 	width = (width + 3) & -4;
1582 	stride = cairo_format_stride_for_width (surface->intel.drm.format, width);
1583 	stride = (stride + 63) & ~63;
1584 	stride = i965_tiling_stride (tiling, stride);
1585 	surface->intel.drm.stride = stride;
1586 
1587 	height = i965_tiling_height (tiling, height);
1588 	assert (height <= I965_MAX_SIZE);
1589 
1590 	size = stride * height;
1591 	bo = intel_bo_create (to_intel_device (&base_dev->base),
1592 			      size, size,
1593 			      gpu_target, tiling, stride);
1594 	if (bo == NULL) {
1595 	    status_ignored = _cairo_drm_surface_finish (&surface->intel.drm);
1596 	    free (surface);
1597 	    return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
1598 	}
1599 
1600 	bo->tiling = tiling;
1601 	bo->stride = stride;
1602 	surface->intel.drm.bo = &bo->base;
1603 
1604 	assert (bo->base.size >= (size_t) stride*height);
1605     }
1606 
1607     return &surface->intel.drm.base;
1608 }
1609 
1610 static cairo_surface_t *
i965_surface_create(cairo_drm_device_t * device,cairo_format_t format,int width,int height)1611 i965_surface_create (cairo_drm_device_t *device,
1612 		     cairo_format_t format, int width, int height)
1613 {
1614     switch (format) {
1615     case CAIRO_FORMAT_ARGB32:
1616     case CAIRO_FORMAT_RGB16_565:
1617     case CAIRO_FORMAT_RGB24:
1618     case CAIRO_FORMAT_A8:
1619 	break;
1620     case CAIRO_FORMAT_INVALID:
1621     default:
1622     case CAIRO_FORMAT_A1:
1623 	return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
1624     }
1625 
1626     return i965_surface_create_internal (device, format, width, height,
1627 	                                 I965_TILING_DEFAULT, TRUE);
1628 }
1629 
1630 static cairo_surface_t *
i965_surface_create_for_name(cairo_drm_device_t * base_dev,unsigned int name,cairo_format_t format,int width,int height,int stride)1631 i965_surface_create_for_name (cairo_drm_device_t *base_dev,
1632 			      unsigned int name,
1633 			      cairo_format_t format,
1634 			      int width, int height, int stride)
1635 {
1636     i965_device_t *device;
1637     i965_surface_t *surface;
1638     cairo_status_t status_ignored;
1639     int min_stride;
1640 
1641     min_stride = cairo_format_stride_for_width (format, (width + 3) & -4);
1642     if (stride < min_stride || stride & 63)
1643 	return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_STRIDE));
1644 
1645     if (format == CAIRO_FORMAT_A1)
1646 	return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
1647 
1648     switch (format) {
1649     case CAIRO_FORMAT_ARGB32:
1650     case CAIRO_FORMAT_RGB16_565:
1651     case CAIRO_FORMAT_RGB24:
1652     case CAIRO_FORMAT_A8:
1653 	break;
1654     case CAIRO_FORMAT_INVALID:
1655     default:
1656     case CAIRO_FORMAT_A1:
1657 	return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
1658     }
1659 
1660     surface = _cairo_malloc (sizeof (i965_surface_t));
1661     if (unlikely (surface == NULL))
1662 	return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
1663 
1664     i965_surface_init (surface, base_dev, format, width, height);
1665 
1666     device = (i965_device_t *) base_dev;
1667     surface->intel.drm.bo = &intel_bo_create_for_name (&device->intel, name)->base;
1668     if (unlikely (surface->intel.drm.bo == NULL)) {
1669 	status_ignored = _cairo_drm_surface_finish (&surface->intel.drm);
1670 	free (surface);
1671 	return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
1672     }
1673 
1674     surface->intel.drm.stride = stride;
1675 
1676     return &surface->intel.drm.base;
1677 }
1678 
1679 static cairo_status_t
i965_surface_enable_scan_out(void * abstract_surface)1680 i965_surface_enable_scan_out (void *abstract_surface)
1681 {
1682     i965_surface_t *surface = abstract_surface;
1683     intel_bo_t *bo;
1684 
1685     if (unlikely (surface->intel.drm.bo == NULL))
1686 	return _cairo_error (CAIRO_STATUS_INVALID_SIZE);
1687 
1688     bo = to_intel_bo (surface->intel.drm.bo);
1689     if (bo->tiling != I915_TILING_X) {
1690 	i965_device_t *device = i965_device (surface);
1691 	cairo_surface_pattern_t pattern;
1692 	cairo_surface_t *clone;
1693 	cairo_status_t status;
1694 
1695 	clone = i965_surface_create_internal (&device->intel.base,
1696 					      surface->intel.drm.base.content,
1697 					      surface->intel.drm.width,
1698 					      surface->intel.drm.height,
1699 					      I915_TILING_X,
1700 					      TRUE);
1701 	if (unlikely (clone->status))
1702 	    return clone->status;
1703 
1704 	/* 2D blit? */
1705 	_cairo_pattern_init_for_surface (&pattern, &surface->intel.drm.base);
1706 	pattern.base.filter = CAIRO_FILTER_NEAREST;
1707 
1708 	status = _cairo_surface_paint (clone,
1709 				       CAIRO_OPERATOR_SOURCE,
1710 				       &pattern.base,
1711 				       NULL);
1712 
1713 	_cairo_pattern_fini (&pattern.base);
1714 
1715 	if (unlikely (status)) {
1716 	    cairo_surface_destroy (clone);
1717 	    return status;
1718 	}
1719 
1720 	/* swap buffer objects */
1721 	surface->intel.drm.bo = ((cairo_drm_surface_t *) clone)->bo;
1722 	((cairo_drm_surface_t *) clone)->bo = &bo->base;
1723 	bo = to_intel_bo (surface->intel.drm.bo);
1724 
1725 	cairo_surface_destroy (clone);
1726     }
1727 
1728     if (unlikely (bo->tiling == I915_TILING_Y))
1729 	return _cairo_error (CAIRO_STATUS_INVALID_FORMAT); /* XXX */
1730 
1731     return CAIRO_STATUS_SUCCESS;
1732 }
1733 
1734 static cairo_int_status_t
_i965_device_flush(cairo_drm_device_t * device)1735 _i965_device_flush (cairo_drm_device_t *device)
1736 {
1737     cairo_status_t status;
1738 
1739     if (unlikely (device->base.finished))
1740 	return CAIRO_STATUS_SUCCESS;
1741 
1742     status = cairo_device_acquire (&device->base);
1743     if (likely (status == CAIRO_STATUS_SUCCESS))
1744 	status = i965_device_flush ((i965_device_t *) device);
1745 
1746     cairo_device_release (&device->base);
1747 
1748     return status;
1749 }
1750 
1751 static cairo_int_status_t
_i965_device_throttle(cairo_drm_device_t * device)1752 _i965_device_throttle (cairo_drm_device_t *device)
1753 {
1754     cairo_status_t status;
1755 
1756     status = cairo_device_acquire (&device->base);
1757     if (unlikely (status))
1758 	return status;
1759 
1760     status = i965_device_flush ((i965_device_t *) device);
1761     intel_throttle ((intel_device_t *) device);
1762 
1763     cairo_device_release (&device->base);
1764 
1765     return status;
1766 }
1767 
1768 static void
_i965_device_destroy(void * base)1769 _i965_device_destroy (void *base)
1770 {
1771     i965_device_t *device = base;
1772 
1773     i965_device_reset (device);
1774     i965_general_state_reset (device);
1775 
1776     _cairo_hash_table_destroy (device->sf_states);
1777     _cairo_hash_table_destroy (device->samplers);
1778     _cairo_hash_table_destroy (device->cc_states);
1779     _cairo_hash_table_destroy (device->wm_kernels);
1780     _cairo_hash_table_destroy (device->wm_states);
1781     _cairo_hash_table_destroy (device->wm_bindings);
1782 
1783     _cairo_freelist_fini (&device->sf_freelist);
1784     _cairo_freelist_fini (&device->cc_freelist);
1785     _cairo_freelist_fini (&device->wm_kernel_freelist);
1786     _cairo_freelist_fini (&device->wm_state_freelist);
1787     _cairo_freelist_fini (&device->wm_binding_freelist);
1788     _cairo_freelist_fini (&device->sampler_freelist);
1789 
1790     intel_device_fini (&device->intel);
1791     free (device);
1792 }
1793 
1794 static cairo_bool_t
hash_equal(const void * A,const void * B)1795 hash_equal (const void *A, const void *B)
1796 {
1797     const cairo_hash_entry_t *a = A, *b = B;
1798     return a->hash == b->hash;
1799 }
1800 
1801 cairo_drm_device_t *
_cairo_drm_i965_device_create(int fd,dev_t dev,int vendor_id,int chip_id)1802 _cairo_drm_i965_device_create (int fd, dev_t dev, int vendor_id, int chip_id)
1803 {
1804     i965_device_t *device;
1805     uint64_t gtt_size;
1806     cairo_status_t status;
1807 
1808     if (! intel_info (fd, &gtt_size))
1809 	return  NULL;
1810 
1811     device = _cairo_malloc (sizeof (i965_device_t));
1812     if (unlikely (device == NULL))
1813 	return (cairo_drm_device_t *) _cairo_device_create_in_error (CAIRO_STATUS_NO_MEMORY);
1814 
1815     status = intel_device_init (&device->intel, fd);
1816     if (unlikely (status))
1817 	goto CLEANUP;
1818 
1819     device->is_g4x = IS_G4X (chip_id);
1820     //device->is_g5x = IS_G5X (chip_id);
1821 
1822     device->intel.base.surface.create = i965_surface_create;
1823     device->intel.base.surface.create_for_name = i965_surface_create_for_name;
1824     device->intel.base.surface.create_from_cacheable_image = NULL;
1825     device->intel.base.surface.enable_scan_out = i965_surface_enable_scan_out;
1826 
1827     device->intel.base.device.flush = _i965_device_flush;
1828     device->intel.base.device.throttle = _i965_device_throttle;
1829     device->intel.base.device.destroy = _i965_device_destroy;
1830 
1831     device->sf_states = _cairo_hash_table_create (i965_sf_state_equal);
1832     if (unlikely (device->sf_states == NULL))
1833 	goto CLEANUP_INTEL;
1834 
1835     _cairo_freelist_init (&device->sf_freelist,
1836 			  sizeof (struct i965_sf_state));
1837 
1838 
1839     device->cc_states = _cairo_hash_table_create (i965_cc_state_equal);
1840     if (unlikely (device->cc_states == NULL))
1841 	goto CLEANUP_SF;
1842 
1843     _cairo_freelist_init (&device->cc_freelist,
1844 			  sizeof (struct i965_cc_state));
1845 
1846 
1847     device->wm_kernels = _cairo_hash_table_create (hash_equal);
1848     if (unlikely (device->wm_kernels == NULL))
1849 	goto CLEANUP_CC;
1850 
1851     _cairo_freelist_init (&device->wm_kernel_freelist,
1852 			  sizeof (struct i965_wm_kernel));
1853 
1854     device->wm_states = _cairo_hash_table_create (i965_wm_state_equal);
1855     if (unlikely (device->wm_states == NULL))
1856 	goto CLEANUP_WM_KERNEL;
1857 
1858     _cairo_freelist_init (&device->wm_state_freelist,
1859 			  sizeof (struct i965_wm_state));
1860 
1861 
1862     device->wm_bindings = _cairo_hash_table_create (i965_wm_binding_equal);
1863     if (unlikely (device->wm_bindings == NULL))
1864 	goto CLEANUP_WM_STATE;
1865 
1866     _cairo_freelist_init (&device->wm_binding_freelist,
1867 			  sizeof (struct i965_wm_binding));
1868 
1869     device->samplers = _cairo_hash_table_create (hash_equal);
1870     if (unlikely (device->samplers == NULL))
1871 	goto CLEANUP_WM_BINDING;
1872 
1873     _cairo_freelist_init (&device->sampler_freelist,
1874 			  sizeof (struct i965_sampler));
1875 
1876     i965_stream_init (&device->batch,
1877 		      device->batch_base, sizeof (device->batch_base),
1878 		      NULL, 0,
1879 		      device->batch_relocations,
1880 		      ARRAY_LENGTH (device->batch_relocations));
1881 
1882     i965_stream_init (&device->surface,
1883 		      device->surface_base, sizeof (device->surface_base),
1884 		      device->surface_pending_relocations,
1885 		      ARRAY_LENGTH (device->surface_pending_relocations),
1886 		      device->surface_relocations,
1887 		      ARRAY_LENGTH (device->surface_relocations));
1888 
1889     i965_stream_init (&device->general,
1890 		      device->general_base, sizeof (device->general_base),
1891 		      device->general_pending_relocations,
1892 		      ARRAY_LENGTH (device->general_pending_relocations),
1893 		      NULL, 0);
1894 
1895     i965_stream_init (&device->vertex,
1896 		      device->vertex_base, sizeof (device->vertex_base),
1897 		      device->vertex_pending_relocations,
1898 		      ARRAY_LENGTH (device->vertex_pending_relocations),
1899 		      NULL, 0);
1900 
1901     cairo_list_init (&device->flush);
1902     i965_device_reset (device);
1903     device->vs_offset = (uint32_t) -1;
1904     device->border_color_offset = (uint32_t) -1;
1905     device->general_state = NULL;
1906 
1907     return _cairo_drm_device_init (&device->intel.base,
1908 				   fd, dev, vendor_id, chip_id,
1909 				   I965_MAX_SIZE);
1910 
1911   CLEANUP_WM_BINDING:
1912     _cairo_hash_table_destroy (device->wm_bindings);
1913   CLEANUP_WM_STATE:
1914     _cairo_hash_table_destroy (device->wm_states);
1915   CLEANUP_WM_KERNEL:
1916     _cairo_hash_table_destroy (device->wm_kernels);
1917   CLEANUP_CC:
1918     _cairo_hash_table_destroy (device->cc_states);
1919   CLEANUP_SF:
1920     _cairo_hash_table_destroy (device->sf_states);
1921   CLEANUP_INTEL:
1922     intel_device_fini (&device->intel);
1923   CLEANUP:
1924     free (device);
1925     return (cairo_drm_device_t *) _cairo_device_create_in_error (status);
1926 }
1927