1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  * $FreeBSD: src/sys/dev/drm2/i915/i915_gem_execbuffer.c,v 1.3 2012/05/28 13:58:08 kib Exp $
28  */
29 
30 #include <sys/limits.h>
31 #include <sys/sfbuf.h>
32 
33 #include <drm/drmP.h>
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "intel_drv.h"
37 
38 struct change_domains {
39 	uint32_t invalidate_domains;
40 	uint32_t flush_domains;
41 	uint32_t flush_rings;
42 	uint32_t flips;
43 };
44 
45 /*
46  * Set the next domain for the specified object. This
47  * may not actually perform the necessary flushing/invaliding though,
48  * as that may want to be batched with other set_domain operations
49  *
50  * This is (we hope) the only really tricky part of gem. The goal
51  * is fairly simple -- track which caches hold bits of the object
52  * and make sure they remain coherent. A few concrete examples may
53  * help to explain how it works. For shorthand, we use the notation
54  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
55  * a pair of read and write domain masks.
56  *
57  * Case 1: the batch buffer
58  *
59  *	1. Allocated
60  *	2. Written by CPU
61  *	3. Mapped to GTT
62  *	4. Read by GPU
63  *	5. Unmapped from GTT
64  *	6. Freed
65  *
66  *	Let's take these a step at a time
67  *
68  *	1. Allocated
69  *		Pages allocated from the kernel may still have
70  *		cache contents, so we set them to (CPU, CPU) always.
71  *	2. Written by CPU (using pwrite)
72  *		The pwrite function calls set_domain (CPU, CPU) and
73  *		this function does nothing (as nothing changes)
74  *	3. Mapped by GTT
75  *		This function asserts that the object is not
76  *		currently in any GPU-based read or write domains
77  *	4. Read by GPU
78  *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
79  *		As write_domain is zero, this function adds in the
80  *		current read domains (CPU+COMMAND, 0).
81  *		flush_domains is set to CPU.
82  *		invalidate_domains is set to COMMAND
83  *		clflush is run to get data out of the CPU caches
84  *		then i915_dev_set_domain calls i915_gem_flush to
85  *		emit an MI_FLUSH and drm_agp_chipset_flush
86  *	5. Unmapped from GTT
87  *		i915_gem_object_unbind calls set_domain (CPU, CPU)
88  *		flush_domains and invalidate_domains end up both zero
89  *		so no flushing/invalidating happens
90  *	6. Freed
91  *		yay, done
92  *
93  * Case 2: The shared render buffer
94  *
95  *	1. Allocated
96  *	2. Mapped to GTT
97  *	3. Read/written by GPU
98  *	4. set_domain to (CPU,CPU)
99  *	5. Read/written by CPU
100  *	6. Read/written by GPU
101  *
102  *	1. Allocated
103  *		Same as last example, (CPU, CPU)
104  *	2. Mapped to GTT
105  *		Nothing changes (assertions find that it is not in the GPU)
106  *	3. Read/written by GPU
107  *		execbuffer calls set_domain (RENDER, RENDER)
108  *		flush_domains gets CPU
109  *		invalidate_domains gets GPU
110  *		clflush (obj)
111  *		MI_FLUSH and drm_agp_chipset_flush
112  *	4. set_domain (CPU, CPU)
113  *		flush_domains gets GPU
114  *		invalidate_domains gets CPU
115  *		wait_rendering (obj) to make sure all drawing is complete.
116  *		This will include an MI_FLUSH to get the data from GPU
117  *		to memory
118  *		clflush (obj) to invalidate the CPU cache
119  *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
120  *	5. Read/written by CPU
121  *		cache lines are loaded and dirtied
122  *	6. Read written by GPU
123  *		Same as last GPU access
124  *
125  * Case 3: The constant buffer
126  *
127  *	1. Allocated
128  *	2. Written by CPU
129  *	3. Read by GPU
130  *	4. Updated (written) by CPU again
131  *	5. Read by GPU
132  *
133  *	1. Allocated
134  *		(CPU, CPU)
135  *	2. Written by CPU
136  *		(CPU, CPU)
137  *	3. Read by GPU
138  *		(CPU+RENDER, 0)
139  *		flush_domains = CPU
140  *		invalidate_domains = RENDER
141  *		clflush (obj)
142  *		MI_FLUSH
143  *		drm_agp_chipset_flush
144  *	4. Updated (written) by CPU again
145  *		(CPU, CPU)
146  *		flush_domains = 0 (no previous write domain)
147  *		invalidate_domains = 0 (no new read domains)
148  *	5. Read by GPU
149  *		(CPU+RENDER, 0)
150  *		flush_domains = CPU
151  *		invalidate_domains = RENDER
152  *		clflush (obj)
153  *		MI_FLUSH
154  *		drm_agp_chipset_flush
155  */
156 static void
157 i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
158 				  struct intel_ring_buffer *ring,
159 				  struct change_domains *cd)
160 {
161 	uint32_t invalidate_domains = 0, flush_domains = 0;
162 
163 	/*
164 	 * If the object isn't moving to a new write domain,
165 	 * let the object stay in multiple read domains
166 	 */
167 	if (obj->base.pending_write_domain == 0)
168 		obj->base.pending_read_domains |= obj->base.read_domains;
169 
170 	/*
171 	 * Flush the current write domain if
172 	 * the new read domains don't match. Invalidate
173 	 * any read domains which differ from the old
174 	 * write domain
175 	 */
176 	if (obj->base.write_domain &&
177 	    (((obj->base.write_domain != obj->base.pending_read_domains ||
178 	       obj->ring != ring)) ||
179 	     (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
180 		flush_domains |= obj->base.write_domain;
181 		invalidate_domains |=
182 			obj->base.pending_read_domains & ~obj->base.write_domain;
183 	}
184 	/*
185 	 * Invalidate any read caches which may have
186 	 * stale data. That is, any new read domains.
187 	 */
188 	invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
189 	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
190 		i915_gem_clflush_object(obj);
191 
192 	if (obj->base.pending_write_domain)
193 		cd->flips |= atomic_read(&obj->pending_flip);
194 
195 	/* The actual obj->write_domain will be updated with
196 	 * pending_write_domain after we emit the accumulated flush for all
197 	 * of our domain changes in execbuffers (which clears objects'
198 	 * write_domains).  So if we have a current write domain that we
199 	 * aren't changing, set pending_write_domain to that.
200 	 */
201 	if (flush_domains == 0 && obj->base.pending_write_domain == 0)
202 		obj->base.pending_write_domain = obj->base.write_domain;
203 
204 	cd->invalidate_domains |= invalidate_domains;
205 	cd->flush_domains |= flush_domains;
206 	if (flush_domains & I915_GEM_GPU_DOMAINS)
207 		cd->flush_rings |= intel_ring_flag(obj->ring);
208 	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
209 		cd->flush_rings |= intel_ring_flag(ring);
210 }
211 
212 struct eb_objects {
213 	int and;
214 	struct hlist_head buckets[0];
215 };
216 
217 static struct eb_objects *
218 eb_create(int size)
219 {
220 	struct eb_objects *eb;
221 	int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
222 	while (count > size)
223 		count >>= 1;
224 #if 0
225 	eb = kzalloc(count*sizeof(struct hlist_head) +
226 		     sizeof(struct eb_objects),
227 		     GFP_KERNEL);
228 #else
229 	eb = kmalloc(count*sizeof(struct hlist_head) +
230 		     sizeof(struct eb_objects),
231 		     DRM_I915_GEM, M_WAITOK | M_ZERO);
232 #endif
233 	if (eb == NULL)
234 		return eb;
235 
236 	eb->and = count - 1;
237 	return eb;
238 }
239 
240 static void
241 eb_reset(struct eb_objects *eb)
242 {
243 	memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
244 }
245 
246 static void
247 eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
248 {
249 	hlist_add_head(&obj->exec_node,
250 		       &eb->buckets[obj->exec_handle & eb->and]);
251 }
252 
253 static struct drm_i915_gem_object *
254 eb_get_object(struct eb_objects *eb, unsigned long handle)
255 {
256 	struct hlist_head *head;
257 	struct hlist_node *node;
258 	struct drm_i915_gem_object *obj;
259 
260 	head = &eb->buckets[handle & eb->and];
261 	hlist_for_each(node, head) {
262 		obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
263 		if (obj->exec_handle == handle)
264 			return obj;
265 	}
266 
267 	return NULL;
268 }
269 
270 static void
271 eb_destroy(struct eb_objects *eb)
272 {
273 	drm_free(eb, DRM_I915_GEM);
274 }
275 
276 static int
277 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
278 				   struct eb_objects *eb,
279 				   struct drm_i915_gem_relocation_entry *reloc)
280 {
281 	struct drm_device *dev = obj->base.dev;
282 	struct drm_gem_object *target_obj;
283 	uint32_t target_offset;
284 	int ret = -EINVAL;
285 
286 	/* we've already hold a reference to all valid objects */
287 	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
288 	if (unlikely(target_obj == NULL))
289 		return -ENOENT;
290 
291 	target_offset = to_intel_bo(target_obj)->gtt_offset;
292 
293 #if WATCH_RELOC
294 	DRM_INFO("%s: obj %p offset %08x target %d "
295 		 "read %08x write %08x gtt %08x "
296 		 "presumed %08x delta %08x\n",
297 		 __func__,
298 		 obj,
299 		 (int) reloc->offset,
300 		 (int) reloc->target_handle,
301 		 (int) reloc->read_domains,
302 		 (int) reloc->write_domain,
303 		 (int) target_offset,
304 		 (int) reloc->presumed_offset,
305 		 reloc->delta);
306 #endif
307 
308 	/* The target buffer should have appeared before us in the
309 	 * exec_object list, so it should have a GTT space bound by now.
310 	 */
311 	if (unlikely(target_offset == 0)) {
312 		DRM_DEBUG("No GTT space found for object %d\n",
313 			  reloc->target_handle);
314 		return ret;
315 	}
316 
317 	/* Validate that the target is in a valid r/w GPU domain */
318 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
319 		DRM_DEBUG("reloc with multiple write domains: "
320 			  "obj %p target %d offset %d "
321 			  "read %08x write %08x",
322 			  obj, reloc->target_handle,
323 			  (int) reloc->offset,
324 			  reloc->read_domains,
325 			  reloc->write_domain);
326 		return ret;
327 	}
328 	if (unlikely((reloc->write_domain | reloc->read_domains)
329 		     & ~I915_GEM_GPU_DOMAINS)) {
330 		DRM_DEBUG("reloc with read/write non-GPU domains: "
331 			  "obj %p target %d offset %d "
332 			  "read %08x write %08x",
333 			  obj, reloc->target_handle,
334 			  (int) reloc->offset,
335 			  reloc->read_domains,
336 			  reloc->write_domain);
337 		return ret;
338 	}
339 	if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
340 		     reloc->write_domain != target_obj->pending_write_domain)) {
341 		DRM_DEBUG("Write domain conflict: "
342 			  "obj %p target %d offset %d "
343 			  "new %08x old %08x\n",
344 			  obj, reloc->target_handle,
345 			  (int) reloc->offset,
346 			  reloc->write_domain,
347 			  target_obj->pending_write_domain);
348 		return ret;
349 	}
350 
351 	target_obj->pending_read_domains |= reloc->read_domains;
352 	target_obj->pending_write_domain |= reloc->write_domain;
353 
354 	/* If the relocation already has the right value in it, no
355 	 * more work needs to be done.
356 	 */
357 	if (target_offset == reloc->presumed_offset)
358 		return 0;
359 
360 	/* Check that the relocation address is valid... */
361 	if (unlikely(reloc->offset > obj->base.size - 4)) {
362 		DRM_DEBUG("Relocation beyond object bounds: "
363 			  "obj %p target %d offset %d size %d.\n",
364 			  obj, reloc->target_handle,
365 			  (int) reloc->offset,
366 			  (int) obj->base.size);
367 		return ret;
368 	}
369 	if (unlikely(reloc->offset & 3)) {
370 		DRM_DEBUG("Relocation not 4-byte aligned: "
371 			  "obj %p target %d offset %d.\n",
372 			  obj, reloc->target_handle,
373 			  (int) reloc->offset);
374 		return ret;
375 	}
376 
377 	/* We can't wait for rendering with pagefaults disabled */
378 	if (obj->active && (curthread->td_flags & TDF_NOFAULT))
379 		return -EFAULT;
380 
381 	reloc->delta += target_offset;
382 	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
383 		uint32_t page_offset = reloc->offset & PAGE_MASK;
384 		char *vaddr;
385 		struct sf_buf *sf;
386 
387 		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
388 		if (ret)
389 			return ret;
390 
391 		sf = sf_buf_alloc(obj->pages[OFF_TO_IDX(reloc->offset)]);
392 		if (sf == NULL)
393 			return (-ENOMEM);
394 		vaddr = (void *)sf_buf_kva(sf);
395 
396 		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
397 		sf_buf_free(sf);
398 	} else {
399 		uint32_t *reloc_entry;
400 		char *reloc_page;
401 
402 		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
403 		if (ret)
404 			return ret;
405 
406 		/*
407 		 * Map the page containing the relocation we're going
408 		 * to perform.
409 		 */
410 		reloc->offset += obj->gtt_offset;
411 		reloc_page = pmap_mapdev_attr(dev->agp->base + (reloc->offset &
412 		    ~PAGE_MASK), PAGE_SIZE, PAT_WRITE_COMBINING);
413 		reloc_entry = (uint32_t *)(reloc_page + (reloc->offset &
414 		    PAGE_MASK));
415 		*(volatile uint32_t *)reloc_entry = reloc->delta;
416 		pmap_unmapdev((vm_offset_t)reloc_page, PAGE_SIZE);
417 	}
418 
419 	/* and update the user's relocation entry */
420 	reloc->presumed_offset = target_offset;
421 
422 	return 0;
423 }
424 
425 static int
426 i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
427     struct eb_objects *eb)
428 {
429 	struct drm_i915_gem_relocation_entry *user_relocs;
430 	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
431 	struct drm_i915_gem_relocation_entry reloc;
432 	int i, ret;
433 
434 	user_relocs = (void *)(uintptr_t)entry->relocs_ptr;
435 	for (i = 0; i < entry->relocation_count; i++) {
436 		ret = -copyin_nofault(user_relocs + i, &reloc, sizeof(reloc));
437 		if (ret != 0)
438 			return (ret);
439 
440 		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
441 		if (ret != 0)
442 			return (ret);
443 
444 		ret = -copyout_nofault(&reloc.presumed_offset,
445 		    &user_relocs[i].presumed_offset,
446 		    sizeof(reloc.presumed_offset));
447 		if (ret != 0)
448 			return (ret);
449 	}
450 
451 	return (0);
452 }
453 
454 static int
455 i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
456     struct eb_objects *eb, struct drm_i915_gem_relocation_entry *relocs)
457 {
458 	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
459 	int i, ret;
460 
461 	for (i = 0; i < entry->relocation_count; i++) {
462 		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
463 		if (ret)
464 			return ret;
465 	}
466 
467 	return 0;
468 }
469 
470 static int
471 i915_gem_execbuffer_relocate(struct drm_device *dev,
472 			     struct eb_objects *eb,
473 			     struct list_head *objects)
474 {
475 	struct drm_i915_gem_object *obj;
476 	thread_t td = curthread;
477 	int ret;
478 	int pflags;
479 
480 	/* Try to move as many of the relocation targets off the active list
481 	 * to avoid unnecessary fallbacks to the slow path, as we cannot wait
482 	 * for the retirement with pagefaults disabled.
483 	 */
484 	i915_gem_retire_requests(dev);
485 
486 	ret = 0;
487 	pflags = td->td_flags & TDF_NOFAULT;
488 	atomic_set_int(&td->td_flags, TDF_NOFAULT);
489 
490 	/* This is the fast path and we cannot handle a pagefault whilst
491 	 * holding the device lock lest the user pass in the relocations
492 	 * contained within a mmaped bo. For in such a case we, the page
493 	 * fault handler would call i915_gem_fault() and we would try to
494 	 * acquire the device lock again. Obviously this is bad.
495 	 */
496 
497 	list_for_each_entry(obj, objects, exec_list) {
498 		ret = i915_gem_execbuffer_relocate_object(obj, eb);
499 		if (ret != 0)
500 			break;
501 	}
502 
503 	if ((pflags & TDF_NOFAULT) == 0)
504 		atomic_clear_int(&td->td_flags, TDF_NOFAULT);
505 
506 	return (ret);
507 }
508 
509 #define  __EXEC_OBJECT_HAS_FENCE (1<<31)
510 
511 static int
512 pin_and_fence_object(struct drm_i915_gem_object *obj,
513 		     struct intel_ring_buffer *ring)
514 {
515 	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
516 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
517 	bool need_fence, need_mappable;
518 	int ret;
519 
520 	need_fence =
521 		has_fenced_gpu_access &&
522 		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
523 		obj->tiling_mode != I915_TILING_NONE;
524 	need_mappable =
525 		entry->relocation_count ? true : need_fence;
526 
527 	ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
528 	if (ret)
529 		return ret;
530 
531 	if (has_fenced_gpu_access) {
532 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
533 			if (obj->tiling_mode) {
534 				ret = i915_gem_object_get_fence(obj);
535 				if (ret)
536 					goto err_unpin;
537 
538 				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
539 				i915_gem_object_pin_fence(obj);
540 			} else {
541 				ret = i915_gem_object_put_fence(obj);
542 				if (ret)
543 					goto err_unpin;
544 			}
545 			obj->pending_fenced_gpu_access = true;
546 		}
547 	}
548 
549 	entry->offset = obj->gtt_offset;
550 	return 0;
551 
552 err_unpin:
553 	i915_gem_object_unpin(obj);
554 	return ret;
555 }
556 
557 static int
558 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
559 			    struct drm_file *file,
560 			    struct list_head *objects)
561 {
562 	drm_i915_private_t *dev_priv;
563 	struct drm_i915_gem_object *obj;
564 	int ret, retry;
565 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
566 	struct list_head ordered_objects;
567 
568 	dev_priv = ring->dev->dev_private;
569 	INIT_LIST_HEAD(&ordered_objects);
570 	while (!list_empty(objects)) {
571 		struct drm_i915_gem_exec_object2 *entry;
572 		bool need_fence, need_mappable;
573 
574 		obj = list_first_entry(objects,
575 				       struct drm_i915_gem_object,
576 				       exec_list);
577 		entry = obj->exec_entry;
578 
579 		need_fence =
580 			has_fenced_gpu_access &&
581 			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
582 			obj->tiling_mode != I915_TILING_NONE;
583 		need_mappable =
584 			entry->relocation_count ? true : need_fence;
585 
586 		if (need_mappable)
587 			list_move(&obj->exec_list, &ordered_objects);
588 		else
589 			list_move_tail(&obj->exec_list, &ordered_objects);
590 
591 		obj->base.pending_read_domains = 0;
592 		obj->base.pending_write_domain = 0;
593 	}
594 	list_splice(&ordered_objects, objects);
595 
596 	/* Attempt to pin all of the buffers into the GTT.
597 	 * This is done in 3 phases:
598 	 *
599 	 * 1a. Unbind all objects that do not match the GTT constraints for
600 	 *     the execbuffer (fenceable, mappable, alignment etc).
601 	 * 1b. Increment pin count for already bound objects and obtain
602 	 *     a fence register if required.
603 	 * 2.  Bind new objects.
604 	 * 3.  Decrement pin count.
605 	 *
606 	 * This avoid unnecessary unbinding of later objects in order to makr
607 	 * room for the earlier objects *unless* we need to defragment.
608 	 */
609 	retry = 0;
610 	do {
611 		ret = 0;
612 
613 		/* Unbind any ill-fitting objects or pin. */
614 		list_for_each_entry(obj, objects, exec_list) {
615 			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
616 			bool need_fence, need_mappable;
617 
618 			if (!obj->gtt_space)
619 				continue;
620 
621 			need_fence =
622 				has_fenced_gpu_access &&
623 				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
624 				obj->tiling_mode != I915_TILING_NONE;
625 			need_mappable =
626 				entry->relocation_count ? true : need_fence;
627 
628 			if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
629 			    (need_mappable && !obj->map_and_fenceable))
630 				ret = i915_gem_object_unbind(obj);
631 			else
632 				ret = pin_and_fence_object(obj, ring);
633 			if (ret)
634 				goto err;
635 		}
636 
637 		/* Bind fresh objects */
638 		list_for_each_entry(obj, objects, exec_list) {
639 			if (obj->gtt_space)
640 				continue;
641 
642 			ret = pin_and_fence_object(obj, ring);
643 			if (ret) {
644 				int ret_ignore;
645 
646 				/* This can potentially raise a harmless
647 				 * -EINVAL if we failed to bind in the above
648 				 * call. It cannot raise -EINTR since we know
649 				 * that the bo is freshly bound and so will
650 				 * not need to be flushed or waited upon.
651 				 */
652 				ret_ignore = i915_gem_object_unbind(obj);
653 				(void)ret_ignore;
654 				if (obj->gtt_space != NULL)
655 					kprintf("%s: gtt_space\n", __func__);
656 				break;
657 			}
658 		}
659 
660 		/* Decrement pin count for bound objects */
661 		list_for_each_entry(obj, objects, exec_list) {
662 			struct drm_i915_gem_exec_object2 *entry;
663 
664 			if (!obj->gtt_space)
665 				continue;
666 
667 			entry = obj->exec_entry;
668 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
669 				i915_gem_object_unpin_fence(obj);
670 				entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
671 			}
672 
673 			i915_gem_object_unpin(obj);
674 
675 			/* ... and ensure ppgtt mapping exist if needed. */
676 			if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
677 				i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
678 						       obj, obj->cache_level);
679 
680 				obj->has_aliasing_ppgtt_mapping = 1;
681 			}
682 		}
683 
684 		if (ret != -ENOSPC || retry > 1)
685 			return ret;
686 
687 		/* First attempt, just clear anything that is purgeable.
688 		 * Second attempt, clear the entire GTT.
689 		 */
690 		ret = i915_gem_evict_everything(ring->dev);
691 		if (ret)
692 			return ret;
693 
694 		retry++;
695 	} while (1);
696 
697 err:
698 	list_for_each_entry_continue_reverse(obj, objects, exec_list) {
699 		struct drm_i915_gem_exec_object2 *entry;
700 
701 		if (!obj->gtt_space)
702 			continue;
703 
704 		entry = obj->exec_entry;
705 		if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
706 			i915_gem_object_unpin_fence(obj);
707 			entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
708 		}
709 
710 		i915_gem_object_unpin(obj);
711 	}
712 
713 	return ret;
714 }
715 
716 static int
717 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
718     struct drm_file *file, struct intel_ring_buffer *ring,
719     struct list_head *objects, struct eb_objects *eb,
720     struct drm_i915_gem_exec_object2 *exec, int count)
721 {
722 	struct drm_i915_gem_relocation_entry *reloc;
723 	struct drm_i915_gem_object *obj;
724 	int *reloc_offset;
725 	int i, total, ret;
726 
727 	/* We may process another execbuffer during the unlock... */
728 	while (!list_empty(objects)) {
729 		obj = list_first_entry(objects,
730 				       struct drm_i915_gem_object,
731 				       exec_list);
732 		list_del_init(&obj->exec_list);
733 		drm_gem_object_unreference(&obj->base);
734 	}
735 
736 	DRM_UNLOCK(dev);
737 
738 	total = 0;
739 	for (i = 0; i < count; i++)
740 		total += exec[i].relocation_count;
741 
742 	reloc_offset = kmalloc(count * sizeof(*reloc_offset), DRM_I915_GEM,
743 	    M_WAITOK | M_ZERO);
744 	reloc = kmalloc(total * sizeof(*reloc), DRM_I915_GEM, M_WAITOK | M_ZERO);
745 
746 	total = 0;
747 	for (i = 0; i < count; i++) {
748 		struct drm_i915_gem_relocation_entry *user_relocs;
749 
750 		user_relocs = (void *)(uintptr_t)exec[i].relocs_ptr;
751 		ret = -copyin(user_relocs, reloc + total,
752 		    exec[i].relocation_count * sizeof(*reloc));
753 		if (ret != 0) {
754 			DRM_LOCK(dev);
755 			goto err;
756 		}
757 
758 		reloc_offset[i] = total;
759 		total += exec[i].relocation_count;
760 	}
761 
762 	ret = i915_mutex_lock_interruptible(dev);
763 	if (ret) {
764 		DRM_LOCK(dev);
765 		goto err;
766 	}
767 
768 	/* reacquire the objects */
769 	eb_reset(eb);
770 	for (i = 0; i < count; i++) {
771 		struct drm_i915_gem_object *obj;
772 
773 		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
774 							exec[i].handle));
775 		if (&obj->base == NULL) {
776 			DRM_DEBUG("Invalid object handle %d at index %d\n",
777 				   exec[i].handle, i);
778 			ret = -ENOENT;
779 			goto err;
780 		}
781 
782 		list_add_tail(&obj->exec_list, objects);
783 		obj->exec_handle = exec[i].handle;
784 		obj->exec_entry = &exec[i];
785 		eb_add_object(eb, obj);
786 	}
787 
788 	ret = i915_gem_execbuffer_reserve(ring, file, objects);
789 	if (ret)
790 		goto err;
791 
792 	list_for_each_entry(obj, objects, exec_list) {
793 		int offset = obj->exec_entry - exec;
794 		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
795 		    reloc + reloc_offset[offset]);
796 		if (ret)
797 			goto err;
798 	}
799 
800 	/* Leave the user relocations as are, this is the painfully slow path,
801 	 * and we want to avoid the complication of dropping the lock whilst
802 	 * having buffers reserved in the aperture and so causing spurious
803 	 * ENOSPC for random operations.
804 	 */
805 
806 err:
807 	drm_free(reloc, DRM_I915_GEM);
808 	drm_free(reloc_offset, DRM_I915_GEM);
809 	return ret;
810 }
811 
812 static int
813 i915_gem_execbuffer_flush(struct drm_device *dev,
814 			  uint32_t invalidate_domains,
815 			  uint32_t flush_domains,
816 			  uint32_t flush_rings)
817 {
818 	drm_i915_private_t *dev_priv = dev->dev_private;
819 	int i, ret;
820 
821 	if (flush_domains & I915_GEM_DOMAIN_CPU)
822 		intel_gtt_chipset_flush();
823 
824 	if (flush_domains & I915_GEM_DOMAIN_GTT)
825 		cpu_sfence();
826 
827 	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
828 		for (i = 0; i < I915_NUM_RINGS; i++)
829 			if (flush_rings & (1 << i)) {
830 				ret = i915_gem_flush_ring(&dev_priv->ring[i],
831 				    invalidate_domains, flush_domains);
832 				if (ret)
833 					return ret;
834 			}
835 	}
836 
837 	return 0;
838 }
839 
840 static int
841 i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
842 {
843 	u32 plane, flip_mask;
844 	int ret;
845 
846 	/* Check for any pending flips. As we only maintain a flip queue depth
847 	 * of 1, we can simply insert a WAIT for the next display flip prior
848 	 * to executing the batch and avoid stalling the CPU.
849 	 */
850 
851 	for (plane = 0; flips >> plane; plane++) {
852 		if (((flips >> plane) & 1) == 0)
853 			continue;
854 
855 		if (plane)
856 			flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
857 		else
858 			flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
859 
860 		ret = intel_ring_begin(ring, 2);
861 		if (ret)
862 			return ret;
863 
864 		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
865 		intel_ring_emit(ring, MI_NOOP);
866 		intel_ring_advance(ring);
867 	}
868 
869 	return 0;
870 }
871 
872 static int
873 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
874 				struct list_head *objects)
875 {
876 	struct drm_i915_gem_object *obj;
877 	struct change_domains cd;
878 	int ret;
879 
880 	memset(&cd, 0, sizeof(cd));
881 	list_for_each_entry(obj, objects, exec_list)
882 		i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
883 
884 	if (cd.invalidate_domains | cd.flush_domains) {
885 #if WATCH_EXEC
886 		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
887 			  __func__,
888 			 cd.invalidate_domains,
889 			 cd.flush_domains);
890 #endif
891 		ret = i915_gem_execbuffer_flush(ring->dev,
892 						cd.invalidate_domains,
893 						cd.flush_domains,
894 						cd.flush_rings);
895 		if (ret)
896 			return ret;
897 	}
898 
899 	if (cd.flips) {
900 		ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
901 		if (ret)
902 			return ret;
903 	}
904 
905 	list_for_each_entry(obj, objects, exec_list) {
906 		ret = i915_gem_object_sync(obj, ring);
907 		if (ret)
908 			return ret;
909 	}
910 
911 	return 0;
912 }
913 
914 static bool
915 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
916 {
917 	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
918 }
919 
920 static int
921 validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
922     vm_page_t ***map)
923 {
924 	vm_page_t *ma;
925 	int i, length, page_count;
926 
927 	/* XXXKIB various limits checking is missing there */
928 	*map = kmalloc(count * sizeof(*ma), DRM_I915_GEM, M_WAITOK | M_ZERO);
929 	for (i = 0; i < count; i++) {
930 		/* First check for malicious input causing overflow */
931 		if (exec[i].relocation_count >
932 		    INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
933 			return -EINVAL;
934 
935 		length = exec[i].relocation_count *
936 		    sizeof(struct drm_i915_gem_relocation_entry);
937 		if (length == 0) {
938 			(*map)[i] = NULL;
939 			continue;
940 		}
941 		/*
942 		 * Since both start and end of the relocation region
943 		 * may be not aligned on the page boundary, be
944 		 * conservative and request a page slot for each
945 		 * partial page.  Thus +2.
946 		 */
947 		page_count = howmany(length, PAGE_SIZE) + 2;
948 		ma = (*map)[i] = kmalloc(page_count * sizeof(vm_page_t),
949 		    DRM_I915_GEM, M_WAITOK | M_ZERO);
950 		if (vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
951 		    exec[i].relocs_ptr, length, VM_PROT_READ | VM_PROT_WRITE,
952 		    ma, page_count) == -1) {
953 			drm_free(ma, DRM_I915_GEM);
954 			(*map)[i] = NULL;
955 			return (-EFAULT);
956 		}
957 	}
958 
959 	return 0;
960 }
961 
962 static void
963 i915_gem_execbuffer_move_to_active(struct list_head *objects,
964 				   struct intel_ring_buffer *ring)
965 {
966 	struct drm_i915_gem_object *obj;
967 	uint32_t old_read, old_write;
968 
969 	list_for_each_entry(obj, objects, exec_list) {
970 		old_read = obj->base.read_domains;
971 		old_write = obj->base.write_domain;
972 
973 		obj->base.read_domains = obj->base.pending_read_domains;
974 		obj->base.write_domain = obj->base.pending_write_domain;
975 		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
976 
977 		i915_gem_object_move_to_active(obj, ring);
978 		if (obj->base.write_domain) {
979 			obj->dirty = 1;
980 			obj->last_write_seqno = intel_ring_get_seqno(ring);
981 			list_move_tail(&obj->gpu_write_list,
982 				       &ring->gpu_write_list);
983 			intel_mark_busy(ring->dev);
984 		}
985 	}
986 }
987 
988 int i915_gem_sync_exec_requests;
989 
990 static void
991 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
992 				    struct drm_file *file,
993 				    struct intel_ring_buffer *ring)
994 {
995 	/* Unconditionally force add_request to emit a full flush. */
996 	ring->gpu_caches_dirty = true;
997 
998 	/* Add a breadcrumb for the completion of the batch buffer */
999 	(void)i915_add_request(ring, file, NULL);
1000 }
1001 
1002 static void
1003 i915_gem_fix_mi_batchbuffer_end(struct drm_i915_gem_object *batch_obj,
1004     uint32_t batch_start_offset, uint32_t batch_len)
1005 {
1006 	char *mkva;
1007 	uint64_t po_r, po_w;
1008 	uint32_t cmd;
1009 
1010 	po_r = batch_obj->base.dev->agp->base + batch_obj->gtt_offset +
1011 	    batch_start_offset + batch_len;
1012 	if (batch_len > 0)
1013 		po_r -= 4;
1014 	mkva = pmap_mapdev_attr(trunc_page(po_r), 2 * PAGE_SIZE,
1015 	    PAT_WRITE_COMBINING);
1016 	po_r &= PAGE_MASK;
1017 	cmd = *(uint32_t *)(mkva + po_r);
1018 
1019 	if (cmd != MI_BATCH_BUFFER_END) {
1020 		/*
1021 		 * batch_len != 0 due to the check at the start of
1022 		 * i915_gem_do_execbuffer
1023 		 */
1024 		if (batch_obj->base.size > batch_start_offset + batch_len) {
1025 			po_w = po_r + 4;
1026 /* DRM_DEBUG("batchbuffer does not end by MI_BATCH_BUFFER_END !\n"); */
1027 		} else {
1028 			po_w = po_r;
1029 DRM_DEBUG("batchbuffer does not end by MI_BATCH_BUFFER_END, overwriting last bo cmd !\n");
1030 		}
1031 		*(uint32_t *)(mkva + po_w) = MI_BATCH_BUFFER_END;
1032 	}
1033 
1034 	pmap_unmapdev((vm_offset_t)mkva, 2 * PAGE_SIZE);
1035 }
1036 
1037 int i915_fix_mi_batchbuffer_end = 0;
1038 
1039  static int
1040 i915_reset_gen7_sol_offsets(struct drm_device *dev,
1041 			    struct intel_ring_buffer *ring)
1042 {
1043 	drm_i915_private_t *dev_priv = dev->dev_private;
1044 	int ret, i;
1045 
1046 	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
1047 		return 0;
1048 
1049 	ret = intel_ring_begin(ring, 4 * 3);
1050 	if (ret)
1051 		return ret;
1052 
1053 	for (i = 0; i < 4; i++) {
1054 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1055 		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1056 		intel_ring_emit(ring, 0);
1057 	}
1058 
1059 	intel_ring_advance(ring);
1060 
1061 	return 0;
1062 }
1063 
1064 static int
1065 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1066 		       struct drm_file *file,
1067 		       struct drm_i915_gem_execbuffer2 *args,
1068 		       struct drm_i915_gem_exec_object2 *exec)
1069 {
1070 	drm_i915_private_t *dev_priv = dev->dev_private;
1071 	struct list_head objects;
1072 	struct eb_objects *eb;
1073 	struct drm_i915_gem_object *batch_obj;
1074 	struct drm_clip_rect *cliprects = NULL;
1075 	struct intel_ring_buffer *ring;
1076 	vm_page_t **relocs_ma;
1077 	u32 exec_start, exec_len;
1078 	u32 mask;
1079 	u32 flags;
1080 	int ret, mode, i;
1081 
1082 	if (!i915_gem_check_execbuffer(args)) {
1083 		DRM_DEBUG("execbuf with invalid offset/length\n");
1084 		return -EINVAL;
1085 	}
1086 
1087 	if (args->batch_len == 0)
1088 		return (0);
1089 
1090 	ret = validate_exec_list(exec, args->buffer_count, &relocs_ma);
1091 	if (ret)
1092 		return ret;
1093 
1094 	flags = 0;
1095 	if (args->flags & I915_EXEC_SECURE) {
1096 		flags |= I915_DISPATCH_SECURE;
1097 	}
1098 	if (args->flags & I915_EXEC_IS_PINNED)
1099 		flags |= I915_DISPATCH_PINNED;
1100 
1101 	switch (args->flags & I915_EXEC_RING_MASK) {
1102 	case I915_EXEC_DEFAULT:
1103 	case I915_EXEC_RENDER:
1104 		ring = &dev_priv->ring[RCS];
1105 		break;
1106 	case I915_EXEC_BSD:
1107 		if (!HAS_BSD(dev)) {
1108 			DRM_DEBUG("execbuf with invalid ring (BSD)\n");
1109 			return -EINVAL;
1110 		}
1111 		ring = &dev_priv->ring[VCS];
1112 		break;
1113 	case I915_EXEC_BLT:
1114 		if (!HAS_BLT(dev)) {
1115 			DRM_DEBUG("execbuf with invalid ring (BLT)\n");
1116 			return -EINVAL;
1117 		}
1118 		ring = &dev_priv->ring[BCS];
1119 		break;
1120 	default:
1121 		DRM_DEBUG("execbuf with unknown ring: %d\n",
1122 			  (int)(args->flags & I915_EXEC_RING_MASK));
1123 		ret = -EINVAL;
1124 		goto pre_struct_lock_err;
1125 	}
1126 	if (!intel_ring_initialized(ring)) {
1127 		DRM_DEBUG("execbuf with invalid ring: %d\n",
1128 			  (int)(args->flags & I915_EXEC_RING_MASK));
1129 		return -EINVAL;
1130 	}
1131 
1132 	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1133 	mask = I915_EXEC_CONSTANTS_MASK;
1134 	switch (mode) {
1135 	case I915_EXEC_CONSTANTS_REL_GENERAL:
1136 	case I915_EXEC_CONSTANTS_ABSOLUTE:
1137 	case I915_EXEC_CONSTANTS_REL_SURFACE:
1138 		if (ring == &dev_priv->ring[RCS] &&
1139 		    mode != dev_priv->relative_constants_mode) {
1140 			if (INTEL_INFO(dev)->gen < 4) {
1141 				ret = -EINVAL;
1142 				goto pre_struct_lock_err;
1143 			}
1144 
1145 			if (INTEL_INFO(dev)->gen > 5 &&
1146 			    mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1147 				ret = -EINVAL;
1148 				goto pre_struct_lock_err;
1149 			}
1150 
1151 			/* The HW changed the meaning on this bit on gen6 */
1152 			if (INTEL_INFO(dev)->gen >= 6)
1153 				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1154 		}
1155 		break;
1156 	default:
1157 		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1158 		ret = -EINVAL;
1159 		goto pre_struct_lock_err;
1160 	}
1161 
1162 	if (args->buffer_count < 1) {
1163 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1164 		ret = -EINVAL;
1165 		goto pre_struct_lock_err;
1166 	}
1167 
1168 	if (args->num_cliprects != 0) {
1169 		if (ring != &dev_priv->ring[RCS]) {
1170 	DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1171 			ret = -EINVAL;
1172 			goto pre_struct_lock_err;
1173 		}
1174 
1175 		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1176 			DRM_DEBUG("execbuf with %u cliprects\n",
1177 				  args->num_cliprects);
1178 			ret = -EINVAL;
1179 			goto pre_struct_lock_err;
1180 		}
1181 		cliprects = kmalloc( sizeof(*cliprects) * args->num_cliprects,
1182 		    DRM_I915_GEM, M_WAITOK | M_ZERO);
1183 		ret = -copyin((void *)(uintptr_t)args->cliprects_ptr, cliprects,
1184 		    sizeof(*cliprects) * args->num_cliprects);
1185 		if (ret != 0)
1186 			goto pre_struct_lock_err;
1187 	}
1188 
1189 	ret = i915_mutex_lock_interruptible(dev);
1190 	if (ret)
1191 		goto pre_struct_lock_err;
1192 
1193 	if (dev_priv->mm.suspended) {
1194 		ret = -EBUSY;
1195 		goto struct_lock_err;
1196 	}
1197 
1198 	eb = eb_create(args->buffer_count);
1199 	if (eb == NULL) {
1200 		ret = -ENOMEM;
1201 		goto struct_lock_err;
1202 	}
1203 
1204 	/* Look up object handles */
1205 	INIT_LIST_HEAD(&objects);
1206 	for (i = 0; i < args->buffer_count; i++) {
1207 		struct drm_i915_gem_object *obj;
1208 		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
1209 							exec[i].handle));
1210 		if (&obj->base == NULL) {
1211 			DRM_DEBUG("Invalid object handle %d at index %d\n",
1212 				   exec[i].handle, i);
1213 			/* prevent error path from reading uninitialized data */
1214 			ret = -ENOENT;
1215 			goto err;
1216 		}
1217 
1218 		if (!list_empty(&obj->exec_list)) {
1219 			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
1220 				   obj, exec[i].handle, i);
1221 			ret = -EINVAL;
1222 			goto err;
1223 		}
1224 
1225 		list_add_tail(&obj->exec_list, &objects);
1226 		obj->exec_handle = exec[i].handle;
1227 		obj->exec_entry = &exec[i];
1228 		eb_add_object(eb, obj);
1229 	}
1230 
1231 	/* take note of the batch buffer before we might reorder the lists */
1232 	batch_obj = list_entry(objects.prev,
1233 			       struct drm_i915_gem_object,
1234 			       exec_list);
1235 
1236 	/* Move the objects en-masse into the GTT, evicting if necessary. */
1237 	ret = i915_gem_execbuffer_reserve(ring, file, &objects);
1238 	if (ret)
1239 		goto err;
1240 
1241 	/* The objects are in their final locations, apply the relocations. */
1242 	ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
1243 	if (ret) {
1244 		if (ret == -EFAULT) {
1245 			ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
1246 			    &objects, eb, exec,	args->buffer_count);
1247 			DRM_LOCK_ASSERT(dev);
1248 		}
1249 		if (ret)
1250 			goto err;
1251 	}
1252 
1253 	/* Set the pending read domains for the batch buffer to COMMAND */
1254 	if (batch_obj->base.pending_write_domain) {
1255 		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1256 		ret = -EINVAL;
1257 		goto err;
1258 	}
1259 	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1260 
1261 	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1262 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
1263 	 * hsw should have this fixed, but let's be paranoid and do it
1264 	 * unconditionally for now. */
1265 	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1266 		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1267 
1268 	ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
1269 	if (ret)
1270 		goto err;
1271 
1272 	if (ring == &dev_priv->ring[RCS] &&
1273 	    mode != dev_priv->relative_constants_mode) {
1274 		ret = intel_ring_begin(ring, 4);
1275 		if (ret)
1276 			goto err;
1277 
1278 		intel_ring_emit(ring, MI_NOOP);
1279 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1280 		intel_ring_emit(ring, INSTPM);
1281 		intel_ring_emit(ring, mask << 16 | mode);
1282 		intel_ring_advance(ring);
1283 
1284 		dev_priv->relative_constants_mode = mode;
1285 	}
1286 
1287 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1288 		ret = i915_reset_gen7_sol_offsets(dev, ring);
1289 		if (ret)
1290 			goto err;
1291 	}
1292 
1293 	exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1294 	exec_len = args->batch_len;
1295 
1296 	if (i915_fix_mi_batchbuffer_end) {
1297 		i915_gem_fix_mi_batchbuffer_end(batch_obj,
1298 		    args->batch_start_offset, args->batch_len);
1299 	}
1300 
1301 	if (cliprects) {
1302 		for (i = 0; i < args->num_cliprects; i++) {
1303 			ret = i915_emit_box_p(dev, &cliprects[i],
1304 			    args->DR1, args->DR4);
1305 			if (ret)
1306 				goto err;
1307 
1308 			ret = ring->dispatch_execbuffer(ring,
1309 							exec_start, exec_len,
1310 							flags);
1311 			if (ret)
1312 				goto err;
1313 		}
1314 	} else {
1315 		ret = ring->dispatch_execbuffer(ring,
1316 						exec_start, exec_len,
1317 						flags);
1318 		if (ret)
1319 			goto err;
1320 	}
1321 
1322 	i915_gem_execbuffer_move_to_active(&objects, ring);
1323 	i915_gem_execbuffer_retire_commands(dev, file, ring);
1324 
1325 err:
1326 	eb_destroy(eb);
1327 	while (!list_empty(&objects)) {
1328 		struct drm_i915_gem_object *obj;
1329 
1330 		obj = list_first_entry(&objects, struct drm_i915_gem_object,
1331 		    exec_list);
1332 		list_del_init(&obj->exec_list);
1333 		drm_gem_object_unreference(&obj->base);
1334 	}
1335 struct_lock_err:
1336 	DRM_UNLOCK(dev);
1337 
1338 pre_struct_lock_err:
1339 	for (i = 0; i < args->buffer_count; i++) {
1340 		if (relocs_ma[i] != NULL) {
1341 			vm_page_unhold_pages(relocs_ma[i], howmany(
1342 			    exec[i].relocation_count *
1343 			    sizeof(struct drm_i915_gem_relocation_entry),
1344 			    PAGE_SIZE));
1345 			drm_free(relocs_ma[i], DRM_I915_GEM);
1346 		}
1347 	}
1348 	drm_free(relocs_ma, DRM_I915_GEM);
1349 	drm_free(cliprects, DRM_I915_GEM);
1350 	return ret;
1351 }
1352 
1353 /*
1354  * Legacy execbuffer just creates an exec2 list from the original exec object
1355  * list array and passes it to the real function.
1356  */
1357 int
1358 i915_gem_execbuffer(struct drm_device *dev, void *data,
1359 		    struct drm_file *file)
1360 {
1361 	struct drm_i915_gem_execbuffer *args = data;
1362 	struct drm_i915_gem_execbuffer2 exec2;
1363 	struct drm_i915_gem_exec_object *exec_list = NULL;
1364 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1365 	int ret, i;
1366 
1367 	DRM_DEBUG("buffers_ptr %d buffer_count %d len %08x\n",
1368 	    (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1369 
1370 	if (args->buffer_count < 1) {
1371 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1372 		return -EINVAL;
1373 	}
1374 
1375 	/* Copy in the exec list from userland */
1376 	/* XXXKIB user-controlled malloc size */
1377 	exec_list = kmalloc(sizeof(*exec_list) * args->buffer_count,
1378 	    DRM_I915_GEM, M_WAITOK);
1379 	exec2_list = kmalloc(sizeof(*exec2_list) * args->buffer_count,
1380 	    DRM_I915_GEM, M_WAITOK);
1381 	ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec_list,
1382 	    sizeof(*exec_list) * args->buffer_count);
1383 	if (ret != 0) {
1384 		DRM_DEBUG("copy %d exec entries failed %d\n",
1385 			  args->buffer_count, ret);
1386 		drm_free(exec_list, DRM_I915_GEM);
1387 		drm_free(exec2_list, DRM_I915_GEM);
1388 		return (ret);
1389 	}
1390 
1391 	for (i = 0; i < args->buffer_count; i++) {
1392 		exec2_list[i].handle = exec_list[i].handle;
1393 		exec2_list[i].relocation_count = exec_list[i].relocation_count;
1394 		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1395 		exec2_list[i].alignment = exec_list[i].alignment;
1396 		exec2_list[i].offset = exec_list[i].offset;
1397 		if (INTEL_INFO(dev)->gen < 4)
1398 			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1399 		else
1400 			exec2_list[i].flags = 0;
1401 	}
1402 
1403 	exec2.buffers_ptr = args->buffers_ptr;
1404 	exec2.buffer_count = args->buffer_count;
1405 	exec2.batch_start_offset = args->batch_start_offset;
1406 	exec2.batch_len = args->batch_len;
1407 	exec2.DR1 = args->DR1;
1408 	exec2.DR4 = args->DR4;
1409 	exec2.num_cliprects = args->num_cliprects;
1410 	exec2.cliprects_ptr = args->cliprects_ptr;
1411 	exec2.flags = I915_EXEC_RENDER;
1412 
1413 	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1414 	if (!ret) {
1415 		/* Copy the new buffer offsets back to the user's exec list. */
1416 		for (i = 0; i < args->buffer_count; i++)
1417 			exec_list[i].offset = exec2_list[i].offset;
1418 		/* ... and back out to userspace */
1419 		ret = -copyout(exec_list, (void *)(uintptr_t)args->buffers_ptr,
1420 		    sizeof(*exec_list) * args->buffer_count);
1421 		if (ret != 0) {
1422 			DRM_DEBUG("failed to copy %d exec entries "
1423 				  "back to user (%d)\n",
1424 				  args->buffer_count, ret);
1425 		}
1426 	}
1427 
1428 	drm_free(exec_list, DRM_I915_GEM);
1429 	drm_free(exec2_list, DRM_I915_GEM);
1430 	return ret;
1431 }
1432 
1433 int
1434 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1435 		     struct drm_file *file)
1436 {
1437 	struct drm_i915_gem_execbuffer2 *args = data;
1438 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1439 	int ret;
1440 
1441 	DRM_DEBUG("buffers_ptr %jx buffer_count %d len %08x\n",
1442 	    (uintmax_t)args->buffers_ptr, args->buffer_count, args->batch_len);
1443 
1444 	if (args->buffer_count < 1 ||
1445 	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1446 		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1447 		return -EINVAL;
1448 	}
1449 
1450 	/* XXXKIB user-controllable kmalloc size */
1451 	exec2_list = kmalloc(sizeof(*exec2_list) * args->buffer_count,
1452 	    DRM_I915_GEM, M_WAITOK);
1453 	ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec2_list,
1454 	    sizeof(*exec2_list) * args->buffer_count);
1455 	if (ret != 0) {
1456 		DRM_DEBUG("copy %d exec entries failed %d\n",
1457 			  args->buffer_count, ret);
1458 		drm_free(exec2_list, DRM_I915_GEM);
1459 		return (ret);
1460 	}
1461 
1462 	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1463 	if (!ret) {
1464 		/* Copy the new buffer offsets back to the user's exec list. */
1465 		ret = -copyout(exec2_list, (void *)(uintptr_t)args->buffers_ptr,
1466 		    sizeof(*exec2_list) * args->buffer_count);
1467 		if (ret) {
1468 			DRM_DEBUG("failed to copy %d exec entries "
1469 				  "back to user (%d)\n",
1470 				  args->buffer_count, ret);
1471 		}
1472 	}
1473 
1474 	drm_free(exec2_list, DRM_I915_GEM);
1475 	return ret;
1476 }
1477