1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/pagemap.h>
35 #include <asm/cpufeature.h>
36 
37 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
38 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
39 #define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
40 #define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
41 
42 #define BATCH_OFFSET_BIAS (256*1024)
43 
44 struct eb_vmas {
45 	struct list_head vmas;
46 	int and;
47 	union {
48 		struct i915_vma *lut[0];
49 		struct hlist_head buckets[0];
50 	};
51 };
52 
53 static struct eb_vmas *
54 eb_create(struct drm_i915_gem_execbuffer2 *args)
55 {
56 	struct eb_vmas *eb = NULL;
57 
58 	if (args->flags & I915_EXEC_HANDLE_LUT) {
59 		unsigned size = args->buffer_count;
60 		size *= sizeof(struct i915_vma *);
61 		size += sizeof(struct eb_vmas);
62 		eb = kmalloc(size, M_DRM, M_NOWAIT);
63 	}
64 
65 	if (eb == NULL) {
66 		unsigned size = args->buffer_count;
67 		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
68 		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
69 		while (count > 2*size)
70 			count >>= 1;
71 		eb = kzalloc(count*sizeof(struct hlist_head) +
72 			     sizeof(struct eb_vmas),
73 			     GFP_TEMPORARY);
74 		if (eb == NULL)
75 			return eb;
76 
77 		eb->and = count - 1;
78 	} else
79 		eb->and = -args->buffer_count;
80 
81 	INIT_LIST_HEAD(&eb->vmas);
82 	return eb;
83 }
84 
85 static void
86 eb_reset(struct eb_vmas *eb)
87 {
88 	if (eb->and >= 0)
89 		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
90 }
91 
92 static int
93 eb_lookup_vmas(struct eb_vmas *eb,
94 	       struct drm_i915_gem_exec_object2 *exec,
95 	       const struct drm_i915_gem_execbuffer2 *args,
96 	       struct i915_address_space *vm,
97 	       struct drm_file *file)
98 {
99 	struct drm_i915_gem_object *obj;
100 	struct list_head objects;
101 	int i, ret;
102 
103 	INIT_LIST_HEAD(&objects);
104 	lockmgr(&file->table_lock, LK_EXCLUSIVE);
105 	/* Grab a reference to the object and release the lock so we can lookup
106 	 * or create the VMA without using GFP_ATOMIC */
107 	for (i = 0; i < args->buffer_count; i++) {
108 		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
109 		if (obj == NULL) {
110 			lockmgr(&file->table_lock, LK_RELEASE);
111 			DRM_DEBUG("Invalid object handle %d at index %d\n",
112 				   exec[i].handle, i);
113 			ret = -ENOENT;
114 			goto err;
115 		}
116 
117 		if (!list_empty(&obj->obj_exec_link)) {
118 			lockmgr(&file->table_lock, LK_RELEASE);
119 			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
120 				   obj, exec[i].handle, i);
121 			ret = -EINVAL;
122 			goto err;
123 		}
124 
125 		drm_gem_object_reference(&obj->base);
126 		list_add_tail(&obj->obj_exec_link, &objects);
127 	}
128 	lockmgr(&file->table_lock, LK_RELEASE);
129 
130 	i = 0;
131 	while (!list_empty(&objects)) {
132 		struct i915_vma *vma;
133 
134 		obj = list_first_entry(&objects,
135 				       struct drm_i915_gem_object,
136 				       obj_exec_link);
137 
138 		/*
139 		 * NOTE: We can leak any vmas created here when something fails
140 		 * later on. But that's no issue since vma_unbind can deal with
141 		 * vmas which are not actually bound. And since only
142 		 * lookup_or_create exists as an interface to get at the vma
143 		 * from the (obj, vm) we don't run the risk of creating
144 		 * duplicated vmas for the same vm.
145 		 */
146 		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
147 		if (IS_ERR(vma)) {
148 			DRM_DEBUG("Failed to lookup VMA\n");
149 			ret = PTR_ERR(vma);
150 			goto err;
151 		}
152 
153 		/* Transfer ownership from the objects list to the vmas list. */
154 		list_add_tail(&vma->exec_list, &eb->vmas);
155 		list_del_init(&obj->obj_exec_link);
156 
157 		vma->exec_entry = &exec[i];
158 		if (eb->and < 0) {
159 			eb->lut[i] = vma;
160 		} else {
161 			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
162 			vma->exec_handle = handle;
163 			hlist_add_head(&vma->exec_node,
164 				       &eb->buckets[handle & eb->and]);
165 		}
166 		++i;
167 	}
168 
169 	return 0;
170 
171 
172 err:
173 	while (!list_empty(&objects)) {
174 		obj = list_first_entry(&objects,
175 				       struct drm_i915_gem_object,
176 				       obj_exec_link);
177 		list_del_init(&obj->obj_exec_link);
178 		drm_gem_object_unreference(&obj->base);
179 	}
180 	/*
181 	 * Objects already transfered to the vmas list will be unreferenced by
182 	 * eb_destroy.
183 	 */
184 
185 	return ret;
186 }
187 
188 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
189 {
190 	if (eb->and < 0) {
191 		if (handle >= -eb->and)
192 			return NULL;
193 		return eb->lut[handle];
194 	} else {
195 		struct hlist_head *head;
196 		struct i915_vma *vma;
197 
198 		head = &eb->buckets[handle & eb->and];
199 		hlist_for_each_entry(vma, head, exec_node) {
200 			if (vma->exec_handle == handle)
201 				return vma;
202 		}
203 		return NULL;
204 	}
205 }
206 
207 static void
208 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
209 {
210 	struct drm_i915_gem_exec_object2 *entry;
211 	struct drm_i915_gem_object *obj = vma->obj;
212 
213 	if (!drm_mm_node_allocated(&vma->node))
214 		return;
215 
216 	entry = vma->exec_entry;
217 
218 	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
219 		i915_gem_object_unpin_fence(obj);
220 
221 	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
222 		vma->pin_count--;
223 
224 	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
225 }
226 
227 static void eb_destroy(struct eb_vmas *eb)
228 {
229 	while (!list_empty(&eb->vmas)) {
230 		struct i915_vma *vma;
231 
232 		vma = list_first_entry(&eb->vmas,
233 				       struct i915_vma,
234 				       exec_list);
235 		list_del_init(&vma->exec_list);
236 		i915_gem_execbuffer_unreserve_vma(vma);
237 		drm_gem_object_unreference(&vma->obj->base);
238 	}
239 	kfree(eb);
240 }
241 
242 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
243 {
244 	return (HAS_LLC(obj->base.dev) ||
245 		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
246 		obj->cache_level != I915_CACHE_NONE);
247 }
248 
249 /* Used to convert any address to canonical form.
250  * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
251  * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
252  * addresses to be in a canonical form:
253  * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
254  * canonical form [63:48] == [47]."
255  */
256 #define GEN8_HIGH_ADDRESS_BIT 47
257 static inline uint64_t gen8_canonical_addr(uint64_t address)
258 {
259 	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
260 }
261 
262 static inline uint64_t gen8_noncanonical_addr(uint64_t address)
263 {
264 	return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
265 }
266 
267 static inline uint64_t
268 relocation_target(struct drm_i915_gem_relocation_entry *reloc,
269 		  uint64_t target_offset)
270 {
271 	return gen8_canonical_addr((int)reloc->delta + target_offset);
272 }
273 
274 static int
275 relocate_entry_cpu(struct drm_i915_gem_object *obj,
276 		   struct drm_i915_gem_relocation_entry *reloc,
277 		   uint64_t target_offset)
278 {
279 	struct drm_device *dev = obj->base.dev;
280 	uint32_t page_offset = offset_in_page(reloc->offset);
281 	uint64_t delta = relocation_target(reloc, target_offset);
282 	char *vaddr;
283 	int ret;
284 
285 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
286 	if (ret)
287 		return ret;
288 
289 	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
290 				reloc->offset >> PAGE_SHIFT));
291 	*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
292 
293 	if (INTEL_INFO(dev)->gen >= 8) {
294 		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
295 
296 		if (page_offset == 0) {
297 			kunmap_atomic(vaddr);
298 			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
299 			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
300 		}
301 
302 		*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
303 	}
304 
305 	kunmap_atomic(vaddr);
306 
307 	return 0;
308 }
309 
310 static int
311 relocate_entry_gtt(struct drm_i915_gem_object *obj,
312 		   struct drm_i915_gem_relocation_entry *reloc,
313 		   uint64_t target_offset)
314 {
315 	struct drm_device *dev = obj->base.dev;
316 	struct drm_i915_private *dev_priv = dev->dev_private;
317 	uint64_t delta = relocation_target(reloc, target_offset);
318 	uint64_t offset;
319 	void __iomem *reloc_page;
320 	int ret;
321 
322 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
323 	if (ret)
324 		return ret;
325 
326 	ret = i915_gem_object_put_fence(obj);
327 	if (ret)
328 		return ret;
329 
330 	/* Map the page containing the relocation we're going to perform.  */
331 	offset = i915_gem_obj_ggtt_offset(obj);
332 	offset += reloc->offset;
333 	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
334 					      offset & ~PAGE_MASK);
335 	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
336 
337 	if (INTEL_INFO(dev)->gen >= 8) {
338 		offset += sizeof(uint32_t);
339 
340 		if (offset_in_page(offset) == 0) {
341 			io_mapping_unmap_atomic(reloc_page);
342 			reloc_page =
343 				io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
344 							 offset);
345 		}
346 
347 		iowrite32(upper_32_bits(delta),
348 			  reloc_page + offset_in_page(offset));
349 	}
350 
351 	io_mapping_unmap_atomic(reloc_page);
352 
353 	return 0;
354 }
355 
356 static void
357 clflush_write32(void *addr, uint32_t value)
358 {
359 	/* This is not a fast path, so KISS. */
360 	drm_clflush_virt_range(addr, sizeof(uint32_t));
361 	*(uint32_t *)addr = value;
362 	drm_clflush_virt_range(addr, sizeof(uint32_t));
363 }
364 
365 static int
366 relocate_entry_clflush(struct drm_i915_gem_object *obj,
367 		       struct drm_i915_gem_relocation_entry *reloc,
368 		       uint64_t target_offset)
369 {
370 	struct drm_device *dev = obj->base.dev;
371 	uint32_t page_offset = offset_in_page(reloc->offset);
372 	uint64_t delta = relocation_target(reloc, target_offset);
373 	char *vaddr;
374 	int ret;
375 
376 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
377 	if (ret)
378 		return ret;
379 
380 	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
381 				reloc->offset >> PAGE_SHIFT));
382 	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
383 
384 	if (INTEL_INFO(dev)->gen >= 8) {
385 		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
386 
387 		if (page_offset == 0) {
388 			kunmap_atomic(vaddr);
389 			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
390 			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
391 		}
392 
393 		clflush_write32(vaddr + page_offset, upper_32_bits(delta));
394 	}
395 
396 	kunmap_atomic(vaddr);
397 
398 	return 0;
399 }
400 
401 static int
402 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
403 				   struct eb_vmas *eb,
404 				   struct drm_i915_gem_relocation_entry *reloc)
405 {
406 	struct drm_device *dev = obj->base.dev;
407 	struct drm_gem_object *target_obj;
408 	struct drm_i915_gem_object *target_i915_obj;
409 	struct i915_vma *target_vma;
410 	uint64_t target_offset;
411 	int ret;
412 
413 	/* we've already hold a reference to all valid objects */
414 	target_vma = eb_get_vma(eb, reloc->target_handle);
415 	if (unlikely(target_vma == NULL))
416 		return -ENOENT;
417 	target_i915_obj = target_vma->obj;
418 	target_obj = &target_vma->obj->base;
419 
420 	target_offset = gen8_canonical_addr(target_vma->node.start);
421 
422 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
423 	 * pipe_control writes because the gpu doesn't properly redirect them
424 	 * through the ppgtt for non_secure batchbuffers. */
425 	if (unlikely(IS_GEN6(dev) &&
426 	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
427 		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
428 				    PIN_GLOBAL);
429 		if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
430 			return ret;
431 	}
432 
433 	/* Validate that the target is in a valid r/w GPU domain */
434 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
435 		DRM_DEBUG("reloc with multiple write domains: "
436 			  "obj %p target %d offset %d "
437 			  "read %08x write %08x",
438 			  obj, reloc->target_handle,
439 			  (int) reloc->offset,
440 			  reloc->read_domains,
441 			  reloc->write_domain);
442 		return -EINVAL;
443 	}
444 	if (unlikely((reloc->write_domain | reloc->read_domains)
445 		     & ~I915_GEM_GPU_DOMAINS)) {
446 		DRM_DEBUG("reloc with read/write non-GPU domains: "
447 			  "obj %p target %d offset %d "
448 			  "read %08x write %08x",
449 			  obj, reloc->target_handle,
450 			  (int) reloc->offset,
451 			  reloc->read_domains,
452 			  reloc->write_domain);
453 		return -EINVAL;
454 	}
455 
456 	target_obj->pending_read_domains |= reloc->read_domains;
457 	target_obj->pending_write_domain |= reloc->write_domain;
458 
459 	/* If the relocation already has the right value in it, no
460 	 * more work needs to be done.
461 	 */
462 	if (target_offset == reloc->presumed_offset)
463 		return 0;
464 
465 	/* Check that the relocation address is valid... */
466 	if (unlikely(reloc->offset >
467 		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
468 		DRM_DEBUG("Relocation beyond object bounds: "
469 			  "obj %p target %d offset %d size %d.\n",
470 			  obj, reloc->target_handle,
471 			  (int) reloc->offset,
472 			  (int) obj->base.size);
473 		return -EINVAL;
474 	}
475 	if (unlikely(reloc->offset & 3)) {
476 		DRM_DEBUG("Relocation not 4-byte aligned: "
477 			  "obj %p target %d offset %d.\n",
478 			  obj, reloc->target_handle,
479 			  (int) reloc->offset);
480 		return -EINVAL;
481 	}
482 
483 	/* We can't wait for rendering with pagefaults disabled */
484 	if (obj->active && (curthread->td_flags & TDF_NOFAULT))
485 		return -EFAULT;
486 
487 	if (use_cpu_reloc(obj))
488 		ret = relocate_entry_cpu(obj, reloc, target_offset);
489 	else if (obj->map_and_fenceable)
490 		ret = relocate_entry_gtt(obj, reloc, target_offset);
491 	else if (cpu_has_clflush)
492 		ret = relocate_entry_clflush(obj, reloc, target_offset);
493 	else {
494 		WARN_ONCE(1, "Impossible case in relocation handling\n");
495 		ret = -ENODEV;
496 	}
497 
498 	if (ret)
499 		return ret;
500 
501 	/* and update the user's relocation entry */
502 	reloc->presumed_offset = target_offset;
503 
504 	return 0;
505 }
506 
507 static int
508 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
509 				 struct eb_vmas *eb)
510 {
511 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
512 	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
513 	struct drm_i915_gem_relocation_entry __user *user_relocs;
514 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
515 	int remain, ret;
516 
517 	user_relocs = to_user_ptr(entry->relocs_ptr);
518 
519 	remain = entry->relocation_count;
520 	while (remain) {
521 		struct drm_i915_gem_relocation_entry *r = stack_reloc;
522 		int count = remain;
523 		if (count > ARRAY_SIZE(stack_reloc))
524 			count = ARRAY_SIZE(stack_reloc);
525 		remain -= count;
526 
527 		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
528 			return -EFAULT;
529 
530 		do {
531 			u64 offset = r->presumed_offset;
532 
533 			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
534 			if (ret)
535 				return ret;
536 
537 			if (r->presumed_offset != offset &&
538 			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
539 						    &r->presumed_offset,
540 						    sizeof(r->presumed_offset))) {
541 				return -EFAULT;
542 			}
543 
544 			user_relocs++;
545 			r++;
546 		} while (--count);
547 	}
548 
549 	return 0;
550 #undef N_RELOC
551 }
552 
553 static int
554 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
555 				      struct eb_vmas *eb,
556 				      struct drm_i915_gem_relocation_entry *relocs)
557 {
558 	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
559 	int i, ret;
560 
561 	for (i = 0; i < entry->relocation_count; i++) {
562 		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
563 		if (ret)
564 			return ret;
565 	}
566 
567 	return 0;
568 }
569 
570 static int
571 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
572 {
573 	struct i915_vma *vma;
574 	int ret = 0;
575 
576 	/* This is the fast path and we cannot handle a pagefault whilst
577 	 * holding the struct mutex lest the user pass in the relocations
578 	 * contained within a mmaped bo. For in such a case we, the page
579 	 * fault handler would call i915_gem_fault() and we would try to
580 	 * acquire the struct mutex again. Obviously this is bad and so
581 	 * lockdep complains vehemently.
582 	 */
583 	pagefault_disable();
584 	list_for_each_entry(vma, &eb->vmas, exec_list) {
585 		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
586 		if (ret)
587 			break;
588 	}
589 	pagefault_enable();
590 
591 	return ret;
592 }
593 
594 static bool only_mappable_for_reloc(unsigned int flags)
595 {
596 	return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
597 		__EXEC_OBJECT_NEEDS_MAP;
598 }
599 
600 static int
601 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
602 				struct intel_engine_cs *ring,
603 				bool *need_reloc)
604 {
605 	struct drm_i915_gem_object *obj = vma->obj;
606 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
607 	uint64_t flags;
608 	int ret;
609 
610 	flags = PIN_USER;
611 	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
612 		flags |= PIN_GLOBAL;
613 
614 	if (!drm_mm_node_allocated(&vma->node)) {
615 		/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
616 		 * limit address to the first 4GBs for unflagged objects.
617 		 */
618 		if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
619 			flags |= PIN_ZONE_4G;
620 		if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
621 			flags |= PIN_GLOBAL | PIN_MAPPABLE;
622 		if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
623 			flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
624 		if (entry->flags & EXEC_OBJECT_PINNED)
625 			flags |= entry->offset | PIN_OFFSET_FIXED;
626 		if ((flags & PIN_MAPPABLE) == 0)
627 			flags |= PIN_HIGH;
628 	}
629 
630 	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
631 	if ((ret == -ENOSPC  || ret == -E2BIG) &&
632 	    only_mappable_for_reloc(entry->flags))
633 		ret = i915_gem_object_pin(obj, vma->vm,
634 					  entry->alignment,
635 					  flags & ~PIN_MAPPABLE);
636 	if (ret)
637 		return ret;
638 
639 	entry->flags |= __EXEC_OBJECT_HAS_PIN;
640 
641 	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
642 		ret = i915_gem_object_get_fence(obj);
643 		if (ret)
644 			return ret;
645 
646 		if (i915_gem_object_pin_fence(obj))
647 			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
648 	}
649 
650 	if (entry->offset != vma->node.start) {
651 		entry->offset = vma->node.start;
652 		*need_reloc = true;
653 	}
654 
655 	if (entry->flags & EXEC_OBJECT_WRITE) {
656 		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
657 		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
658 	}
659 
660 	return 0;
661 }
662 
663 static bool
664 need_reloc_mappable(struct i915_vma *vma)
665 {
666 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
667 
668 	if (entry->relocation_count == 0)
669 		return false;
670 
671 	if (!vma->is_ggtt)
672 		return false;
673 
674 	/* See also use_cpu_reloc() */
675 	if (HAS_LLC(vma->obj->base.dev))
676 		return false;
677 
678 	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
679 		return false;
680 
681 	return true;
682 }
683 
684 static bool
685 eb_vma_misplaced(struct i915_vma *vma)
686 {
687 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
688 	struct drm_i915_gem_object *obj = vma->obj;
689 
690 	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
691 
692 	if (entry->alignment &&
693 	    vma->node.start & (entry->alignment - 1))
694 		return true;
695 
696 	if (entry->flags & EXEC_OBJECT_PINNED &&
697 	    vma->node.start != entry->offset)
698 		return true;
699 
700 	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
701 	    vma->node.start < BATCH_OFFSET_BIAS)
702 		return true;
703 
704 	/* avoid costly ping-pong once a batch bo ended up non-mappable */
705 	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
706 		return !only_mappable_for_reloc(entry->flags);
707 
708 	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
709 	    (vma->node.start + vma->node.size - 1) >> 32)
710 		return true;
711 
712 	return false;
713 }
714 
715 static int
716 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
717 			    struct list_head *vmas,
718 			    struct intel_context *ctx,
719 			    bool *need_relocs)
720 {
721 	struct drm_i915_gem_object *obj;
722 	struct i915_vma *vma;
723 	struct i915_address_space *vm;
724 	struct list_head ordered_vmas;
725 	struct list_head pinned_vmas;
726 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
727 	int retry;
728 
729 	i915_gem_retire_requests_ring(ring);
730 
731 	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
732 
733 	INIT_LIST_HEAD(&ordered_vmas);
734 	INIT_LIST_HEAD(&pinned_vmas);
735 	while (!list_empty(vmas)) {
736 		struct drm_i915_gem_exec_object2 *entry;
737 		bool need_fence, need_mappable;
738 
739 		vma = list_first_entry(vmas, struct i915_vma, exec_list);
740 		obj = vma->obj;
741 		entry = vma->exec_entry;
742 
743 		if (ctx->flags & CONTEXT_NO_ZEROMAP)
744 			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
745 
746 		if (!has_fenced_gpu_access)
747 			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
748 		need_fence =
749 			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
750 			obj->tiling_mode != I915_TILING_NONE;
751 		need_mappable = need_fence || need_reloc_mappable(vma);
752 
753 		if (entry->flags & EXEC_OBJECT_PINNED)
754 			list_move_tail(&vma->exec_list, &pinned_vmas);
755 		else if (need_mappable) {
756 			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
757 			list_move(&vma->exec_list, &ordered_vmas);
758 		} else
759 			list_move_tail(&vma->exec_list, &ordered_vmas);
760 
761 		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
762 		obj->base.pending_write_domain = 0;
763 	}
764 	list_splice(&ordered_vmas, vmas);
765 	list_splice(&pinned_vmas, vmas);
766 
767 	/* Attempt to pin all of the buffers into the GTT.
768 	 * This is done in 3 phases:
769 	 *
770 	 * 1a. Unbind all objects that do not match the GTT constraints for
771 	 *     the execbuffer (fenceable, mappable, alignment etc).
772 	 * 1b. Increment pin count for already bound objects.
773 	 * 2.  Bind new objects.
774 	 * 3.  Decrement pin count.
775 	 *
776 	 * This avoid unnecessary unbinding of later objects in order to make
777 	 * room for the earlier objects *unless* we need to defragment.
778 	 */
779 	retry = 0;
780 	do {
781 		int ret = 0;
782 
783 		/* Unbind any ill-fitting objects or pin. */
784 		list_for_each_entry(vma, vmas, exec_list) {
785 			if (!drm_mm_node_allocated(&vma->node))
786 				continue;
787 
788 			if (eb_vma_misplaced(vma))
789 				ret = i915_vma_unbind(vma);
790 			else
791 				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
792 			if (ret)
793 				goto err;
794 		}
795 
796 		/* Bind fresh objects */
797 		list_for_each_entry(vma, vmas, exec_list) {
798 			if (drm_mm_node_allocated(&vma->node))
799 				continue;
800 
801 			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
802 			if (ret)
803 				goto err;
804 		}
805 
806 err:
807 		if (ret != -ENOSPC || retry++)
808 			return ret;
809 
810 		/* Decrement pin count for bound objects */
811 		list_for_each_entry(vma, vmas, exec_list)
812 			i915_gem_execbuffer_unreserve_vma(vma);
813 
814 		ret = i915_gem_evict_vm(vm, true);
815 		if (ret)
816 			return ret;
817 	} while (1);
818 }
819 
820 static int
821 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
822 				  struct drm_i915_gem_execbuffer2 *args,
823 				  struct drm_file *file,
824 				  struct intel_engine_cs *ring,
825 				  struct eb_vmas *eb,
826 				  struct drm_i915_gem_exec_object2 *exec,
827 				  struct intel_context *ctx)
828 {
829 	struct drm_i915_gem_relocation_entry *reloc;
830 	struct i915_address_space *vm;
831 	struct i915_vma *vma;
832 	bool need_relocs;
833 	int *reloc_offset;
834 	int i, total, ret;
835 	unsigned count = args->buffer_count;
836 
837 	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
838 
839 	/* We may process another execbuffer during the unlock... */
840 	while (!list_empty(&eb->vmas)) {
841 		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
842 		list_del_init(&vma->exec_list);
843 		i915_gem_execbuffer_unreserve_vma(vma);
844 		drm_gem_object_unreference(&vma->obj->base);
845 	}
846 
847 	mutex_unlock(&dev->struct_mutex);
848 
849 	total = 0;
850 	for (i = 0; i < count; i++)
851 		total += exec[i].relocation_count;
852 
853 	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
854 	reloc = drm_malloc_ab(total, sizeof(*reloc));
855 	if (reloc == NULL || reloc_offset == NULL) {
856 		drm_free_large(reloc);
857 		drm_free_large(reloc_offset);
858 		mutex_lock(&dev->struct_mutex);
859 		return -ENOMEM;
860 	}
861 
862 	total = 0;
863 	for (i = 0; i < count; i++) {
864 		struct drm_i915_gem_relocation_entry __user *user_relocs;
865 		u64 invalid_offset = (u64)-1;
866 		int j;
867 
868 		user_relocs = to_user_ptr(exec[i].relocs_ptr);
869 
870 		if (copy_from_user(reloc+total, user_relocs,
871 				   exec[i].relocation_count * sizeof(*reloc))) {
872 			ret = -EFAULT;
873 			mutex_lock(&dev->struct_mutex);
874 			goto err;
875 		}
876 
877 		/* As we do not update the known relocation offsets after
878 		 * relocating (due to the complexities in lock handling),
879 		 * we need to mark them as invalid now so that we force the
880 		 * relocation processing next time. Just in case the target
881 		 * object is evicted and then rebound into its old
882 		 * presumed_offset before the next execbuffer - if that
883 		 * happened we would make the mistake of assuming that the
884 		 * relocations were valid.
885 		 */
886 		for (j = 0; j < exec[i].relocation_count; j++) {
887 			if (__copy_to_user(&user_relocs[j].presumed_offset,
888 					   &invalid_offset,
889 					   sizeof(invalid_offset))) {
890 				ret = -EFAULT;
891 				mutex_lock(&dev->struct_mutex);
892 				goto err;
893 			}
894 		}
895 
896 		reloc_offset[i] = total;
897 		total += exec[i].relocation_count;
898 	}
899 
900 	ret = i915_mutex_lock_interruptible(dev);
901 	if (ret) {
902 		mutex_lock(&dev->struct_mutex);
903 		goto err;
904 	}
905 
906 	/* reacquire the objects */
907 	eb_reset(eb);
908 	ret = eb_lookup_vmas(eb, exec, args, vm, file);
909 	if (ret)
910 		goto err;
911 
912 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
913 	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
914 	if (ret)
915 		goto err;
916 
917 	list_for_each_entry(vma, &eb->vmas, exec_list) {
918 		int offset = vma->exec_entry - exec;
919 		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
920 							    reloc + reloc_offset[offset]);
921 		if (ret)
922 			goto err;
923 	}
924 
925 	/* Leave the user relocations as are, this is the painfully slow path,
926 	 * and we want to avoid the complication of dropping the lock whilst
927 	 * having buffers reserved in the aperture and so causing spurious
928 	 * ENOSPC for random operations.
929 	 */
930 
931 err:
932 	drm_free_large(reloc);
933 	drm_free_large(reloc_offset);
934 	return ret;
935 }
936 
937 static int
938 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
939 				struct list_head *vmas)
940 {
941 	const unsigned other_rings = ~intel_ring_flag(req->ring);
942 	struct i915_vma *vma;
943 	uint32_t flush_domains = 0;
944 	bool flush_chipset = false;
945 	int ret;
946 
947 	list_for_each_entry(vma, vmas, exec_list) {
948 		struct drm_i915_gem_object *obj = vma->obj;
949 
950 		if (obj->active & other_rings) {
951 			ret = i915_gem_object_sync(obj, req->ring, &req);
952 			if (ret)
953 				return ret;
954 		}
955 
956 		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
957 			flush_chipset |= i915_gem_clflush_object(obj, false);
958 
959 		flush_domains |= obj->base.write_domain;
960 	}
961 
962 	if (flush_chipset)
963 		i915_gem_chipset_flush(req->ring->dev);
964 
965 	if (flush_domains & I915_GEM_DOMAIN_GTT)
966 		wmb();
967 
968 	/* Unconditionally invalidate gpu caches and ensure that we do flush
969 	 * any residual writes from the previous batch.
970 	 */
971 	return intel_ring_invalidate_all_caches(req);
972 }
973 
974 static bool
975 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
976 {
977 	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
978 		return false;
979 
980 	/* Kernel clipping was a DRI1 misfeature */
981 	if (exec->num_cliprects || exec->cliprects_ptr)
982 		return false;
983 
984 	if (exec->DR4 == 0xffffffff) {
985 		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
986 		exec->DR4 = 0;
987 	}
988 	if (exec->DR1 || exec->DR4)
989 		return false;
990 
991 	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
992 		return false;
993 
994 	return true;
995 }
996 
997 static int
998 validate_exec_list(struct drm_device *dev,
999 		   struct drm_i915_gem_exec_object2 *exec,
1000 		   int count)
1001 {
1002 	unsigned relocs_total = 0;
1003 	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1004 	unsigned invalid_flags;
1005 	int i;
1006 
1007 	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1008 	if (USES_FULL_PPGTT(dev))
1009 		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1010 
1011 	for (i = 0; i < count; i++) {
1012 		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
1013 		int length; /* limited by fault_in_pages_readable() */
1014 
1015 		if (exec[i].flags & invalid_flags)
1016 			return -EINVAL;
1017 
1018 		/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1019 		 * any non-page-aligned or non-canonical addresses.
1020 		 */
1021 		if (exec[i].flags & EXEC_OBJECT_PINNED) {
1022 			if (exec[i].offset !=
1023 			    gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1024 				return -EINVAL;
1025 
1026 			/* From drm_mm perspective address space is continuous,
1027 			 * so from this point we're always using non-canonical
1028 			 * form internally.
1029 			 */
1030 			exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1031 		}
1032 
1033 		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1034 			return -EINVAL;
1035 
1036 		/* First check for malicious input causing overflow in
1037 		 * the worst case where we need to allocate the entire
1038 		 * relocation tree as a single array.
1039 		 */
1040 		if (exec[i].relocation_count > relocs_max - relocs_total)
1041 			return -EINVAL;
1042 		relocs_total += exec[i].relocation_count;
1043 
1044 		length = exec[i].relocation_count *
1045 			sizeof(struct drm_i915_gem_relocation_entry);
1046 		/*
1047 		 * We must check that the entire relocation array is safe
1048 		 * to read, but since we may need to update the presumed
1049 		 * offsets during execution, check for full write access.
1050 		 */
1051 #if 0
1052 		if (!access_ok(VERIFY_WRITE, ptr, length))
1053 			return -EFAULT;
1054 #endif
1055 
1056 		if (likely(!i915.prefault_disable)) {
1057 			if (fault_in_multipages_readable(ptr, length))
1058 				return -EFAULT;
1059 		}
1060 	}
1061 
1062 	return 0;
1063 }
1064 
1065 static struct intel_context *
1066 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1067 			  struct intel_engine_cs *ring, const u32 ctx_id)
1068 {
1069 	struct intel_context *ctx = NULL;
1070 	struct i915_ctx_hang_stats *hs;
1071 
1072 	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1073 		return ERR_PTR(-EINVAL);
1074 
1075 	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
1076 	if (IS_ERR(ctx))
1077 		return ctx;
1078 
1079 	hs = &ctx->hang_stats;
1080 	if (hs->banned) {
1081 		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1082 		return ERR_PTR(-EIO);
1083 	}
1084 
1085 	if (i915.enable_execlists && !ctx->engine[ring->id].state) {
1086 		int ret = intel_lr_context_deferred_alloc(ctx, ring);
1087 		if (ret) {
1088 			DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1089 			return ERR_PTR(ret);
1090 		}
1091 	}
1092 
1093 	return ctx;
1094 }
1095 
1096 void
1097 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1098 				   struct drm_i915_gem_request *req)
1099 {
1100 	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
1101 	struct i915_vma *vma;
1102 
1103 	list_for_each_entry(vma, vmas, exec_list) {
1104 		struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1105 		struct drm_i915_gem_object *obj = vma->obj;
1106 		u32 old_read = obj->base.read_domains;
1107 		u32 old_write = obj->base.write_domain;
1108 
1109 		obj->dirty = 1; /* be paranoid  */
1110 		obj->base.write_domain = obj->base.pending_write_domain;
1111 		if (obj->base.write_domain == 0)
1112 			obj->base.pending_read_domains |= obj->base.read_domains;
1113 		obj->base.read_domains = obj->base.pending_read_domains;
1114 
1115 		i915_vma_move_to_active(vma, req);
1116 		if (obj->base.write_domain) {
1117 			i915_gem_request_assign(&obj->last_write_req, req);
1118 
1119 			intel_fb_obj_invalidate(obj, ORIGIN_CS);
1120 
1121 			/* update for the implicit flush after a batch */
1122 			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1123 		}
1124 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1125 			i915_gem_request_assign(&obj->last_fenced_req, req);
1126 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1127 				struct drm_i915_private *dev_priv = to_i915(ring->dev);
1128 				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1129 					       &dev_priv->mm.fence_list);
1130 			}
1131 		}
1132 
1133 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
1134 	}
1135 }
1136 
1137 void
1138 i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
1139 {
1140 	/* Unconditionally force add_request to emit a full flush. */
1141 	params->ring->gpu_caches_dirty = true;
1142 
1143 	/* Add a breadcrumb for the completion of the batch buffer */
1144 	__i915_add_request(params->request, params->batch_obj, true);
1145 }
1146 
1147 static int
1148 i915_reset_gen7_sol_offsets(struct drm_device *dev,
1149 			    struct drm_i915_gem_request *req)
1150 {
1151 	struct intel_engine_cs *ring = req->ring;
1152 	struct drm_i915_private *dev_priv = dev->dev_private;
1153 	int ret, i;
1154 
1155 	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1156 		DRM_DEBUG("sol reset is gen7/rcs only\n");
1157 		return -EINVAL;
1158 	}
1159 
1160 	ret = intel_ring_begin(req, 4 * 3);
1161 	if (ret)
1162 		return ret;
1163 
1164 	for (i = 0; i < 4; i++) {
1165 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1166 		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
1167 		intel_ring_emit(ring, 0);
1168 	}
1169 
1170 	intel_ring_advance(ring);
1171 
1172 	return 0;
1173 }
1174 
1175 static struct drm_i915_gem_object*
1176 i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1177 			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1178 			  struct eb_vmas *eb,
1179 			  struct drm_i915_gem_object *batch_obj,
1180 			  u32 batch_start_offset,
1181 			  u32 batch_len,
1182 			  bool is_master)
1183 {
1184 	struct drm_i915_gem_object *shadow_batch_obj;
1185 	struct i915_vma *vma;
1186 	int ret;
1187 
1188 	shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
1189 						   PAGE_ALIGN(batch_len));
1190 	if (IS_ERR(shadow_batch_obj))
1191 		return shadow_batch_obj;
1192 
1193 	ret = i915_parse_cmds(ring,
1194 			      batch_obj,
1195 			      shadow_batch_obj,
1196 			      batch_start_offset,
1197 			      batch_len,
1198 			      is_master);
1199 	if (ret)
1200 		goto err;
1201 
1202 	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
1203 	if (ret)
1204 		goto err;
1205 
1206 	i915_gem_object_unpin_pages(shadow_batch_obj);
1207 
1208 	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1209 
1210 	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1211 	vma->exec_entry = shadow_exec_entry;
1212 	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1213 	drm_gem_object_reference(&shadow_batch_obj->base);
1214 	list_add_tail(&vma->exec_list, &eb->vmas);
1215 
1216 	shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1217 
1218 	return shadow_batch_obj;
1219 
1220 err:
1221 	i915_gem_object_unpin_pages(shadow_batch_obj);
1222 	if (ret == -EACCES) /* unhandled chained batch */
1223 		return batch_obj;
1224 	else
1225 		return ERR_PTR(ret);
1226 }
1227 
1228 int
1229 i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1230 			       struct drm_i915_gem_execbuffer2 *args,
1231 			       struct list_head *vmas)
1232 {
1233 	struct drm_device *dev = params->dev;
1234 	struct intel_engine_cs *ring = params->ring;
1235 	struct drm_i915_private *dev_priv = dev->dev_private;
1236 	u64 exec_start, exec_len;
1237 	int instp_mode;
1238 	u32 instp_mask;
1239 	int ret;
1240 
1241 	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1242 	if (ret)
1243 		return ret;
1244 
1245 	ret = i915_switch_context(params->request);
1246 	if (ret)
1247 		return ret;
1248 
1249 	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
1250 	     "%s didn't clear reload\n", ring->name);
1251 
1252 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1253 	instp_mask = I915_EXEC_CONSTANTS_MASK;
1254 	switch (instp_mode) {
1255 	case I915_EXEC_CONSTANTS_REL_GENERAL:
1256 	case I915_EXEC_CONSTANTS_ABSOLUTE:
1257 	case I915_EXEC_CONSTANTS_REL_SURFACE:
1258 		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1259 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1260 			return -EINVAL;
1261 		}
1262 
1263 		if (instp_mode != dev_priv->relative_constants_mode) {
1264 			if (INTEL_INFO(dev)->gen < 4) {
1265 				DRM_DEBUG("no rel constants on pre-gen4\n");
1266 				return -EINVAL;
1267 			}
1268 
1269 			if (INTEL_INFO(dev)->gen > 5 &&
1270 			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1271 				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1272 				return -EINVAL;
1273 			}
1274 
1275 			/* The HW changed the meaning on this bit on gen6 */
1276 			if (INTEL_INFO(dev)->gen >= 6)
1277 				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1278 		}
1279 		break;
1280 	default:
1281 		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1282 		return -EINVAL;
1283 	}
1284 
1285 	if (ring == &dev_priv->ring[RCS] &&
1286 	    instp_mode != dev_priv->relative_constants_mode) {
1287 		ret = intel_ring_begin(params->request, 4);
1288 		if (ret)
1289 			return ret;
1290 
1291 		intel_ring_emit(ring, MI_NOOP);
1292 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1293 		intel_ring_emit_reg(ring, INSTPM);
1294 		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1295 		intel_ring_advance(ring);
1296 
1297 		dev_priv->relative_constants_mode = instp_mode;
1298 	}
1299 
1300 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1301 		ret = i915_reset_gen7_sol_offsets(dev, params->request);
1302 		if (ret)
1303 			return ret;
1304 	}
1305 
1306 	exec_len   = args->batch_len;
1307 	exec_start = params->batch_obj_vm_offset +
1308 		     params->args_batch_start_offset;
1309 
1310 	if (exec_len == 0)
1311 		exec_len = params->batch_obj->base.size;
1312 
1313 	ret = ring->dispatch_execbuffer(params->request,
1314 					exec_start, exec_len,
1315 					params->dispatch_flags);
1316 	if (ret)
1317 		return ret;
1318 
1319 	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1320 
1321 	i915_gem_execbuffer_move_to_active(vmas, params->request);
1322 	i915_gem_execbuffer_retire_commands(params);
1323 
1324 	return 0;
1325 }
1326 
1327 /**
1328  * Find one BSD ring to dispatch the corresponding BSD command.
1329  * The ring index is returned.
1330  */
1331 static unsigned int
1332 gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
1333 {
1334 	struct drm_i915_file_private *file_priv = file->driver_priv;
1335 
1336 	/* Check whether the file_priv has already selected one ring. */
1337 	if ((int)file_priv->bsd_ring < 0) {
1338 		/* If not, use the ping-pong mechanism to select one. */
1339 		mutex_lock(&dev_priv->dev->struct_mutex);
1340 		file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
1341 		dev_priv->mm.bsd_ring_dispatch_index ^= 1;
1342 		mutex_unlock(&dev_priv->dev->struct_mutex);
1343 	}
1344 
1345 	return file_priv->bsd_ring;
1346 }
1347 
1348 static struct drm_i915_gem_object *
1349 eb_get_batch(struct eb_vmas *eb)
1350 {
1351 	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1352 
1353 	/*
1354 	 * SNA is doing fancy tricks with compressing batch buffers, which leads
1355 	 * to negative relocation deltas. Usually that works out ok since the
1356 	 * relocate address is still positive, except when the batch is placed
1357 	 * very low in the GTT. Ensure this doesn't happen.
1358 	 *
1359 	 * Note that actual hangs have only been observed on gen7, but for
1360 	 * paranoia do it everywhere.
1361 	 */
1362 	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
1363 		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1364 
1365 	return vma->obj;
1366 }
1367 
1368 #define I915_USER_RINGS (4)
1369 
1370 static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
1371 	[I915_EXEC_DEFAULT]	= RCS,
1372 	[I915_EXEC_RENDER]	= RCS,
1373 	[I915_EXEC_BLT]		= BCS,
1374 	[I915_EXEC_BSD]		= VCS,
1375 	[I915_EXEC_VEBOX]	= VECS
1376 };
1377 
1378 static int
1379 eb_select_ring(struct drm_i915_private *dev_priv,
1380 	       struct drm_file *file,
1381 	       struct drm_i915_gem_execbuffer2 *args,
1382 	       struct intel_engine_cs **ring)
1383 {
1384 	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1385 
1386 	if (user_ring_id > I915_USER_RINGS) {
1387 		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1388 		return -EINVAL;
1389 	}
1390 
1391 	if ((user_ring_id != I915_EXEC_BSD) &&
1392 	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1393 		DRM_DEBUG("execbuf with non bsd ring but with invalid "
1394 			  "bsd dispatch flags: %d\n", (int)(args->flags));
1395 		return -EINVAL;
1396 	}
1397 
1398 	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1399 		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1400 
1401 		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1402 			bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
1403 		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1404 			   bsd_idx <= I915_EXEC_BSD_RING2) {
1405 			bsd_idx >>= I915_EXEC_BSD_SHIFT;
1406 			bsd_idx--;
1407 		} else {
1408 			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1409 				  bsd_idx);
1410 			return -EINVAL;
1411 		}
1412 
1413 		*ring = &dev_priv->ring[_VCS(bsd_idx)];
1414 	} else {
1415 		*ring = &dev_priv->ring[user_ring_map[user_ring_id]];
1416 	}
1417 
1418 	if (!intel_ring_initialized(*ring)) {
1419 		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1420 		return -EINVAL;
1421 	}
1422 
1423 	return 0;
1424 }
1425 
1426 static int
1427 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1428 		       struct drm_file *file,
1429 		       struct drm_i915_gem_execbuffer2 *args,
1430 		       struct drm_i915_gem_exec_object2 *exec)
1431 {
1432 	struct drm_i915_private *dev_priv = dev->dev_private;
1433 	struct drm_i915_gem_request *req = NULL;
1434 	struct eb_vmas *eb;
1435 	struct drm_i915_gem_object *batch_obj;
1436 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
1437 	struct intel_engine_cs *ring;
1438 	struct intel_context *ctx;
1439 	struct i915_address_space *vm;
1440 	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1441 	struct i915_execbuffer_params *params = &params_master;
1442 	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1443 	u32 dispatch_flags;
1444 	int ret;
1445 	bool need_relocs;
1446 
1447 	if (!i915_gem_check_execbuffer(args))
1448 		return -EINVAL;
1449 
1450 	ret = validate_exec_list(dev, exec, args->buffer_count);
1451 	if (ret)
1452 		return ret;
1453 
1454 	dispatch_flags = 0;
1455 	if (args->flags & I915_EXEC_SECURE) {
1456 		dispatch_flags |= I915_DISPATCH_SECURE;
1457 	}
1458 	if (args->flags & I915_EXEC_IS_PINNED)
1459 		dispatch_flags |= I915_DISPATCH_PINNED;
1460 
1461 	ret = eb_select_ring(dev_priv, file, args, &ring);
1462 	if (ret)
1463 		return ret;
1464 
1465 	if (args->buffer_count < 1) {
1466 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1467 		return -EINVAL;
1468 	}
1469 
1470 	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1471 		if (!HAS_RESOURCE_STREAMER(dev)) {
1472 			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1473 			return -EINVAL;
1474 		}
1475 		if (ring->id != RCS) {
1476 			DRM_DEBUG("RS is not available on %s\n",
1477 				 ring->name);
1478 			return -EINVAL;
1479 		}
1480 
1481 		dispatch_flags |= I915_DISPATCH_RS;
1482 	}
1483 
1484 	intel_runtime_pm_get(dev_priv);
1485 
1486 	ret = i915_mutex_lock_interruptible(dev);
1487 	if (ret)
1488 		goto pre_mutex_err;
1489 
1490 	ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1491 	if (IS_ERR(ctx)) {
1492 		mutex_unlock(&dev->struct_mutex);
1493 		ret = PTR_ERR(ctx);
1494 		goto pre_mutex_err;
1495 	}
1496 
1497 	i915_gem_context_reference(ctx);
1498 
1499 	if (ctx->ppgtt)
1500 		vm = &ctx->ppgtt->base;
1501 	else
1502 		vm = &dev_priv->gtt.base;
1503 
1504 	memset(&params_master, 0x00, sizeof(params_master));
1505 
1506 	eb = eb_create(args);
1507 	if (eb == NULL) {
1508 		i915_gem_context_unreference(ctx);
1509 		mutex_unlock(&dev->struct_mutex);
1510 		ret = -ENOMEM;
1511 		goto pre_mutex_err;
1512 	}
1513 
1514 	/* Look up object handles */
1515 	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1516 	if (ret)
1517 		goto err;
1518 
1519 	/* take note of the batch buffer before we might reorder the lists */
1520 	batch_obj = eb_get_batch(eb);
1521 
1522 	/* Move the objects en-masse into the GTT, evicting if necessary. */
1523 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1524 	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
1525 	if (ret)
1526 		goto err;
1527 
1528 	/* The objects are in their final locations, apply the relocations. */
1529 	if (need_relocs)
1530 		ret = i915_gem_execbuffer_relocate(eb);
1531 	if (ret) {
1532 		if (ret == -EFAULT) {
1533 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1534 								eb, exec, ctx);
1535 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1536 		}
1537 		if (ret)
1538 			goto err;
1539 	}
1540 
1541 	/* Set the pending read domains for the batch buffer to COMMAND */
1542 	if (batch_obj->base.pending_write_domain) {
1543 		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1544 		ret = -EINVAL;
1545 		goto err;
1546 	}
1547 
1548 	params->args_batch_start_offset = args->batch_start_offset;
1549 	if (i915_needs_cmd_parser(ring) && args->batch_len) {
1550 		struct drm_i915_gem_object *parsed_batch_obj;
1551 
1552 		parsed_batch_obj = i915_gem_execbuffer_parse(ring,
1553 						      &shadow_exec_entry,
1554 						      eb,
1555 						      batch_obj,
1556 						      args->batch_start_offset,
1557 						      args->batch_len,
1558 						      file->is_master);
1559 		if (IS_ERR(parsed_batch_obj)) {
1560 			ret = PTR_ERR(parsed_batch_obj);
1561 			goto err;
1562 		}
1563 
1564 		/*
1565 		 * parsed_batch_obj == batch_obj means batch not fully parsed:
1566 		 * Accept, but don't promote to secure.
1567 		 */
1568 
1569 		if (parsed_batch_obj != batch_obj) {
1570 			/*
1571 			 * Batch parsed and accepted:
1572 			 *
1573 			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1574 			 * bit from MI_BATCH_BUFFER_START commands issued in
1575 			 * the dispatch_execbuffer implementations. We
1576 			 * specifically don't want that set on batches the
1577 			 * command parser has accepted.
1578 			 */
1579 			dispatch_flags |= I915_DISPATCH_SECURE;
1580 			params->args_batch_start_offset = 0;
1581 			batch_obj = parsed_batch_obj;
1582 		}
1583 	}
1584 
1585 	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1586 
1587 	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1588 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
1589 	 * hsw should have this fixed, but bdw mucks it up again. */
1590 	if (dispatch_flags & I915_DISPATCH_SECURE) {
1591 		/*
1592 		 * So on first glance it looks freaky that we pin the batch here
1593 		 * outside of the reservation loop. But:
1594 		 * - The batch is already pinned into the relevant ppgtt, so we
1595 		 *   already have the backing storage fully allocated.
1596 		 * - No other BO uses the global gtt (well contexts, but meh),
1597 		 *   so we don't really have issues with multiple objects not
1598 		 *   fitting due to fragmentation.
1599 		 * So this is actually safe.
1600 		 */
1601 		ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1602 		if (ret)
1603 			goto err;
1604 
1605 		params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
1606 	} else
1607 		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
1608 
1609 	/* Allocate a request for this batch buffer nice and early. */
1610 	req = i915_gem_request_alloc(ring, ctx);
1611 	if (IS_ERR(req)) {
1612 		ret = PTR_ERR(req);
1613 		goto err_batch_unpin;
1614 	}
1615 
1616 	ret = i915_gem_request_add_to_client(req, file);
1617 	if (ret)
1618 		goto err_batch_unpin;
1619 
1620 	/*
1621 	 * Save assorted stuff away to pass through to *_submission().
1622 	 * NB: This data should be 'persistent' and not local as it will
1623 	 * kept around beyond the duration of the IOCTL once the GPU
1624 	 * scheduler arrives.
1625 	 */
1626 	params->dev                     = dev;
1627 	params->file                    = file;
1628 	params->ring                    = ring;
1629 	params->dispatch_flags          = dispatch_flags;
1630 	params->batch_obj               = batch_obj;
1631 	params->ctx                     = ctx;
1632 	params->request                 = req;
1633 
1634 	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
1635 
1636 err_batch_unpin:
1637 	/*
1638 	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1639 	 * batch vma for correctness. For less ugly and less fragility this
1640 	 * needs to be adjusted to also track the ggtt batch vma properly as
1641 	 * active.
1642 	 */
1643 	if (dispatch_flags & I915_DISPATCH_SECURE)
1644 		i915_gem_object_ggtt_unpin(batch_obj);
1645 
1646 err:
1647 	/* the request owns the ref now */
1648 	i915_gem_context_unreference(ctx);
1649 	eb_destroy(eb);
1650 
1651 	/*
1652 	 * If the request was created but not successfully submitted then it
1653 	 * must be freed again. If it was submitted then it is being tracked
1654 	 * on the active request list and no clean up is required here.
1655 	 */
1656 	if (ret && !IS_ERR_OR_NULL(req))
1657 		i915_gem_request_cancel(req);
1658 
1659 	mutex_unlock(&dev->struct_mutex);
1660 
1661 pre_mutex_err:
1662 	/* intel_gpu_busy should also get a ref, so it will free when the device
1663 	 * is really idle. */
1664 	intel_runtime_pm_put(dev_priv);
1665 	return ret;
1666 }
1667 
1668 /*
1669  * Legacy execbuffer just creates an exec2 list from the original exec object
1670  * list array and passes it to the real function.
1671  */
1672 int
1673 i915_gem_execbuffer(struct drm_device *dev, void *data,
1674 		    struct drm_file *file)
1675 {
1676 	struct drm_i915_gem_execbuffer *args = data;
1677 	struct drm_i915_gem_execbuffer2 exec2;
1678 	struct drm_i915_gem_exec_object *exec_list = NULL;
1679 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1680 	int ret, i;
1681 
1682 	if (args->buffer_count < 1) {
1683 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1684 		return -EINVAL;
1685 	}
1686 
1687 	/* Copy in the exec list from userland */
1688 	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1689 	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1690 	if (exec_list == NULL || exec2_list == NULL) {
1691 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1692 			  args->buffer_count);
1693 		drm_free_large(exec_list);
1694 		drm_free_large(exec2_list);
1695 		return -ENOMEM;
1696 	}
1697 	ret = copy_from_user(exec_list,
1698 			     to_user_ptr(args->buffers_ptr),
1699 			     sizeof(*exec_list) * args->buffer_count);
1700 	if (ret != 0) {
1701 		DRM_DEBUG("copy %d exec entries failed %d\n",
1702 			  args->buffer_count, ret);
1703 		drm_free_large(exec_list);
1704 		drm_free_large(exec2_list);
1705 		return -EFAULT;
1706 	}
1707 
1708 	for (i = 0; i < args->buffer_count; i++) {
1709 		exec2_list[i].handle = exec_list[i].handle;
1710 		exec2_list[i].relocation_count = exec_list[i].relocation_count;
1711 		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1712 		exec2_list[i].alignment = exec_list[i].alignment;
1713 		exec2_list[i].offset = exec_list[i].offset;
1714 		if (INTEL_INFO(dev)->gen < 4)
1715 			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1716 		else
1717 			exec2_list[i].flags = 0;
1718 	}
1719 
1720 	exec2.buffers_ptr = args->buffers_ptr;
1721 	exec2.buffer_count = args->buffer_count;
1722 	exec2.batch_start_offset = args->batch_start_offset;
1723 	exec2.batch_len = args->batch_len;
1724 	exec2.DR1 = args->DR1;
1725 	exec2.DR4 = args->DR4;
1726 	exec2.num_cliprects = args->num_cliprects;
1727 	exec2.cliprects_ptr = args->cliprects_ptr;
1728 	exec2.flags = I915_EXEC_RENDER;
1729 	i915_execbuffer2_set_context_id(exec2, 0);
1730 
1731 	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1732 	if (!ret) {
1733 		struct drm_i915_gem_exec_object __user *user_exec_list =
1734 			to_user_ptr(args->buffers_ptr);
1735 
1736 		/* Copy the new buffer offsets back to the user's exec list. */
1737 		for (i = 0; i < args->buffer_count; i++) {
1738 			exec2_list[i].offset =
1739 				gen8_canonical_addr(exec2_list[i].offset);
1740 			ret = __copy_to_user(&user_exec_list[i].offset,
1741 					     &exec2_list[i].offset,
1742 					     sizeof(user_exec_list[i].offset));
1743 			if (ret) {
1744 				ret = -EFAULT;
1745 				DRM_DEBUG("failed to copy %d exec entries "
1746 					  "back to user (%d)\n",
1747 					  args->buffer_count, ret);
1748 				break;
1749 			}
1750 		}
1751 	}
1752 
1753 	drm_free_large(exec_list);
1754 	drm_free_large(exec2_list);
1755 	return ret;
1756 }
1757 
1758 int
1759 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1760 		     struct drm_file *file)
1761 {
1762 	struct drm_i915_gem_execbuffer2 *args = data;
1763 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1764 	int ret;
1765 
1766 	if (args->buffer_count < 1 ||
1767 	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1768 		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1769 		return -EINVAL;
1770 	}
1771 
1772 	if (args->rsvd2 != 0) {
1773 		DRM_DEBUG("dirty rvsd2 field\n");
1774 		return -EINVAL;
1775 	}
1776 
1777 	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1778 			     M_DRM, M_NOWAIT);
1779 	if (exec2_list == NULL)
1780 		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1781 					   args->buffer_count);
1782 	if (exec2_list == NULL) {
1783 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1784 			  args->buffer_count);
1785 		return -ENOMEM;
1786 	}
1787 	ret = copy_from_user(exec2_list,
1788 			     to_user_ptr(args->buffers_ptr),
1789 			     sizeof(*exec2_list) * args->buffer_count);
1790 	if (ret != 0) {
1791 		DRM_DEBUG("copy %d exec entries failed %d\n",
1792 			  args->buffer_count, ret);
1793 		drm_free_large(exec2_list);
1794 		return -EFAULT;
1795 	}
1796 
1797 	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1798 	if (!ret) {
1799 		/* Copy the new buffer offsets back to the user's exec list. */
1800 		struct drm_i915_gem_exec_object2 __user *user_exec_list =
1801 				   to_user_ptr(args->buffers_ptr);
1802 		int i;
1803 
1804 		for (i = 0; i < args->buffer_count; i++) {
1805 			exec2_list[i].offset =
1806 				gen8_canonical_addr(exec2_list[i].offset);
1807 			ret = __copy_to_user(&user_exec_list[i].offset,
1808 					     &exec2_list[i].offset,
1809 					     sizeof(user_exec_list[i].offset));
1810 			if (ret) {
1811 				ret = -EFAULT;
1812 				DRM_DEBUG("failed to copy %d exec entries "
1813 					  "back to user\n",
1814 					  args->buffer_count);
1815 				break;
1816 			}
1817 		}
1818 	}
1819 
1820 	drm_free_large(exec2_list);
1821 	return ret;
1822 }
1823