1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
35 #include <linux/uaccess.h>
36 #include <asm/cpufeature.h>
37 
38 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
39 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
40 #define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
41 #define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
42 
43 #define BATCH_OFFSET_BIAS (256*1024)
44 
45 struct eb_vmas {
46 	struct list_head vmas;
47 	int and;
48 	union {
49 		struct i915_vma *lut[0];
50 		struct hlist_head buckets[0];
51 	};
52 };
53 
54 static struct eb_vmas *
55 eb_create(struct drm_i915_gem_execbuffer2 *args)
56 {
57 	struct eb_vmas *eb = NULL;
58 
59 	if (args->flags & I915_EXEC_HANDLE_LUT) {
60 		unsigned size = args->buffer_count;
61 		size *= sizeof(struct i915_vma *);
62 		size += sizeof(struct eb_vmas);
63 		eb = kmalloc(size, M_DRM, M_NOWAIT);
64 	}
65 
66 	if (eb == NULL) {
67 		unsigned size = args->buffer_count;
68 		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
69 		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
70 		while (count > 2*size)
71 			count >>= 1;
72 		eb = kzalloc(count*sizeof(struct hlist_head) +
73 			     sizeof(struct eb_vmas),
74 			     GFP_TEMPORARY);
75 		if (eb == NULL)
76 			return eb;
77 
78 		eb->and = count - 1;
79 	} else
80 		eb->and = -args->buffer_count;
81 
82 	INIT_LIST_HEAD(&eb->vmas);
83 	return eb;
84 }
85 
86 static void
87 eb_reset(struct eb_vmas *eb)
88 {
89 	if (eb->and >= 0)
90 		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
91 }
92 
93 static int
94 eb_lookup_vmas(struct eb_vmas *eb,
95 	       struct drm_i915_gem_exec_object2 *exec,
96 	       const struct drm_i915_gem_execbuffer2 *args,
97 	       struct i915_address_space *vm,
98 	       struct drm_file *file)
99 {
100 	struct drm_i915_gem_object *obj;
101 	struct list_head objects;
102 	int i, ret;
103 
104 	INIT_LIST_HEAD(&objects);
105 	lockmgr(&file->table_lock, LK_EXCLUSIVE);
106 	/* Grab a reference to the object and release the lock so we can lookup
107 	 * or create the VMA without using GFP_ATOMIC */
108 	for (i = 0; i < args->buffer_count; i++) {
109 		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
110 		if (obj == NULL) {
111 			lockmgr(&file->table_lock, LK_RELEASE);
112 			DRM_DEBUG("Invalid object handle %d at index %d\n",
113 				   exec[i].handle, i);
114 			ret = -ENOENT;
115 			goto err;
116 		}
117 
118 		if (!list_empty(&obj->obj_exec_link)) {
119 			lockmgr(&file->table_lock, LK_RELEASE);
120 			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
121 				   obj, exec[i].handle, i);
122 			ret = -EINVAL;
123 			goto err;
124 		}
125 
126 		drm_gem_object_reference(&obj->base);
127 		list_add_tail(&obj->obj_exec_link, &objects);
128 	}
129 	lockmgr(&file->table_lock, LK_RELEASE);
130 
131 	i = 0;
132 	while (!list_empty(&objects)) {
133 		struct i915_vma *vma;
134 
135 		obj = list_first_entry(&objects,
136 				       struct drm_i915_gem_object,
137 				       obj_exec_link);
138 
139 		/*
140 		 * NOTE: We can leak any vmas created here when something fails
141 		 * later on. But that's no issue since vma_unbind can deal with
142 		 * vmas which are not actually bound. And since only
143 		 * lookup_or_create exists as an interface to get at the vma
144 		 * from the (obj, vm) we don't run the risk of creating
145 		 * duplicated vmas for the same vm.
146 		 */
147 		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
148 		if (IS_ERR(vma)) {
149 			DRM_DEBUG("Failed to lookup VMA\n");
150 			ret = PTR_ERR(vma);
151 			goto err;
152 		}
153 
154 		/* Transfer ownership from the objects list to the vmas list. */
155 		list_add_tail(&vma->exec_list, &eb->vmas);
156 		list_del_init(&obj->obj_exec_link);
157 
158 		vma->exec_entry = &exec[i];
159 		if (eb->and < 0) {
160 			eb->lut[i] = vma;
161 		} else {
162 			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
163 			vma->exec_handle = handle;
164 			hlist_add_head(&vma->exec_node,
165 				       &eb->buckets[handle & eb->and]);
166 		}
167 		++i;
168 	}
169 
170 	return 0;
171 
172 
173 err:
174 	while (!list_empty(&objects)) {
175 		obj = list_first_entry(&objects,
176 				       struct drm_i915_gem_object,
177 				       obj_exec_link);
178 		list_del_init(&obj->obj_exec_link);
179 		drm_gem_object_unreference(&obj->base);
180 	}
181 	/*
182 	 * Objects already transfered to the vmas list will be unreferenced by
183 	 * eb_destroy.
184 	 */
185 
186 	return ret;
187 }
188 
189 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
190 {
191 	if (eb->and < 0) {
192 		if (handle >= -eb->and)
193 			return NULL;
194 		return eb->lut[handle];
195 	} else {
196 		struct hlist_head *head;
197 		struct i915_vma *vma;
198 
199 		head = &eb->buckets[handle & eb->and];
200 		hlist_for_each_entry(vma, head, exec_node) {
201 			if (vma->exec_handle == handle)
202 				return vma;
203 		}
204 		return NULL;
205 	}
206 }
207 
208 static void
209 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
210 {
211 	struct drm_i915_gem_exec_object2 *entry;
212 	struct drm_i915_gem_object *obj = vma->obj;
213 
214 	if (!drm_mm_node_allocated(&vma->node))
215 		return;
216 
217 	entry = vma->exec_entry;
218 
219 	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
220 		i915_gem_object_unpin_fence(obj);
221 
222 	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
223 		vma->pin_count--;
224 
225 	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
226 }
227 
228 static void eb_destroy(struct eb_vmas *eb)
229 {
230 	while (!list_empty(&eb->vmas)) {
231 		struct i915_vma *vma;
232 
233 		vma = list_first_entry(&eb->vmas,
234 				       struct i915_vma,
235 				       exec_list);
236 		list_del_init(&vma->exec_list);
237 		i915_gem_execbuffer_unreserve_vma(vma);
238 		drm_gem_object_unreference(&vma->obj->base);
239 	}
240 	kfree(eb);
241 }
242 
243 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
244 {
245 	return (HAS_LLC(obj->base.dev) ||
246 		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
247 		obj->cache_level != I915_CACHE_NONE);
248 }
249 
250 /* Used to convert any address to canonical form.
251  * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
252  * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
253  * addresses to be in a canonical form:
254  * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
255  * canonical form [63:48] == [47]."
256  */
257 #define GEN8_HIGH_ADDRESS_BIT 47
258 static inline uint64_t gen8_canonical_addr(uint64_t address)
259 {
260 	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
261 }
262 
263 static inline uint64_t gen8_noncanonical_addr(uint64_t address)
264 {
265 	return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
266 }
267 
268 static inline uint64_t
269 relocation_target(struct drm_i915_gem_relocation_entry *reloc,
270 		  uint64_t target_offset)
271 {
272 	return gen8_canonical_addr((int)reloc->delta + target_offset);
273 }
274 
275 static int
276 relocate_entry_cpu(struct drm_i915_gem_object *obj,
277 		   struct drm_i915_gem_relocation_entry *reloc,
278 		   uint64_t target_offset)
279 {
280 	struct drm_device *dev = obj->base.dev;
281 	uint32_t page_offset = offset_in_page(reloc->offset);
282 	uint64_t delta = relocation_target(reloc, target_offset);
283 	char *vaddr;
284 	int ret;
285 
286 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
287 	if (ret)
288 		return ret;
289 
290 	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
291 				reloc->offset >> PAGE_SHIFT));
292 	*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
293 
294 	if (INTEL_INFO(dev)->gen >= 8) {
295 		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
296 
297 		if (page_offset == 0) {
298 			kunmap_atomic(vaddr);
299 			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
300 			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
301 		}
302 
303 		*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
304 	}
305 
306 	kunmap_atomic(vaddr);
307 
308 	return 0;
309 }
310 
311 static int
312 relocate_entry_gtt(struct drm_i915_gem_object *obj,
313 		   struct drm_i915_gem_relocation_entry *reloc,
314 		   uint64_t target_offset)
315 {
316 	struct drm_device *dev = obj->base.dev;
317 	struct drm_i915_private *dev_priv = to_i915(dev);
318 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
319 	uint64_t delta = relocation_target(reloc, target_offset);
320 	uint64_t offset;
321 	void __iomem *reloc_page;
322 	int ret;
323 
324 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
325 	if (ret)
326 		return ret;
327 
328 	ret = i915_gem_object_put_fence(obj);
329 	if (ret)
330 		return ret;
331 
332 	/* Map the page containing the relocation we're going to perform.  */
333 	offset = i915_gem_obj_ggtt_offset(obj);
334 	offset += reloc->offset;
335 	reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
336 					      offset & LINUX_PAGE_MASK);
337 	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
338 
339 	if (INTEL_INFO(dev)->gen >= 8) {
340 		offset += sizeof(uint32_t);
341 
342 		if (offset_in_page(offset) == 0) {
343 			io_mapping_unmap_atomic(reloc_page);
344 			reloc_page =
345 				io_mapping_map_atomic_wc(ggtt->mappable,
346 							 offset);
347 		}
348 
349 		iowrite32(upper_32_bits(delta),
350 			  reloc_page + offset_in_page(offset));
351 	}
352 
353 	io_mapping_unmap_atomic(reloc_page);
354 
355 	return 0;
356 }
357 
358 static void
359 clflush_write32(void *addr, uint32_t value)
360 {
361 	/* This is not a fast path, so KISS. */
362 	drm_clflush_virt_range(addr, sizeof(uint32_t));
363 	*(uint32_t *)addr = value;
364 	drm_clflush_virt_range(addr, sizeof(uint32_t));
365 }
366 
367 static int
368 relocate_entry_clflush(struct drm_i915_gem_object *obj,
369 		       struct drm_i915_gem_relocation_entry *reloc,
370 		       uint64_t target_offset)
371 {
372 	struct drm_device *dev = obj->base.dev;
373 	uint32_t page_offset = offset_in_page(reloc->offset);
374 	uint64_t delta = relocation_target(reloc, target_offset);
375 	char *vaddr;
376 	int ret;
377 
378 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
379 	if (ret)
380 		return ret;
381 
382 	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
383 				reloc->offset >> PAGE_SHIFT));
384 	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
385 
386 	if (INTEL_INFO(dev)->gen >= 8) {
387 		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
388 
389 		if (page_offset == 0) {
390 			kunmap_atomic(vaddr);
391 			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
392 			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
393 		}
394 
395 		clflush_write32(vaddr + page_offset, upper_32_bits(delta));
396 	}
397 
398 	kunmap_atomic(vaddr);
399 
400 	return 0;
401 }
402 
403 static int
404 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
405 				   struct eb_vmas *eb,
406 				   struct drm_i915_gem_relocation_entry *reloc)
407 {
408 	struct drm_device *dev = obj->base.dev;
409 	struct drm_gem_object *target_obj;
410 	struct drm_i915_gem_object *target_i915_obj;
411 	struct i915_vma *target_vma;
412 	uint64_t target_offset;
413 	int ret;
414 
415 	/* we've already hold a reference to all valid objects */
416 	target_vma = eb_get_vma(eb, reloc->target_handle);
417 	if (unlikely(target_vma == NULL))
418 		return -ENOENT;
419 	target_i915_obj = target_vma->obj;
420 	target_obj = &target_vma->obj->base;
421 
422 	target_offset = gen8_canonical_addr(target_vma->node.start);
423 
424 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
425 	 * pipe_control writes because the gpu doesn't properly redirect them
426 	 * through the ppgtt for non_secure batchbuffers. */
427 	if (unlikely(IS_GEN6(dev) &&
428 	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
429 		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
430 				    PIN_GLOBAL);
431 		if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
432 			return ret;
433 	}
434 
435 	/* Validate that the target is in a valid r/w GPU domain */
436 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
437 		DRM_DEBUG("reloc with multiple write domains: "
438 			  "obj %p target %d offset %d "
439 			  "read %08x write %08x",
440 			  obj, reloc->target_handle,
441 			  (int) reloc->offset,
442 			  reloc->read_domains,
443 			  reloc->write_domain);
444 		return -EINVAL;
445 	}
446 	if (unlikely((reloc->write_domain | reloc->read_domains)
447 		     & ~I915_GEM_GPU_DOMAINS)) {
448 		DRM_DEBUG("reloc with read/write non-GPU domains: "
449 			  "obj %p target %d offset %d "
450 			  "read %08x write %08x",
451 			  obj, reloc->target_handle,
452 			  (int) reloc->offset,
453 			  reloc->read_domains,
454 			  reloc->write_domain);
455 		return -EINVAL;
456 	}
457 
458 	target_obj->pending_read_domains |= reloc->read_domains;
459 	target_obj->pending_write_domain |= reloc->write_domain;
460 
461 	/* If the relocation already has the right value in it, no
462 	 * more work needs to be done.
463 	 */
464 	if (target_offset == reloc->presumed_offset)
465 		return 0;
466 
467 	/* Check that the relocation address is valid... */
468 	if (unlikely(reloc->offset >
469 		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
470 		DRM_DEBUG("Relocation beyond object bounds: "
471 			  "obj %p target %d offset %d size %d.\n",
472 			  obj, reloc->target_handle,
473 			  (int) reloc->offset,
474 			  (int) obj->base.size);
475 		return -EINVAL;
476 	}
477 	if (unlikely(reloc->offset & 3)) {
478 		DRM_DEBUG("Relocation not 4-byte aligned: "
479 			  "obj %p target %d offset %d.\n",
480 			  obj, reloc->target_handle,
481 			  (int) reloc->offset);
482 		return -EINVAL;
483 	}
484 
485 	/* We can't wait for rendering with pagefaults disabled */
486 	if (obj->active && (curthread->td_flags & TDF_NOFAULT))
487 		return -EFAULT;
488 
489 	if (use_cpu_reloc(obj))
490 		ret = relocate_entry_cpu(obj, reloc, target_offset);
491 	else if (obj->map_and_fenceable)
492 		ret = relocate_entry_gtt(obj, reloc, target_offset);
493 	else if (static_cpu_has(X86_FEATURE_CLFLUSH))
494 		ret = relocate_entry_clflush(obj, reloc, target_offset);
495 	else {
496 		WARN_ONCE(1, "Impossible case in relocation handling\n");
497 		ret = -ENODEV;
498 	}
499 
500 	if (ret)
501 		return ret;
502 
503 	/* and update the user's relocation entry */
504 	reloc->presumed_offset = target_offset;
505 
506 	return 0;
507 }
508 
509 static int
510 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
511 				 struct eb_vmas *eb)
512 {
513 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
514 	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
515 	struct drm_i915_gem_relocation_entry __user *user_relocs;
516 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
517 	int remain, ret;
518 
519 	user_relocs = u64_to_user_ptr(entry->relocs_ptr);
520 
521 	remain = entry->relocation_count;
522 	while (remain) {
523 		struct drm_i915_gem_relocation_entry *r = stack_reloc;
524 		int count = remain;
525 		if (count > ARRAY_SIZE(stack_reloc))
526 			count = ARRAY_SIZE(stack_reloc);
527 		remain -= count;
528 
529 		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
530 			return -EFAULT;
531 
532 		do {
533 			u64 offset = r->presumed_offset;
534 
535 			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
536 			if (ret)
537 				return ret;
538 
539 			if (r->presumed_offset != offset &&
540 			    __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
541 				return -EFAULT;
542 			}
543 
544 			user_relocs++;
545 			r++;
546 		} while (--count);
547 	}
548 
549 	return 0;
550 #undef N_RELOC
551 }
552 
553 static int
554 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
555 				      struct eb_vmas *eb,
556 				      struct drm_i915_gem_relocation_entry *relocs)
557 {
558 	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
559 	int i, ret;
560 
561 	for (i = 0; i < entry->relocation_count; i++) {
562 		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
563 		if (ret)
564 			return ret;
565 	}
566 
567 	return 0;
568 }
569 
570 static int
571 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
572 {
573 	struct i915_vma *vma;
574 	int ret = 0;
575 
576 	/* This is the fast path and we cannot handle a pagefault whilst
577 	 * holding the struct mutex lest the user pass in the relocations
578 	 * contained within a mmaped bo. For in such a case we, the page
579 	 * fault handler would call i915_gem_fault() and we would try to
580 	 * acquire the struct mutex again. Obviously this is bad and so
581 	 * lockdep complains vehemently.
582 	 */
583 	pagefault_disable();
584 	list_for_each_entry(vma, &eb->vmas, exec_list) {
585 		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
586 		if (ret)
587 			break;
588 	}
589 	pagefault_enable();
590 
591 	return ret;
592 }
593 
594 static bool only_mappable_for_reloc(unsigned int flags)
595 {
596 	return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
597 		__EXEC_OBJECT_NEEDS_MAP;
598 }
599 
600 static int
601 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
602 				struct intel_engine_cs *engine,
603 				bool *need_reloc)
604 {
605 	struct drm_i915_gem_object *obj = vma->obj;
606 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
607 	uint64_t flags;
608 	int ret;
609 
610 	flags = PIN_USER;
611 	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
612 		flags |= PIN_GLOBAL;
613 
614 	if (!drm_mm_node_allocated(&vma->node)) {
615 		/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
616 		 * limit address to the first 4GBs for unflagged objects.
617 		 */
618 		if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
619 			flags |= PIN_ZONE_4G;
620 		if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
621 			flags |= PIN_GLOBAL | PIN_MAPPABLE;
622 		if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
623 			flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
624 		if (entry->flags & EXEC_OBJECT_PINNED)
625 			flags |= entry->offset | PIN_OFFSET_FIXED;
626 		if ((flags & PIN_MAPPABLE) == 0)
627 			flags |= PIN_HIGH;
628 	}
629 
630 	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
631 	if ((ret == -ENOSPC  || ret == -E2BIG) &&
632 	    only_mappable_for_reloc(entry->flags))
633 		ret = i915_gem_object_pin(obj, vma->vm,
634 					  entry->alignment,
635 					  flags & ~PIN_MAPPABLE);
636 	if (ret)
637 		return ret;
638 
639 	entry->flags |= __EXEC_OBJECT_HAS_PIN;
640 
641 	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
642 		ret = i915_gem_object_get_fence(obj);
643 		if (ret)
644 			return ret;
645 
646 		if (i915_gem_object_pin_fence(obj))
647 			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
648 	}
649 
650 	if (entry->offset != vma->node.start) {
651 		entry->offset = vma->node.start;
652 		*need_reloc = true;
653 	}
654 
655 	if (entry->flags & EXEC_OBJECT_WRITE) {
656 		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
657 		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
658 	}
659 
660 	return 0;
661 }
662 
663 static bool
664 need_reloc_mappable(struct i915_vma *vma)
665 {
666 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
667 
668 	if (entry->relocation_count == 0)
669 		return false;
670 
671 	if (!vma->is_ggtt)
672 		return false;
673 
674 	/* See also use_cpu_reloc() */
675 	if (HAS_LLC(vma->obj->base.dev))
676 		return false;
677 
678 	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
679 		return false;
680 
681 	return true;
682 }
683 
684 static bool
685 eb_vma_misplaced(struct i915_vma *vma)
686 {
687 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
688 	struct drm_i915_gem_object *obj = vma->obj;
689 
690 	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
691 
692 	if (entry->alignment &&
693 	    vma->node.start & (entry->alignment - 1))
694 		return true;
695 
696 	if (entry->flags & EXEC_OBJECT_PINNED &&
697 	    vma->node.start != entry->offset)
698 		return true;
699 
700 	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
701 	    vma->node.start < BATCH_OFFSET_BIAS)
702 		return true;
703 
704 	/* avoid costly ping-pong once a batch bo ended up non-mappable */
705 	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
706 		return !only_mappable_for_reloc(entry->flags);
707 
708 	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
709 	    (vma->node.start + vma->node.size - 1) >> 32)
710 		return true;
711 
712 	return false;
713 }
714 
715 static int
716 i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
717 			    struct list_head *vmas,
718 			    struct i915_gem_context *ctx,
719 			    bool *need_relocs)
720 {
721 	struct drm_i915_gem_object *obj;
722 	struct i915_vma *vma;
723 	struct i915_address_space *vm;
724 	struct list_head ordered_vmas;
725 	struct list_head pinned_vmas;
726 	bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
727 	int retry;
728 
729 	i915_gem_retire_requests_ring(engine);
730 
731 	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
732 
733 	INIT_LIST_HEAD(&ordered_vmas);
734 	INIT_LIST_HEAD(&pinned_vmas);
735 	while (!list_empty(vmas)) {
736 		struct drm_i915_gem_exec_object2 *entry;
737 		bool need_fence, need_mappable;
738 
739 		vma = list_first_entry(vmas, struct i915_vma, exec_list);
740 		obj = vma->obj;
741 		entry = vma->exec_entry;
742 
743 		if (ctx->flags & CONTEXT_NO_ZEROMAP)
744 			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
745 
746 		if (!has_fenced_gpu_access)
747 			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
748 		need_fence =
749 			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
750 			obj->tiling_mode != I915_TILING_NONE;
751 		need_mappable = need_fence || need_reloc_mappable(vma);
752 
753 		if (entry->flags & EXEC_OBJECT_PINNED)
754 			list_move_tail(&vma->exec_list, &pinned_vmas);
755 		else if (need_mappable) {
756 			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
757 			list_move(&vma->exec_list, &ordered_vmas);
758 		} else
759 			list_move_tail(&vma->exec_list, &ordered_vmas);
760 
761 		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
762 		obj->base.pending_write_domain = 0;
763 	}
764 	list_splice(&ordered_vmas, vmas);
765 	list_splice(&pinned_vmas, vmas);
766 
767 	/* Attempt to pin all of the buffers into the GTT.
768 	 * This is done in 3 phases:
769 	 *
770 	 * 1a. Unbind all objects that do not match the GTT constraints for
771 	 *     the execbuffer (fenceable, mappable, alignment etc).
772 	 * 1b. Increment pin count for already bound objects.
773 	 * 2.  Bind new objects.
774 	 * 3.  Decrement pin count.
775 	 *
776 	 * This avoid unnecessary unbinding of later objects in order to make
777 	 * room for the earlier objects *unless* we need to defragment.
778 	 */
779 	retry = 0;
780 	do {
781 		int ret = 0;
782 
783 		/* Unbind any ill-fitting objects or pin. */
784 		list_for_each_entry(vma, vmas, exec_list) {
785 			if (!drm_mm_node_allocated(&vma->node))
786 				continue;
787 
788 			if (eb_vma_misplaced(vma))
789 				ret = i915_vma_unbind(vma);
790 			else
791 				ret = i915_gem_execbuffer_reserve_vma(vma,
792 								      engine,
793 								      need_relocs);
794 			if (ret)
795 				goto err;
796 		}
797 
798 		/* Bind fresh objects */
799 		list_for_each_entry(vma, vmas, exec_list) {
800 			if (drm_mm_node_allocated(&vma->node))
801 				continue;
802 
803 			ret = i915_gem_execbuffer_reserve_vma(vma, engine,
804 							      need_relocs);
805 			if (ret)
806 				goto err;
807 		}
808 
809 err:
810 		if (ret != -ENOSPC || retry++)
811 			return ret;
812 
813 		/* Decrement pin count for bound objects */
814 		list_for_each_entry(vma, vmas, exec_list)
815 			i915_gem_execbuffer_unreserve_vma(vma);
816 
817 		ret = i915_gem_evict_vm(vm, true);
818 		if (ret)
819 			return ret;
820 	} while (1);
821 }
822 
823 static int
824 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
825 				  struct drm_i915_gem_execbuffer2 *args,
826 				  struct drm_file *file,
827 				  struct intel_engine_cs *engine,
828 				  struct eb_vmas *eb,
829 				  struct drm_i915_gem_exec_object2 *exec,
830 				  struct i915_gem_context *ctx)
831 {
832 	struct drm_i915_gem_relocation_entry *reloc;
833 	struct i915_address_space *vm;
834 	struct i915_vma *vma;
835 	bool need_relocs;
836 	int *reloc_offset;
837 	int i, total, ret;
838 	unsigned count = args->buffer_count;
839 
840 	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
841 
842 	/* We may process another execbuffer during the unlock... */
843 	while (!list_empty(&eb->vmas)) {
844 		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
845 		list_del_init(&vma->exec_list);
846 		i915_gem_execbuffer_unreserve_vma(vma);
847 		drm_gem_object_unreference(&vma->obj->base);
848 	}
849 
850 	mutex_unlock(&dev->struct_mutex);
851 
852 	total = 0;
853 	for (i = 0; i < count; i++)
854 		total += exec[i].relocation_count;
855 
856 	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
857 	reloc = drm_malloc_ab(total, sizeof(*reloc));
858 	if (reloc == NULL || reloc_offset == NULL) {
859 		drm_free_large(reloc);
860 		drm_free_large(reloc_offset);
861 		mutex_lock(&dev->struct_mutex);
862 		return -ENOMEM;
863 	}
864 
865 	total = 0;
866 	for (i = 0; i < count; i++) {
867 		struct drm_i915_gem_relocation_entry __user *user_relocs;
868 		u64 invalid_offset = (u64)-1;
869 		int j;
870 
871 		user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
872 
873 		if (copy_from_user(reloc+total, user_relocs,
874 				   exec[i].relocation_count * sizeof(*reloc))) {
875 			ret = -EFAULT;
876 			mutex_lock(&dev->struct_mutex);
877 			goto err;
878 		}
879 
880 		/* As we do not update the known relocation offsets after
881 		 * relocating (due to the complexities in lock handling),
882 		 * we need to mark them as invalid now so that we force the
883 		 * relocation processing next time. Just in case the target
884 		 * object is evicted and then rebound into its old
885 		 * presumed_offset before the next execbuffer - if that
886 		 * happened we would make the mistake of assuming that the
887 		 * relocations were valid.
888 		 */
889 		for (j = 0; j < exec[i].relocation_count; j++) {
890 			if (__copy_to_user(&user_relocs[j].presumed_offset,
891 					   &invalid_offset,
892 					   sizeof(invalid_offset))) {
893 				ret = -EFAULT;
894 				mutex_lock(&dev->struct_mutex);
895 				goto err;
896 			}
897 		}
898 
899 		reloc_offset[i] = total;
900 		total += exec[i].relocation_count;
901 	}
902 
903 	ret = i915_mutex_lock_interruptible(dev);
904 	if (ret) {
905 		mutex_lock(&dev->struct_mutex);
906 		goto err;
907 	}
908 
909 	/* reacquire the objects */
910 	eb_reset(eb);
911 	ret = eb_lookup_vmas(eb, exec, args, vm, file);
912 	if (ret)
913 		goto err;
914 
915 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
916 	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
917 					  &need_relocs);
918 	if (ret)
919 		goto err;
920 
921 	list_for_each_entry(vma, &eb->vmas, exec_list) {
922 		int offset = vma->exec_entry - exec;
923 		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
924 							    reloc + reloc_offset[offset]);
925 		if (ret)
926 			goto err;
927 	}
928 
929 	/* Leave the user relocations as are, this is the painfully slow path,
930 	 * and we want to avoid the complication of dropping the lock whilst
931 	 * having buffers reserved in the aperture and so causing spurious
932 	 * ENOSPC for random operations.
933 	 */
934 
935 err:
936 	drm_free_large(reloc);
937 	drm_free_large(reloc_offset);
938 	return ret;
939 }
940 
941 static int
942 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
943 				struct list_head *vmas)
944 {
945 	const unsigned other_rings = ~intel_engine_flag(req->engine);
946 	struct i915_vma *vma;
947 	int ret;
948 
949 	list_for_each_entry(vma, vmas, exec_list) {
950 		struct drm_i915_gem_object *obj = vma->obj;
951 
952 		if (obj->active & other_rings) {
953 			ret = i915_gem_object_sync(obj, req->engine, &req);
954 			if (ret)
955 				return ret;
956 		}
957 
958 		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
959 			i915_gem_clflush_object(obj, false);
960 	}
961 
962 	/* Unconditionally flush any chipset caches (for streaming writes). */
963 	i915_gem_chipset_flush(req->engine->i915);
964 
965 	/* Unconditionally invalidate gpu caches and ensure that we do flush
966 	 * any residual writes from the previous batch.
967 	 */
968 	return intel_ring_invalidate_all_caches(req);
969 }
970 
971 static bool
972 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
973 {
974 	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
975 		return false;
976 
977 	/* Kernel clipping was a DRI1 misfeature */
978 	if (exec->num_cliprects || exec->cliprects_ptr)
979 		return false;
980 
981 	if (exec->DR4 == 0xffffffff) {
982 		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
983 		exec->DR4 = 0;
984 	}
985 	if (exec->DR1 || exec->DR4)
986 		return false;
987 
988 	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
989 		return false;
990 
991 	return true;
992 }
993 
994 static int
995 validate_exec_list(struct drm_device *dev,
996 		   struct drm_i915_gem_exec_object2 *exec,
997 		   int count)
998 {
999 	unsigned relocs_total = 0;
1000 	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1001 	unsigned invalid_flags;
1002 	int i;
1003 
1004 	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1005 	if (USES_FULL_PPGTT(dev))
1006 		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1007 
1008 	for (i = 0; i < count; i++) {
1009 		char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
1010 		int length; /* limited by fault_in_pages_readable() */
1011 
1012 		if (exec[i].flags & invalid_flags)
1013 			return -EINVAL;
1014 
1015 		/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1016 		 * any non-page-aligned or non-canonical addresses.
1017 		 */
1018 		if (exec[i].flags & EXEC_OBJECT_PINNED) {
1019 			if (exec[i].offset !=
1020 			    gen8_canonical_addr(exec[i].offset & I915_GTT_PAGE_MASK))
1021 				return -EINVAL;
1022 
1023 			/* From drm_mm perspective address space is continuous,
1024 			 * so from this point we're always using non-canonical
1025 			 * form internally.
1026 			 */
1027 			exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1028 		}
1029 
1030 		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1031 			return -EINVAL;
1032 
1033 		/* First check for malicious input causing overflow in
1034 		 * the worst case where we need to allocate the entire
1035 		 * relocation tree as a single array.
1036 		 */
1037 		if (exec[i].relocation_count > relocs_max - relocs_total)
1038 			return -EINVAL;
1039 		relocs_total += exec[i].relocation_count;
1040 
1041 		length = exec[i].relocation_count *
1042 			sizeof(struct drm_i915_gem_relocation_entry);
1043 		/*
1044 		 * We must check that the entire relocation array is safe
1045 		 * to read, but since we may need to update the presumed
1046 		 * offsets during execution, check for full write access.
1047 		 */
1048 #if 0
1049 		if (!access_ok(VERIFY_WRITE, ptr, length))
1050 			return -EFAULT;
1051 #endif
1052 
1053 		if (likely(!i915.prefault_disable)) {
1054 			if (fault_in_multipages_readable(ptr, length))
1055 				return -EFAULT;
1056 		}
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 static struct i915_gem_context *
1063 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1064 			  struct intel_engine_cs *engine, const u32 ctx_id)
1065 {
1066 	struct i915_gem_context *ctx = NULL;
1067 	struct i915_ctx_hang_stats *hs;
1068 
1069 	if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1070 		return ERR_PTR(-EINVAL);
1071 
1072 	ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1073 	if (IS_ERR(ctx))
1074 		return ctx;
1075 
1076 	hs = &ctx->hang_stats;
1077 	if (hs->banned) {
1078 		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1079 		return ERR_PTR(-EIO);
1080 	}
1081 
1082 	return ctx;
1083 }
1084 
1085 void
1086 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1087 				   struct drm_i915_gem_request *req)
1088 {
1089 	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1090 	struct i915_vma *vma;
1091 
1092 	list_for_each_entry(vma, vmas, exec_list) {
1093 		struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1094 		struct drm_i915_gem_object *obj = vma->obj;
1095 		u32 old_read = obj->base.read_domains;
1096 		u32 old_write = obj->base.write_domain;
1097 
1098 		obj->dirty = 1; /* be paranoid  */
1099 		obj->base.write_domain = obj->base.pending_write_domain;
1100 		if (obj->base.write_domain == 0)
1101 			obj->base.pending_read_domains |= obj->base.read_domains;
1102 		obj->base.read_domains = obj->base.pending_read_domains;
1103 
1104 		i915_vma_move_to_active(vma, req);
1105 		if (obj->base.write_domain) {
1106 			i915_gem_request_assign(&obj->last_write_req, req);
1107 
1108 			intel_fb_obj_invalidate(obj, ORIGIN_CS);
1109 
1110 			/* update for the implicit flush after a batch */
1111 			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1112 		}
1113 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1114 			i915_gem_request_assign(&obj->last_fenced_req, req);
1115 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1116 				struct drm_i915_private *dev_priv = engine->i915;
1117 				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1118 					       &dev_priv->mm.fence_list);
1119 			}
1120 		}
1121 
1122 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
1123 	}
1124 }
1125 
1126 static void
1127 i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
1128 {
1129 	/* Unconditionally force add_request to emit a full flush. */
1130 	params->engine->gpu_caches_dirty = true;
1131 
1132 	/* Add a breadcrumb for the completion of the batch buffer */
1133 	__i915_add_request(params->request, params->batch_obj, true);
1134 }
1135 
1136 static int
1137 i915_reset_gen7_sol_offsets(struct drm_device *dev,
1138 			    struct drm_i915_gem_request *req)
1139 {
1140 	struct intel_engine_cs *engine = req->engine;
1141 	struct drm_i915_private *dev_priv = to_i915(dev);
1142 	int ret, i;
1143 
1144 	if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
1145 		DRM_DEBUG("sol reset is gen7/rcs only\n");
1146 		return -EINVAL;
1147 	}
1148 
1149 	ret = intel_ring_begin(req, 4 * 3);
1150 	if (ret)
1151 		return ret;
1152 
1153 	for (i = 0; i < 4; i++) {
1154 		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
1155 		intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
1156 		intel_ring_emit(engine, 0);
1157 	}
1158 
1159 	intel_ring_advance(engine);
1160 
1161 	return 0;
1162 }
1163 
1164 static struct drm_i915_gem_object*
1165 i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1166 			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1167 			  struct eb_vmas *eb,
1168 			  struct drm_i915_gem_object *batch_obj,
1169 			  u32 batch_start_offset,
1170 			  u32 batch_len,
1171 			  bool is_master)
1172 {
1173 	struct drm_i915_gem_object *shadow_batch_obj;
1174 	struct i915_vma *vma;
1175 	int ret;
1176 
1177 	shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
1178 						   PAGE_ALIGN(batch_len));
1179 	if (IS_ERR(shadow_batch_obj))
1180 		return shadow_batch_obj;
1181 
1182 	ret = i915_parse_cmds(engine,
1183 			      batch_obj,
1184 			      shadow_batch_obj,
1185 			      batch_start_offset,
1186 			      batch_len,
1187 			      is_master);
1188 	if (ret)
1189 		goto err;
1190 
1191 	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
1192 	if (ret)
1193 		goto err;
1194 
1195 	i915_gem_object_unpin_pages(shadow_batch_obj);
1196 
1197 	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1198 
1199 	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1200 	vma->exec_entry = shadow_exec_entry;
1201 	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1202 	drm_gem_object_reference(&shadow_batch_obj->base);
1203 	list_add_tail(&vma->exec_list, &eb->vmas);
1204 
1205 	shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1206 
1207 	return shadow_batch_obj;
1208 
1209 err:
1210 	i915_gem_object_unpin_pages(shadow_batch_obj);
1211 	if (ret == -EACCES) /* unhandled chained batch */
1212 		return batch_obj;
1213 	else
1214 		return ERR_PTR(ret);
1215 }
1216 
1217 int
1218 i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1219 			       struct drm_i915_gem_execbuffer2 *args,
1220 			       struct list_head *vmas)
1221 {
1222 	struct drm_device *dev = params->dev;
1223 	struct intel_engine_cs *engine = params->engine;
1224 	struct drm_i915_private *dev_priv = to_i915(dev);
1225 	u64 exec_start, exec_len;
1226 	int instp_mode;
1227 	u32 instp_mask;
1228 	int ret;
1229 
1230 	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1231 	if (ret)
1232 		return ret;
1233 
1234 	ret = i915_switch_context(params->request);
1235 	if (ret)
1236 		return ret;
1237 
1238 	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
1239 	     "%s didn't clear reload\n", engine->name);
1240 
1241 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1242 	instp_mask = I915_EXEC_CONSTANTS_MASK;
1243 	switch (instp_mode) {
1244 	case I915_EXEC_CONSTANTS_REL_GENERAL:
1245 	case I915_EXEC_CONSTANTS_ABSOLUTE:
1246 	case I915_EXEC_CONSTANTS_REL_SURFACE:
1247 		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
1248 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1249 			return -EINVAL;
1250 		}
1251 
1252 		if (instp_mode != dev_priv->relative_constants_mode) {
1253 			if (INTEL_INFO(dev)->gen < 4) {
1254 				DRM_DEBUG("no rel constants on pre-gen4\n");
1255 				return -EINVAL;
1256 			}
1257 
1258 			if (INTEL_INFO(dev)->gen > 5 &&
1259 			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1260 				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1261 				return -EINVAL;
1262 			}
1263 
1264 			/* The HW changed the meaning on this bit on gen6 */
1265 			if (INTEL_INFO(dev)->gen >= 6)
1266 				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1267 		}
1268 		break;
1269 	default:
1270 		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1271 		return -EINVAL;
1272 	}
1273 
1274 	if (engine == &dev_priv->engine[RCS] &&
1275 	    instp_mode != dev_priv->relative_constants_mode) {
1276 		ret = intel_ring_begin(params->request, 4);
1277 		if (ret)
1278 			return ret;
1279 
1280 		intel_ring_emit(engine, MI_NOOP);
1281 		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
1282 		intel_ring_emit_reg(engine, INSTPM);
1283 		intel_ring_emit(engine, instp_mask << 16 | instp_mode);
1284 		intel_ring_advance(engine);
1285 
1286 		dev_priv->relative_constants_mode = instp_mode;
1287 	}
1288 
1289 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1290 		ret = i915_reset_gen7_sol_offsets(dev, params->request);
1291 		if (ret)
1292 			return ret;
1293 	}
1294 
1295 	exec_len   = args->batch_len;
1296 	exec_start = params->batch_obj_vm_offset +
1297 		     params->args_batch_start_offset;
1298 
1299 	if (exec_len == 0)
1300 		exec_len = params->batch_obj->base.size;
1301 
1302 	ret = engine->dispatch_execbuffer(params->request,
1303 					exec_start, exec_len,
1304 					params->dispatch_flags);
1305 	if (ret)
1306 		return ret;
1307 
1308 	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1309 
1310 	i915_gem_execbuffer_move_to_active(vmas, params->request);
1311 
1312 	return 0;
1313 }
1314 
1315 /**
1316  * Find one BSD ring to dispatch the corresponding BSD command.
1317  * The ring index is returned.
1318  */
1319 static unsigned int
1320 gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
1321 {
1322 	struct drm_i915_file_private *file_priv = file->driver_priv;
1323 
1324 	/* Check whether the file_priv has already selected one ring. */
1325 	if ((int)file_priv->bsd_ring < 0) {
1326 		/* If not, use the ping-pong mechanism to select one. */
1327 		mutex_lock(&dev_priv->drm.struct_mutex);
1328 		file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
1329 		dev_priv->mm.bsd_ring_dispatch_index ^= 1;
1330 		mutex_unlock(&dev_priv->drm.struct_mutex);
1331 	}
1332 
1333 	return file_priv->bsd_ring;
1334 }
1335 
1336 static struct drm_i915_gem_object *
1337 eb_get_batch(struct eb_vmas *eb)
1338 {
1339 	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1340 
1341 	/*
1342 	 * SNA is doing fancy tricks with compressing batch buffers, which leads
1343 	 * to negative relocation deltas. Usually that works out ok since the
1344 	 * relocate address is still positive, except when the batch is placed
1345 	 * very low in the GTT. Ensure this doesn't happen.
1346 	 *
1347 	 * Note that actual hangs have only been observed on gen7, but for
1348 	 * paranoia do it everywhere.
1349 	 */
1350 	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
1351 		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1352 
1353 	return vma->obj;
1354 }
1355 
1356 #define I915_USER_RINGS (4)
1357 
1358 static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1359 	[I915_EXEC_DEFAULT]	= RCS,
1360 	[I915_EXEC_RENDER]	= RCS,
1361 	[I915_EXEC_BLT]		= BCS,
1362 	[I915_EXEC_BSD]		= VCS,
1363 	[I915_EXEC_VEBOX]	= VECS
1364 };
1365 
1366 static int
1367 eb_select_ring(struct drm_i915_private *dev_priv,
1368 	       struct drm_file *file,
1369 	       struct drm_i915_gem_execbuffer2 *args,
1370 	       struct intel_engine_cs **ring)
1371 {
1372 	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1373 
1374 	if (user_ring_id > I915_USER_RINGS) {
1375 		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1376 		return -EINVAL;
1377 	}
1378 
1379 	if ((user_ring_id != I915_EXEC_BSD) &&
1380 	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1381 		DRM_DEBUG("execbuf with non bsd ring but with invalid "
1382 			  "bsd dispatch flags: %d\n", (int)(args->flags));
1383 		return -EINVAL;
1384 	}
1385 
1386 	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1387 		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1388 
1389 		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1390 			bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
1391 		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1392 			   bsd_idx <= I915_EXEC_BSD_RING2) {
1393 			bsd_idx >>= I915_EXEC_BSD_SHIFT;
1394 			bsd_idx--;
1395 		} else {
1396 			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1397 				  bsd_idx);
1398 			return -EINVAL;
1399 		}
1400 
1401 		*ring = &dev_priv->engine[_VCS(bsd_idx)];
1402 	} else {
1403 		*ring = &dev_priv->engine[user_ring_map[user_ring_id]];
1404 	}
1405 
1406 	if (!intel_engine_initialized(*ring)) {
1407 		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1408 		return -EINVAL;
1409 	}
1410 
1411 	return 0;
1412 }
1413 
1414 static int
1415 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1416 		       struct drm_file *file,
1417 		       struct drm_i915_gem_execbuffer2 *args,
1418 		       struct drm_i915_gem_exec_object2 *exec)
1419 {
1420 	struct drm_i915_private *dev_priv = to_i915(dev);
1421 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1422 	struct drm_i915_gem_request *req = NULL;
1423 	struct eb_vmas *eb;
1424 	struct drm_i915_gem_object *batch_obj;
1425 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
1426 	struct intel_engine_cs *engine;
1427 	struct i915_gem_context *ctx;
1428 	struct i915_address_space *vm;
1429 	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1430 	struct i915_execbuffer_params *params = &params_master;
1431 	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1432 	u32 dispatch_flags;
1433 	int ret;
1434 	bool need_relocs;
1435 
1436 	if (!i915_gem_check_execbuffer(args))
1437 		return -EINVAL;
1438 
1439 	ret = validate_exec_list(dev, exec, args->buffer_count);
1440 	if (ret)
1441 		return ret;
1442 
1443 	dispatch_flags = 0;
1444 	if (args->flags & I915_EXEC_SECURE) {
1445 #if 0
1446 		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1447 		    return -EPERM;
1448 #endif
1449 
1450 		dispatch_flags |= I915_DISPATCH_SECURE;
1451 	}
1452 	if (args->flags & I915_EXEC_IS_PINNED)
1453 		dispatch_flags |= I915_DISPATCH_PINNED;
1454 
1455 	ret = eb_select_ring(dev_priv, file, args, &engine);
1456 	if (ret)
1457 		return ret;
1458 
1459 	if (args->buffer_count < 1) {
1460 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1461 		return -EINVAL;
1462 	}
1463 
1464 	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1465 		if (!HAS_RESOURCE_STREAMER(dev)) {
1466 			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1467 			return -EINVAL;
1468 		}
1469 		if (engine->id != RCS) {
1470 			DRM_DEBUG("RS is not available on %s\n",
1471 				 engine->name);
1472 			return -EINVAL;
1473 		}
1474 
1475 		dispatch_flags |= I915_DISPATCH_RS;
1476 	}
1477 
1478 	/* Take a local wakeref for preparing to dispatch the execbuf as
1479 	 * we expect to access the hardware fairly frequently in the
1480 	 * process. Upon first dispatch, we acquire another prolonged
1481 	 * wakeref that we hold until the GPU has been idle for at least
1482 	 * 100ms.
1483 	 */
1484 	intel_runtime_pm_get(dev_priv);
1485 
1486 	ret = i915_mutex_lock_interruptible(dev);
1487 	if (ret)
1488 		goto pre_mutex_err;
1489 
1490 	ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
1491 	if (IS_ERR(ctx)) {
1492 		mutex_unlock(&dev->struct_mutex);
1493 		ret = PTR_ERR(ctx);
1494 		goto pre_mutex_err;
1495 	}
1496 
1497 	i915_gem_context_reference(ctx);
1498 
1499 	if (ctx->ppgtt)
1500 		vm = &ctx->ppgtt->base;
1501 	else
1502 		vm = &ggtt->base;
1503 
1504 	memset(&params_master, 0x00, sizeof(params_master));
1505 
1506 	eb = eb_create(args);
1507 	if (eb == NULL) {
1508 		i915_gem_context_unreference(ctx);
1509 		mutex_unlock(&dev->struct_mutex);
1510 		ret = -ENOMEM;
1511 		goto pre_mutex_err;
1512 	}
1513 
1514 	/* Look up object handles */
1515 	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1516 	if (ret)
1517 		goto err;
1518 
1519 	/* take note of the batch buffer before we might reorder the lists */
1520 	batch_obj = eb_get_batch(eb);
1521 
1522 	/* Move the objects en-masse into the GTT, evicting if necessary. */
1523 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1524 	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1525 					  &need_relocs);
1526 	if (ret)
1527 		goto err;
1528 
1529 	/* The objects are in their final locations, apply the relocations. */
1530 	if (need_relocs)
1531 		ret = i915_gem_execbuffer_relocate(eb);
1532 	if (ret) {
1533 		if (ret == -EFAULT) {
1534 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
1535 								engine,
1536 								eb, exec, ctx);
1537 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1538 		}
1539 		if (ret)
1540 			goto err;
1541 	}
1542 
1543 	/* Set the pending read domains for the batch buffer to COMMAND */
1544 	if (batch_obj->base.pending_write_domain) {
1545 		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1546 		ret = -EINVAL;
1547 		goto err;
1548 	}
1549 
1550 	params->args_batch_start_offset = args->batch_start_offset;
1551 	if (i915_needs_cmd_parser(engine) && args->batch_len) {
1552 		struct drm_i915_gem_object *parsed_batch_obj;
1553 
1554 		parsed_batch_obj = i915_gem_execbuffer_parse(engine,
1555 							     &shadow_exec_entry,
1556 							     eb,
1557 							     batch_obj,
1558 							     args->batch_start_offset,
1559 							     args->batch_len,
1560 							     drm_is_current_master(file));
1561 		if (IS_ERR(parsed_batch_obj)) {
1562 			ret = PTR_ERR(parsed_batch_obj);
1563 			goto err;
1564 		}
1565 
1566 		/*
1567 		 * parsed_batch_obj == batch_obj means batch not fully parsed:
1568 		 * Accept, but don't promote to secure.
1569 		 */
1570 
1571 		if (parsed_batch_obj != batch_obj) {
1572 			/*
1573 			 * Batch parsed and accepted:
1574 			 *
1575 			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1576 			 * bit from MI_BATCH_BUFFER_START commands issued in
1577 			 * the dispatch_execbuffer implementations. We
1578 			 * specifically don't want that set on batches the
1579 			 * command parser has accepted.
1580 			 */
1581 			dispatch_flags |= I915_DISPATCH_SECURE;
1582 			params->args_batch_start_offset = 0;
1583 			batch_obj = parsed_batch_obj;
1584 		}
1585 	}
1586 
1587 	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1588 
1589 	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1590 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
1591 	 * hsw should have this fixed, but bdw mucks it up again. */
1592 	if (dispatch_flags & I915_DISPATCH_SECURE) {
1593 		/*
1594 		 * So on first glance it looks freaky that we pin the batch here
1595 		 * outside of the reservation loop. But:
1596 		 * - The batch is already pinned into the relevant ppgtt, so we
1597 		 *   already have the backing storage fully allocated.
1598 		 * - No other BO uses the global gtt (well contexts, but meh),
1599 		 *   so we don't really have issues with multiple objects not
1600 		 *   fitting due to fragmentation.
1601 		 * So this is actually safe.
1602 		 */
1603 		ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1604 		if (ret)
1605 			goto err;
1606 
1607 		params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
1608 	} else
1609 		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
1610 
1611 	/* Allocate a request for this batch buffer nice and early. */
1612 	req = i915_gem_request_alloc(engine, ctx);
1613 	if (IS_ERR(req)) {
1614 		ret = PTR_ERR(req);
1615 		goto err_batch_unpin;
1616 	}
1617 
1618 	ret = i915_gem_request_add_to_client(req, file);
1619 	if (ret)
1620 		goto err_request;
1621 
1622 	/*
1623 	 * Save assorted stuff away to pass through to *_submission().
1624 	 * NB: This data should be 'persistent' and not local as it will
1625 	 * kept around beyond the duration of the IOCTL once the GPU
1626 	 * scheduler arrives.
1627 	 */
1628 	params->dev                     = dev;
1629 	params->file                    = file;
1630 	params->engine                    = engine;
1631 	params->dispatch_flags          = dispatch_flags;
1632 	params->batch_obj               = batch_obj;
1633 	params->ctx                     = ctx;
1634 	params->request                 = req;
1635 
1636 	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
1637 err_request:
1638 	i915_gem_execbuffer_retire_commands(params);
1639 
1640 err_batch_unpin:
1641 	/*
1642 	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1643 	 * batch vma for correctness. For less ugly and less fragility this
1644 	 * needs to be adjusted to also track the ggtt batch vma properly as
1645 	 * active.
1646 	 */
1647 	if (dispatch_flags & I915_DISPATCH_SECURE)
1648 		i915_gem_object_ggtt_unpin(batch_obj);
1649 
1650 err:
1651 	/* the request owns the ref now */
1652 	i915_gem_context_unreference(ctx);
1653 	eb_destroy(eb);
1654 
1655 	mutex_unlock(&dev->struct_mutex);
1656 
1657 pre_mutex_err:
1658 	/* intel_gpu_busy should also get a ref, so it will free when the device
1659 	 * is really idle. */
1660 	intel_runtime_pm_put(dev_priv);
1661 	return ret;
1662 }
1663 
1664 /*
1665  * Legacy execbuffer just creates an exec2 list from the original exec object
1666  * list array and passes it to the real function.
1667  */
1668 int
1669 i915_gem_execbuffer(struct drm_device *dev, void *data,
1670 		    struct drm_file *file)
1671 {
1672 	struct drm_i915_gem_execbuffer *args = data;
1673 	struct drm_i915_gem_execbuffer2 exec2;
1674 	struct drm_i915_gem_exec_object *exec_list = NULL;
1675 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1676 	int ret, i;
1677 
1678 	if (args->buffer_count < 1) {
1679 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1680 		return -EINVAL;
1681 	}
1682 
1683 	/* Copy in the exec list from userland */
1684 	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1685 	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1686 	if (exec_list == NULL || exec2_list == NULL) {
1687 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1688 			  args->buffer_count);
1689 		drm_free_large(exec_list);
1690 		drm_free_large(exec2_list);
1691 		return -ENOMEM;
1692 	}
1693 	ret = copy_from_user(exec_list,
1694 			     u64_to_user_ptr(args->buffers_ptr),
1695 			     sizeof(*exec_list) * args->buffer_count);
1696 	if (ret != 0) {
1697 		DRM_DEBUG("copy %d exec entries failed %d\n",
1698 			  args->buffer_count, ret);
1699 		drm_free_large(exec_list);
1700 		drm_free_large(exec2_list);
1701 		return -EFAULT;
1702 	}
1703 
1704 	for (i = 0; i < args->buffer_count; i++) {
1705 		exec2_list[i].handle = exec_list[i].handle;
1706 		exec2_list[i].relocation_count = exec_list[i].relocation_count;
1707 		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1708 		exec2_list[i].alignment = exec_list[i].alignment;
1709 		exec2_list[i].offset = exec_list[i].offset;
1710 		if (INTEL_INFO(dev)->gen < 4)
1711 			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1712 		else
1713 			exec2_list[i].flags = 0;
1714 	}
1715 
1716 	exec2.buffers_ptr = args->buffers_ptr;
1717 	exec2.buffer_count = args->buffer_count;
1718 	exec2.batch_start_offset = args->batch_start_offset;
1719 	exec2.batch_len = args->batch_len;
1720 	exec2.DR1 = args->DR1;
1721 	exec2.DR4 = args->DR4;
1722 	exec2.num_cliprects = args->num_cliprects;
1723 	exec2.cliprects_ptr = args->cliprects_ptr;
1724 	exec2.flags = I915_EXEC_RENDER;
1725 	i915_execbuffer2_set_context_id(exec2, 0);
1726 
1727 	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1728 	if (!ret) {
1729 		struct drm_i915_gem_exec_object __user *user_exec_list =
1730 			u64_to_user_ptr(args->buffers_ptr);
1731 
1732 		/* Copy the new buffer offsets back to the user's exec list. */
1733 		for (i = 0; i < args->buffer_count; i++) {
1734 			exec2_list[i].offset =
1735 				gen8_canonical_addr(exec2_list[i].offset);
1736 			ret = __copy_to_user(&user_exec_list[i].offset,
1737 					     &exec2_list[i].offset,
1738 					     sizeof(user_exec_list[i].offset));
1739 			if (ret) {
1740 				ret = -EFAULT;
1741 				DRM_DEBUG("failed to copy %d exec entries "
1742 					  "back to user (%d)\n",
1743 					  args->buffer_count, ret);
1744 				break;
1745 			}
1746 		}
1747 	}
1748 
1749 	drm_free_large(exec_list);
1750 	drm_free_large(exec2_list);
1751 	return ret;
1752 }
1753 
1754 int
1755 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1756 		     struct drm_file *file)
1757 {
1758 	struct drm_i915_gem_execbuffer2 *args = data;
1759 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1760 	int ret;
1761 
1762 	if (args->buffer_count < 1 ||
1763 	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1764 		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1765 		return -EINVAL;
1766 	}
1767 
1768 	if (args->rsvd2 != 0) {
1769 		DRM_DEBUG("dirty rvsd2 field\n");
1770 		return -EINVAL;
1771 	}
1772 
1773 	exec2_list = drm_malloc_gfp(args->buffer_count,
1774 				    sizeof(*exec2_list),
1775 				    GFP_TEMPORARY);
1776 	if (exec2_list == NULL) {
1777 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1778 			  args->buffer_count);
1779 		return -ENOMEM;
1780 	}
1781 	ret = copy_from_user(exec2_list,
1782 			     u64_to_user_ptr(args->buffers_ptr),
1783 			     sizeof(*exec2_list) * args->buffer_count);
1784 	if (ret != 0) {
1785 		DRM_DEBUG("copy %d exec entries failed %d\n",
1786 			  args->buffer_count, ret);
1787 		drm_free_large(exec2_list);
1788 		return -EFAULT;
1789 	}
1790 
1791 	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1792 	if (!ret) {
1793 		/* Copy the new buffer offsets back to the user's exec list. */
1794 		struct drm_i915_gem_exec_object2 __user *user_exec_list =
1795 				   u64_to_user_ptr(args->buffers_ptr);
1796 		int i;
1797 
1798 		for (i = 0; i < args->buffer_count; i++) {
1799 			exec2_list[i].offset =
1800 				gen8_canonical_addr(exec2_list[i].offset);
1801 			ret = __copy_to_user(&user_exec_list[i].offset,
1802 					     &exec2_list[i].offset,
1803 					     sizeof(user_exec_list[i].offset));
1804 			if (ret) {
1805 				ret = -EFAULT;
1806 				DRM_DEBUG("failed to copy %d exec entries "
1807 					  "back to user\n",
1808 					  args->buffer_count);
1809 				break;
1810 			}
1811 		}
1812 	}
1813 
1814 	drm_free_large(exec2_list);
1815 	return ret;
1816 }
1817