1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/pagemap.h>
35 #include <asm/cpufeature.h>
36 
37 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
38 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
39 #define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
40 #define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
41 
42 #define BATCH_OFFSET_BIAS (256*1024)
43 
44 struct eb_vmas {
45 	struct list_head vmas;
46 	int and;
47 	union {
48 		struct i915_vma *lut[0];
49 		struct hlist_head buckets[0];
50 	};
51 };
52 
53 static struct eb_vmas *
54 eb_create(struct drm_i915_gem_execbuffer2 *args)
55 {
56 	struct eb_vmas *eb = NULL;
57 
58 	if (args->flags & I915_EXEC_HANDLE_LUT) {
59 		unsigned size = args->buffer_count;
60 		size *= sizeof(struct i915_vma *);
61 		size += sizeof(struct eb_vmas);
62 		eb = kmalloc(size, M_DRM, M_NOWAIT);
63 	}
64 
65 	if (eb == NULL) {
66 		unsigned size = args->buffer_count;
67 		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
68 		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
69 		while (count > 2*size)
70 			count >>= 1;
71 		eb = kzalloc(count*sizeof(struct hlist_head) +
72 			     sizeof(struct eb_vmas),
73 			     GFP_TEMPORARY);
74 		if (eb == NULL)
75 			return eb;
76 
77 		eb->and = count - 1;
78 	} else
79 		eb->and = -args->buffer_count;
80 
81 	INIT_LIST_HEAD(&eb->vmas);
82 	return eb;
83 }
84 
85 static void
86 eb_reset(struct eb_vmas *eb)
87 {
88 	if (eb->and >= 0)
89 		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
90 }
91 
92 static int
93 eb_lookup_vmas(struct eb_vmas *eb,
94 	       struct drm_i915_gem_exec_object2 *exec,
95 	       const struct drm_i915_gem_execbuffer2 *args,
96 	       struct i915_address_space *vm,
97 	       struct drm_file *file)
98 {
99 	struct drm_i915_gem_object *obj;
100 	struct list_head objects;
101 	int i, ret;
102 
103 	INIT_LIST_HEAD(&objects);
104 	lockmgr(&file->table_lock, LK_EXCLUSIVE);
105 	/* Grab a reference to the object and release the lock so we can lookup
106 	 * or create the VMA without using GFP_ATOMIC */
107 	for (i = 0; i < args->buffer_count; i++) {
108 		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
109 		if (obj == NULL) {
110 			lockmgr(&file->table_lock, LK_RELEASE);
111 			DRM_DEBUG("Invalid object handle %d at index %d\n",
112 				   exec[i].handle, i);
113 			ret = -ENOENT;
114 			goto err;
115 		}
116 
117 		if (!list_empty(&obj->obj_exec_link)) {
118 			lockmgr(&file->table_lock, LK_RELEASE);
119 			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
120 				   obj, exec[i].handle, i);
121 			ret = -EINVAL;
122 			goto err;
123 		}
124 
125 		drm_gem_object_reference(&obj->base);
126 		list_add_tail(&obj->obj_exec_link, &objects);
127 	}
128 	lockmgr(&file->table_lock, LK_RELEASE);
129 
130 	i = 0;
131 	while (!list_empty(&objects)) {
132 		struct i915_vma *vma;
133 
134 		obj = list_first_entry(&objects,
135 				       struct drm_i915_gem_object,
136 				       obj_exec_link);
137 
138 		/*
139 		 * NOTE: We can leak any vmas created here when something fails
140 		 * later on. But that's no issue since vma_unbind can deal with
141 		 * vmas which are not actually bound. And since only
142 		 * lookup_or_create exists as an interface to get at the vma
143 		 * from the (obj, vm) we don't run the risk of creating
144 		 * duplicated vmas for the same vm.
145 		 */
146 		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
147 		if (IS_ERR(vma)) {
148 			DRM_DEBUG("Failed to lookup VMA\n");
149 			ret = PTR_ERR(vma);
150 			goto err;
151 		}
152 
153 		/* Transfer ownership from the objects list to the vmas list. */
154 		list_add_tail(&vma->exec_list, &eb->vmas);
155 		list_del_init(&obj->obj_exec_link);
156 
157 		vma->exec_entry = &exec[i];
158 		if (eb->and < 0) {
159 			eb->lut[i] = vma;
160 		} else {
161 			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
162 			vma->exec_handle = handle;
163 			hlist_add_head(&vma->exec_node,
164 				       &eb->buckets[handle & eb->and]);
165 		}
166 		++i;
167 	}
168 
169 	return 0;
170 
171 
172 err:
173 	while (!list_empty(&objects)) {
174 		obj = list_first_entry(&objects,
175 				       struct drm_i915_gem_object,
176 				       obj_exec_link);
177 		list_del_init(&obj->obj_exec_link);
178 		drm_gem_object_unreference(&obj->base);
179 	}
180 	/*
181 	 * Objects already transfered to the vmas list will be unreferenced by
182 	 * eb_destroy.
183 	 */
184 
185 	return ret;
186 }
187 
188 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
189 {
190 	if (eb->and < 0) {
191 		if (handle >= -eb->and)
192 			return NULL;
193 		return eb->lut[handle];
194 	} else {
195 		struct hlist_head *head;
196 		struct hlist_node *node;
197 
198 		head = &eb->buckets[handle & eb->and];
199 		hlist_for_each(node, head) {
200 			struct i915_vma *vma;
201 
202 			vma = hlist_entry(node, struct i915_vma, exec_node);
203 			if (vma->exec_handle == handle)
204 				return vma;
205 		}
206 		return NULL;
207 	}
208 }
209 
210 static void
211 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
212 {
213 	struct drm_i915_gem_exec_object2 *entry;
214 	struct drm_i915_gem_object *obj = vma->obj;
215 
216 	if (!drm_mm_node_allocated(&vma->node))
217 		return;
218 
219 	entry = vma->exec_entry;
220 
221 	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
222 		i915_gem_object_unpin_fence(obj);
223 
224 	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
225 		vma->pin_count--;
226 
227 	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
228 }
229 
230 static void eb_destroy(struct eb_vmas *eb)
231 {
232 	while (!list_empty(&eb->vmas)) {
233 		struct i915_vma *vma;
234 
235 		vma = list_first_entry(&eb->vmas,
236 				       struct i915_vma,
237 				       exec_list);
238 		list_del_init(&vma->exec_list);
239 		i915_gem_execbuffer_unreserve_vma(vma);
240 		drm_gem_object_unreference(&vma->obj->base);
241 	}
242 	kfree(eb);
243 }
244 
245 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
246 {
247 	return (HAS_LLC(obj->base.dev) ||
248 		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
249 		obj->cache_level != I915_CACHE_NONE);
250 }
251 
252 /* Used to convert any address to canonical form.
253  * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
254  * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
255  * addresses to be in a canonical form:
256  * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
257  * canonical form [63:48] == [47]."
258  */
259 #define GEN8_HIGH_ADDRESS_BIT 47
260 static inline uint64_t gen8_canonical_addr(uint64_t address)
261 {
262 	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
263 }
264 
265 static inline uint64_t gen8_noncanonical_addr(uint64_t address)
266 {
267 	return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
268 }
269 
270 static inline uint64_t
271 relocation_target(struct drm_i915_gem_relocation_entry *reloc,
272 		  uint64_t target_offset)
273 {
274 	return gen8_canonical_addr((int)reloc->delta + target_offset);
275 }
276 
277 static int
278 relocate_entry_cpu(struct drm_i915_gem_object *obj,
279 		   struct drm_i915_gem_relocation_entry *reloc,
280 		   uint64_t target_offset)
281 {
282 	struct drm_device *dev = obj->base.dev;
283 	uint32_t page_offset = offset_in_page(reloc->offset);
284 	uint64_t delta = relocation_target(reloc, target_offset);
285 	char *vaddr;
286 	int ret;
287 
288 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
289 	if (ret)
290 		return ret;
291 
292 	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
293 				reloc->offset >> PAGE_SHIFT));
294 	*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
295 
296 	if (INTEL_INFO(dev)->gen >= 8) {
297 		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
298 
299 		if (page_offset == 0) {
300 			kunmap_atomic(vaddr);
301 			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
302 			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
303 		}
304 
305 		*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
306 	}
307 
308 	kunmap_atomic(vaddr);
309 
310 	return 0;
311 }
312 
313 static int
314 relocate_entry_gtt(struct drm_i915_gem_object *obj,
315 		   struct drm_i915_gem_relocation_entry *reloc,
316 		   uint64_t target_offset)
317 {
318 	struct drm_device *dev = obj->base.dev;
319 	struct drm_i915_private *dev_priv = dev->dev_private;
320 	uint64_t delta = relocation_target(reloc, target_offset);
321 	uint64_t offset;
322 	void __iomem *reloc_page;
323 	int ret;
324 
325 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
326 	if (ret)
327 		return ret;
328 
329 	ret = i915_gem_object_put_fence(obj);
330 	if (ret)
331 		return ret;
332 
333 	/* Map the page containing the relocation we're going to perform.  */
334 	offset = i915_gem_obj_ggtt_offset(obj);
335 	offset += reloc->offset;
336 	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
337 					      offset & ~PAGE_MASK);
338 	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
339 
340 	if (INTEL_INFO(dev)->gen >= 8) {
341 		offset += sizeof(uint32_t);
342 
343 		if (offset_in_page(offset) == 0) {
344 			io_mapping_unmap_atomic(reloc_page);
345 			reloc_page =
346 				io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
347 							 offset);
348 		}
349 
350 		iowrite32(upper_32_bits(delta),
351 			  reloc_page + offset_in_page(offset));
352 	}
353 
354 	io_mapping_unmap_atomic(reloc_page);
355 
356 	return 0;
357 }
358 
359 static void
360 clflush_write32(void *addr, uint32_t value)
361 {
362 	/* This is not a fast path, so KISS. */
363 	drm_clflush_virt_range(addr, sizeof(uint32_t));
364 	*(uint32_t *)addr = value;
365 	drm_clflush_virt_range(addr, sizeof(uint32_t));
366 }
367 
368 static int
369 relocate_entry_clflush(struct drm_i915_gem_object *obj,
370 		       struct drm_i915_gem_relocation_entry *reloc,
371 		       uint64_t target_offset)
372 {
373 	struct drm_device *dev = obj->base.dev;
374 	uint32_t page_offset = offset_in_page(reloc->offset);
375 	uint64_t delta = relocation_target(reloc, target_offset);
376 	char *vaddr;
377 	int ret;
378 
379 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
380 	if (ret)
381 		return ret;
382 
383 	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
384 				reloc->offset >> PAGE_SHIFT));
385 	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
386 
387 	if (INTEL_INFO(dev)->gen >= 8) {
388 		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
389 
390 		if (page_offset == 0) {
391 			kunmap_atomic(vaddr);
392 			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
393 			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
394 		}
395 
396 		clflush_write32(vaddr + page_offset, upper_32_bits(delta));
397 	}
398 
399 	kunmap_atomic(vaddr);
400 
401 	return 0;
402 }
403 
404 static int
405 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
406 				   struct eb_vmas *eb,
407 				   struct drm_i915_gem_relocation_entry *reloc)
408 {
409 	struct drm_device *dev = obj->base.dev;
410 	struct drm_gem_object *target_obj;
411 	struct drm_i915_gem_object *target_i915_obj;
412 	struct i915_vma *target_vma;
413 	uint64_t target_offset;
414 	int ret;
415 
416 	/* we've already hold a reference to all valid objects */
417 	target_vma = eb_get_vma(eb, reloc->target_handle);
418 	if (unlikely(target_vma == NULL))
419 		return -ENOENT;
420 	target_i915_obj = target_vma->obj;
421 	target_obj = &target_vma->obj->base;
422 
423 	target_offset = gen8_canonical_addr(target_vma->node.start);
424 
425 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
426 	 * pipe_control writes because the gpu doesn't properly redirect them
427 	 * through the ppgtt for non_secure batchbuffers. */
428 	if (unlikely(IS_GEN6(dev) &&
429 	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
430 		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
431 				    PIN_GLOBAL);
432 		if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
433 			return ret;
434 	}
435 
436 	/* Validate that the target is in a valid r/w GPU domain */
437 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
438 		DRM_DEBUG("reloc with multiple write domains: "
439 			  "obj %p target %d offset %d "
440 			  "read %08x write %08x",
441 			  obj, reloc->target_handle,
442 			  (int) reloc->offset,
443 			  reloc->read_domains,
444 			  reloc->write_domain);
445 		return -EINVAL;
446 	}
447 	if (unlikely((reloc->write_domain | reloc->read_domains)
448 		     & ~I915_GEM_GPU_DOMAINS)) {
449 		DRM_DEBUG("reloc with read/write non-GPU domains: "
450 			  "obj %p target %d offset %d "
451 			  "read %08x write %08x",
452 			  obj, reloc->target_handle,
453 			  (int) reloc->offset,
454 			  reloc->read_domains,
455 			  reloc->write_domain);
456 		return -EINVAL;
457 	}
458 
459 	target_obj->pending_read_domains |= reloc->read_domains;
460 	target_obj->pending_write_domain |= reloc->write_domain;
461 
462 	/* If the relocation already has the right value in it, no
463 	 * more work needs to be done.
464 	 */
465 	if (target_offset == reloc->presumed_offset)
466 		return 0;
467 
468 	/* Check that the relocation address is valid... */
469 	if (unlikely(reloc->offset >
470 		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
471 		DRM_DEBUG("Relocation beyond object bounds: "
472 			  "obj %p target %d offset %d size %d.\n",
473 			  obj, reloc->target_handle,
474 			  (int) reloc->offset,
475 			  (int) obj->base.size);
476 		return -EINVAL;
477 	}
478 	if (unlikely(reloc->offset & 3)) {
479 		DRM_DEBUG("Relocation not 4-byte aligned: "
480 			  "obj %p target %d offset %d.\n",
481 			  obj, reloc->target_handle,
482 			  (int) reloc->offset);
483 		return -EINVAL;
484 	}
485 
486 	/* We can't wait for rendering with pagefaults disabled */
487 	if (obj->active && (curthread->td_flags & TDF_NOFAULT))
488 		return -EFAULT;
489 
490 	if (use_cpu_reloc(obj))
491 		ret = relocate_entry_cpu(obj, reloc, target_offset);
492 	else if (obj->map_and_fenceable)
493 		ret = relocate_entry_gtt(obj, reloc, target_offset);
494 	else if (cpu_has_clflush)
495 		ret = relocate_entry_clflush(obj, reloc, target_offset);
496 	else {
497 		WARN_ONCE(1, "Impossible case in relocation handling\n");
498 		ret = -ENODEV;
499 	}
500 
501 	if (ret)
502 		return ret;
503 
504 	/* and update the user's relocation entry */
505 	reloc->presumed_offset = target_offset;
506 
507 	return 0;
508 }
509 
510 static int
511 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
512 				 struct eb_vmas *eb)
513 {
514 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
515 	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
516 	struct drm_i915_gem_relocation_entry __user *user_relocs;
517 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
518 	int remain, ret;
519 
520 	user_relocs = to_user_ptr(entry->relocs_ptr);
521 
522 	remain = entry->relocation_count;
523 	while (remain) {
524 		struct drm_i915_gem_relocation_entry *r = stack_reloc;
525 		int count = remain;
526 		if (count > ARRAY_SIZE(stack_reloc))
527 			count = ARRAY_SIZE(stack_reloc);
528 		remain -= count;
529 
530 		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
531 			return -EFAULT;
532 
533 		do {
534 			u64 offset = r->presumed_offset;
535 
536 			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
537 			if (ret)
538 				return ret;
539 
540 			if (r->presumed_offset != offset &&
541 			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
542 						    &r->presumed_offset,
543 						    sizeof(r->presumed_offset))) {
544 				return -EFAULT;
545 			}
546 
547 			user_relocs++;
548 			r++;
549 		} while (--count);
550 	}
551 
552 	return 0;
553 #undef N_RELOC
554 }
555 
556 static int
557 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
558 				      struct eb_vmas *eb,
559 				      struct drm_i915_gem_relocation_entry *relocs)
560 {
561 	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
562 	int i, ret;
563 
564 	for (i = 0; i < entry->relocation_count; i++) {
565 		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
566 		if (ret)
567 			return ret;
568 	}
569 
570 	return 0;
571 }
572 
573 static int
574 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
575 {
576 	struct i915_vma *vma;
577 	int ret = 0;
578 
579 	/* This is the fast path and we cannot handle a pagefault whilst
580 	 * holding the struct mutex lest the user pass in the relocations
581 	 * contained within a mmaped bo. For in such a case we, the page
582 	 * fault handler would call i915_gem_fault() and we would try to
583 	 * acquire the struct mutex again. Obviously this is bad and so
584 	 * lockdep complains vehemently.
585 	 */
586 	pagefault_disable();
587 	list_for_each_entry(vma, &eb->vmas, exec_list) {
588 		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
589 		if (ret)
590 			break;
591 	}
592 	pagefault_enable();
593 
594 	return ret;
595 }
596 
597 static bool only_mappable_for_reloc(unsigned int flags)
598 {
599 	return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
600 		__EXEC_OBJECT_NEEDS_MAP;
601 }
602 
603 static int
604 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
605 				struct intel_engine_cs *ring,
606 				bool *need_reloc)
607 {
608 	struct drm_i915_gem_object *obj = vma->obj;
609 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
610 	uint64_t flags;
611 	int ret;
612 
613 	flags = PIN_USER;
614 	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
615 		flags |= PIN_GLOBAL;
616 
617 	if (!drm_mm_node_allocated(&vma->node)) {
618 		/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
619 		 * limit address to the first 4GBs for unflagged objects.
620 		 */
621 		if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
622 			flags |= PIN_ZONE_4G;
623 		if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
624 			flags |= PIN_GLOBAL | PIN_MAPPABLE;
625 		if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
626 			flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
627 		if (entry->flags & EXEC_OBJECT_PINNED)
628 			flags |= entry->offset | PIN_OFFSET_FIXED;
629 		if ((flags & PIN_MAPPABLE) == 0)
630 			flags |= PIN_HIGH;
631 	}
632 
633 	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
634 	if ((ret == -ENOSPC  || ret == -E2BIG) &&
635 	    only_mappable_for_reloc(entry->flags))
636 		ret = i915_gem_object_pin(obj, vma->vm,
637 					  entry->alignment,
638 					  flags & ~PIN_MAPPABLE);
639 	if (ret)
640 		return ret;
641 
642 	entry->flags |= __EXEC_OBJECT_HAS_PIN;
643 
644 	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
645 		ret = i915_gem_object_get_fence(obj);
646 		if (ret)
647 			return ret;
648 
649 		if (i915_gem_object_pin_fence(obj))
650 			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
651 	}
652 
653 	if (entry->offset != vma->node.start) {
654 		entry->offset = vma->node.start;
655 		*need_reloc = true;
656 	}
657 
658 	if (entry->flags & EXEC_OBJECT_WRITE) {
659 		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
660 		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
661 	}
662 
663 	return 0;
664 }
665 
666 static bool
667 need_reloc_mappable(struct i915_vma *vma)
668 {
669 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
670 
671 	if (entry->relocation_count == 0)
672 		return false;
673 
674 	if (!i915_is_ggtt(vma->vm))
675 		return false;
676 
677 	/* See also use_cpu_reloc() */
678 	if (HAS_LLC(vma->obj->base.dev))
679 		return false;
680 
681 	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
682 		return false;
683 
684 	return true;
685 }
686 
687 static bool
688 eb_vma_misplaced(struct i915_vma *vma)
689 {
690 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
691 	struct drm_i915_gem_object *obj = vma->obj;
692 
693 	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
694 	       !i915_is_ggtt(vma->vm));
695 
696 	if (entry->alignment &&
697 	    vma->node.start & (entry->alignment - 1))
698 		return true;
699 
700 	if (entry->flags & EXEC_OBJECT_PINNED &&
701 	    vma->node.start != entry->offset)
702 		return true;
703 
704 	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
705 	    vma->node.start < BATCH_OFFSET_BIAS)
706 		return true;
707 
708 	/* avoid costly ping-pong once a batch bo ended up non-mappable */
709 	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
710 		return !only_mappable_for_reloc(entry->flags);
711 
712 	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
713 	    (vma->node.start + vma->node.size - 1) >> 32)
714 		return true;
715 
716 	return false;
717 }
718 
719 static int
720 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
721 			    struct list_head *vmas,
722 			    struct intel_context *ctx,
723 			    bool *need_relocs)
724 {
725 	struct drm_i915_gem_object *obj;
726 	struct i915_vma *vma;
727 	struct i915_address_space *vm;
728 	struct list_head ordered_vmas;
729 	struct list_head pinned_vmas;
730 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
731 	int retry;
732 
733 	i915_gem_retire_requests_ring(ring);
734 
735 	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
736 
737 	INIT_LIST_HEAD(&ordered_vmas);
738 	INIT_LIST_HEAD(&pinned_vmas);
739 	while (!list_empty(vmas)) {
740 		struct drm_i915_gem_exec_object2 *entry;
741 		bool need_fence, need_mappable;
742 
743 		vma = list_first_entry(vmas, struct i915_vma, exec_list);
744 		obj = vma->obj;
745 		entry = vma->exec_entry;
746 
747 		if (ctx->flags & CONTEXT_NO_ZEROMAP)
748 			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
749 
750 		if (!has_fenced_gpu_access)
751 			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
752 		need_fence =
753 			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
754 			obj->tiling_mode != I915_TILING_NONE;
755 		need_mappable = need_fence || need_reloc_mappable(vma);
756 
757 		if (entry->flags & EXEC_OBJECT_PINNED)
758 			list_move_tail(&vma->exec_list, &pinned_vmas);
759 		else if (need_mappable) {
760 			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
761 			list_move(&vma->exec_list, &ordered_vmas);
762 		} else
763 			list_move_tail(&vma->exec_list, &ordered_vmas);
764 
765 		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
766 		obj->base.pending_write_domain = 0;
767 	}
768 	list_splice(&ordered_vmas, vmas);
769 	list_splice(&pinned_vmas, vmas);
770 
771 	/* Attempt to pin all of the buffers into the GTT.
772 	 * This is done in 3 phases:
773 	 *
774 	 * 1a. Unbind all objects that do not match the GTT constraints for
775 	 *     the execbuffer (fenceable, mappable, alignment etc).
776 	 * 1b. Increment pin count for already bound objects.
777 	 * 2.  Bind new objects.
778 	 * 3.  Decrement pin count.
779 	 *
780 	 * This avoid unnecessary unbinding of later objects in order to make
781 	 * room for the earlier objects *unless* we need to defragment.
782 	 */
783 	retry = 0;
784 	do {
785 		int ret = 0;
786 
787 		/* Unbind any ill-fitting objects or pin. */
788 		list_for_each_entry(vma, vmas, exec_list) {
789 			if (!drm_mm_node_allocated(&vma->node))
790 				continue;
791 
792 			if (eb_vma_misplaced(vma))
793 				ret = i915_vma_unbind(vma);
794 			else
795 				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
796 			if (ret)
797 				goto err;
798 		}
799 
800 		/* Bind fresh objects */
801 		list_for_each_entry(vma, vmas, exec_list) {
802 			if (drm_mm_node_allocated(&vma->node))
803 				continue;
804 
805 			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
806 			if (ret)
807 				goto err;
808 		}
809 
810 err:
811 		if (ret != -ENOSPC || retry++)
812 			return ret;
813 
814 		/* Decrement pin count for bound objects */
815 		list_for_each_entry(vma, vmas, exec_list)
816 			i915_gem_execbuffer_unreserve_vma(vma);
817 
818 		ret = i915_gem_evict_vm(vm, true);
819 		if (ret)
820 			return ret;
821 	} while (1);
822 }
823 
824 static int
825 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
826 				  struct drm_i915_gem_execbuffer2 *args,
827 				  struct drm_file *file,
828 				  struct intel_engine_cs *ring,
829 				  struct eb_vmas *eb,
830 				  struct drm_i915_gem_exec_object2 *exec,
831 				  struct intel_context *ctx)
832 {
833 	struct drm_i915_gem_relocation_entry *reloc;
834 	struct i915_address_space *vm;
835 	struct i915_vma *vma;
836 	bool need_relocs;
837 	int *reloc_offset;
838 	int i, total, ret;
839 	unsigned count = args->buffer_count;
840 
841 	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
842 
843 	/* We may process another execbuffer during the unlock... */
844 	while (!list_empty(&eb->vmas)) {
845 		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
846 		list_del_init(&vma->exec_list);
847 		i915_gem_execbuffer_unreserve_vma(vma);
848 		drm_gem_object_unreference(&vma->obj->base);
849 	}
850 
851 	mutex_unlock(&dev->struct_mutex);
852 
853 	total = 0;
854 	for (i = 0; i < count; i++)
855 		total += exec[i].relocation_count;
856 
857 	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
858 	reloc = drm_malloc_ab(total, sizeof(*reloc));
859 	if (reloc == NULL || reloc_offset == NULL) {
860 		drm_free_large(reloc);
861 		drm_free_large(reloc_offset);
862 		mutex_lock(&dev->struct_mutex);
863 		return -ENOMEM;
864 	}
865 
866 	total = 0;
867 	for (i = 0; i < count; i++) {
868 		struct drm_i915_gem_relocation_entry __user *user_relocs;
869 		u64 invalid_offset = (u64)-1;
870 		int j;
871 
872 		user_relocs = to_user_ptr(exec[i].relocs_ptr);
873 
874 		if (copy_from_user(reloc+total, user_relocs,
875 				   exec[i].relocation_count * sizeof(*reloc))) {
876 			ret = -EFAULT;
877 			mutex_lock(&dev->struct_mutex);
878 			goto err;
879 		}
880 
881 		/* As we do not update the known relocation offsets after
882 		 * relocating (due to the complexities in lock handling),
883 		 * we need to mark them as invalid now so that we force the
884 		 * relocation processing next time. Just in case the target
885 		 * object is evicted and then rebound into its old
886 		 * presumed_offset before the next execbuffer - if that
887 		 * happened we would make the mistake of assuming that the
888 		 * relocations were valid.
889 		 */
890 		for (j = 0; j < exec[i].relocation_count; j++) {
891 			if (__copy_to_user(&user_relocs[j].presumed_offset,
892 					   &invalid_offset,
893 					   sizeof(invalid_offset))) {
894 				ret = -EFAULT;
895 				mutex_lock(&dev->struct_mutex);
896 				goto err;
897 			}
898 		}
899 
900 		reloc_offset[i] = total;
901 		total += exec[i].relocation_count;
902 	}
903 
904 	ret = i915_mutex_lock_interruptible(dev);
905 	if (ret) {
906 		mutex_lock(&dev->struct_mutex);
907 		goto err;
908 	}
909 
910 	/* reacquire the objects */
911 	eb_reset(eb);
912 	ret = eb_lookup_vmas(eb, exec, args, vm, file);
913 	if (ret)
914 		goto err;
915 
916 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
917 	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
918 	if (ret)
919 		goto err;
920 
921 	list_for_each_entry(vma, &eb->vmas, exec_list) {
922 		int offset = vma->exec_entry - exec;
923 		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
924 							    reloc + reloc_offset[offset]);
925 		if (ret)
926 			goto err;
927 	}
928 
929 	/* Leave the user relocations as are, this is the painfully slow path,
930 	 * and we want to avoid the complication of dropping the lock whilst
931 	 * having buffers reserved in the aperture and so causing spurious
932 	 * ENOSPC for random operations.
933 	 */
934 
935 err:
936 	drm_free_large(reloc);
937 	drm_free_large(reloc_offset);
938 	return ret;
939 }
940 
941 static int
942 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
943 				struct list_head *vmas)
944 {
945 	const unsigned other_rings = ~intel_ring_flag(req->ring);
946 	struct i915_vma *vma;
947 	uint32_t flush_domains = 0;
948 	bool flush_chipset = false;
949 	int ret;
950 
951 	list_for_each_entry(vma, vmas, exec_list) {
952 		struct drm_i915_gem_object *obj = vma->obj;
953 
954 		if (obj->active & other_rings) {
955 			ret = i915_gem_object_sync(obj, req->ring, &req);
956 			if (ret)
957 				return ret;
958 		}
959 
960 		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
961 			flush_chipset |= i915_gem_clflush_object(obj, false);
962 
963 		flush_domains |= obj->base.write_domain;
964 	}
965 
966 	if (flush_chipset)
967 		i915_gem_chipset_flush(req->ring->dev);
968 
969 	if (flush_domains & I915_GEM_DOMAIN_GTT)
970 		wmb();
971 
972 	/* Unconditionally invalidate gpu caches and ensure that we do flush
973 	 * any residual writes from the previous batch.
974 	 */
975 	return intel_ring_invalidate_all_caches(req);
976 }
977 
978 static bool
979 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
980 {
981 	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
982 		return false;
983 
984 	/* Kernel clipping was a DRI1 misfeature */
985 	if (exec->num_cliprects || exec->cliprects_ptr)
986 		return false;
987 
988 	if (exec->DR4 == 0xffffffff) {
989 		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
990 		exec->DR4 = 0;
991 	}
992 	if (exec->DR1 || exec->DR4)
993 		return false;
994 
995 	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
996 		return false;
997 
998 	return true;
999 }
1000 
1001 static int
1002 validate_exec_list(struct drm_device *dev,
1003 		   struct drm_i915_gem_exec_object2 *exec,
1004 		   int count)
1005 {
1006 	unsigned relocs_total = 0;
1007 	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1008 	unsigned invalid_flags;
1009 	int i;
1010 
1011 	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1012 	if (USES_FULL_PPGTT(dev))
1013 		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1014 
1015 	for (i = 0; i < count; i++) {
1016 		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
1017 		int length; /* limited by fault_in_pages_readable() */
1018 
1019 		if (exec[i].flags & invalid_flags)
1020 			return -EINVAL;
1021 
1022 		/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1023 		 * any non-page-aligned or non-canonical addresses.
1024 		 */
1025 		if (exec[i].flags & EXEC_OBJECT_PINNED) {
1026 			if (exec[i].offset !=
1027 			    gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1028 				return -EINVAL;
1029 
1030 			/* From drm_mm perspective address space is continuous,
1031 			 * so from this point we're always using non-canonical
1032 			 * form internally.
1033 			 */
1034 			exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1035 		}
1036 
1037 		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1038 			return -EINVAL;
1039 
1040 		/* First check for malicious input causing overflow in
1041 		 * the worst case where we need to allocate the entire
1042 		 * relocation tree as a single array.
1043 		 */
1044 		if (exec[i].relocation_count > relocs_max - relocs_total)
1045 			return -EINVAL;
1046 		relocs_total += exec[i].relocation_count;
1047 
1048 		length = exec[i].relocation_count *
1049 			sizeof(struct drm_i915_gem_relocation_entry);
1050 		/*
1051 		 * We must check that the entire relocation array is safe
1052 		 * to read, but since we may need to update the presumed
1053 		 * offsets during execution, check for full write access.
1054 		 */
1055 #if 0
1056 		if (!access_ok(VERIFY_WRITE, ptr, length))
1057 			return -EFAULT;
1058 #endif
1059 
1060 		if (likely(!i915.prefault_disable)) {
1061 			if (fault_in_multipages_readable(ptr, length))
1062 				return -EFAULT;
1063 		}
1064 	}
1065 
1066 	return 0;
1067 }
1068 
1069 static struct intel_context *
1070 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1071 			  struct intel_engine_cs *ring, const u32 ctx_id)
1072 {
1073 	struct intel_context *ctx = NULL;
1074 	struct i915_ctx_hang_stats *hs;
1075 
1076 	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1077 		return ERR_PTR(-EINVAL);
1078 
1079 	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
1080 	if (IS_ERR(ctx))
1081 		return ctx;
1082 
1083 	hs = &ctx->hang_stats;
1084 	if (hs->banned) {
1085 		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1086 		return ERR_PTR(-EIO);
1087 	}
1088 
1089 	if (i915.enable_execlists && !ctx->engine[ring->id].state) {
1090 		int ret = intel_lr_context_deferred_alloc(ctx, ring);
1091 		if (ret) {
1092 			DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1093 			return ERR_PTR(ret);
1094 		}
1095 	}
1096 
1097 	return ctx;
1098 }
1099 
1100 void
1101 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1102 				   struct drm_i915_gem_request *req)
1103 {
1104 	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
1105 	struct i915_vma *vma;
1106 
1107 	list_for_each_entry(vma, vmas, exec_list) {
1108 		struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1109 		struct drm_i915_gem_object *obj = vma->obj;
1110 		u32 old_read = obj->base.read_domains;
1111 		u32 old_write = obj->base.write_domain;
1112 
1113 		obj->dirty = 1; /* be paranoid  */
1114 		obj->base.write_domain = obj->base.pending_write_domain;
1115 		if (obj->base.write_domain == 0)
1116 			obj->base.pending_read_domains |= obj->base.read_domains;
1117 		obj->base.read_domains = obj->base.pending_read_domains;
1118 
1119 		i915_vma_move_to_active(vma, req);
1120 		if (obj->base.write_domain) {
1121 			i915_gem_request_assign(&obj->last_write_req, req);
1122 
1123 			intel_fb_obj_invalidate(obj, ORIGIN_CS);
1124 
1125 			/* update for the implicit flush after a batch */
1126 			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1127 		}
1128 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1129 			i915_gem_request_assign(&obj->last_fenced_req, req);
1130 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1131 				struct drm_i915_private *dev_priv = to_i915(ring->dev);
1132 				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1133 					       &dev_priv->mm.fence_list);
1134 			}
1135 		}
1136 
1137 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
1138 	}
1139 }
1140 
1141 void
1142 i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
1143 {
1144 	/* Unconditionally force add_request to emit a full flush. */
1145 	params->ring->gpu_caches_dirty = true;
1146 
1147 	/* Add a breadcrumb for the completion of the batch buffer */
1148 	__i915_add_request(params->request, params->batch_obj, true);
1149 }
1150 
1151 static int
1152 i915_reset_gen7_sol_offsets(struct drm_device *dev,
1153 			    struct drm_i915_gem_request *req)
1154 {
1155 	struct intel_engine_cs *ring = req->ring;
1156 	struct drm_i915_private *dev_priv = dev->dev_private;
1157 	int ret, i;
1158 
1159 	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1160 		DRM_DEBUG("sol reset is gen7/rcs only\n");
1161 		return -EINVAL;
1162 	}
1163 
1164 	ret = intel_ring_begin(req, 4 * 3);
1165 	if (ret)
1166 		return ret;
1167 
1168 	for (i = 0; i < 4; i++) {
1169 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1170 		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
1171 		intel_ring_emit(ring, 0);
1172 	}
1173 
1174 	intel_ring_advance(ring);
1175 
1176 	return 0;
1177 }
1178 
1179 static struct drm_i915_gem_object*
1180 i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1181 			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1182 			  struct eb_vmas *eb,
1183 			  struct drm_i915_gem_object *batch_obj,
1184 			  u32 batch_start_offset,
1185 			  u32 batch_len,
1186 			  bool is_master)
1187 {
1188 	struct drm_i915_gem_object *shadow_batch_obj;
1189 	struct i915_vma *vma;
1190 	int ret;
1191 
1192 	shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
1193 						   PAGE_ALIGN(batch_len));
1194 	if (IS_ERR(shadow_batch_obj))
1195 		return shadow_batch_obj;
1196 
1197 	ret = i915_parse_cmds(ring,
1198 			      batch_obj,
1199 			      shadow_batch_obj,
1200 			      batch_start_offset,
1201 			      batch_len,
1202 			      is_master);
1203 	if (ret)
1204 		goto err;
1205 
1206 	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
1207 	if (ret)
1208 		goto err;
1209 
1210 	i915_gem_object_unpin_pages(shadow_batch_obj);
1211 
1212 	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1213 
1214 	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1215 	vma->exec_entry = shadow_exec_entry;
1216 	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1217 	drm_gem_object_reference(&shadow_batch_obj->base);
1218 	list_add_tail(&vma->exec_list, &eb->vmas);
1219 
1220 	shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1221 
1222 	return shadow_batch_obj;
1223 
1224 err:
1225 	i915_gem_object_unpin_pages(shadow_batch_obj);
1226 	if (ret == -EACCES) /* unhandled chained batch */
1227 		return batch_obj;
1228 	else
1229 		return ERR_PTR(ret);
1230 }
1231 
1232 int
1233 i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1234 			       struct drm_i915_gem_execbuffer2 *args,
1235 			       struct list_head *vmas)
1236 {
1237 	struct drm_device *dev = params->dev;
1238 	struct intel_engine_cs *ring = params->ring;
1239 	struct drm_i915_private *dev_priv = dev->dev_private;
1240 	u64 exec_start, exec_len;
1241 	int instp_mode;
1242 	u32 instp_mask;
1243 	int ret;
1244 
1245 	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1246 	if (ret)
1247 		return ret;
1248 
1249 	ret = i915_switch_context(params->request);
1250 	if (ret)
1251 		return ret;
1252 
1253 	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
1254 	     "%s didn't clear reload\n", ring->name);
1255 
1256 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1257 	instp_mask = I915_EXEC_CONSTANTS_MASK;
1258 	switch (instp_mode) {
1259 	case I915_EXEC_CONSTANTS_REL_GENERAL:
1260 	case I915_EXEC_CONSTANTS_ABSOLUTE:
1261 	case I915_EXEC_CONSTANTS_REL_SURFACE:
1262 		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1263 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1264 			return -EINVAL;
1265 		}
1266 
1267 		if (instp_mode != dev_priv->relative_constants_mode) {
1268 			if (INTEL_INFO(dev)->gen < 4) {
1269 				DRM_DEBUG("no rel constants on pre-gen4\n");
1270 				return -EINVAL;
1271 			}
1272 
1273 			if (INTEL_INFO(dev)->gen > 5 &&
1274 			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1275 				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1276 				return -EINVAL;
1277 			}
1278 
1279 			/* The HW changed the meaning on this bit on gen6 */
1280 			if (INTEL_INFO(dev)->gen >= 6)
1281 				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1282 		}
1283 		break;
1284 	default:
1285 		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1286 		return -EINVAL;
1287 	}
1288 
1289 	if (ring == &dev_priv->ring[RCS] &&
1290 	    instp_mode != dev_priv->relative_constants_mode) {
1291 		ret = intel_ring_begin(params->request, 4);
1292 		if (ret)
1293 			return ret;
1294 
1295 		intel_ring_emit(ring, MI_NOOP);
1296 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1297 		intel_ring_emit_reg(ring, INSTPM);
1298 		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1299 		intel_ring_advance(ring);
1300 
1301 		dev_priv->relative_constants_mode = instp_mode;
1302 	}
1303 
1304 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1305 		ret = i915_reset_gen7_sol_offsets(dev, params->request);
1306 		if (ret)
1307 			return ret;
1308 	}
1309 
1310 	exec_len   = args->batch_len;
1311 	exec_start = params->batch_obj_vm_offset +
1312 		     params->args_batch_start_offset;
1313 
1314 	ret = ring->dispatch_execbuffer(params->request,
1315 					exec_start, exec_len,
1316 					params->dispatch_flags);
1317 	if (ret)
1318 		return ret;
1319 
1320 	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1321 
1322 	i915_gem_execbuffer_move_to_active(vmas, params->request);
1323 	i915_gem_execbuffer_retire_commands(params);
1324 
1325 	return 0;
1326 }
1327 
1328 /**
1329  * Find one BSD ring to dispatch the corresponding BSD command.
1330  * The Ring ID is returned.
1331  */
1332 static int gen8_dispatch_bsd_ring(struct drm_device *dev,
1333 				  struct drm_file *file)
1334 {
1335 	struct drm_i915_private *dev_priv = dev->dev_private;
1336 	struct drm_i915_file_private *file_priv = file->driver_priv;
1337 
1338 	/* Check whether the file_priv is using one ring */
1339 	if (file_priv->bsd_ring)
1340 		return file_priv->bsd_ring->id;
1341 	else {
1342 		/* If no, use the ping-pong mechanism to select one ring */
1343 		int ring_id;
1344 
1345 		mutex_lock(&dev->struct_mutex);
1346 		if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
1347 			ring_id = VCS;
1348 			dev_priv->mm.bsd_ring_dispatch_index = 1;
1349 		} else {
1350 			ring_id = VCS2;
1351 			dev_priv->mm.bsd_ring_dispatch_index = 0;
1352 		}
1353 		file_priv->bsd_ring = &dev_priv->ring[ring_id];
1354 		mutex_unlock(&dev->struct_mutex);
1355 		return ring_id;
1356 	}
1357 }
1358 
1359 static struct drm_i915_gem_object *
1360 eb_get_batch(struct eb_vmas *eb)
1361 {
1362 	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1363 
1364 	/*
1365 	 * SNA is doing fancy tricks with compressing batch buffers, which leads
1366 	 * to negative relocation deltas. Usually that works out ok since the
1367 	 * relocate address is still positive, except when the batch is placed
1368 	 * very low in the GTT. Ensure this doesn't happen.
1369 	 *
1370 	 * Note that actual hangs have only been observed on gen7, but for
1371 	 * paranoia do it everywhere.
1372 	 */
1373 	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
1374 		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1375 
1376 	return vma->obj;
1377 }
1378 
1379 static int
1380 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1381 		       struct drm_file *file,
1382 		       struct drm_i915_gem_execbuffer2 *args,
1383 		       struct drm_i915_gem_exec_object2 *exec)
1384 {
1385 	struct drm_i915_private *dev_priv = dev->dev_private;
1386 	struct eb_vmas *eb;
1387 	struct drm_i915_gem_object *batch_obj;
1388 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
1389 	struct intel_engine_cs *ring;
1390 	struct intel_context *ctx;
1391 	struct i915_address_space *vm;
1392 	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1393 	struct i915_execbuffer_params *params = &params_master;
1394 	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1395 	u32 dispatch_flags;
1396 	int ret;
1397 	bool need_relocs;
1398 
1399 	if (!i915_gem_check_execbuffer(args))
1400 		return -EINVAL;
1401 
1402 	ret = validate_exec_list(dev, exec, args->buffer_count);
1403 	if (ret)
1404 		return ret;
1405 
1406 	dispatch_flags = 0;
1407 	if (args->flags & I915_EXEC_SECURE) {
1408 		dispatch_flags |= I915_DISPATCH_SECURE;
1409 	}
1410 	if (args->flags & I915_EXEC_IS_PINNED)
1411 		dispatch_flags |= I915_DISPATCH_PINNED;
1412 
1413 	if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1414 		DRM_DEBUG("execbuf with unknown ring: %d\n",
1415 			  (int)(args->flags & I915_EXEC_RING_MASK));
1416 		return -EINVAL;
1417 	}
1418 
1419 	if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
1420 	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1421 		DRM_DEBUG("execbuf with non bsd ring but with invalid "
1422 			"bsd dispatch flags: %d\n", (int)(args->flags));
1423 		return -EINVAL;
1424 	}
1425 
1426 	if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1427 		ring = &dev_priv->ring[RCS];
1428 	else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
1429 		if (HAS_BSD2(dev)) {
1430 			int ring_id;
1431 
1432 			switch (args->flags & I915_EXEC_BSD_MASK) {
1433 			case I915_EXEC_BSD_DEFAULT:
1434 				ring_id = gen8_dispatch_bsd_ring(dev, file);
1435 				ring = &dev_priv->ring[ring_id];
1436 				break;
1437 			case I915_EXEC_BSD_RING1:
1438 				ring = &dev_priv->ring[VCS];
1439 				break;
1440 			case I915_EXEC_BSD_RING2:
1441 				ring = &dev_priv->ring[VCS2];
1442 				break;
1443 			default:
1444 				DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
1445 					  (int)(args->flags & I915_EXEC_BSD_MASK));
1446 				return -EINVAL;
1447 			}
1448 		} else
1449 			ring = &dev_priv->ring[VCS];
1450 	} else
1451 		ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1452 
1453 	if (!intel_ring_initialized(ring)) {
1454 		DRM_DEBUG("execbuf with invalid ring: %d\n",
1455 			  (int)(args->flags & I915_EXEC_RING_MASK));
1456 		return -EINVAL;
1457 	}
1458 
1459 	if (args->buffer_count < 1) {
1460 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1461 		return -EINVAL;
1462 	}
1463 
1464 	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1465 		if (!HAS_RESOURCE_STREAMER(dev)) {
1466 			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1467 			return -EINVAL;
1468 		}
1469 		if (ring->id != RCS) {
1470 			DRM_DEBUG("RS is not available on %s\n",
1471 				 ring->name);
1472 			return -EINVAL;
1473 		}
1474 
1475 		dispatch_flags |= I915_DISPATCH_RS;
1476 	}
1477 
1478 	intel_runtime_pm_get(dev_priv);
1479 
1480 	ret = i915_mutex_lock_interruptible(dev);
1481 	if (ret)
1482 		goto pre_mutex_err;
1483 
1484 	ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1485 	if (IS_ERR(ctx)) {
1486 		mutex_unlock(&dev->struct_mutex);
1487 		ret = PTR_ERR(ctx);
1488 		goto pre_mutex_err;
1489 	}
1490 
1491 	i915_gem_context_reference(ctx);
1492 
1493 	if (ctx->ppgtt)
1494 		vm = &ctx->ppgtt->base;
1495 	else
1496 		vm = &dev_priv->gtt.base;
1497 
1498 	memset(&params_master, 0x00, sizeof(params_master));
1499 
1500 	eb = eb_create(args);
1501 	if (eb == NULL) {
1502 		i915_gem_context_unreference(ctx);
1503 		mutex_unlock(&dev->struct_mutex);
1504 		ret = -ENOMEM;
1505 		goto pre_mutex_err;
1506 	}
1507 
1508 	/* Look up object handles */
1509 	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1510 	if (ret)
1511 		goto err;
1512 
1513 	/* take note of the batch buffer before we might reorder the lists */
1514 	batch_obj = eb_get_batch(eb);
1515 
1516 	/* Move the objects en-masse into the GTT, evicting if necessary. */
1517 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1518 	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
1519 	if (ret)
1520 		goto err;
1521 
1522 	/* The objects are in their final locations, apply the relocations. */
1523 	if (need_relocs)
1524 		ret = i915_gem_execbuffer_relocate(eb);
1525 	if (ret) {
1526 		if (ret == -EFAULT) {
1527 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1528 								eb, exec, ctx);
1529 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1530 		}
1531 		if (ret)
1532 			goto err;
1533 	}
1534 
1535 	/* Set the pending read domains for the batch buffer to COMMAND */
1536 	if (batch_obj->base.pending_write_domain) {
1537 		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1538 		ret = -EINVAL;
1539 		goto err;
1540 	}
1541 
1542 	params->args_batch_start_offset = args->batch_start_offset;
1543 	if (i915_needs_cmd_parser(ring) && args->batch_len) {
1544 		struct drm_i915_gem_object *parsed_batch_obj;
1545 
1546 		parsed_batch_obj = i915_gem_execbuffer_parse(ring,
1547 						      &shadow_exec_entry,
1548 						      eb,
1549 						      batch_obj,
1550 						      args->batch_start_offset,
1551 						      args->batch_len,
1552 						      file->is_master);
1553 		if (IS_ERR(parsed_batch_obj)) {
1554 			ret = PTR_ERR(parsed_batch_obj);
1555 			goto err;
1556 		}
1557 
1558 		/*
1559 		 * parsed_batch_obj == batch_obj means batch not fully parsed:
1560 		 * Accept, but don't promote to secure.
1561 		 */
1562 
1563 		if (parsed_batch_obj != batch_obj) {
1564 			/*
1565 			 * Batch parsed and accepted:
1566 			 *
1567 			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1568 			 * bit from MI_BATCH_BUFFER_START commands issued in
1569 			 * the dispatch_execbuffer implementations. We
1570 			 * specifically don't want that set on batches the
1571 			 * command parser has accepted.
1572 			 */
1573 			dispatch_flags |= I915_DISPATCH_SECURE;
1574 			params->args_batch_start_offset = 0;
1575 			batch_obj = parsed_batch_obj;
1576 		}
1577 	}
1578 
1579 	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1580 
1581 	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1582 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
1583 	 * hsw should have this fixed, but bdw mucks it up again. */
1584 	if (dispatch_flags & I915_DISPATCH_SECURE) {
1585 		/*
1586 		 * So on first glance it looks freaky that we pin the batch here
1587 		 * outside of the reservation loop. But:
1588 		 * - The batch is already pinned into the relevant ppgtt, so we
1589 		 *   already have the backing storage fully allocated.
1590 		 * - No other BO uses the global gtt (well contexts, but meh),
1591 		 *   so we don't really have issues with multiple objects not
1592 		 *   fitting due to fragmentation.
1593 		 * So this is actually safe.
1594 		 */
1595 		ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1596 		if (ret)
1597 			goto err;
1598 
1599 		params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
1600 	} else
1601 		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
1602 
1603 	/* Allocate a request for this batch buffer nice and early. */
1604 	ret = i915_gem_request_alloc(ring, ctx, &params->request);
1605 	if (ret)
1606 		goto err_batch_unpin;
1607 
1608 	ret = i915_gem_request_add_to_client(params->request, file);
1609 	if (ret)
1610 		goto err_batch_unpin;
1611 
1612 	/*
1613 	 * Save assorted stuff away to pass through to *_submission().
1614 	 * NB: This data should be 'persistent' and not local as it will
1615 	 * kept around beyond the duration of the IOCTL once the GPU
1616 	 * scheduler arrives.
1617 	 */
1618 	params->dev                     = dev;
1619 	params->file                    = file;
1620 	params->ring                    = ring;
1621 	params->dispatch_flags          = dispatch_flags;
1622 	params->batch_obj               = batch_obj;
1623 	params->ctx                     = ctx;
1624 
1625 	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
1626 
1627 err_batch_unpin:
1628 	/*
1629 	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1630 	 * batch vma for correctness. For less ugly and less fragility this
1631 	 * needs to be adjusted to also track the ggtt batch vma properly as
1632 	 * active.
1633 	 */
1634 	if (dispatch_flags & I915_DISPATCH_SECURE)
1635 		i915_gem_object_ggtt_unpin(batch_obj);
1636 
1637 err:
1638 	/* the request owns the ref now */
1639 	i915_gem_context_unreference(ctx);
1640 	eb_destroy(eb);
1641 
1642 	/*
1643 	 * If the request was created but not successfully submitted then it
1644 	 * must be freed again. If it was submitted then it is being tracked
1645 	 * on the active request list and no clean up is required here.
1646 	 */
1647 	if (ret && params->request)
1648 		i915_gem_request_cancel(params->request);
1649 
1650 	mutex_unlock(&dev->struct_mutex);
1651 
1652 pre_mutex_err:
1653 	/* intel_gpu_busy should also get a ref, so it will free when the device
1654 	 * is really idle. */
1655 	intel_runtime_pm_put(dev_priv);
1656 	return ret;
1657 }
1658 
1659 /*
1660  * Legacy execbuffer just creates an exec2 list from the original exec object
1661  * list array and passes it to the real function.
1662  */
1663 int
1664 i915_gem_execbuffer(struct drm_device *dev, void *data,
1665 		    struct drm_file *file)
1666 {
1667 	struct drm_i915_gem_execbuffer *args = data;
1668 	struct drm_i915_gem_execbuffer2 exec2;
1669 	struct drm_i915_gem_exec_object *exec_list = NULL;
1670 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1671 	int ret, i;
1672 
1673 	if (args->buffer_count < 1) {
1674 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1675 		return -EINVAL;
1676 	}
1677 
1678 	/* Copy in the exec list from userland */
1679 	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1680 	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1681 	if (exec_list == NULL || exec2_list == NULL) {
1682 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1683 			  args->buffer_count);
1684 		drm_free_large(exec_list);
1685 		drm_free_large(exec2_list);
1686 		return -ENOMEM;
1687 	}
1688 	ret = copy_from_user(exec_list,
1689 			     to_user_ptr(args->buffers_ptr),
1690 			     sizeof(*exec_list) * args->buffer_count);
1691 	if (ret != 0) {
1692 		DRM_DEBUG("copy %d exec entries failed %d\n",
1693 			  args->buffer_count, ret);
1694 		drm_free_large(exec_list);
1695 		drm_free_large(exec2_list);
1696 		return -EFAULT;
1697 	}
1698 
1699 	for (i = 0; i < args->buffer_count; i++) {
1700 		exec2_list[i].handle = exec_list[i].handle;
1701 		exec2_list[i].relocation_count = exec_list[i].relocation_count;
1702 		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1703 		exec2_list[i].alignment = exec_list[i].alignment;
1704 		exec2_list[i].offset = exec_list[i].offset;
1705 		if (INTEL_INFO(dev)->gen < 4)
1706 			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1707 		else
1708 			exec2_list[i].flags = 0;
1709 	}
1710 
1711 	exec2.buffers_ptr = args->buffers_ptr;
1712 	exec2.buffer_count = args->buffer_count;
1713 	exec2.batch_start_offset = args->batch_start_offset;
1714 	exec2.batch_len = args->batch_len;
1715 	exec2.DR1 = args->DR1;
1716 	exec2.DR4 = args->DR4;
1717 	exec2.num_cliprects = args->num_cliprects;
1718 	exec2.cliprects_ptr = args->cliprects_ptr;
1719 	exec2.flags = I915_EXEC_RENDER;
1720 	i915_execbuffer2_set_context_id(exec2, 0);
1721 
1722 	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1723 	if (!ret) {
1724 		struct drm_i915_gem_exec_object __user *user_exec_list =
1725 			to_user_ptr(args->buffers_ptr);
1726 
1727 		/* Copy the new buffer offsets back to the user's exec list. */
1728 		for (i = 0; i < args->buffer_count; i++) {
1729 			exec2_list[i].offset =
1730 				gen8_canonical_addr(exec2_list[i].offset);
1731 			ret = __copy_to_user(&user_exec_list[i].offset,
1732 					     &exec2_list[i].offset,
1733 					     sizeof(user_exec_list[i].offset));
1734 			if (ret) {
1735 				ret = -EFAULT;
1736 				DRM_DEBUG("failed to copy %d exec entries "
1737 					  "back to user (%d)\n",
1738 					  args->buffer_count, ret);
1739 				break;
1740 			}
1741 		}
1742 	}
1743 
1744 	drm_free_large(exec_list);
1745 	drm_free_large(exec2_list);
1746 	return ret;
1747 }
1748 
1749 int
1750 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1751 		     struct drm_file *file)
1752 {
1753 	struct drm_i915_gem_execbuffer2 *args = data;
1754 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1755 	int ret;
1756 
1757 	if (args->buffer_count < 1 ||
1758 	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1759 		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1760 		return -EINVAL;
1761 	}
1762 
1763 	if (args->rsvd2 != 0) {
1764 		DRM_DEBUG("dirty rvsd2 field\n");
1765 		return -EINVAL;
1766 	}
1767 
1768 	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1769 			     M_DRM, M_NOWAIT);
1770 	if (exec2_list == NULL)
1771 		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1772 					   args->buffer_count);
1773 	if (exec2_list == NULL) {
1774 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1775 			  args->buffer_count);
1776 		return -ENOMEM;
1777 	}
1778 	ret = copy_from_user(exec2_list,
1779 			     to_user_ptr(args->buffers_ptr),
1780 			     sizeof(*exec2_list) * args->buffer_count);
1781 	if (ret != 0) {
1782 		DRM_DEBUG("copy %d exec entries failed %d\n",
1783 			  args->buffer_count, ret);
1784 		drm_free_large(exec2_list);
1785 		return -EFAULT;
1786 	}
1787 
1788 	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1789 	if (!ret) {
1790 		/* Copy the new buffer offsets back to the user's exec list. */
1791 		struct drm_i915_gem_exec_object2 __user *user_exec_list =
1792 				   to_user_ptr(args->buffers_ptr);
1793 		int i;
1794 
1795 		for (i = 0; i < args->buffer_count; i++) {
1796 			exec2_list[i].offset =
1797 				gen8_canonical_addr(exec2_list[i].offset);
1798 			ret = __copy_to_user(&user_exec_list[i].offset,
1799 					     &exec2_list[i].offset,
1800 					     sizeof(user_exec_list[i].offset));
1801 			if (ret) {
1802 				ret = -EFAULT;
1803 				DRM_DEBUG("failed to copy %d exec entries "
1804 					  "back to user\n",
1805 					  args->buffer_count);
1806 				break;
1807 			}
1808 		}
1809 	}
1810 
1811 	drm_free_large(exec2_list);
1812 	return ret;
1813 }
1814