1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008,2010 Intel Corporation
5  */
6 
7 #include <linux/intel-iommu.h>
8 #include <linux/dma-resv.h>
9 #include <linux/sync_file.h>
10 #include <linux/uaccess.h>
11 
12 #include <drm/drm_syncobj.h>
13 
14 #include <dev/pci/pcivar.h>
15 #include <dev/pci/agpvar.h>
16 
17 #include "display/intel_frontbuffer.h"
18 
19 #include "gem/i915_gem_ioctls.h"
20 #include "gt/intel_context.h"
21 #include "gt/intel_engine_pool.h"
22 #include "gt/intel_gt.h"
23 #include "gt/intel_gt_pm.h"
24 #include "gt/intel_ring.h"
25 
26 #include "i915_drv.h"
27 #include "i915_gem_clflush.h"
28 #include "i915_gem_context.h"
29 #include "i915_gem_ioctls.h"
30 #include "i915_sw_fence_work.h"
31 #include "i915_trace.h"
32 
33 struct eb_vma {
34 	struct i915_vma *vma;
35 	unsigned int flags;
36 
37 	/** This vma's place in the execbuf reservation list */
38 	struct drm_i915_gem_exec_object2 *exec;
39 	struct list_head bind_link;
40 	struct list_head reloc_link;
41 
42 	struct hlist_node node;
43 	u32 handle;
44 };
45 
46 enum {
47 	FORCE_CPU_RELOC = 1,
48 	FORCE_GTT_RELOC,
49 	FORCE_GPU_RELOC,
50 #define DBG_FORCE_RELOC 0 /* choose one of the above! */
51 };
52 
53 #define __EXEC_OBJECT_HAS_PIN		BIT(31)
54 #define __EXEC_OBJECT_HAS_FENCE		BIT(30)
55 #define __EXEC_OBJECT_NEEDS_MAP		BIT(29)
56 #define __EXEC_OBJECT_NEEDS_BIAS	BIT(28)
57 #define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 28) /* all of the above */
58 #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
59 
60 #define __EXEC_HAS_RELOC	BIT(31)
61 #define __EXEC_INTERNAL_FLAGS	(~0u << 31)
62 #define UPDATE			PIN_OFFSET_FIXED
63 
64 #define BATCH_OFFSET_BIAS (256*1024)
65 
66 #define __I915_EXEC_ILLEGAL_FLAGS \
67 	(__I915_EXEC_UNKNOWN_FLAGS | \
68 	 I915_EXEC_CONSTANTS_MASK  | \
69 	 I915_EXEC_RESOURCE_STREAMER)
70 
71 /* Catch emission of unexpected errors for CI! */
72 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
73 #undef EINVAL
74 #define EINVAL ({ \
75 	DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
76 	22; \
77 })
78 #endif
79 
80 /**
81  * DOC: User command execution
82  *
83  * Userspace submits commands to be executed on the GPU as an instruction
84  * stream within a GEM object we call a batchbuffer. This instructions may
85  * refer to other GEM objects containing auxiliary state such as kernels,
86  * samplers, render targets and even secondary batchbuffers. Userspace does
87  * not know where in the GPU memory these objects reside and so before the
88  * batchbuffer is passed to the GPU for execution, those addresses in the
89  * batchbuffer and auxiliary objects are updated. This is known as relocation,
90  * or patching. To try and avoid having to relocate each object on the next
91  * execution, userspace is told the location of those objects in this pass,
92  * but this remains just a hint as the kernel may choose a new location for
93  * any object in the future.
94  *
95  * At the level of talking to the hardware, submitting a batchbuffer for the
96  * GPU to execute is to add content to a buffer from which the HW
97  * command streamer is reading.
98  *
99  * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
100  *    Execlists, this command is not placed on the same buffer as the
101  *    remaining items.
102  *
103  * 2. Add a command to invalidate caches to the buffer.
104  *
105  * 3. Add a batchbuffer start command to the buffer; the start command is
106  *    essentially a token together with the GPU address of the batchbuffer
107  *    to be executed.
108  *
109  * 4. Add a pipeline flush to the buffer.
110  *
111  * 5. Add a memory write command to the buffer to record when the GPU
112  *    is done executing the batchbuffer. The memory write writes the
113  *    global sequence number of the request, ``i915_request::global_seqno``;
114  *    the i915 driver uses the current value in the register to determine
115  *    if the GPU has completed the batchbuffer.
116  *
117  * 6. Add a user interrupt command to the buffer. This command instructs
118  *    the GPU to issue an interrupt when the command, pipeline flush and
119  *    memory write are completed.
120  *
121  * 7. Inform the hardware of the additional commands added to the buffer
122  *    (by updating the tail pointer).
123  *
124  * Processing an execbuf ioctl is conceptually split up into a few phases.
125  *
126  * 1. Validation - Ensure all the pointers, handles and flags are valid.
127  * 2. Reservation - Assign GPU address space for every object
128  * 3. Relocation - Update any addresses to point to the final locations
129  * 4. Serialisation - Order the request with respect to its dependencies
130  * 5. Construction - Construct a request to execute the batchbuffer
131  * 6. Submission (at some point in the future execution)
132  *
133  * Reserving resources for the execbuf is the most complicated phase. We
134  * neither want to have to migrate the object in the address space, nor do
135  * we want to have to update any relocations pointing to this object. Ideally,
136  * we want to leave the object where it is and for all the existing relocations
137  * to match. If the object is given a new address, or if userspace thinks the
138  * object is elsewhere, we have to parse all the relocation entries and update
139  * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
140  * all the target addresses in all of its objects match the value in the
141  * relocation entries and that they all match the presumed offsets given by the
142  * list of execbuffer objects. Using this knowledge, we know that if we haven't
143  * moved any buffers, all the relocation entries are valid and we can skip
144  * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
145  * hang.) The requirement for using I915_EXEC_NO_RELOC are:
146  *
147  *      The addresses written in the objects must match the corresponding
148  *      reloc.presumed_offset which in turn must match the corresponding
149  *      execobject.offset.
150  *
151  *      Any render targets written to in the batch must be flagged with
152  *      EXEC_OBJECT_WRITE.
153  *
154  *      To avoid stalling, execobject.offset should match the current
155  *      address of that object within the active context.
156  *
157  * The reservation is done is multiple phases. First we try and keep any
158  * object already bound in its current location - so as long as meets the
159  * constraints imposed by the new execbuffer. Any object left unbound after the
160  * first pass is then fitted into any available idle space. If an object does
161  * not fit, all objects are removed from the reservation and the process rerun
162  * after sorting the objects into a priority order (more difficult to fit
163  * objects are tried first). Failing that, the entire VM is cleared and we try
164  * to fit the execbuf once last time before concluding that it simply will not
165  * fit.
166  *
167  * A small complication to all of this is that we allow userspace not only to
168  * specify an alignment and a size for the object in the address space, but
169  * we also allow userspace to specify the exact offset. This objects are
170  * simpler to place (the location is known a priori) all we have to do is make
171  * sure the space is available.
172  *
173  * Once all the objects are in place, patching up the buried pointers to point
174  * to the final locations is a fairly simple job of walking over the relocation
175  * entry arrays, looking up the right address and rewriting the value into
176  * the object. Simple! ... The relocation entries are stored in user memory
177  * and so to access them we have to copy them into a local buffer. That copy
178  * has to avoid taking any pagefaults as they may lead back to a GEM object
179  * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
180  * the relocation into multiple passes. First we try to do everything within an
181  * atomic context (avoid the pagefaults) which requires that we never wait. If
182  * we detect that we may wait, or if we need to fault, then we have to fallback
183  * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
184  * bells yet?) Dropping the mutex means that we lose all the state we have
185  * built up so far for the execbuf and we must reset any global data. However,
186  * we do leave the objects pinned in their final locations - which is a
187  * potential issue for concurrent execbufs. Once we have left the mutex, we can
188  * allocate and copy all the relocation entries into a large array at our
189  * leisure, reacquire the mutex, reclaim all the objects and other state and
190  * then proceed to update any incorrect addresses with the objects.
191  *
192  * As we process the relocation entries, we maintain a record of whether the
193  * object is being written to. Using NORELOC, we expect userspace to provide
194  * this information instead. We also check whether we can skip the relocation
195  * by comparing the expected value inside the relocation entry with the target's
196  * final address. If they differ, we have to map the current object and rewrite
197  * the 4 or 8 byte pointer within.
198  *
199  * Serialising an execbuf is quite simple according to the rules of the GEM
200  * ABI. Execution within each context is ordered by the order of submission.
201  * Writes to any GEM object are in order of submission and are exclusive. Reads
202  * from a GEM object are unordered with respect to other reads, but ordered by
203  * writes. A write submitted after a read cannot occur before the read, and
204  * similarly any read submitted after a write cannot occur before the write.
205  * Writes are ordered between engines such that only one write occurs at any
206  * time (completing any reads beforehand) - using semaphores where available
207  * and CPU serialisation otherwise. Other GEM access obey the same rules, any
208  * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
209  * reads before starting, and any read (either using set-domain or pread) must
210  * flush all GPU writes before starting. (Note we only employ a barrier before,
211  * we currently rely on userspace not concurrently starting a new execution
212  * whilst reading or writing to an object. This may be an advantage or not
213  * depending on how much you trust userspace not to shoot themselves in the
214  * foot.) Serialisation may just result in the request being inserted into
215  * a DAG awaiting its turn, but most simple is to wait on the CPU until
216  * all dependencies are resolved.
217  *
218  * After all of that, is just a matter of closing the request and handing it to
219  * the hardware (well, leaving it in a queue to be executed). However, we also
220  * offer the ability for batchbuffers to be run with elevated privileges so
221  * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
222  * Before any batch is given extra privileges we first must check that it
223  * contains no nefarious instructions, we check that each instruction is from
224  * our whitelist and all registers are also from an allowed list. We first
225  * copy the user's batchbuffer to a shadow (so that the user doesn't have
226  * access to it, either by the CPU or GPU as we scan it) and then parse each
227  * instruction. If everything is ok, we set a flag telling the hardware to run
228  * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
229  */
230 
231 struct i915_execbuffer {
232 	struct drm_i915_private *i915; /** i915 backpointer */
233 	struct drm_file *file; /** per-file lookup tables and limits */
234 	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
235 	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
236 	struct eb_vma *vma;
237 
238 	struct intel_engine_cs *engine; /** engine to queue the request to */
239 	struct intel_context *context; /* logical state for the request */
240 	struct i915_gem_context *gem_context; /** caller's context */
241 
242 	struct i915_request *request; /** our request to build */
243 	struct eb_vma *batch; /** identity of the batch obj/vma */
244 	struct i915_vma *trampoline; /** trampoline used for chaining */
245 
246 	/** actual size of execobj[] as we may extend it for the cmdparser */
247 	unsigned int buffer_count;
248 
249 	/** list of vma not yet bound during reservation phase */
250 	struct list_head unbound;
251 
252 	/** list of vma that have execobj.relocation_count */
253 	struct list_head relocs;
254 
255 	/**
256 	 * Track the most recently used object for relocations, as we
257 	 * frequently have to perform multiple relocations within the same
258 	 * obj/page
259 	 */
260 	struct reloc_cache {
261 		struct drm_mm_node node; /** temporary GTT binding */
262 		unsigned long vaddr; /** Current kmap address */
263 		unsigned long page; /** Currently mapped page index */
264 		unsigned int gen; /** Cached value of INTEL_GEN */
265 		bool use_64bit_reloc : 1;
266 		bool has_llc : 1;
267 		bool has_fence : 1;
268 		bool needs_unfenced : 1;
269 
270 		struct i915_request *rq;
271 		u32 *rq_cmd;
272 		unsigned int rq_size;
273 
274 		struct agp_map *map;
275 		bus_space_tag_t iot;
276 		bus_space_handle_t ioh;
277 	} reloc_cache;
278 
279 	u64 invalid_flags; /** Set of execobj.flags that are invalid */
280 	u32 context_flags; /** Set of execobj.flags to insert from the ctx */
281 
282 	u32 batch_start_offset; /** Location within object of batch */
283 	u32 batch_len; /** Length of batch within object */
284 	u32 batch_flags; /** Flags composed for emit_bb_start() */
285 
286 	/**
287 	 * Indicate either the size of the hastable used to resolve
288 	 * relocation handles, or if negative that we are using a direct
289 	 * index into the execobj[].
290 	 */
291 	int lut_size;
292 	struct hlist_head *buckets; /** ht for relocation handles */
293 };
294 
295 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
296 {
297 	return intel_engine_requires_cmd_parser(eb->engine) ||
298 		(intel_engine_using_cmd_parser(eb->engine) &&
299 		 eb->args->batch_len);
300 }
301 
302 static int eb_create(struct i915_execbuffer *eb)
303 {
304 	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
305 		unsigned int size = 1 + ilog2(eb->buffer_count);
306 
307 		/*
308 		 * Without a 1:1 association between relocation handles and
309 		 * the execobject[] index, we instead create a hashtable.
310 		 * We size it dynamically based on available memory, starting
311 		 * first with 1:1 assocative hash and scaling back until
312 		 * the allocation succeeds.
313 		 *
314 		 * Later on we use a positive lut_size to indicate we are
315 		 * using this hashtable, and a negative value to indicate a
316 		 * direct lookup.
317 		 */
318 		do {
319 			gfp_t flags;
320 
321 			/* While we can still reduce the allocation size, don't
322 			 * raise a warning and allow the allocation to fail.
323 			 * On the last pass though, we want to try as hard
324 			 * as possible to perform the allocation and warn
325 			 * if it fails.
326 			 */
327 			flags = GFP_KERNEL;
328 			if (size > 1)
329 				flags |= __GFP_NORETRY | __GFP_NOWARN;
330 
331 			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
332 					      flags);
333 			if (eb->buckets)
334 				break;
335 		} while (--size);
336 
337 		if (unlikely(!size))
338 			return -ENOMEM;
339 
340 		eb->lut_size = size;
341 	} else {
342 		eb->lut_size = -eb->buffer_count;
343 	}
344 
345 	return 0;
346 }
347 
348 static bool
349 eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
350 		 const struct i915_vma *vma,
351 		 unsigned int flags)
352 {
353 	if (vma->node.size < entry->pad_to_size)
354 		return true;
355 
356 	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
357 		return true;
358 
359 	if (flags & EXEC_OBJECT_PINNED &&
360 	    vma->node.start != entry->offset)
361 		return true;
362 
363 	if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
364 	    vma->node.start < BATCH_OFFSET_BIAS)
365 		return true;
366 
367 	if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
368 	    (vma->node.start + vma->node.size - 1) >> 32)
369 		return true;
370 
371 	if (flags & __EXEC_OBJECT_NEEDS_MAP &&
372 	    !i915_vma_is_map_and_fenceable(vma))
373 		return true;
374 
375 	return false;
376 }
377 
378 static inline bool
379 eb_pin_vma(struct i915_execbuffer *eb,
380 	   const struct drm_i915_gem_exec_object2 *entry,
381 	   struct eb_vma *ev)
382 {
383 	struct i915_vma *vma = ev->vma;
384 	u64 pin_flags;
385 
386 	if (vma->node.size)
387 		pin_flags = vma->node.start;
388 	else
389 		pin_flags = entry->offset & PIN_OFFSET_MASK;
390 
391 	pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
392 	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
393 		pin_flags |= PIN_GLOBAL;
394 
395 	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
396 		return false;
397 
398 	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
399 		if (unlikely(i915_vma_pin_fence(vma))) {
400 			i915_vma_unpin(vma);
401 			return false;
402 		}
403 
404 		if (vma->fence)
405 			ev->flags |= __EXEC_OBJECT_HAS_FENCE;
406 	}
407 
408 	ev->flags |= __EXEC_OBJECT_HAS_PIN;
409 	return !eb_vma_misplaced(entry, vma, ev->flags);
410 }
411 
412 static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
413 {
414 	GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
415 
416 	if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
417 		__i915_vma_unpin_fence(vma);
418 
419 	__i915_vma_unpin(vma);
420 }
421 
422 static inline void
423 eb_unreserve_vma(struct eb_vma *ev)
424 {
425 	if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
426 		return;
427 
428 	__eb_unreserve_vma(ev->vma, ev->flags);
429 	ev->flags &= ~__EXEC_OBJECT_RESERVED;
430 }
431 
432 static int
433 eb_validate_vma(struct i915_execbuffer *eb,
434 		struct drm_i915_gem_exec_object2 *entry,
435 		struct i915_vma *vma)
436 {
437 	if (unlikely(entry->flags & eb->invalid_flags))
438 		return -EINVAL;
439 
440 	if (unlikely(entry->alignment &&
441 		     !is_power_of_2_u64(entry->alignment)))
442 		return -EINVAL;
443 
444 	/*
445 	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
446 	 * any non-page-aligned or non-canonical addresses.
447 	 */
448 	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
449 		     entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
450 		return -EINVAL;
451 
452 	/* pad_to_size was once a reserved field, so sanitize it */
453 	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
454 		if (unlikely(offset_in_page(entry->pad_to_size)))
455 			return -EINVAL;
456 	} else {
457 		entry->pad_to_size = 0;
458 	}
459 	/*
460 	 * From drm_mm perspective address space is continuous,
461 	 * so from this point we're always using non-canonical
462 	 * form internally.
463 	 */
464 	entry->offset = gen8_noncanonical_addr(entry->offset);
465 
466 	if (!eb->reloc_cache.has_fence) {
467 		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
468 	} else {
469 		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
470 		     eb->reloc_cache.needs_unfenced) &&
471 		    i915_gem_object_is_tiled(vma->obj))
472 			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
473 	}
474 
475 	if (!(entry->flags & EXEC_OBJECT_PINNED))
476 		entry->flags |= eb->context_flags;
477 
478 	return 0;
479 }
480 
481 static void
482 eb_add_vma(struct i915_execbuffer *eb,
483 	   unsigned int i, unsigned batch_idx,
484 	   struct i915_vma *vma)
485 {
486 	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
487 	struct eb_vma *ev = &eb->vma[i];
488 
489 	GEM_BUG_ON(i915_vma_is_closed(vma));
490 
491 	ev->vma = i915_vma_get(vma);
492 	ev->exec = entry;
493 	ev->flags = entry->flags;
494 
495 	if (eb->lut_size > 0) {
496 		ev->handle = entry->handle;
497 		hlist_add_head(&ev->node,
498 			       &eb->buckets[hash_32(entry->handle,
499 						    eb->lut_size)]);
500 	}
501 
502 	if (entry->relocation_count)
503 		list_add_tail(&ev->reloc_link, &eb->relocs);
504 
505 	/*
506 	 * SNA is doing fancy tricks with compressing batch buffers, which leads
507 	 * to negative relocation deltas. Usually that works out ok since the
508 	 * relocate address is still positive, except when the batch is placed
509 	 * very low in the GTT. Ensure this doesn't happen.
510 	 *
511 	 * Note that actual hangs have only been observed on gen7, but for
512 	 * paranoia do it everywhere.
513 	 */
514 	if (i == batch_idx) {
515 		if (entry->relocation_count &&
516 		    !(ev->flags & EXEC_OBJECT_PINNED))
517 			ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
518 		if (eb->reloc_cache.has_fence)
519 			ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
520 
521 		eb->batch = ev;
522 	}
523 
524 	if (eb_pin_vma(eb, entry, ev)) {
525 		if (entry->offset != vma->node.start) {
526 			entry->offset = vma->node.start | UPDATE;
527 			eb->args->flags |= __EXEC_HAS_RELOC;
528 		}
529 	} else {
530 		eb_unreserve_vma(ev);
531 		list_add_tail(&ev->bind_link, &eb->unbound);
532 	}
533 }
534 
535 static inline int use_cpu_reloc(const struct reloc_cache *cache,
536 				const struct drm_i915_gem_object *obj)
537 {
538 	if (!i915_gem_object_has_struct_page(obj))
539 		return false;
540 
541 	if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
542 		return true;
543 
544 	if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
545 		return false;
546 
547 	return (cache->has_llc ||
548 		obj->cache_dirty ||
549 		obj->cache_level != I915_CACHE_NONE);
550 }
551 
552 static int eb_reserve_vma(const struct i915_execbuffer *eb,
553 			  struct eb_vma *ev,
554 			  u64 pin_flags)
555 {
556 	struct drm_i915_gem_exec_object2 *entry = ev->exec;
557 	unsigned int exec_flags = ev->flags;
558 	struct i915_vma *vma = ev->vma;
559 	int err;
560 
561 	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
562 		pin_flags |= PIN_GLOBAL;
563 
564 	/*
565 	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
566 	 * limit address to the first 4GBs for unflagged objects.
567 	 */
568 	if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
569 		pin_flags |= PIN_ZONE_4G;
570 
571 	if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
572 		pin_flags |= PIN_MAPPABLE;
573 
574 	if (exec_flags & EXEC_OBJECT_PINNED)
575 		pin_flags |= entry->offset | PIN_OFFSET_FIXED;
576 	else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
577 		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
578 
579 	if (drm_mm_node_allocated(&vma->node) &&
580 	    eb_vma_misplaced(entry, vma, ev->flags)) {
581 		err = i915_vma_unbind(vma);
582 		if (err)
583 			return err;
584 	}
585 
586 	err = i915_vma_pin(vma,
587 			   entry->pad_to_size, entry->alignment,
588 			   pin_flags);
589 	if (err)
590 		return err;
591 
592 	if (entry->offset != vma->node.start) {
593 		entry->offset = vma->node.start | UPDATE;
594 		eb->args->flags |= __EXEC_HAS_RELOC;
595 	}
596 
597 	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
598 		err = i915_vma_pin_fence(vma);
599 		if (unlikely(err)) {
600 			i915_vma_unpin(vma);
601 			return err;
602 		}
603 
604 		if (vma->fence)
605 			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
606 	}
607 
608 	ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
609 	GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
610 
611 	return 0;
612 }
613 
614 static int eb_reserve(struct i915_execbuffer *eb)
615 {
616 	const unsigned int count = eb->buffer_count;
617 	unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
618 	struct list_head last;
619 	struct eb_vma *ev;
620 	unsigned int i, pass;
621 	int err = 0;
622 
623 	/*
624 	 * Attempt to pin all of the buffers into the GTT.
625 	 * This is done in 3 phases:
626 	 *
627 	 * 1a. Unbind all objects that do not match the GTT constraints for
628 	 *     the execbuffer (fenceable, mappable, alignment etc).
629 	 * 1b. Increment pin count for already bound objects.
630 	 * 2.  Bind new objects.
631 	 * 3.  Decrement pin count.
632 	 *
633 	 * This avoid unnecessary unbinding of later objects in order to make
634 	 * room for the earlier objects *unless* we need to defragment.
635 	 */
636 
637 	if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex))
638 		return -EINTR;
639 
640 	pass = 0;
641 	do {
642 		list_for_each_entry(ev, &eb->unbound, bind_link) {
643 			err = eb_reserve_vma(eb, ev, pin_flags);
644 			if (err)
645 				break;
646 		}
647 		if (!(err == -ENOSPC || err == -EAGAIN))
648 			break;
649 
650 		/* Resort *all* the objects into priority order */
651 		INIT_LIST_HEAD(&eb->unbound);
652 		INIT_LIST_HEAD(&last);
653 		for (i = 0; i < count; i++) {
654 			unsigned int flags;
655 
656 			ev = &eb->vma[i];
657 			flags = ev->flags;
658 			if (flags & EXEC_OBJECT_PINNED &&
659 			    flags & __EXEC_OBJECT_HAS_PIN)
660 				continue;
661 
662 			eb_unreserve_vma(ev);
663 
664 			if (flags & EXEC_OBJECT_PINNED)
665 				/* Pinned must have their slot */
666 				list_add(&ev->bind_link, &eb->unbound);
667 			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
668 				/* Map require the lowest 256MiB (aperture) */
669 				list_add_tail(&ev->bind_link, &eb->unbound);
670 			else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
671 				/* Prioritise 4GiB region for restricted bo */
672 				list_add(&ev->bind_link, &last);
673 			else
674 				list_add_tail(&ev->bind_link, &last);
675 		}
676 		list_splice_tail(&last, &eb->unbound);
677 
678 		if (err == -EAGAIN) {
679 			mutex_unlock(&eb->i915->drm.struct_mutex);
680 			flush_workqueue(eb->i915->mm.userptr_wq);
681 			mutex_lock(&eb->i915->drm.struct_mutex);
682 			continue;
683 		}
684 
685 		switch (pass++) {
686 		case 0:
687 			break;
688 
689 		case 1:
690 			/* Too fragmented, unbind everything and retry */
691 			mutex_lock(&eb->context->vm->mutex);
692 			err = i915_gem_evict_vm(eb->context->vm);
693 			mutex_unlock(&eb->context->vm->mutex);
694 			if (err)
695 				goto unlock;
696 			break;
697 
698 		default:
699 			err = -ENOSPC;
700 			goto unlock;
701 		}
702 
703 		pin_flags = PIN_USER;
704 	} while (1);
705 
706 unlock:
707 	mutex_unlock(&eb->i915->drm.struct_mutex);
708 	return err;
709 }
710 
711 static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
712 {
713 	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
714 		return 0;
715 	else
716 		return eb->buffer_count - 1;
717 }
718 
719 static int eb_select_context(struct i915_execbuffer *eb)
720 {
721 	struct i915_gem_context *ctx;
722 
723 	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
724 	if (unlikely(!ctx))
725 		return -ENOENT;
726 
727 	eb->gem_context = ctx;
728 	if (rcu_access_pointer(ctx->vm))
729 		eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
730 
731 	eb->context_flags = 0;
732 	if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
733 		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
734 
735 	return 0;
736 }
737 
738 static int eb_lookup_vmas(struct i915_execbuffer *eb)
739 {
740 	struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
741 	struct drm_i915_gem_object *obj;
742 	unsigned int i, batch;
743 	int err;
744 
745 	if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
746 		return -ENOENT;
747 
748 	INIT_LIST_HEAD(&eb->relocs);
749 	INIT_LIST_HEAD(&eb->unbound);
750 
751 	batch = eb_batch_index(eb);
752 
753 	for (i = 0; i < eb->buffer_count; i++) {
754 		u32 handle = eb->exec[i].handle;
755 		struct i915_lut_handle *lut;
756 		struct i915_vma *vma;
757 
758 		vma = radix_tree_lookup(handles_vma, handle);
759 		if (likely(vma))
760 			goto add_vma;
761 
762 		obj = i915_gem_object_lookup(eb->file, handle);
763 		if (unlikely(!obj)) {
764 			err = -ENOENT;
765 			goto err_vma;
766 		}
767 
768 		vma = i915_vma_instance(obj, eb->context->vm, NULL);
769 		if (IS_ERR(vma)) {
770 			err = PTR_ERR(vma);
771 			goto err_obj;
772 		}
773 
774 		lut = i915_lut_handle_alloc();
775 		if (unlikely(!lut)) {
776 			err = -ENOMEM;
777 			goto err_obj;
778 		}
779 
780 		err = radix_tree_insert(handles_vma, handle, vma);
781 		if (unlikely(err)) {
782 			i915_lut_handle_free(lut);
783 			goto err_obj;
784 		}
785 
786 		/* transfer ref to lut */
787 		if (!atomic_fetch_inc(&vma->open_count))
788 			i915_vma_reopen(vma);
789 		lut->handle = handle;
790 		lut->ctx = eb->gem_context;
791 
792 		i915_gem_object_lock(obj);
793 		list_add(&lut->obj_link, &obj->lut_list);
794 		i915_gem_object_unlock(obj);
795 
796 add_vma:
797 		err = eb_validate_vma(eb, &eb->exec[i], vma);
798 		if (unlikely(err))
799 			goto err_vma;
800 
801 		eb_add_vma(eb, i, batch, vma);
802 	}
803 
804 	return 0;
805 
806 err_obj:
807 	i915_gem_object_put(obj);
808 err_vma:
809 	eb->vma[i].vma = NULL;
810 	return err;
811 }
812 
813 static struct eb_vma *
814 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
815 {
816 	if (eb->lut_size < 0) {
817 		if (handle >= -eb->lut_size)
818 			return NULL;
819 		return &eb->vma[handle];
820 	} else {
821 		struct hlist_head *head;
822 		struct eb_vma *ev;
823 
824 		head = &eb->buckets[hash_32(handle, eb->lut_size)];
825 		hlist_for_each_entry(ev, head, node) {
826 			if (ev->handle == handle)
827 				return ev;
828 		}
829 		return NULL;
830 	}
831 }
832 
833 static void eb_release_vmas(const struct i915_execbuffer *eb)
834 {
835 	const unsigned int count = eb->buffer_count;
836 	unsigned int i;
837 
838 	for (i = 0; i < count; i++) {
839 		struct eb_vma *ev = &eb->vma[i];
840 		struct i915_vma *vma = ev->vma;
841 
842 		if (!vma)
843 			break;
844 
845 		eb->vma[i].vma = NULL;
846 
847 		if (ev->flags & __EXEC_OBJECT_HAS_PIN)
848 			__eb_unreserve_vma(vma, ev->flags);
849 
850 		i915_vma_put(vma);
851 	}
852 }
853 
854 static void eb_destroy(const struct i915_execbuffer *eb)
855 {
856 	GEM_BUG_ON(eb->reloc_cache.rq);
857 
858 	if (eb->lut_size > 0)
859 		kfree(eb->buckets);
860 }
861 
862 static inline u64
863 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
864 		  const struct i915_vma *target)
865 {
866 	return gen8_canonical_addr((int)reloc->delta + target->node.start);
867 }
868 
869 static void reloc_cache_init(struct reloc_cache *cache,
870 			     struct drm_i915_private *i915)
871 {
872 	cache->page = -1;
873 	cache->vaddr = 0;
874 	/* Must be a variable in the struct to allow GCC to unroll. */
875 	cache->gen = INTEL_GEN(i915);
876 	cache->has_llc = HAS_LLC(i915);
877 	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
878 	cache->has_fence = cache->gen < 4;
879 	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
880 #ifdef notyet
881 	cache->node.flags = 0;
882 #else
883 	cache->node.hole_follows = 0;
884 	cache->node.allocated = 0;
885 	cache->node.scanned_block = 0;
886 #endif
887 	cache->rq = NULL;
888 	cache->rq_size = 0;
889 
890 	cache->map = i915->agph;
891 	cache->iot = i915->bst;
892 }
893 
894 static inline void *unmask_page(unsigned long p)
895 {
896 	return (void *)(uintptr_t)(p & ~PAGE_MASK);
897 }
898 
899 static inline unsigned int unmask_flags(unsigned long p)
900 {
901 	return p & PAGE_MASK;
902 }
903 
904 #define KMAP 0x4 /* after CLFLUSH_FLAGS */
905 
906 static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
907 {
908 	struct drm_i915_private *i915 =
909 		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
910 	return &i915->ggtt;
911 }
912 
913 static void reloc_gpu_flush(struct reloc_cache *cache)
914 {
915 	struct drm_i915_gem_object *obj = cache->rq->batch->obj;
916 
917 	GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
918 	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
919 
920 	__i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
921 	i915_gem_object_unpin_map(obj);
922 
923 	intel_gt_chipset_flush(cache->rq->engine->gt);
924 
925 	i915_request_add(cache->rq);
926 	cache->rq = NULL;
927 }
928 
929 static void reloc_cache_reset(struct reloc_cache *cache)
930 {
931 	void *vaddr;
932 
933 	if (cache->rq)
934 		reloc_gpu_flush(cache);
935 
936 	if (!cache->vaddr)
937 		return;
938 
939 	vaddr = unmask_page(cache->vaddr);
940 	if (cache->vaddr & KMAP) {
941 		if (cache->vaddr & CLFLUSH_AFTER)
942 			mb();
943 
944 		kunmap_atomic(vaddr);
945 		i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
946 	} else {
947 		struct i915_ggtt *ggtt = cache_to_ggtt(cache);
948 
949 		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
950 #ifdef __linux__
951 		io_mapping_unmap_atomic((void __iomem *)vaddr);
952 #else
953 		agp_unmap_atomic(cache->map, cache->ioh);
954 #endif
955 
956 		if (drm_mm_node_allocated(&cache->node)) {
957 			ggtt->vm.clear_range(&ggtt->vm,
958 					     cache->node.start,
959 					     cache->node.size);
960 			mutex_lock(&ggtt->vm.mutex);
961 			drm_mm_remove_node(&cache->node);
962 			mutex_unlock(&ggtt->vm.mutex);
963 		} else {
964 			i915_vma_unpin((struct i915_vma *)cache->node.mm);
965 		}
966 	}
967 
968 	cache->vaddr = 0;
969 	cache->page = -1;
970 }
971 
972 static void *reloc_kmap(struct drm_i915_gem_object *obj,
973 			struct reloc_cache *cache,
974 			unsigned long page)
975 {
976 	void *vaddr;
977 
978 	if (cache->vaddr) {
979 		kunmap_atomic(unmask_page(cache->vaddr));
980 	} else {
981 		unsigned int flushes;
982 		int err;
983 
984 		err = i915_gem_object_prepare_write(obj, &flushes);
985 		if (err)
986 			return ERR_PTR(err);
987 
988 		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
989 		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & ~PAGE_MASK);
990 
991 		cache->vaddr = flushes | KMAP;
992 		cache->node.mm = (void *)obj;
993 		if (flushes)
994 			mb();
995 	}
996 
997 	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
998 	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
999 	cache->page = page;
1000 
1001 	return vaddr;
1002 }
1003 
1004 static void *reloc_iomap(struct drm_i915_gem_object *obj,
1005 			 struct reloc_cache *cache,
1006 			 unsigned long page)
1007 {
1008 	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1009 	unsigned long offset;
1010 	void *vaddr;
1011 
1012 	if (cache->vaddr) {
1013 		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1014 #ifdef __linux__
1015 		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1016 #else
1017 		agp_unmap_atomic(cache->map, cache->ioh);
1018 #endif
1019 	} else {
1020 		struct i915_vma *vma;
1021 		int err;
1022 
1023 		if (i915_gem_object_is_tiled(obj))
1024 			return ERR_PTR(-EINVAL);
1025 
1026 		if (use_cpu_reloc(cache, obj))
1027 			return NULL;
1028 
1029 		i915_gem_object_lock(obj);
1030 		err = i915_gem_object_set_to_gtt_domain(obj, true);
1031 		i915_gem_object_unlock(obj);
1032 		if (err)
1033 			return ERR_PTR(err);
1034 
1035 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1036 					       PIN_MAPPABLE |
1037 					       PIN_NONBLOCK /* NOWARN */ |
1038 					       PIN_NOEVICT);
1039 		if (IS_ERR(vma)) {
1040 			memset(&cache->node, 0, sizeof(cache->node));
1041 			mutex_lock(&ggtt->vm.mutex);
1042 			err = drm_mm_insert_node_in_range
1043 				(&ggtt->vm.mm, &cache->node,
1044 				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1045 				 0, ggtt->mappable_end,
1046 				 DRM_MM_INSERT_LOW);
1047 			mutex_unlock(&ggtt->vm.mutex);
1048 			if (err) /* no inactive aperture space, use cpu reloc */
1049 				return NULL;
1050 		} else {
1051 			cache->node.start = vma->node.start;
1052 			cache->node.mm = (void *)vma;
1053 		}
1054 	}
1055 
1056 	offset = cache->node.start;
1057 	if (drm_mm_node_allocated(&cache->node)) {
1058 		ggtt->vm.insert_page(&ggtt->vm,
1059 				     i915_gem_object_get_dma_address(obj, page),
1060 				     offset, I915_CACHE_NONE, 0);
1061 	} else {
1062 		offset += page << PAGE_SHIFT;
1063 	}
1064 
1065 #ifdef __linux__
1066 	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
1067 							 offset);
1068 #else
1069 	agp_map_atomic(cache->map, offset, &cache->ioh);
1070 	vaddr = bus_space_vaddr(cache->iot, cache->ioh);
1071 #endif
1072 	cache->page = page;
1073 	cache->vaddr = (unsigned long)vaddr;
1074 
1075 	return vaddr;
1076 }
1077 
1078 static void *reloc_vaddr(struct drm_i915_gem_object *obj,
1079 			 struct reloc_cache *cache,
1080 			 unsigned long page)
1081 {
1082 	void *vaddr;
1083 
1084 	if (cache->page == page) {
1085 		vaddr = unmask_page(cache->vaddr);
1086 	} else {
1087 		vaddr = NULL;
1088 		if ((cache->vaddr & KMAP) == 0)
1089 			vaddr = reloc_iomap(obj, cache, page);
1090 		if (!vaddr)
1091 			vaddr = reloc_kmap(obj, cache, page);
1092 	}
1093 
1094 	return vaddr;
1095 }
1096 
1097 static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1098 {
1099 	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
1100 		if (flushes & CLFLUSH_BEFORE) {
1101 			clflushopt(addr);
1102 			mb();
1103 		}
1104 
1105 		*addr = value;
1106 
1107 		/*
1108 		 * Writes to the same cacheline are serialised by the CPU
1109 		 * (including clflush). On the write path, we only require
1110 		 * that it hits memory in an orderly fashion and place
1111 		 * mb barriers at the start and end of the relocation phase
1112 		 * to ensure ordering of clflush wrt to the system.
1113 		 */
1114 		if (flushes & CLFLUSH_AFTER)
1115 			clflushopt(addr);
1116 	} else
1117 		*addr = value;
1118 }
1119 
1120 static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
1121 {
1122 	struct drm_i915_gem_object *obj = vma->obj;
1123 	int err;
1124 
1125 	i915_vma_lock(vma);
1126 
1127 	if (obj->cache_dirty & ~obj->cache_coherent)
1128 		i915_gem_clflush_object(obj, 0);
1129 	obj->write_domain = 0;
1130 
1131 	err = i915_request_await_object(rq, vma->obj, true);
1132 	if (err == 0)
1133 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1134 
1135 	i915_vma_unlock(vma);
1136 
1137 	return err;
1138 }
1139 
1140 static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
1141 			     struct i915_vma *vma,
1142 			     unsigned int len)
1143 {
1144 	struct reloc_cache *cache = &eb->reloc_cache;
1145 	struct intel_engine_pool_node *pool;
1146 	struct i915_request *rq;
1147 	struct i915_vma *batch;
1148 	u32 *cmd;
1149 	int err;
1150 
1151 	pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
1152 	if (IS_ERR(pool))
1153 		return PTR_ERR(pool);
1154 
1155 	cmd = i915_gem_object_pin_map(pool->obj,
1156 				      cache->has_llc ?
1157 				      I915_MAP_FORCE_WB :
1158 				      I915_MAP_FORCE_WC);
1159 	if (IS_ERR(cmd)) {
1160 		err = PTR_ERR(cmd);
1161 		goto out_pool;
1162 	}
1163 
1164 	batch = i915_vma_instance(pool->obj, vma->vm, NULL);
1165 	if (IS_ERR(batch)) {
1166 		err = PTR_ERR(batch);
1167 		goto err_unmap;
1168 	}
1169 
1170 	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
1171 	if (err)
1172 		goto err_unmap;
1173 
1174 	rq = i915_request_create(eb->context);
1175 	if (IS_ERR(rq)) {
1176 		err = PTR_ERR(rq);
1177 		goto err_unpin;
1178 	}
1179 
1180 	err = intel_engine_pool_mark_active(pool, rq);
1181 	if (err)
1182 		goto err_request;
1183 
1184 	err = reloc_move_to_gpu(rq, vma);
1185 	if (err)
1186 		goto err_request;
1187 
1188 	err = eb->engine->emit_bb_start(rq,
1189 					batch->node.start, PAGE_SIZE,
1190 					cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
1191 	if (err)
1192 		goto skip_request;
1193 
1194 	i915_vma_lock(batch);
1195 	err = i915_request_await_object(rq, batch->obj, false);
1196 	if (err == 0)
1197 		err = i915_vma_move_to_active(batch, rq, 0);
1198 	i915_vma_unlock(batch);
1199 	if (err)
1200 		goto skip_request;
1201 
1202 	rq->batch = batch;
1203 	i915_vma_unpin(batch);
1204 
1205 	cache->rq = rq;
1206 	cache->rq_cmd = cmd;
1207 	cache->rq_size = 0;
1208 
1209 	/* Return with batch mapping (cmd) still pinned */
1210 	goto out_pool;
1211 
1212 skip_request:
1213 	i915_request_set_error_once(rq, err);
1214 err_request:
1215 	i915_request_add(rq);
1216 err_unpin:
1217 	i915_vma_unpin(batch);
1218 err_unmap:
1219 	i915_gem_object_unpin_map(pool->obj);
1220 out_pool:
1221 	intel_engine_pool_put(pool);
1222 	return err;
1223 }
1224 
1225 static u32 *reloc_gpu(struct i915_execbuffer *eb,
1226 		      struct i915_vma *vma,
1227 		      unsigned int len)
1228 {
1229 	struct reloc_cache *cache = &eb->reloc_cache;
1230 	u32 *cmd;
1231 
1232 	if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
1233 		reloc_gpu_flush(cache);
1234 
1235 	if (unlikely(!cache->rq)) {
1236 		int err;
1237 
1238 		if (!intel_engine_can_store_dword(eb->engine))
1239 			return ERR_PTR(-ENODEV);
1240 
1241 		err = __reloc_gpu_alloc(eb, vma, len);
1242 		if (unlikely(err))
1243 			return ERR_PTR(err);
1244 	}
1245 
1246 	cmd = cache->rq_cmd + cache->rq_size;
1247 	cache->rq_size += len;
1248 
1249 	return cmd;
1250 }
1251 
1252 static u64
1253 relocate_entry(struct i915_vma *vma,
1254 	       const struct drm_i915_gem_relocation_entry *reloc,
1255 	       struct i915_execbuffer *eb,
1256 	       const struct i915_vma *target)
1257 {
1258 	u64 offset = reloc->offset;
1259 	u64 target_offset = relocation_target(reloc, target);
1260 	bool wide = eb->reloc_cache.use_64bit_reloc;
1261 	void *vaddr;
1262 
1263 	if (!eb->reloc_cache.vaddr &&
1264 	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1265 	     !dma_resv_test_signaled_rcu(vma->resv, true))) {
1266 		const unsigned int gen = eb->reloc_cache.gen;
1267 		unsigned int len;
1268 		u32 *batch;
1269 		u64 addr;
1270 
1271 		if (wide)
1272 			len = offset & 7 ? 8 : 5;
1273 		else if (gen >= 4)
1274 			len = 4;
1275 		else
1276 			len = 3;
1277 
1278 		batch = reloc_gpu(eb, vma, len);
1279 		if (IS_ERR(batch))
1280 			goto repeat;
1281 
1282 		addr = gen8_canonical_addr(vma->node.start + offset);
1283 		if (wide) {
1284 			if (offset & 7) {
1285 				*batch++ = MI_STORE_DWORD_IMM_GEN4;
1286 				*batch++ = lower_32_bits(addr);
1287 				*batch++ = upper_32_bits(addr);
1288 				*batch++ = lower_32_bits(target_offset);
1289 
1290 				addr = gen8_canonical_addr(addr + 4);
1291 
1292 				*batch++ = MI_STORE_DWORD_IMM_GEN4;
1293 				*batch++ = lower_32_bits(addr);
1294 				*batch++ = upper_32_bits(addr);
1295 				*batch++ = upper_32_bits(target_offset);
1296 			} else {
1297 				*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
1298 				*batch++ = lower_32_bits(addr);
1299 				*batch++ = upper_32_bits(addr);
1300 				*batch++ = lower_32_bits(target_offset);
1301 				*batch++ = upper_32_bits(target_offset);
1302 			}
1303 		} else if (gen >= 6) {
1304 			*batch++ = MI_STORE_DWORD_IMM_GEN4;
1305 			*batch++ = 0;
1306 			*batch++ = addr;
1307 			*batch++ = target_offset;
1308 		} else if (gen >= 4) {
1309 			*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1310 			*batch++ = 0;
1311 			*batch++ = addr;
1312 			*batch++ = target_offset;
1313 		} else {
1314 			*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1315 			*batch++ = addr;
1316 			*batch++ = target_offset;
1317 		}
1318 
1319 		goto out;
1320 	}
1321 
1322 repeat:
1323 	vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1324 	if (IS_ERR(vaddr))
1325 		return PTR_ERR(vaddr);
1326 
1327 	clflush_write32(vaddr + offset_in_page(offset),
1328 			lower_32_bits(target_offset),
1329 			eb->reloc_cache.vaddr);
1330 
1331 	if (wide) {
1332 		offset += sizeof(u32);
1333 		target_offset >>= 32;
1334 		wide = false;
1335 		goto repeat;
1336 	}
1337 
1338 out:
1339 	return target->node.start | UPDATE;
1340 }
1341 
1342 static u64
1343 eb_relocate_entry(struct i915_execbuffer *eb,
1344 		  struct eb_vma *ev,
1345 		  const struct drm_i915_gem_relocation_entry *reloc)
1346 {
1347 	struct drm_i915_private *i915 = eb->i915;
1348 	struct eb_vma *target;
1349 	int err;
1350 
1351 	/* we've already hold a reference to all valid objects */
1352 	target = eb_get_vma(eb, reloc->target_handle);
1353 	if (unlikely(!target))
1354 		return -ENOENT;
1355 
1356 	/* Validate that the target is in a valid r/w GPU domain */
1357 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1358 		drm_dbg(&i915->drm, "reloc with multiple write domains: "
1359 			  "target %d offset %d "
1360 			  "read %08x write %08x",
1361 			  reloc->target_handle,
1362 			  (int) reloc->offset,
1363 			  reloc->read_domains,
1364 			  reloc->write_domain);
1365 		return -EINVAL;
1366 	}
1367 	if (unlikely((reloc->write_domain | reloc->read_domains)
1368 		     & ~I915_GEM_GPU_DOMAINS)) {
1369 		drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
1370 			  "target %d offset %d "
1371 			  "read %08x write %08x",
1372 			  reloc->target_handle,
1373 			  (int) reloc->offset,
1374 			  reloc->read_domains,
1375 			  reloc->write_domain);
1376 		return -EINVAL;
1377 	}
1378 
1379 	if (reloc->write_domain) {
1380 		target->flags |= EXEC_OBJECT_WRITE;
1381 
1382 		/*
1383 		 * Sandybridge PPGTT errata: We need a global gtt mapping
1384 		 * for MI and pipe_control writes because the gpu doesn't
1385 		 * properly redirect them through the ppgtt for non_secure
1386 		 * batchbuffers.
1387 		 */
1388 		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1389 		    IS_GEN(eb->i915, 6)) {
1390 			err = i915_vma_bind(target->vma,
1391 					    target->vma->obj->cache_level,
1392 					    PIN_GLOBAL, NULL);
1393 			if (WARN_ONCE(err,
1394 				      "Unexpected failure to bind target VMA!"))
1395 				return err;
1396 		}
1397 	}
1398 
1399 	/*
1400 	 * If the relocation already has the right value in it, no
1401 	 * more work needs to be done.
1402 	 */
1403 	if (!DBG_FORCE_RELOC &&
1404 	    gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
1405 		return 0;
1406 
1407 	/* Check that the relocation address is valid... */
1408 	if (unlikely(reloc->offset >
1409 		     ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1410 		drm_dbg(&i915->drm, "Relocation beyond object bounds: "
1411 			  "target %d offset %d size %d.\n",
1412 			  reloc->target_handle,
1413 			  (int)reloc->offset,
1414 			  (int)ev->vma->size);
1415 		return -EINVAL;
1416 	}
1417 	if (unlikely(reloc->offset & 3)) {
1418 		drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
1419 			  "target %d offset %d.\n",
1420 			  reloc->target_handle,
1421 			  (int)reloc->offset);
1422 		return -EINVAL;
1423 	}
1424 
1425 	/*
1426 	 * If we write into the object, we need to force the synchronisation
1427 	 * barrier, either with an asynchronous clflush or if we executed the
1428 	 * patching using the GPU (though that should be serialised by the
1429 	 * timeline). To be completely sure, and since we are required to
1430 	 * do relocations we are already stalling, disable the user's opt
1431 	 * out of our synchronisation.
1432 	 */
1433 	ev->flags &= ~EXEC_OBJECT_ASYNC;
1434 
1435 	/* and update the user's relocation entry */
1436 	return relocate_entry(ev->vma, reloc, eb, target->vma);
1437 }
1438 
1439 static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
1440 {
1441 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1442 	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
1443 	struct drm_i915_gem_relocation_entry __user *urelocs;
1444 	const struct drm_i915_gem_exec_object2 *entry = ev->exec;
1445 	unsigned int remain;
1446 
1447 	urelocs = u64_to_user_ptr(entry->relocs_ptr);
1448 	remain = entry->relocation_count;
1449 	if (unlikely(remain > N_RELOC(ULONG_MAX)))
1450 		return -EINVAL;
1451 
1452 	/*
1453 	 * We must check that the entire relocation array is safe
1454 	 * to read. However, if the array is not writable the user loses
1455 	 * the updated relocation values.
1456 	 */
1457 	if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
1458 		return -EFAULT;
1459 
1460 	do {
1461 		struct drm_i915_gem_relocation_entry *r = stack;
1462 		unsigned int count =
1463 			min_t(unsigned int, remain, ARRAY_SIZE(stack));
1464 		unsigned int copied;
1465 
1466 		/*
1467 		 * This is the fast path and we cannot handle a pagefault
1468 		 * whilst holding the struct mutex lest the user pass in the
1469 		 * relocations contained within a mmaped bo. For in such a case
1470 		 * we, the page fault handler would call i915_gem_fault() and
1471 		 * we would try to acquire the struct mutex again. Obviously
1472 		 * this is bad and so lockdep complains vehemently.
1473 		 */
1474 		copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
1475 		if (unlikely(copied)) {
1476 			remain = -EFAULT;
1477 			goto out;
1478 		}
1479 
1480 		remain -= count;
1481 		do {
1482 			u64 offset = eb_relocate_entry(eb, ev, r);
1483 
1484 			if (likely(offset == 0)) {
1485 			} else if ((s64)offset < 0) {
1486 				remain = (int)offset;
1487 				goto out;
1488 			} else {
1489 				/*
1490 				 * Note that reporting an error now
1491 				 * leaves everything in an inconsistent
1492 				 * state as we have *already* changed
1493 				 * the relocation value inside the
1494 				 * object. As we have not changed the
1495 				 * reloc.presumed_offset or will not
1496 				 * change the execobject.offset, on the
1497 				 * call we may not rewrite the value
1498 				 * inside the object, leaving it
1499 				 * dangling and causing a GPU hang. Unless
1500 				 * userspace dynamically rebuilds the
1501 				 * relocations on each execbuf rather than
1502 				 * presume a static tree.
1503 				 *
1504 				 * We did previously check if the relocations
1505 				 * were writable (access_ok), an error now
1506 				 * would be a strange race with mprotect,
1507 				 * having already demonstrated that we
1508 				 * can read from this userspace address.
1509 				 */
1510 				offset = gen8_canonical_addr(offset & ~UPDATE);
1511 				__put_user(offset,
1512 					   &urelocs[r - stack].presumed_offset);
1513 			}
1514 		} while (r++, --count);
1515 		urelocs += ARRAY_SIZE(stack);
1516 	} while (remain);
1517 out:
1518 	reloc_cache_reset(&eb->reloc_cache);
1519 	return remain;
1520 }
1521 
1522 static int eb_relocate(struct i915_execbuffer *eb)
1523 {
1524 	int err;
1525 
1526 	mutex_lock(&eb->gem_context->mutex);
1527 	err = eb_lookup_vmas(eb);
1528 	mutex_unlock(&eb->gem_context->mutex);
1529 	if (err)
1530 		return err;
1531 
1532 	if (!list_empty(&eb->unbound)) {
1533 		err = eb_reserve(eb);
1534 		if (err)
1535 			return err;
1536 	}
1537 
1538 	/* The objects are in their final locations, apply the relocations. */
1539 	if (eb->args->flags & __EXEC_HAS_RELOC) {
1540 		struct eb_vma *ev;
1541 
1542 		list_for_each_entry(ev, &eb->relocs, reloc_link) {
1543 			err = eb_relocate_vma(eb, ev);
1544 			if (err)
1545 				return err;
1546 		}
1547 	}
1548 
1549 	return 0;
1550 }
1551 
1552 static int eb_move_to_gpu(struct i915_execbuffer *eb)
1553 {
1554 	const unsigned int count = eb->buffer_count;
1555 	struct ww_acquire_ctx acquire;
1556 	unsigned int i;
1557 	int err = 0;
1558 
1559 	ww_acquire_init(&acquire, &reservation_ww_class);
1560 
1561 	for (i = 0; i < count; i++) {
1562 		struct eb_vma *ev = &eb->vma[i];
1563 		struct i915_vma *vma = ev->vma;
1564 
1565 		err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
1566 		if (err == -EDEADLK) {
1567 			GEM_BUG_ON(i == 0);
1568 			do {
1569 				int j = i - 1;
1570 
1571 				ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
1572 
1573 				swap(eb->vma[i],  eb->vma[j]);
1574 			} while (--i);
1575 
1576 			err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
1577 							       &acquire);
1578 		}
1579 		if (err)
1580 			break;
1581 	}
1582 	ww_acquire_done(&acquire);
1583 
1584 	while (i--) {
1585 		struct eb_vma *ev = &eb->vma[i];
1586 		struct i915_vma *vma = ev->vma;
1587 		unsigned int flags = ev->flags;
1588 		struct drm_i915_gem_object *obj = vma->obj;
1589 
1590 		assert_vma_held(vma);
1591 
1592 		if (flags & EXEC_OBJECT_CAPTURE) {
1593 			struct i915_capture_list *capture;
1594 
1595 			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
1596 			if (capture) {
1597 				capture->next = eb->request->capture_list;
1598 				capture->vma = vma;
1599 				eb->request->capture_list = capture;
1600 			}
1601 		}
1602 
1603 		/*
1604 		 * If the GPU is not _reading_ through the CPU cache, we need
1605 		 * to make sure that any writes (both previous GPU writes from
1606 		 * before a change in snooping levels and normal CPU writes)
1607 		 * caught in that cache are flushed to main memory.
1608 		 *
1609 		 * We want to say
1610 		 *   obj->cache_dirty &&
1611 		 *   !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
1612 		 * but gcc's optimiser doesn't handle that as well and emits
1613 		 * two jumps instead of one. Maybe one day...
1614 		 */
1615 		if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1616 			if (i915_gem_clflush_object(obj, 0))
1617 				flags &= ~EXEC_OBJECT_ASYNC;
1618 		}
1619 
1620 		if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
1621 			err = i915_request_await_object
1622 				(eb->request, obj, flags & EXEC_OBJECT_WRITE);
1623 		}
1624 
1625 		if (err == 0)
1626 			err = i915_vma_move_to_active(vma, eb->request, flags);
1627 
1628 		i915_vma_unlock(vma);
1629 
1630 		__eb_unreserve_vma(vma, flags);
1631 		i915_vma_put(vma);
1632 
1633 		ev->vma = NULL;
1634 	}
1635 	ww_acquire_fini(&acquire);
1636 
1637 	if (unlikely(err))
1638 		goto err_skip;
1639 
1640 	eb->exec = NULL;
1641 
1642 	/* Unconditionally flush any chipset caches (for streaming writes). */
1643 	intel_gt_chipset_flush(eb->engine->gt);
1644 	return 0;
1645 
1646 err_skip:
1647 	i915_request_set_error_once(eb->request, err);
1648 	return err;
1649 }
1650 
1651 static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1652 {
1653 	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1654 		return -EINVAL;
1655 
1656 	/* Kernel clipping was a DRI1 misfeature */
1657 	if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
1658 		if (exec->num_cliprects || exec->cliprects_ptr)
1659 			return -EINVAL;
1660 	}
1661 
1662 	if (exec->DR4 == 0xffffffff) {
1663 		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1664 		exec->DR4 = 0;
1665 	}
1666 	if (exec->DR1 || exec->DR4)
1667 		return -EINVAL;
1668 
1669 	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1670 		return -EINVAL;
1671 
1672 	return 0;
1673 }
1674 
1675 static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1676 {
1677 	u32 *cs;
1678 	int i;
1679 
1680 	if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
1681 		drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
1682 		return -EINVAL;
1683 	}
1684 
1685 	cs = intel_ring_begin(rq, 4 * 2 + 2);
1686 	if (IS_ERR(cs))
1687 		return PTR_ERR(cs);
1688 
1689 	*cs++ = MI_LOAD_REGISTER_IMM(4);
1690 	for (i = 0; i < 4; i++) {
1691 		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
1692 		*cs++ = 0;
1693 	}
1694 	*cs++ = MI_NOOP;
1695 	intel_ring_advance(rq, cs);
1696 
1697 	return 0;
1698 }
1699 
1700 static struct i915_vma *
1701 shadow_batch_pin(struct drm_i915_gem_object *obj,
1702 		 struct i915_address_space *vm,
1703 		 unsigned int flags)
1704 {
1705 	struct i915_vma *vma;
1706 	int err;
1707 
1708 	vma = i915_vma_instance(obj, vm, NULL);
1709 	if (IS_ERR(vma))
1710 		return vma;
1711 
1712 	err = i915_vma_pin(vma, 0, 0, flags);
1713 	if (err)
1714 		return ERR_PTR(err);
1715 
1716 	return vma;
1717 }
1718 
1719 struct eb_parse_work {
1720 	struct dma_fence_work base;
1721 	struct intel_engine_cs *engine;
1722 	struct i915_vma *batch;
1723 	struct i915_vma *shadow;
1724 	struct i915_vma *trampoline;
1725 	unsigned int batch_offset;
1726 	unsigned int batch_length;
1727 };
1728 
1729 static int __eb_parse(struct dma_fence_work *work)
1730 {
1731 	struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
1732 
1733 	return intel_engine_cmd_parser(pw->engine,
1734 				       pw->batch,
1735 				       pw->batch_offset,
1736 				       pw->batch_length,
1737 				       pw->shadow,
1738 				       pw->trampoline);
1739 }
1740 
1741 static void __eb_parse_release(struct dma_fence_work *work)
1742 {
1743 	struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
1744 
1745 	if (pw->trampoline)
1746 		i915_active_release(&pw->trampoline->active);
1747 	i915_active_release(&pw->shadow->active);
1748 	i915_active_release(&pw->batch->active);
1749 }
1750 
1751 static const struct dma_fence_work_ops eb_parse_ops = {
1752 	.name = "eb_parse",
1753 	.work = __eb_parse,
1754 	.release = __eb_parse_release,
1755 };
1756 
1757 static int eb_parse_pipeline(struct i915_execbuffer *eb,
1758 			     struct i915_vma *shadow,
1759 			     struct i915_vma *trampoline)
1760 {
1761 	struct eb_parse_work *pw;
1762 	int err;
1763 
1764 	pw = kzalloc(sizeof(*pw), GFP_KERNEL);
1765 	if (!pw)
1766 		return -ENOMEM;
1767 
1768 	err = i915_active_acquire(&eb->batch->vma->active);
1769 	if (err)
1770 		goto err_free;
1771 
1772 	err = i915_active_acquire(&shadow->active);
1773 	if (err)
1774 		goto err_batch;
1775 
1776 	if (trampoline) {
1777 		err = i915_active_acquire(&trampoline->active);
1778 		if (err)
1779 			goto err_shadow;
1780 	}
1781 
1782 	dma_fence_work_init(&pw->base, &eb_parse_ops);
1783 
1784 	pw->engine = eb->engine;
1785 	pw->batch = eb->batch->vma;
1786 	pw->batch_offset = eb->batch_start_offset;
1787 	pw->batch_length = eb->batch_len;
1788 	pw->shadow = shadow;
1789 	pw->trampoline = trampoline;
1790 
1791 	err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
1792 	if (err)
1793 		goto err_trampoline;
1794 
1795 	err = dma_resv_reserve_shared(pw->batch->resv, 1);
1796 	if (err)
1797 		goto err_batch_unlock;
1798 
1799 	/* Wait for all writes (and relocs) into the batch to complete */
1800 	err = i915_sw_fence_await_reservation(&pw->base.chain,
1801 					      pw->batch->resv, NULL, false,
1802 					      0, I915_FENCE_GFP);
1803 	if (err < 0)
1804 		goto err_batch_unlock;
1805 
1806 	/* Keep the batch alive and unwritten as we parse */
1807 	dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
1808 
1809 	dma_resv_unlock(pw->batch->resv);
1810 
1811 	/* Force execution to wait for completion of the parser */
1812 	dma_resv_lock(shadow->resv, NULL);
1813 	dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
1814 	dma_resv_unlock(shadow->resv);
1815 
1816 	dma_fence_work_commit(&pw->base);
1817 	return 0;
1818 
1819 err_batch_unlock:
1820 	dma_resv_unlock(pw->batch->resv);
1821 err_trampoline:
1822 	if (trampoline)
1823 		i915_active_release(&trampoline->active);
1824 err_shadow:
1825 	i915_active_release(&shadow->active);
1826 err_batch:
1827 	i915_active_release(&eb->batch->vma->active);
1828 err_free:
1829 	kfree(pw);
1830 	return err;
1831 }
1832 
1833 static int eb_parse(struct i915_execbuffer *eb)
1834 {
1835 	struct drm_i915_private *i915 = eb->i915;
1836 	struct intel_engine_pool_node *pool;
1837 	struct i915_vma *shadow, *trampoline;
1838 	unsigned int len;
1839 	int err;
1840 
1841 	if (!eb_use_cmdparser(eb))
1842 		return 0;
1843 
1844 	len = eb->batch_len;
1845 	if (!CMDPARSER_USES_GGTT(eb->i915)) {
1846 		/*
1847 		 * ppGTT backed shadow buffers must be mapped RO, to prevent
1848 		 * post-scan tampering
1849 		 */
1850 		if (!eb->context->vm->has_read_only) {
1851 			drm_dbg(&i915->drm,
1852 				"Cannot prevent post-scan tampering without RO capable vm\n");
1853 			return -EINVAL;
1854 		}
1855 	} else {
1856 		len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
1857 	}
1858 
1859 	pool = intel_engine_get_pool(eb->engine, len);
1860 	if (IS_ERR(pool))
1861 		return PTR_ERR(pool);
1862 
1863 	shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
1864 	if (IS_ERR(shadow)) {
1865 		err = PTR_ERR(shadow);
1866 		goto err;
1867 	}
1868 	i915_gem_object_set_readonly(shadow->obj);
1869 
1870 	trampoline = NULL;
1871 	if (CMDPARSER_USES_GGTT(eb->i915)) {
1872 		trampoline = shadow;
1873 
1874 		shadow = shadow_batch_pin(pool->obj,
1875 					  &eb->engine->gt->ggtt->vm,
1876 					  PIN_GLOBAL);
1877 		if (IS_ERR(shadow)) {
1878 			err = PTR_ERR(shadow);
1879 			shadow = trampoline;
1880 			goto err_shadow;
1881 		}
1882 
1883 		eb->batch_flags |= I915_DISPATCH_SECURE;
1884 	}
1885 
1886 	err = eb_parse_pipeline(eb, shadow, trampoline);
1887 	if (err)
1888 		goto err_trampoline;
1889 
1890 	eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
1891 	eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
1892 	eb->batch = &eb->vma[eb->buffer_count++];
1893 
1894 	eb->trampoline = trampoline;
1895 	eb->batch_start_offset = 0;
1896 
1897 	shadow->private = pool;
1898 	return 0;
1899 
1900 err_trampoline:
1901 	if (trampoline)
1902 		i915_vma_unpin(trampoline);
1903 err_shadow:
1904 	i915_vma_unpin(shadow);
1905 err:
1906 	intel_engine_pool_put(pool);
1907 	return err;
1908 }
1909 
1910 static void
1911 add_to_client(struct i915_request *rq, struct drm_file *file)
1912 {
1913 	struct drm_i915_file_private *file_priv = file->driver_priv;
1914 
1915 	rq->file_priv = file_priv;
1916 
1917 	spin_lock(&file_priv->mm.lock);
1918 	list_add_tail(&rq->client_link, &file_priv->mm.request_list);
1919 	spin_unlock(&file_priv->mm.lock);
1920 }
1921 
1922 static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
1923 {
1924 	int err;
1925 
1926 	err = eb_move_to_gpu(eb);
1927 	if (err)
1928 		return err;
1929 
1930 	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
1931 		err = i915_reset_gen7_sol_offsets(eb->request);
1932 		if (err)
1933 			return err;
1934 	}
1935 
1936 	/*
1937 	 * After we completed waiting for other engines (using HW semaphores)
1938 	 * then we can signal that this request/batch is ready to run. This
1939 	 * allows us to determine if the batch is still waiting on the GPU
1940 	 * or actually running by checking the breadcrumb.
1941 	 */
1942 	if (eb->engine->emit_init_breadcrumb) {
1943 		err = eb->engine->emit_init_breadcrumb(eb->request);
1944 		if (err)
1945 			return err;
1946 	}
1947 
1948 	err = eb->engine->emit_bb_start(eb->request,
1949 					batch->node.start +
1950 					eb->batch_start_offset,
1951 					eb->batch_len,
1952 					eb->batch_flags);
1953 	if (err)
1954 		return err;
1955 
1956 	if (eb->trampoline) {
1957 		GEM_BUG_ON(eb->batch_start_offset);
1958 		err = eb->engine->emit_bb_start(eb->request,
1959 						eb->trampoline->node.start +
1960 						eb->batch_len,
1961 						0, 0);
1962 		if (err)
1963 			return err;
1964 	}
1965 
1966 	if (intel_context_nopreempt(eb->context))
1967 		__set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
1968 
1969 	return 0;
1970 }
1971 
1972 static int num_vcs_engines(const struct drm_i915_private *i915)
1973 {
1974 	return hweight64(INTEL_INFO(i915)->engine_mask &
1975 			 GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
1976 }
1977 
1978 /*
1979  * Find one BSD ring to dispatch the corresponding BSD command.
1980  * The engine index is returned.
1981  */
1982 static unsigned int
1983 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
1984 			 struct drm_file *file)
1985 {
1986 	struct drm_i915_file_private *file_priv = file->driver_priv;
1987 
1988 	/* Check whether the file_priv has already selected one ring. */
1989 	if ((int)file_priv->bsd_engine < 0)
1990 		file_priv->bsd_engine =
1991 			get_random_int() % num_vcs_engines(dev_priv);
1992 
1993 	return file_priv->bsd_engine;
1994 }
1995 
1996 static const enum intel_engine_id user_ring_map[] = {
1997 	[I915_EXEC_DEFAULT]	= RCS0,
1998 	[I915_EXEC_RENDER]	= RCS0,
1999 	[I915_EXEC_BLT]		= BCS0,
2000 	[I915_EXEC_BSD]		= VCS0,
2001 	[I915_EXEC_VEBOX]	= VECS0
2002 };
2003 
2004 static struct i915_request *eb_throttle(struct intel_context *ce)
2005 {
2006 	struct intel_ring *ring = ce->ring;
2007 	struct intel_timeline *tl = ce->timeline;
2008 	struct i915_request *rq;
2009 
2010 	/*
2011 	 * Completely unscientific finger-in-the-air estimates for suitable
2012 	 * maximum user request size (to avoid blocking) and then backoff.
2013 	 */
2014 	if (intel_ring_update_space(ring) >= PAGE_SIZE)
2015 		return NULL;
2016 
2017 	/*
2018 	 * Find a request that after waiting upon, there will be at least half
2019 	 * the ring available. The hysteresis allows us to compete for the
2020 	 * shared ring and should mean that we sleep less often prior to
2021 	 * claiming our resources, but not so long that the ring completely
2022 	 * drains before we can submit our next request.
2023 	 */
2024 	list_for_each_entry(rq, &tl->requests, link) {
2025 		if (rq->ring != ring)
2026 			continue;
2027 
2028 		if (__intel_ring_space(rq->postfix,
2029 				       ring->emit, ring->size) > ring->size / 2)
2030 			break;
2031 	}
2032 	if (&rq->link == &tl->requests)
2033 		return NULL; /* weird, we will check again later for real */
2034 
2035 	return i915_request_get(rq);
2036 }
2037 
2038 static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
2039 {
2040 	struct intel_timeline *tl;
2041 	struct i915_request *rq;
2042 	int err;
2043 
2044 	/*
2045 	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
2046 	 * EIO if the GPU is already wedged.
2047 	 */
2048 	err = intel_gt_terminally_wedged(ce->engine->gt);
2049 	if (err)
2050 		return err;
2051 
2052 	if (unlikely(intel_context_is_banned(ce)))
2053 		return -EIO;
2054 
2055 	/*
2056 	 * Pinning the contexts may generate requests in order to acquire
2057 	 * GGTT space, so do this first before we reserve a seqno for
2058 	 * ourselves.
2059 	 */
2060 	err = intel_context_pin(ce);
2061 	if (err)
2062 		return err;
2063 
2064 	/*
2065 	 * Take a local wakeref for preparing to dispatch the execbuf as
2066 	 * we expect to access the hardware fairly frequently in the
2067 	 * process, and require the engine to be kept awake between accesses.
2068 	 * Upon dispatch, we acquire another prolonged wakeref that we hold
2069 	 * until the timeline is idle, which in turn releases the wakeref
2070 	 * taken on the engine, and the parent device.
2071 	 */
2072 	tl = intel_context_timeline_lock(ce);
2073 	if (IS_ERR(tl)) {
2074 		err = PTR_ERR(tl);
2075 		goto err_unpin;
2076 	}
2077 
2078 	intel_context_enter(ce);
2079 	rq = eb_throttle(ce);
2080 
2081 	intel_context_timeline_unlock(tl);
2082 
2083 	if (rq) {
2084 #ifdef __linux__
2085 		bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
2086 #else
2087 		bool nonblock = eb->file->filp->f_flag & FNONBLOCK;
2088 #endif
2089 		long timeout;
2090 
2091 		timeout = MAX_SCHEDULE_TIMEOUT;
2092 		if (nonblock)
2093 			timeout = 0;
2094 
2095 		timeout = i915_request_wait(rq,
2096 					    I915_WAIT_INTERRUPTIBLE,
2097 					    timeout);
2098 		i915_request_put(rq);
2099 
2100 		if (timeout < 0) {
2101 			err = nonblock ? -EWOULDBLOCK : timeout;
2102 			goto err_exit;
2103 		}
2104 	}
2105 
2106 	eb->engine = ce->engine;
2107 	eb->context = ce;
2108 	return 0;
2109 
2110 err_exit:
2111 	mutex_lock(&tl->mutex);
2112 	intel_context_exit(ce);
2113 	intel_context_timeline_unlock(tl);
2114 err_unpin:
2115 	intel_context_unpin(ce);
2116 	return err;
2117 }
2118 
2119 static void eb_unpin_engine(struct i915_execbuffer *eb)
2120 {
2121 	struct intel_context *ce = eb->context;
2122 	struct intel_timeline *tl = ce->timeline;
2123 
2124 	mutex_lock(&tl->mutex);
2125 	intel_context_exit(ce);
2126 	mutex_unlock(&tl->mutex);
2127 
2128 	intel_context_unpin(ce);
2129 }
2130 
2131 static unsigned int
2132 eb_select_legacy_ring(struct i915_execbuffer *eb,
2133 		      struct drm_file *file,
2134 		      struct drm_i915_gem_execbuffer2 *args)
2135 {
2136 	struct drm_i915_private *i915 = eb->i915;
2137 	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
2138 
2139 	if (user_ring_id != I915_EXEC_BSD &&
2140 	    (args->flags & I915_EXEC_BSD_MASK)) {
2141 		drm_dbg(&i915->drm,
2142 			"execbuf with non bsd ring but with invalid "
2143 			"bsd dispatch flags: %d\n", (int)(args->flags));
2144 		return -1;
2145 	}
2146 
2147 	if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
2148 		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
2149 
2150 		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2151 			bsd_idx = gen8_dispatch_bsd_engine(i915, file);
2152 		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
2153 			   bsd_idx <= I915_EXEC_BSD_RING2) {
2154 			bsd_idx >>= I915_EXEC_BSD_SHIFT;
2155 			bsd_idx--;
2156 		} else {
2157 			drm_dbg(&i915->drm,
2158 				"execbuf with unknown bsd ring: %u\n",
2159 				bsd_idx);
2160 			return -1;
2161 		}
2162 
2163 		return _VCS(bsd_idx);
2164 	}
2165 
2166 	if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
2167 		drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
2168 			user_ring_id);
2169 		return -1;
2170 	}
2171 
2172 	return user_ring_map[user_ring_id];
2173 }
2174 
2175 static int
2176 eb_pin_engine(struct i915_execbuffer *eb,
2177 	      struct drm_file *file,
2178 	      struct drm_i915_gem_execbuffer2 *args)
2179 {
2180 	struct intel_context *ce;
2181 	unsigned int idx;
2182 	int err;
2183 
2184 	if (i915_gem_context_user_engines(eb->gem_context))
2185 		idx = args->flags & I915_EXEC_RING_MASK;
2186 	else
2187 		idx = eb_select_legacy_ring(eb, file, args);
2188 
2189 	ce = i915_gem_context_get_engine(eb->gem_context, idx);
2190 	if (IS_ERR(ce))
2191 		return PTR_ERR(ce);
2192 
2193 	err = __eb_pin_engine(eb, ce);
2194 	intel_context_put(ce);
2195 
2196 	return err;
2197 }
2198 
2199 static void
2200 __free_fence_array(struct drm_syncobj **fences, unsigned int n)
2201 {
2202 	while (n--)
2203 		drm_syncobj_put(ptr_mask_bits(fences[n], 2));
2204 	kvfree(fences);
2205 }
2206 
2207 static struct drm_syncobj **
2208 get_fence_array(struct drm_i915_gem_execbuffer2 *args,
2209 		struct drm_file *file)
2210 {
2211 	const unsigned long nfences = args->num_cliprects;
2212 	struct drm_i915_gem_exec_fence __user *user;
2213 	struct drm_syncobj **fences;
2214 	unsigned long n;
2215 	int err;
2216 
2217 	if (!(args->flags & I915_EXEC_FENCE_ARRAY))
2218 		return NULL;
2219 
2220 	/* Check multiplication overflow for access_ok() and kvmalloc_array() */
2221 	BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
2222 	if (nfences > min_t(unsigned long,
2223 			    ULONG_MAX / sizeof(*user),
2224 			    SIZE_MAX / sizeof(*fences)))
2225 		return ERR_PTR(-EINVAL);
2226 
2227 	user = u64_to_user_ptr(args->cliprects_ptr);
2228 	if (!access_ok(user, nfences * sizeof(*user)))
2229 		return ERR_PTR(-EFAULT);
2230 
2231 	fences = kvmalloc_array(nfences, sizeof(*fences),
2232 				__GFP_NOWARN | GFP_KERNEL);
2233 	if (!fences)
2234 		return ERR_PTR(-ENOMEM);
2235 
2236 	for (n = 0; n < nfences; n++) {
2237 		struct drm_i915_gem_exec_fence fence;
2238 		struct drm_syncobj *syncobj;
2239 
2240 		if (__copy_from_user(&fence, user++, sizeof(fence))) {
2241 			err = -EFAULT;
2242 			goto err;
2243 		}
2244 
2245 		if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
2246 			err = -EINVAL;
2247 			goto err;
2248 		}
2249 
2250 		syncobj = drm_syncobj_find(file, fence.handle);
2251 		if (!syncobj) {
2252 			DRM_DEBUG("Invalid syncobj handle provided\n");
2253 			err = -ENOENT;
2254 			goto err;
2255 		}
2256 
2257 #ifdef notyet
2258 		BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
2259 			     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
2260 #endif
2261 
2262 		fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
2263 	}
2264 
2265 	return fences;
2266 
2267 err:
2268 	__free_fence_array(fences, n);
2269 	return ERR_PTR(err);
2270 }
2271 
2272 static void
2273 put_fence_array(struct drm_i915_gem_execbuffer2 *args,
2274 		struct drm_syncobj **fences)
2275 {
2276 	if (fences)
2277 		__free_fence_array(fences, args->num_cliprects);
2278 }
2279 
2280 static int
2281 await_fence_array(struct i915_execbuffer *eb,
2282 		  struct drm_syncobj **fences)
2283 {
2284 	const unsigned int nfences = eb->args->num_cliprects;
2285 	unsigned int n;
2286 	int err;
2287 
2288 	for (n = 0; n < nfences; n++) {
2289 		struct drm_syncobj *syncobj;
2290 		struct dma_fence *fence;
2291 		unsigned int flags;
2292 
2293 		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2294 		if (!(flags & I915_EXEC_FENCE_WAIT))
2295 			continue;
2296 
2297 		fence = drm_syncobj_fence_get(syncobj);
2298 		if (!fence)
2299 			return -EINVAL;
2300 
2301 		err = i915_request_await_dma_fence(eb->request, fence);
2302 		dma_fence_put(fence);
2303 		if (err < 0)
2304 			return err;
2305 	}
2306 
2307 	return 0;
2308 }
2309 
2310 static void
2311 signal_fence_array(struct i915_execbuffer *eb,
2312 		   struct drm_syncobj **fences)
2313 {
2314 	const unsigned int nfences = eb->args->num_cliprects;
2315 	struct dma_fence * const fence = &eb->request->fence;
2316 	unsigned int n;
2317 
2318 	for (n = 0; n < nfences; n++) {
2319 		struct drm_syncobj *syncobj;
2320 		unsigned int flags;
2321 
2322 		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2323 		if (!(flags & I915_EXEC_FENCE_SIGNAL))
2324 			continue;
2325 
2326 		drm_syncobj_replace_fence(syncobj, fence);
2327 	}
2328 }
2329 
2330 static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
2331 {
2332 	struct i915_request *rq, *rn;
2333 
2334 	list_for_each_entry_safe(rq, rn, &tl->requests, link)
2335 		if (rq == end || !i915_request_retire(rq))
2336 			break;
2337 }
2338 
2339 static void eb_request_add(struct i915_execbuffer *eb)
2340 {
2341 	struct i915_request *rq = eb->request;
2342 	struct intel_timeline * const tl = i915_request_timeline(rq);
2343 	struct i915_sched_attr attr = {};
2344 	struct i915_request *prev;
2345 
2346 	lockdep_assert_held(&tl->mutex);
2347 	lockdep_unpin_lock(&tl->mutex, rq->cookie);
2348 
2349 	trace_i915_request_add(rq);
2350 
2351 	prev = __i915_request_commit(rq);
2352 
2353 	/* Check that the context wasn't destroyed before submission */
2354 	if (likely(!intel_context_is_closed(eb->context))) {
2355 		attr = eb->gem_context->sched;
2356 
2357 		/*
2358 		 * Boost actual workloads past semaphores!
2359 		 *
2360 		 * With semaphores we spin on one engine waiting for another,
2361 		 * simply to reduce the latency of starting our work when
2362 		 * the signaler completes. However, if there is any other
2363 		 * work that we could be doing on this engine instead, that
2364 		 * is better utilisation and will reduce the overall duration
2365 		 * of the current work. To avoid PI boosting a semaphore
2366 		 * far in the distance past over useful work, we keep a history
2367 		 * of any semaphore use along our dependency chain.
2368 		 */
2369 		if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
2370 			attr.priority |= I915_PRIORITY_NOSEMAPHORE;
2371 
2372 		/*
2373 		 * Boost priorities to new clients (new request flows).
2374 		 *
2375 		 * Allow interactive/synchronous clients to jump ahead of
2376 		 * the bulk clients. (FQ_CODEL)
2377 		 */
2378 		if (list_empty(&rq->sched.signalers_list))
2379 			attr.priority |= I915_PRIORITY_WAIT;
2380 	} else {
2381 		/* Serialise with context_close via the add_to_timeline */
2382 		i915_request_set_error_once(rq, -ENOENT);
2383 		__i915_request_skip(rq);
2384 	}
2385 
2386 	local_bh_disable();
2387 	__i915_request_queue(rq, &attr);
2388 	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
2389 
2390 	/* Try to clean up the client's timeline after submitting the request */
2391 	if (prev)
2392 		retire_requests(tl, prev);
2393 
2394 	mutex_unlock(&tl->mutex);
2395 }
2396 
2397 static int
2398 i915_gem_do_execbuffer(struct drm_device *dev,
2399 		       struct drm_file *file,
2400 		       struct drm_i915_gem_execbuffer2 *args,
2401 		       struct drm_i915_gem_exec_object2 *exec,
2402 		       struct drm_syncobj **fences)
2403 {
2404 	struct drm_i915_private *i915 = to_i915(dev);
2405 	struct i915_execbuffer eb;
2406 	struct dma_fence *in_fence = NULL;
2407 	struct dma_fence *exec_fence = NULL;
2408 	struct sync_file *out_fence = NULL;
2409 	struct i915_vma *batch;
2410 	int out_fence_fd = -1;
2411 	int err;
2412 
2413 	BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2414 	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
2415 		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2416 
2417 	eb.i915 = i915;
2418 	eb.file = file;
2419 	eb.args = args;
2420 	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2421 		args->flags |= __EXEC_HAS_RELOC;
2422 
2423 	eb.exec = exec;
2424 	eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
2425 	eb.vma[0].vma = NULL;
2426 
2427 	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2428 	reloc_cache_init(&eb.reloc_cache, eb.i915);
2429 
2430 	eb.buffer_count = args->buffer_count;
2431 	eb.batch_start_offset = args->batch_start_offset;
2432 	eb.batch_len = args->batch_len;
2433 	eb.trampoline = NULL;
2434 
2435 	eb.batch_flags = 0;
2436 	if (args->flags & I915_EXEC_SECURE) {
2437 		if (INTEL_GEN(i915) >= 11)
2438 			return -ENODEV;
2439 
2440 		/* Return -EPERM to trigger fallback code on old binaries. */
2441 		if (!HAS_SECURE_BATCHES(i915))
2442 			return -EPERM;
2443 
2444 		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2445 			return -EPERM;
2446 
2447 		eb.batch_flags |= I915_DISPATCH_SECURE;
2448 	}
2449 	if (args->flags & I915_EXEC_IS_PINNED)
2450 		eb.batch_flags |= I915_DISPATCH_PINNED;
2451 
2452 	if (args->flags & I915_EXEC_FENCE_IN) {
2453 		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2454 		if (!in_fence)
2455 			return -EINVAL;
2456 	}
2457 
2458 	if (args->flags & I915_EXEC_FENCE_SUBMIT) {
2459 		if (in_fence) {
2460 			err = -EINVAL;
2461 			goto err_in_fence;
2462 		}
2463 
2464 		exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2465 		if (!exec_fence) {
2466 			err = -EINVAL;
2467 			goto err_in_fence;
2468 		}
2469 	}
2470 
2471 	if (args->flags & I915_EXEC_FENCE_OUT) {
2472 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
2473 		if (out_fence_fd < 0) {
2474 			err = out_fence_fd;
2475 			goto err_exec_fence;
2476 		}
2477 	}
2478 
2479 	err = eb_create(&eb);
2480 	if (err)
2481 		goto err_out_fence;
2482 
2483 	GEM_BUG_ON(!eb.lut_size);
2484 
2485 	err = eb_select_context(&eb);
2486 	if (unlikely(err))
2487 		goto err_destroy;
2488 
2489 	err = eb_pin_engine(&eb, file, args);
2490 	if (unlikely(err))
2491 		goto err_context;
2492 
2493 	err = eb_relocate(&eb);
2494 	if (err) {
2495 		/*
2496 		 * If the user expects the execobject.offset and
2497 		 * reloc.presumed_offset to be an exact match,
2498 		 * as for using NO_RELOC, then we cannot update
2499 		 * the execobject.offset until we have completed
2500 		 * relocation.
2501 		 */
2502 		args->flags &= ~__EXEC_HAS_RELOC;
2503 		goto err_vma;
2504 	}
2505 
2506 	if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
2507 		drm_dbg(&i915->drm,
2508 			"Attempting to use self-modifying batch buffer\n");
2509 		err = -EINVAL;
2510 		goto err_vma;
2511 	}
2512 
2513 	if (range_overflows_t(u64,
2514 			      eb.batch_start_offset, eb.batch_len,
2515 			      eb.batch->vma->size)) {
2516 		drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
2517 		err = -EINVAL;
2518 		goto err_vma;
2519 	}
2520 
2521 	if (eb.batch_len == 0)
2522 		eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
2523 
2524 	err = eb_parse(&eb);
2525 	if (err)
2526 		goto err_vma;
2527 
2528 	/*
2529 	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2530 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
2531 	 * hsw should have this fixed, but bdw mucks it up again. */
2532 	batch = eb.batch->vma;
2533 	if (eb.batch_flags & I915_DISPATCH_SECURE) {
2534 		struct i915_vma *vma;
2535 
2536 		/*
2537 		 * So on first glance it looks freaky that we pin the batch here
2538 		 * outside of the reservation loop. But:
2539 		 * - The batch is already pinned into the relevant ppgtt, so we
2540 		 *   already have the backing storage fully allocated.
2541 		 * - No other BO uses the global gtt (well contexts, but meh),
2542 		 *   so we don't really have issues with multiple objects not
2543 		 *   fitting due to fragmentation.
2544 		 * So this is actually safe.
2545 		 */
2546 		vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
2547 		if (IS_ERR(vma)) {
2548 			err = PTR_ERR(vma);
2549 			goto err_parse;
2550 		}
2551 
2552 		batch = vma;
2553 	}
2554 
2555 	/* All GPU relocation batches must be submitted prior to the user rq */
2556 	GEM_BUG_ON(eb.reloc_cache.rq);
2557 
2558 	/* Allocate a request for this batch buffer nice and early. */
2559 	eb.request = i915_request_create(eb.context);
2560 	if (IS_ERR(eb.request)) {
2561 		err = PTR_ERR(eb.request);
2562 		goto err_batch_unpin;
2563 	}
2564 
2565 	if (in_fence) {
2566 		err = i915_request_await_dma_fence(eb.request, in_fence);
2567 		if (err < 0)
2568 			goto err_request;
2569 	}
2570 
2571 	if (exec_fence) {
2572 		err = i915_request_await_execution(eb.request, exec_fence,
2573 						   eb.engine->bond_execute);
2574 		if (err < 0)
2575 			goto err_request;
2576 	}
2577 
2578 	if (fences) {
2579 		err = await_fence_array(&eb, fences);
2580 		if (err)
2581 			goto err_request;
2582 	}
2583 
2584 	if (out_fence_fd != -1) {
2585 		out_fence = sync_file_create(&eb.request->fence);
2586 		if (!out_fence) {
2587 			err = -ENOMEM;
2588 			goto err_request;
2589 		}
2590 	}
2591 
2592 	/*
2593 	 * Whilst this request exists, batch_obj will be on the
2594 	 * active_list, and so will hold the active reference. Only when this
2595 	 * request is retired will the the batch_obj be moved onto the
2596 	 * inactive_list and lose its active reference. Hence we do not need
2597 	 * to explicitly hold another reference here.
2598 	 */
2599 	eb.request->batch = batch;
2600 	if (batch->private)
2601 		intel_engine_pool_mark_active(batch->private, eb.request);
2602 
2603 	trace_i915_request_queue(eb.request, eb.batch_flags);
2604 	err = eb_submit(&eb, batch);
2605 err_request:
2606 	add_to_client(eb.request, file);
2607 	i915_request_get(eb.request);
2608 	eb_request_add(&eb);
2609 
2610 	if (fences)
2611 		signal_fence_array(&eb, fences);
2612 
2613 	if (out_fence) {
2614 		if (err == 0) {
2615 			fd_install(out_fence_fd, out_fence->file);
2616 			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2617 			args->rsvd2 |= (u64)out_fence_fd << 32;
2618 			out_fence_fd = -1;
2619 		} else {
2620 			fput(out_fence->file);
2621 		}
2622 	}
2623 	i915_request_put(eb.request);
2624 
2625 err_batch_unpin:
2626 	if (eb.batch_flags & I915_DISPATCH_SECURE)
2627 		i915_vma_unpin(batch);
2628 err_parse:
2629 	if (batch->private)
2630 		intel_engine_pool_put(batch->private);
2631 err_vma:
2632 	if (eb.exec)
2633 		eb_release_vmas(&eb);
2634 	if (eb.trampoline)
2635 		i915_vma_unpin(eb.trampoline);
2636 	eb_unpin_engine(&eb);
2637 err_context:
2638 	i915_gem_context_put(eb.gem_context);
2639 err_destroy:
2640 	eb_destroy(&eb);
2641 err_out_fence:
2642 	if (out_fence_fd != -1)
2643 		put_unused_fd(out_fence_fd);
2644 err_exec_fence:
2645 	dma_fence_put(exec_fence);
2646 err_in_fence:
2647 	dma_fence_put(in_fence);
2648 	return err;
2649 }
2650 
2651 static size_t eb_element_size(void)
2652 {
2653 	return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
2654 }
2655 
2656 static bool check_buffer_count(size_t count)
2657 {
2658 	const size_t sz = eb_element_size();
2659 
2660 	/*
2661 	 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
2662 	 * array size (see eb_create()). Otherwise, we can accept an array as
2663 	 * large as can be addressed (though use large arrays at your peril)!
2664 	 */
2665 
2666 	return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
2667 }
2668 
2669 /*
2670  * Legacy execbuffer just creates an exec2 list from the original exec object
2671  * list array and passes it to the real function.
2672  */
2673 int
2674 i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
2675 			  struct drm_file *file)
2676 {
2677 	struct drm_i915_private *i915 = to_i915(dev);
2678 	struct drm_i915_gem_execbuffer *args = data;
2679 	struct drm_i915_gem_execbuffer2 exec2;
2680 	struct drm_i915_gem_exec_object *exec_list = NULL;
2681 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2682 	const size_t count = args->buffer_count;
2683 	unsigned int i;
2684 	int err;
2685 
2686 	if (!check_buffer_count(count)) {
2687 		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
2688 		return -EINVAL;
2689 	}
2690 
2691 	exec2.buffers_ptr = args->buffers_ptr;
2692 	exec2.buffer_count = args->buffer_count;
2693 	exec2.batch_start_offset = args->batch_start_offset;
2694 	exec2.batch_len = args->batch_len;
2695 	exec2.DR1 = args->DR1;
2696 	exec2.DR4 = args->DR4;
2697 	exec2.num_cliprects = args->num_cliprects;
2698 	exec2.cliprects_ptr = args->cliprects_ptr;
2699 	exec2.flags = I915_EXEC_RENDER;
2700 	i915_execbuffer2_set_context_id(exec2, 0);
2701 
2702 	err = i915_gem_check_execbuffer(&exec2);
2703 	if (err)
2704 		return err;
2705 
2706 	/* Copy in the exec list from userland */
2707 	exec_list = kvmalloc_array(count, sizeof(*exec_list),
2708 				   __GFP_NOWARN | GFP_KERNEL);
2709 	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2710 				    __GFP_NOWARN | GFP_KERNEL);
2711 	if (exec_list == NULL || exec2_list == NULL) {
2712 		drm_dbg(&i915->drm,
2713 			"Failed to allocate exec list for %d buffers\n",
2714 			args->buffer_count);
2715 		kvfree(exec_list);
2716 		kvfree(exec2_list);
2717 		return -ENOMEM;
2718 	}
2719 	err = copy_from_user(exec_list,
2720 			     u64_to_user_ptr(args->buffers_ptr),
2721 			     sizeof(*exec_list) * count);
2722 	if (err) {
2723 		drm_dbg(&i915->drm, "copy %d exec entries failed %d\n",
2724 			args->buffer_count, err);
2725 		kvfree(exec_list);
2726 		kvfree(exec2_list);
2727 		return -EFAULT;
2728 	}
2729 
2730 	for (i = 0; i < args->buffer_count; i++) {
2731 		exec2_list[i].handle = exec_list[i].handle;
2732 		exec2_list[i].relocation_count = exec_list[i].relocation_count;
2733 		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
2734 		exec2_list[i].alignment = exec_list[i].alignment;
2735 		exec2_list[i].offset = exec_list[i].offset;
2736 		if (INTEL_GEN(to_i915(dev)) < 4)
2737 			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
2738 		else
2739 			exec2_list[i].flags = 0;
2740 	}
2741 
2742 	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2743 	if (exec2.flags & __EXEC_HAS_RELOC) {
2744 		struct drm_i915_gem_exec_object __user *user_exec_list =
2745 			u64_to_user_ptr(args->buffers_ptr);
2746 
2747 		/* Copy the new buffer offsets back to the user's exec list. */
2748 		for (i = 0; i < args->buffer_count; i++) {
2749 			if (!(exec2_list[i].offset & UPDATE))
2750 				continue;
2751 
2752 			exec2_list[i].offset =
2753 				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2754 			exec2_list[i].offset &= PIN_OFFSET_MASK;
2755 			if (__copy_to_user(&user_exec_list[i].offset,
2756 					   &exec2_list[i].offset,
2757 					   sizeof(user_exec_list[i].offset)))
2758 				break;
2759 		}
2760 	}
2761 
2762 	kvfree(exec_list);
2763 	kvfree(exec2_list);
2764 	return err;
2765 }
2766 
2767 int
2768 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
2769 			   struct drm_file *file)
2770 {
2771 	struct drm_i915_private *i915 = to_i915(dev);
2772 	struct drm_i915_gem_execbuffer2 *args = data;
2773 	struct drm_i915_gem_exec_object2 *exec2_list;
2774 	struct drm_syncobj **fences = NULL;
2775 	const size_t count = args->buffer_count;
2776 	int err;
2777 
2778 	if (!check_buffer_count(count)) {
2779 		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
2780 		return -EINVAL;
2781 	}
2782 
2783 	err = i915_gem_check_execbuffer(args);
2784 	if (err)
2785 		return err;
2786 
2787 	/* Allocate an extra slot for use by the command parser */
2788 	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2789 				    __GFP_NOWARN | GFP_KERNEL);
2790 	if (exec2_list == NULL) {
2791 		drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
2792 			count);
2793 		return -ENOMEM;
2794 	}
2795 	if (copy_from_user(exec2_list,
2796 			   u64_to_user_ptr(args->buffers_ptr),
2797 			   sizeof(*exec2_list) * count)) {
2798 		drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
2799 		kvfree(exec2_list);
2800 		return -EFAULT;
2801 	}
2802 
2803 	if (args->flags & I915_EXEC_FENCE_ARRAY) {
2804 		fences = get_fence_array(args, file);
2805 		if (IS_ERR(fences)) {
2806 			kvfree(exec2_list);
2807 			return PTR_ERR(fences);
2808 		}
2809 	}
2810 
2811 	err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
2812 
2813 	/*
2814 	 * Now that we have begun execution of the batchbuffer, we ignore
2815 	 * any new error after this point. Also given that we have already
2816 	 * updated the associated relocations, we try to write out the current
2817 	 * object locations irrespective of any error.
2818 	 */
2819 	if (args->flags & __EXEC_HAS_RELOC) {
2820 		struct drm_i915_gem_exec_object2 __user *user_exec_list =
2821 			u64_to_user_ptr(args->buffers_ptr);
2822 		unsigned int i;
2823 
2824 		/* Copy the new buffer offsets back to the user's exec list. */
2825 		/*
2826 		 * Note: count * sizeof(*user_exec_list) does not overflow,
2827 		 * because we checked 'count' in check_buffer_count().
2828 		 *
2829 		 * And this range already got effectively checked earlier
2830 		 * when we did the "copy_from_user()" above.
2831 		 */
2832 		if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
2833 			goto end;
2834 
2835 		for (i = 0; i < args->buffer_count; i++) {
2836 			if (!(exec2_list[i].offset & UPDATE))
2837 				continue;
2838 
2839 			exec2_list[i].offset =
2840 				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2841 			unsafe_put_user(exec2_list[i].offset,
2842 					&user_exec_list[i].offset,
2843 					end_user);
2844 		}
2845 end_user:
2846 		user_access_end();
2847 end:;
2848 	}
2849 
2850 	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
2851 	put_fence_array(args, fences);
2852 	kvfree(exec2_list);
2853 	return err;
2854 }
2855