1*6b7fd35dSriastradh /*	$NetBSD: intel_engine_types.h,v 1.7 2021/12/19 11:51:59 riastradh Exp $	*/
21571a7a1Sriastradh 
31571a7a1Sriastradh /*
41571a7a1Sriastradh  * SPDX-License-Identifier: MIT
51571a7a1Sriastradh  *
61571a7a1Sriastradh  * Copyright © 2019 Intel Corporation
71571a7a1Sriastradh  */
81571a7a1Sriastradh 
91571a7a1Sriastradh #ifndef __INTEL_ENGINE_TYPES__
101571a7a1Sriastradh #define __INTEL_ENGINE_TYPES__
111571a7a1Sriastradh 
121571a7a1Sriastradh #include <linux/average.h>
13d6e4e60eSriastradh #include <linux/completion.h>
141571a7a1Sriastradh #include <linux/hashtable.h>
151571a7a1Sriastradh #include <linux/irq_work.h>
161571a7a1Sriastradh #include <linux/kref.h>
171571a7a1Sriastradh #include <linux/list.h>
181571a7a1Sriastradh #include <linux/llist.h>
197d1626b0Sriastradh #include <linux/notifier.h>
201571a7a1Sriastradh #include <linux/rbtree.h>
211571a7a1Sriastradh #include <linux/timer.h>
221571a7a1Sriastradh #include <linux/types.h>
231571a7a1Sriastradh #include <linux/workqueue.h>
241571a7a1Sriastradh 
251571a7a1Sriastradh #include "i915_gem.h"
261571a7a1Sriastradh #include "i915_pmu.h"
271571a7a1Sriastradh #include "i915_priolist_types.h"
281571a7a1Sriastradh #include "i915_selftest.h"
291571a7a1Sriastradh #include "intel_engine_pool_types.h"
301571a7a1Sriastradh #include "intel_sseu.h"
311571a7a1Sriastradh #include "intel_timeline_types.h"
321571a7a1Sriastradh #include "intel_wakeref.h"
331571a7a1Sriastradh #include "intel_workarounds_types.h"
341571a7a1Sriastradh 
351571a7a1Sriastradh /* Legacy HW Engine ID */
361571a7a1Sriastradh 
371571a7a1Sriastradh #define RCS0_HW		0
381571a7a1Sriastradh #define VCS0_HW		1
391571a7a1Sriastradh #define BCS0_HW		2
401571a7a1Sriastradh #define VECS0_HW	3
411571a7a1Sriastradh #define VCS1_HW		4
421571a7a1Sriastradh #define VCS2_HW		6
431571a7a1Sriastradh #define VCS3_HW		7
441571a7a1Sriastradh #define VECS1_HW	12
451571a7a1Sriastradh 
461571a7a1Sriastradh /* Gen11+ HW Engine class + instance */
471571a7a1Sriastradh #define RENDER_CLASS		0
481571a7a1Sriastradh #define VIDEO_DECODE_CLASS	1
491571a7a1Sriastradh #define VIDEO_ENHANCEMENT_CLASS	2
501571a7a1Sriastradh #define COPY_ENGINE_CLASS	3
511571a7a1Sriastradh #define OTHER_CLASS		4
521571a7a1Sriastradh #define MAX_ENGINE_CLASS	4
531571a7a1Sriastradh #define MAX_ENGINE_INSTANCE	3
541571a7a1Sriastradh 
551571a7a1Sriastradh #define I915_MAX_SLICES	3
561571a7a1Sriastradh #define I915_MAX_SUBSLICES 8
571571a7a1Sriastradh 
581571a7a1Sriastradh #define I915_CMD_HASH_ORDER 9
591571a7a1Sriastradh 
601571a7a1Sriastradh struct dma_fence;
611571a7a1Sriastradh struct drm_i915_gem_object;
621571a7a1Sriastradh struct drm_i915_reg_table;
631571a7a1Sriastradh struct i915_gem_context;
641571a7a1Sriastradh struct i915_request;
651571a7a1Sriastradh struct i915_sched_attr;
661571a7a1Sriastradh struct intel_gt;
671571a7a1Sriastradh struct intel_ring;
681571a7a1Sriastradh struct intel_uncore;
691571a7a1Sriastradh 
701571a7a1Sriastradh typedef u8 intel_engine_mask_t;
711571a7a1Sriastradh #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
721571a7a1Sriastradh 
731571a7a1Sriastradh struct intel_hw_status_page {
741571a7a1Sriastradh 	struct i915_vma *vma;
751571a7a1Sriastradh 	u32 *addr;
761571a7a1Sriastradh };
771571a7a1Sriastradh 
781571a7a1Sriastradh struct intel_instdone {
791571a7a1Sriastradh 	u32 instdone;
801571a7a1Sriastradh 	/* The following exist only in the RCS engine */
811571a7a1Sriastradh 	u32 slice_common;
821571a7a1Sriastradh 	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
831571a7a1Sriastradh 	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
841571a7a1Sriastradh };
851571a7a1Sriastradh 
861571a7a1Sriastradh /*
871571a7a1Sriastradh  * we use a single page to load ctx workarounds so all of these
881571a7a1Sriastradh  * values are referred in terms of dwords
891571a7a1Sriastradh  *
901571a7a1Sriastradh  * struct i915_wa_ctx_bb:
911571a7a1Sriastradh  *  offset: specifies batch starting position, also helpful in case
921571a7a1Sriastradh  *    if we want to have multiple batches at different offsets based on
931571a7a1Sriastradh  *    some criteria. It is not a requirement at the moment but provides
941571a7a1Sriastradh  *    an option for future use.
951571a7a1Sriastradh  *  size: size of the batch in DWORDS
961571a7a1Sriastradh  */
971571a7a1Sriastradh struct i915_ctx_workarounds {
981571a7a1Sriastradh 	struct i915_wa_ctx_bb {
991571a7a1Sriastradh 		u32 offset;
1001571a7a1Sriastradh 		u32 size;
1011571a7a1Sriastradh 	} indirect_ctx, per_ctx;
1021571a7a1Sriastradh 	struct i915_vma *vma;
1031571a7a1Sriastradh };
1041571a7a1Sriastradh 
1051571a7a1Sriastradh #define I915_MAX_VCS	4
1061571a7a1Sriastradh #define I915_MAX_VECS	2
1071571a7a1Sriastradh 
1081571a7a1Sriastradh /*
1091571a7a1Sriastradh  * Engine IDs definitions.
1101571a7a1Sriastradh  * Keep instances of the same type engine together.
1111571a7a1Sriastradh  */
1121571a7a1Sriastradh enum intel_engine_id {
1131571a7a1Sriastradh 	RCS0 = 0,
1141571a7a1Sriastradh 	BCS0,
1151571a7a1Sriastradh 	VCS0,
1161571a7a1Sriastradh 	VCS1,
1171571a7a1Sriastradh 	VCS2,
1181571a7a1Sriastradh 	VCS3,
1191571a7a1Sriastradh #define _VCS(n) (VCS0 + (n))
1201571a7a1Sriastradh 	VECS0,
1211571a7a1Sriastradh 	VECS1,
1221571a7a1Sriastradh #define _VECS(n) (VECS0 + (n))
1231571a7a1Sriastradh 	I915_NUM_ENGINES
1241571a7a1Sriastradh #define INVALID_ENGINE ((enum intel_engine_id)-1)
1251571a7a1Sriastradh };
1261571a7a1Sriastradh 
1271571a7a1Sriastradh /* A simple estimator for the round-trip latency of an engine */
1281571a7a1Sriastradh DECLARE_EWMA(_engine_latency, 6, 4)
1291571a7a1Sriastradh 
1301571a7a1Sriastradh struct st_preempt_hang {
1311571a7a1Sriastradh 	struct completion completion;
1321571a7a1Sriastradh 	unsigned int count;
1331571a7a1Sriastradh 	bool inject_hang;
1341571a7a1Sriastradh };
1351571a7a1Sriastradh 
1361571a7a1Sriastradh /**
1371571a7a1Sriastradh  * struct intel_engine_execlists - execlist submission queue and port state
1381571a7a1Sriastradh  *
1391571a7a1Sriastradh  * The struct intel_engine_execlists represents the combined logical state of
1401571a7a1Sriastradh  * driver and the hardware state for execlist mode of submission.
1411571a7a1Sriastradh  */
1421571a7a1Sriastradh struct intel_engine_execlists {
1431571a7a1Sriastradh 	/**
1441571a7a1Sriastradh 	 * @tasklet: softirq tasklet for bottom handler
1451571a7a1Sriastradh 	 */
1461571a7a1Sriastradh 	struct tasklet_struct tasklet;
1471571a7a1Sriastradh 
1481571a7a1Sriastradh 	/**
1491571a7a1Sriastradh 	 * @timer: kick the current context if its timeslice expires
1501571a7a1Sriastradh 	 */
1511571a7a1Sriastradh 	struct timer_list timer;
1521571a7a1Sriastradh 
1531571a7a1Sriastradh 	/**
1541571a7a1Sriastradh 	 * @preempt: reset the current context if it fails to give way
1551571a7a1Sriastradh 	 */
1561571a7a1Sriastradh 	struct timer_list preempt;
1571571a7a1Sriastradh 
1581571a7a1Sriastradh 	/**
1591571a7a1Sriastradh 	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
1601571a7a1Sriastradh 	 */
1611571a7a1Sriastradh 	struct i915_priolist default_priolist;
1621571a7a1Sriastradh 
1631571a7a1Sriastradh 	/**
1641571a7a1Sriastradh 	 * @no_priolist: priority lists disabled
1651571a7a1Sriastradh 	 */
1661571a7a1Sriastradh 	bool no_priolist;
1671571a7a1Sriastradh 
168a0e49a24Sriastradh #ifdef __NetBSD__
169dcdc7e41Sriastradh 	bus_space_tag_t bst;
170dcdc7e41Sriastradh 	bus_space_handle_t bsh;
171dcdc7e41Sriastradh 	bus_size_t submit_reg;
172dcdc7e41Sriastradh 	bus_size_t ctrl_reg;
173dcdc7e41Sriastradh #else
1741571a7a1Sriastradh 	/**
1751571a7a1Sriastradh 	 * @submit_reg: gen-specific execlist submission register
1761571a7a1Sriastradh 	 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
1771571a7a1Sriastradh 	 * the ExecList Submission Queue Contents register array for Gen11+
1781571a7a1Sriastradh 	 */
1791571a7a1Sriastradh 	u32 __iomem *submit_reg;
1801571a7a1Sriastradh 
1811571a7a1Sriastradh 	/**
1821571a7a1Sriastradh 	 * @ctrl_reg: the enhanced execlists control register, used to load the
1831571a7a1Sriastradh 	 * submit queue on the HW and to request preemptions to idle
1841571a7a1Sriastradh 	 */
1851571a7a1Sriastradh 	u32 __iomem *ctrl_reg;
186a0e49a24Sriastradh #endif
187a0e49a24Sriastradh 
1881571a7a1Sriastradh #define EXECLIST_MAX_PORTS 2
1891571a7a1Sriastradh 	/**
1901571a7a1Sriastradh 	 * @active: the currently known context executing on HW
1911571a7a1Sriastradh 	 */
1921571a7a1Sriastradh 	struct i915_request * const *active;
1931571a7a1Sriastradh 	/**
1941571a7a1Sriastradh 	 * @inflight: the set of contexts submitted and acknowleged by HW
1951571a7a1Sriastradh 	 *
1961571a7a1Sriastradh 	 * The set of inflight contexts is managed by reading CS events
1971571a7a1Sriastradh 	 * from the HW. On a context-switch event (not preemption), we
1981571a7a1Sriastradh 	 * know the HW has transitioned from port0 to port1, and we
1991571a7a1Sriastradh 	 * advance our inflight/active tracking accordingly.
2001571a7a1Sriastradh 	 */
2011571a7a1Sriastradh 	struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */];
2021571a7a1Sriastradh 	/**
2031571a7a1Sriastradh 	 * @pending: the next set of contexts submitted to ELSP
2041571a7a1Sriastradh 	 *
2051571a7a1Sriastradh 	 * We store the array of contexts that we submit to HW (via ELSP) and
2061571a7a1Sriastradh 	 * promote them to the inflight array once HW has signaled the
2071571a7a1Sriastradh 	 * preemption or idle-to-active event.
2081571a7a1Sriastradh 	 */
2091571a7a1Sriastradh 	struct i915_request *pending[EXECLIST_MAX_PORTS + 1];
2101571a7a1Sriastradh 
2111571a7a1Sriastradh 	/**
2121571a7a1Sriastradh 	 * @port_mask: number of execlist ports - 1
2131571a7a1Sriastradh 	 */
2141571a7a1Sriastradh 	unsigned int port_mask;
2151571a7a1Sriastradh 
2161571a7a1Sriastradh 	/**
2171571a7a1Sriastradh 	 * @switch_priority_hint: Second context priority.
2181571a7a1Sriastradh 	 *
2191571a7a1Sriastradh 	 * We submit multiple contexts to the HW simultaneously and would
2201571a7a1Sriastradh 	 * like to occasionally switch between them to emulate timeslicing.
2211571a7a1Sriastradh 	 * To know when timeslicing is suitable, we track the priority of
2221571a7a1Sriastradh 	 * the context submitted second.
2231571a7a1Sriastradh 	 */
2241571a7a1Sriastradh 	int switch_priority_hint;
2251571a7a1Sriastradh 
2261571a7a1Sriastradh 	/**
2271571a7a1Sriastradh 	 * @queue_priority_hint: Highest pending priority.
2281571a7a1Sriastradh 	 *
2291571a7a1Sriastradh 	 * When we add requests into the queue, or adjust the priority of
2301571a7a1Sriastradh 	 * executing requests, we compute the maximum priority of those
2311571a7a1Sriastradh 	 * pending requests. We can then use this value to determine if
2321571a7a1Sriastradh 	 * we need to preempt the executing requests to service the queue.
2331571a7a1Sriastradh 	 * However, since the we may have recorded the priority of an inflight
2341571a7a1Sriastradh 	 * request we wanted to preempt but since completed, at the time of
2351571a7a1Sriastradh 	 * dequeuing the priority hint may no longer may match the highest
2361571a7a1Sriastradh 	 * available request priority.
2371571a7a1Sriastradh 	 */
2381571a7a1Sriastradh 	int queue_priority_hint;
2391571a7a1Sriastradh 
2401571a7a1Sriastradh 	/**
2411571a7a1Sriastradh 	 * @queue: queue of requests, in priority lists
2421571a7a1Sriastradh 	 */
2431571a7a1Sriastradh 	struct rb_root_cached queue;
2441571a7a1Sriastradh 	struct rb_root_cached virtual;
2451571a7a1Sriastradh 
2461571a7a1Sriastradh 	/**
2471571a7a1Sriastradh 	 * @csb_write: control register for Context Switch buffer
2481571a7a1Sriastradh 	 *
2491571a7a1Sriastradh 	 * Note this register may be either mmio or HWSP shadow.
2501571a7a1Sriastradh 	 */
2511571a7a1Sriastradh 	u32 *csb_write;
2521571a7a1Sriastradh 
2531571a7a1Sriastradh 	/**
2541571a7a1Sriastradh 	 * @csb_status: status array for Context Switch buffer
2551571a7a1Sriastradh 	 *
2561571a7a1Sriastradh 	 * Note these register may be either mmio or HWSP shadow.
2571571a7a1Sriastradh 	 */
2581571a7a1Sriastradh 	u32 *csb_status;
2591571a7a1Sriastradh 
2601571a7a1Sriastradh 	/**
2611571a7a1Sriastradh 	 * @csb_size: context status buffer FIFO size
2621571a7a1Sriastradh 	 */
2631571a7a1Sriastradh 	u8 csb_size;
2641571a7a1Sriastradh 
2651571a7a1Sriastradh 	/**
2661571a7a1Sriastradh 	 * @csb_head: context status buffer head
2671571a7a1Sriastradh 	 */
2681571a7a1Sriastradh 	u8 csb_head;
2691571a7a1Sriastradh 
2701571a7a1Sriastradh 	I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
2711571a7a1Sriastradh };
2721571a7a1Sriastradh 
2731571a7a1Sriastradh #define INTEL_ENGINE_CS_MAX_NAME 8
2741571a7a1Sriastradh 
2751571a7a1Sriastradh struct intel_engine_cs {
2761571a7a1Sriastradh 	struct drm_i915_private *i915;
2771571a7a1Sriastradh 	struct intel_gt *gt;
2781571a7a1Sriastradh 	struct intel_uncore *uncore;
2791571a7a1Sriastradh 	char name[INTEL_ENGINE_CS_MAX_NAME];
2801571a7a1Sriastradh 
2811571a7a1Sriastradh 	enum intel_engine_id id;
2821571a7a1Sriastradh 	enum intel_engine_id legacy_idx;
2831571a7a1Sriastradh 
2841571a7a1Sriastradh 	unsigned int hw_id;
2851571a7a1Sriastradh 	unsigned int guc_id;
2861571a7a1Sriastradh 
2871571a7a1Sriastradh 	intel_engine_mask_t mask;
2881571a7a1Sriastradh 
2891571a7a1Sriastradh 	u8 class;
2901571a7a1Sriastradh 	u8 instance;
2911571a7a1Sriastradh 
2921571a7a1Sriastradh 	u16 uabi_class;
2931571a7a1Sriastradh 	u16 uabi_instance;
2941571a7a1Sriastradh 
2951571a7a1Sriastradh 	u32 uabi_capabilities;
2961571a7a1Sriastradh 	u32 context_size;
2971571a7a1Sriastradh 	u32 mmio_base;
2981571a7a1Sriastradh 
2991571a7a1Sriastradh 	unsigned int context_tag;
3001571a7a1Sriastradh #define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS)
3011571a7a1Sriastradh 
302*6b7fd35dSriastradh 	union {
303*6b7fd35dSriastradh 		struct rb_node rbtree;
304*6b7fd35dSriastradh 		struct llist_node llist;
305*6b7fd35dSriastradh 		struct list_head list;
306*6b7fd35dSriastradh 	} uabi_node;
3071571a7a1Sriastradh 
3081571a7a1Sriastradh 	struct intel_sseu sseu;
3091571a7a1Sriastradh 
3101571a7a1Sriastradh 	struct {
3111571a7a1Sriastradh 		spinlock_t lock;
3121571a7a1Sriastradh 		struct list_head requests;
3131571a7a1Sriastradh 		struct list_head hold; /* ready requests, but on hold */
3141571a7a1Sriastradh 	} active;
3151571a7a1Sriastradh 
3161571a7a1Sriastradh 	struct llist_head barrier_tasks;
3171571a7a1Sriastradh 
3181571a7a1Sriastradh 	struct intel_context *kernel_context; /* pinned */
3191571a7a1Sriastradh 
3201571a7a1Sriastradh 	intel_engine_mask_t saturated; /* submitting semaphores too late? */
3211571a7a1Sriastradh 
3221571a7a1Sriastradh 	struct {
3231571a7a1Sriastradh 		struct delayed_work work;
3241571a7a1Sriastradh 		struct i915_request *systole;
3251571a7a1Sriastradh 	} heartbeat;
3261571a7a1Sriastradh 
3271571a7a1Sriastradh 	unsigned long serial;
3281571a7a1Sriastradh 
3291571a7a1Sriastradh 	unsigned long wakeref_serial;
3301571a7a1Sriastradh 	struct intel_wakeref wakeref;
3311571a7a1Sriastradh 	struct drm_i915_gem_object *default_state;
3321571a7a1Sriastradh 	void *pinned_default_state;
3331571a7a1Sriastradh 
3341571a7a1Sriastradh 	struct {
3351571a7a1Sriastradh 		struct intel_ring *ring;
3361571a7a1Sriastradh 		struct intel_timeline *timeline;
3371571a7a1Sriastradh 	} legacy;
3381571a7a1Sriastradh 
3391571a7a1Sriastradh 	/*
3401571a7a1Sriastradh 	 * We track the average duration of the idle pulse on parking the
3411571a7a1Sriastradh 	 * engine to keep an estimate of the how the fast the engine is
3421571a7a1Sriastradh 	 * under ideal conditions.
3431571a7a1Sriastradh 	 */
3441571a7a1Sriastradh 	struct ewma__engine_latency latency;
3451571a7a1Sriastradh 
3461571a7a1Sriastradh 	/* Rather than have every client wait upon all user interrupts,
3471571a7a1Sriastradh 	 * with the herd waking after every interrupt and each doing the
3481571a7a1Sriastradh 	 * heavyweight seqno dance, we delegate the task (of being the
3491571a7a1Sriastradh 	 * bottom-half of the user interrupt) to the first client. After
3501571a7a1Sriastradh 	 * every interrupt, we wake up one client, who does the heavyweight
3511571a7a1Sriastradh 	 * coherent seqno read and either goes back to sleep (if incomplete),
3521571a7a1Sriastradh 	 * or wakes up all the completed clients in parallel, before then
3531571a7a1Sriastradh 	 * transferring the bottom-half status to the next client in the queue.
3541571a7a1Sriastradh 	 *
3551571a7a1Sriastradh 	 * Compared to walking the entire list of waiters in a single dedicated
3561571a7a1Sriastradh 	 * bottom-half, we reduce the latency of the first waiter by avoiding
3571571a7a1Sriastradh 	 * a context switch, but incur additional coherent seqno reads when
3581571a7a1Sriastradh 	 * following the chain of request breadcrumbs. Since it is most likely
3591571a7a1Sriastradh 	 * that we have a single client waiting on each seqno, then reducing
3601571a7a1Sriastradh 	 * the overhead of waking that client is much preferred.
3611571a7a1Sriastradh 	 */
3621571a7a1Sriastradh 	struct intel_breadcrumbs {
3631571a7a1Sriastradh 		spinlock_t irq_lock;
3641571a7a1Sriastradh 		struct list_head signalers;
3651571a7a1Sriastradh 
3661571a7a1Sriastradh 		struct irq_work irq_work; /* for use from inside irq_lock */
3671571a7a1Sriastradh 
3681571a7a1Sriastradh 		unsigned int irq_enabled;
3691571a7a1Sriastradh 
3701571a7a1Sriastradh 		bool irq_armed;
3711571a7a1Sriastradh 	} breadcrumbs;
3721571a7a1Sriastradh 
3731571a7a1Sriastradh 	struct intel_engine_pmu {
3741571a7a1Sriastradh 		/**
3751571a7a1Sriastradh 		 * @enable: Bitmask of enable sample events on this engine.
3761571a7a1Sriastradh 		 *
3771571a7a1Sriastradh 		 * Bits correspond to sample event types, for instance
3781571a7a1Sriastradh 		 * I915_SAMPLE_QUEUED is bit 0 etc.
3791571a7a1Sriastradh 		 */
3801571a7a1Sriastradh 		u32 enable;
3811571a7a1Sriastradh 		/**
3821571a7a1Sriastradh 		 * @enable_count: Reference count for the enabled samplers.
3831571a7a1Sriastradh 		 *
3841571a7a1Sriastradh 		 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
3851571a7a1Sriastradh 		 */
3861571a7a1Sriastradh 		unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
3871571a7a1Sriastradh 		/**
3881571a7a1Sriastradh 		 * @sample: Counter values for sampling events.
3891571a7a1Sriastradh 		 *
3901571a7a1Sriastradh 		 * Our internal timer stores the current counters in this field.
3911571a7a1Sriastradh 		 *
3921571a7a1Sriastradh 		 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
3931571a7a1Sriastradh 		 */
3941571a7a1Sriastradh 		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
3951571a7a1Sriastradh 	} pmu;
3961571a7a1Sriastradh 
3971571a7a1Sriastradh 	/*
3981571a7a1Sriastradh 	 * A pool of objects to use as shadow copies of client batch buffers
3991571a7a1Sriastradh 	 * when the command parser is enabled. Prevents the client from
4001571a7a1Sriastradh 	 * modifying the batch contents after software parsing.
4011571a7a1Sriastradh 	 */
4021571a7a1Sriastradh 	struct intel_engine_pool pool;
4031571a7a1Sriastradh 
4041571a7a1Sriastradh 	struct intel_hw_status_page status_page;
4051571a7a1Sriastradh 	struct i915_ctx_workarounds wa_ctx;
4061571a7a1Sriastradh 	struct i915_wa_list ctx_wa_list;
4071571a7a1Sriastradh 	struct i915_wa_list wa_list;
4081571a7a1Sriastradh 	struct i915_wa_list whitelist;
4091571a7a1Sriastradh 
4101571a7a1Sriastradh 	u32             irq_keep_mask; /* always keep these interrupts */
4111571a7a1Sriastradh 	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
4121571a7a1Sriastradh 	void		(*irq_enable)(struct intel_engine_cs *engine);
4131571a7a1Sriastradh 	void		(*irq_disable)(struct intel_engine_cs *engine);
4141571a7a1Sriastradh 
4151571a7a1Sriastradh 	int		(*resume)(struct intel_engine_cs *engine);
4161571a7a1Sriastradh 
4171571a7a1Sriastradh 	struct {
4181571a7a1Sriastradh 		void (*prepare)(struct intel_engine_cs *engine);
4191571a7a1Sriastradh 
4201571a7a1Sriastradh 		void (*rewind)(struct intel_engine_cs *engine, bool stalled);
4211571a7a1Sriastradh 		void (*cancel)(struct intel_engine_cs *engine);
4221571a7a1Sriastradh 
4231571a7a1Sriastradh 		void (*finish)(struct intel_engine_cs *engine);
4241571a7a1Sriastradh 	} reset;
4251571a7a1Sriastradh 
4261571a7a1Sriastradh 	void		(*park)(struct intel_engine_cs *engine);
4271571a7a1Sriastradh 	void		(*unpark)(struct intel_engine_cs *engine);
4281571a7a1Sriastradh 
4291571a7a1Sriastradh 	void		(*set_default_submission)(struct intel_engine_cs *engine);
4301571a7a1Sriastradh 
4311571a7a1Sriastradh 	const struct intel_context_ops *cops;
4321571a7a1Sriastradh 
4331571a7a1Sriastradh 	int		(*request_alloc)(struct i915_request *rq);
4341571a7a1Sriastradh 
4351571a7a1Sriastradh 	int		(*emit_flush)(struct i915_request *request, u32 mode);
4361571a7a1Sriastradh #define EMIT_INVALIDATE	BIT(0)
4371571a7a1Sriastradh #define EMIT_FLUSH	BIT(1)
4381571a7a1Sriastradh #define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
4391571a7a1Sriastradh 	int		(*emit_bb_start)(struct i915_request *rq,
4401571a7a1Sriastradh 					 u64 offset, u32 length,
4411571a7a1Sriastradh 					 unsigned int dispatch_flags);
4421571a7a1Sriastradh #define I915_DISPATCH_SECURE BIT(0)
4431571a7a1Sriastradh #define I915_DISPATCH_PINNED BIT(1)
4441571a7a1Sriastradh 	int		 (*emit_init_breadcrumb)(struct i915_request *rq);
4451571a7a1Sriastradh 	u32		*(*emit_fini_breadcrumb)(struct i915_request *rq,
4461571a7a1Sriastradh 						 u32 *cs);
4471571a7a1Sriastradh 	unsigned int	emit_fini_breadcrumb_dw;
4481571a7a1Sriastradh 
4491571a7a1Sriastradh 	/* Pass the request to the hardware queue (e.g. directly into
4501571a7a1Sriastradh 	 * the legacy ringbuffer or to the end of an execlist).
4511571a7a1Sriastradh 	 *
4521571a7a1Sriastradh 	 * This is called from an atomic context with irqs disabled; must
4531571a7a1Sriastradh 	 * be irq safe.
4541571a7a1Sriastradh 	 */
4551571a7a1Sriastradh 	void		(*submit_request)(struct i915_request *rq);
4561571a7a1Sriastradh 
4571571a7a1Sriastradh 	/*
4581571a7a1Sriastradh 	 * Called on signaling of a SUBMIT_FENCE, passing along the signaling
4591571a7a1Sriastradh 	 * request down to the bonded pairs.
4601571a7a1Sriastradh 	 */
4611571a7a1Sriastradh 	void            (*bond_execute)(struct i915_request *rq,
4621571a7a1Sriastradh 					struct dma_fence *signal);
4631571a7a1Sriastradh 
4641571a7a1Sriastradh 	/*
4651571a7a1Sriastradh 	 * Call when the priority on a request has changed and it and its
4661571a7a1Sriastradh 	 * dependencies may need rescheduling. Note the request itself may
4671571a7a1Sriastradh 	 * not be ready to run!
4681571a7a1Sriastradh 	 */
4691571a7a1Sriastradh 	void		(*schedule)(struct i915_request *request,
4701571a7a1Sriastradh 				    const struct i915_sched_attr *attr);
4711571a7a1Sriastradh 
4721571a7a1Sriastradh 	void		(*release)(struct intel_engine_cs *engine);
4731571a7a1Sriastradh 
4741571a7a1Sriastradh 	struct intel_engine_execlists execlists;
4751571a7a1Sriastradh 
4761571a7a1Sriastradh 	/*
4771571a7a1Sriastradh 	 * Keep track of completed timelines on this engine for early
4781571a7a1Sriastradh 	 * retirement with the goal of quickly enabling powersaving as
4791571a7a1Sriastradh 	 * soon as the engine is idle.
4801571a7a1Sriastradh 	 */
4811571a7a1Sriastradh 	struct intel_timeline *retire;
4821571a7a1Sriastradh 	struct work_struct retire_work;
4831571a7a1Sriastradh 
4841571a7a1Sriastradh 	/* status_notifier: list of callbacks for context-switch changes */
4851571a7a1Sriastradh 	struct atomic_notifier_head context_status_notifier;
4861571a7a1Sriastradh 
4871571a7a1Sriastradh #define I915_ENGINE_USING_CMD_PARSER BIT(0)
4881571a7a1Sriastradh #define I915_ENGINE_SUPPORTS_STATS   BIT(1)
4891571a7a1Sriastradh #define I915_ENGINE_HAS_PREEMPTION   BIT(2)
4901571a7a1Sriastradh #define I915_ENGINE_HAS_SEMAPHORES   BIT(3)
4911571a7a1Sriastradh #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
4921571a7a1Sriastradh #define I915_ENGINE_IS_VIRTUAL       BIT(5)
4931571a7a1Sriastradh #define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
4941571a7a1Sriastradh #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
4951571a7a1Sriastradh 	unsigned int flags;
4961571a7a1Sriastradh 
4971571a7a1Sriastradh 	/*
4981571a7a1Sriastradh 	 * Table of commands the command parser needs to know about
4991571a7a1Sriastradh 	 * for this engine.
5001571a7a1Sriastradh 	 */
5011571a7a1Sriastradh 	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
5021571a7a1Sriastradh 
5031571a7a1Sriastradh 	/*
5041571a7a1Sriastradh 	 * Table of registers allowed in commands that read/write registers.
5051571a7a1Sriastradh 	 */
5061571a7a1Sriastradh 	const struct drm_i915_reg_table *reg_tables;
5071571a7a1Sriastradh 	int reg_table_count;
5081571a7a1Sriastradh 
5091571a7a1Sriastradh 	/*
5101571a7a1Sriastradh 	 * Returns the bitmask for the length field of the specified command.
5111571a7a1Sriastradh 	 * Return 0 for an unrecognized/invalid command.
5121571a7a1Sriastradh 	 *
5131571a7a1Sriastradh 	 * If the command parser finds an entry for a command in the engine's
5141571a7a1Sriastradh 	 * cmd_tables, it gets the command's length based on the table entry.
5151571a7a1Sriastradh 	 * If not, it calls this function to determine the per-engine length
5161571a7a1Sriastradh 	 * field encoding for the command (i.e. different opcode ranges use
5171571a7a1Sriastradh 	 * certain bits to encode the command length in the header).
5181571a7a1Sriastradh 	 */
5191571a7a1Sriastradh 	u32 (*get_cmd_length_mask)(u32 cmd_header);
5201571a7a1Sriastradh 
5211571a7a1Sriastradh 	struct {
5221571a7a1Sriastradh 		/**
5231571a7a1Sriastradh 		 * @lock: Lock protecting the below fields.
5241571a7a1Sriastradh 		 */
5251571a7a1Sriastradh 		seqlock_t lock;
5261571a7a1Sriastradh 		/**
5271571a7a1Sriastradh 		 * @enabled: Reference count indicating number of listeners.
5281571a7a1Sriastradh 		 */
5291571a7a1Sriastradh 		unsigned int enabled;
5301571a7a1Sriastradh 		/**
5311571a7a1Sriastradh 		 * @active: Number of contexts currently scheduled in.
5321571a7a1Sriastradh 		 */
5331571a7a1Sriastradh 		unsigned int active;
5341571a7a1Sriastradh 		/**
5351571a7a1Sriastradh 		 * @enabled_at: Timestamp when busy stats were enabled.
5361571a7a1Sriastradh 		 */
5371571a7a1Sriastradh 		ktime_t enabled_at;
5381571a7a1Sriastradh 		/**
5391571a7a1Sriastradh 		 * @start: Timestamp of the last idle to active transition.
5401571a7a1Sriastradh 		 *
5411571a7a1Sriastradh 		 * Idle is defined as active == 0, active is active > 0.
5421571a7a1Sriastradh 		 */
5431571a7a1Sriastradh 		ktime_t start;
5441571a7a1Sriastradh 		/**
5451571a7a1Sriastradh 		 * @total: Total time this engine was busy.
5461571a7a1Sriastradh 		 *
5471571a7a1Sriastradh 		 * Accumulated time not counting the most recent block in cases
5481571a7a1Sriastradh 		 * where engine is currently busy (active > 0).
5491571a7a1Sriastradh 		 */
5501571a7a1Sriastradh 		ktime_t total;
5511571a7a1Sriastradh 	} stats;
5521571a7a1Sriastradh 
5531571a7a1Sriastradh 	struct {
5541571a7a1Sriastradh 		unsigned long heartbeat_interval_ms;
5551571a7a1Sriastradh 		unsigned long preempt_timeout_ms;
5561571a7a1Sriastradh 		unsigned long stop_timeout_ms;
5571571a7a1Sriastradh 		unsigned long timeslice_duration_ms;
5581571a7a1Sriastradh 	} props;
5591571a7a1Sriastradh };
5601571a7a1Sriastradh 
5611571a7a1Sriastradh static inline bool
intel_engine_using_cmd_parser(const struct intel_engine_cs * engine)5621571a7a1Sriastradh intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
5631571a7a1Sriastradh {
5641571a7a1Sriastradh 	return engine->flags & I915_ENGINE_USING_CMD_PARSER;
5651571a7a1Sriastradh }
5661571a7a1Sriastradh 
5671571a7a1Sriastradh static inline bool
intel_engine_requires_cmd_parser(const struct intel_engine_cs * engine)5681571a7a1Sriastradh intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
5691571a7a1Sriastradh {
5701571a7a1Sriastradh 	return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
5711571a7a1Sriastradh }
5721571a7a1Sriastradh 
5731571a7a1Sriastradh static inline bool
intel_engine_supports_stats(const struct intel_engine_cs * engine)5741571a7a1Sriastradh intel_engine_supports_stats(const struct intel_engine_cs *engine)
5751571a7a1Sriastradh {
5761571a7a1Sriastradh 	return engine->flags & I915_ENGINE_SUPPORTS_STATS;
5771571a7a1Sriastradh }
5781571a7a1Sriastradh 
5791571a7a1Sriastradh static inline bool
intel_engine_has_preemption(const struct intel_engine_cs * engine)5801571a7a1Sriastradh intel_engine_has_preemption(const struct intel_engine_cs *engine)
5811571a7a1Sriastradh {
5821571a7a1Sriastradh 	return engine->flags & I915_ENGINE_HAS_PREEMPTION;
5831571a7a1Sriastradh }
5841571a7a1Sriastradh 
5851571a7a1Sriastradh static inline bool
intel_engine_has_semaphores(const struct intel_engine_cs * engine)5861571a7a1Sriastradh intel_engine_has_semaphores(const struct intel_engine_cs *engine)
5871571a7a1Sriastradh {
5881571a7a1Sriastradh 	return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
5891571a7a1Sriastradh }
5901571a7a1Sriastradh 
5911571a7a1Sriastradh static inline bool
intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs * engine)5921571a7a1Sriastradh intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
5931571a7a1Sriastradh {
5941571a7a1Sriastradh 	return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
5951571a7a1Sriastradh }
5961571a7a1Sriastradh 
5971571a7a1Sriastradh static inline bool
intel_engine_is_virtual(const struct intel_engine_cs * engine)5981571a7a1Sriastradh intel_engine_is_virtual(const struct intel_engine_cs *engine)
5991571a7a1Sriastradh {
6001571a7a1Sriastradh 	return engine->flags & I915_ENGINE_IS_VIRTUAL;
6011571a7a1Sriastradh }
6021571a7a1Sriastradh 
6031571a7a1Sriastradh static inline bool
intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)6041571a7a1Sriastradh intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
6051571a7a1Sriastradh {
6061571a7a1Sriastradh 	return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO;
6071571a7a1Sriastradh }
6081571a7a1Sriastradh 
6091571a7a1Sriastradh #define instdone_has_slice(dev_priv___, sseu___, slice___) \
6101571a7a1Sriastradh 	((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
6111571a7a1Sriastradh 
6121571a7a1Sriastradh #define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
6131571a7a1Sriastradh 	(IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \
6141571a7a1Sriastradh 	 intel_sseu_has_subslice(sseu__, 0, subslice__))
6151571a7a1Sriastradh 
6161571a7a1Sriastradh #define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
6171571a7a1Sriastradh 	for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \
6181571a7a1Sriastradh 	     (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \
6191571a7a1Sriastradh 	     (slice_) += ((subslice_) == 0)) \
6201571a7a1Sriastradh 		for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
6211571a7a1Sriastradh 			    (instdone_has_subslice(dev_priv_, sseu_, slice_, \
6221571a7a1Sriastradh 						    subslice_)))
6231571a7a1Sriastradh #endif /* __INTEL_ENGINE_TYPES_H__ */
624