1c349dbc7Sjsg /*
2c349dbc7Sjsg  * SPDX-License-Identifier: MIT
3c349dbc7Sjsg  *
4c349dbc7Sjsg  * Copyright © 2018 Intel Corporation
5c349dbc7Sjsg  */
6c349dbc7Sjsg 
7c349dbc7Sjsg #ifndef _I915_SCHEDULER_TYPES_H_
8c349dbc7Sjsg #define _I915_SCHEDULER_TYPES_H_
9c349dbc7Sjsg 
10c349dbc7Sjsg #include <linux/list.h>
11c349dbc7Sjsg 
12c349dbc7Sjsg #include "gt/intel_engine_types.h"
13c349dbc7Sjsg #include "i915_priolist_types.h"
14c349dbc7Sjsg 
15c349dbc7Sjsg struct drm_i915_private;
16c349dbc7Sjsg struct i915_request;
17c349dbc7Sjsg struct intel_engine_cs;
18c349dbc7Sjsg 
19c349dbc7Sjsg struct i915_sched_attr {
20c349dbc7Sjsg 	/**
21c349dbc7Sjsg 	 * @priority: execution and service priority
22c349dbc7Sjsg 	 *
23c349dbc7Sjsg 	 * All clients are equal, but some are more equal than others!
24c349dbc7Sjsg 	 *
25c349dbc7Sjsg 	 * Requests from a context with a greater (more positive) value of
26c349dbc7Sjsg 	 * @priority will be executed before those with a lower @priority
27c349dbc7Sjsg 	 * value, forming a simple QoS.
28c349dbc7Sjsg 	 *
29c349dbc7Sjsg 	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
30c349dbc7Sjsg 	 */
31c349dbc7Sjsg 	int priority;
32c349dbc7Sjsg };
33c349dbc7Sjsg 
34c349dbc7Sjsg /*
35c349dbc7Sjsg  * "People assume that time is a strict progression of cause to effect, but
36c349dbc7Sjsg  * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
37c349dbc7Sjsg  * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
38c349dbc7Sjsg  *
39c349dbc7Sjsg  * Requests exist in a complex web of interdependencies. Each request
40c349dbc7Sjsg  * has to wait for some other request to complete before it is ready to be run
41c349dbc7Sjsg  * (e.g. we have to wait until the pixels have been rendering into a texture
42c349dbc7Sjsg  * before we can copy from it). We track the readiness of a request in terms
43c349dbc7Sjsg  * of fences, but we also need to keep the dependency tree for the lifetime
44c349dbc7Sjsg  * of the request (beyond the life of an individual fence). We use the tree
45c349dbc7Sjsg  * at various points to reorder the requests whilst keeping the requests
46c349dbc7Sjsg  * in order with respect to their various dependencies.
47c349dbc7Sjsg  *
48c349dbc7Sjsg  * There is no active component to the "scheduler". As we know the dependency
49c349dbc7Sjsg  * DAG of each request, we are able to insert it into a sorted queue when it
50c349dbc7Sjsg  * is ready, and are able to reorder its portion of the graph to accommodate
51c349dbc7Sjsg  * dynamic priority changes.
52c349dbc7Sjsg  *
53c349dbc7Sjsg  * Ok, there is now one active element to the "scheduler" in the backends.
54c349dbc7Sjsg  * We let a new context run for a small amount of time before re-evaluating
55c349dbc7Sjsg  * the run order. As we re-evaluate, we maintain the strict ordering of
56c349dbc7Sjsg  * dependencies, but attempt to rotate the active contexts (the current context
57c349dbc7Sjsg  * is put to the back of its priority queue, then reshuffling its dependents).
58c349dbc7Sjsg  * This provides minimal timeslicing and prevents a userspace hog (e.g.
59c349dbc7Sjsg  * something waiting on a user semaphore [VkEvent]) from denying service to
60c349dbc7Sjsg  * others.
61c349dbc7Sjsg  */
62c349dbc7Sjsg struct i915_sched_node {
63c349dbc7Sjsg 	struct list_head signalers_list; /* those before us, we depend upon */
64c349dbc7Sjsg 	struct list_head waiters_list; /* those after us, they depend upon us */
65c349dbc7Sjsg 	struct list_head link;
66c349dbc7Sjsg 	struct i915_sched_attr attr;
67c349dbc7Sjsg 	unsigned int flags;
68ad8b1aafSjsg #define I915_SCHED_HAS_EXTERNAL_CHAIN	BIT(0)
69c349dbc7Sjsg 	intel_engine_mask_t semaphores;
70c349dbc7Sjsg };
71c349dbc7Sjsg 
72c349dbc7Sjsg struct i915_dependency {
73c349dbc7Sjsg 	struct i915_sched_node *signaler;
74c349dbc7Sjsg 	struct i915_sched_node *waiter;
75c349dbc7Sjsg 	struct list_head signal_link;
76c349dbc7Sjsg 	struct list_head wait_link;
77c349dbc7Sjsg 	struct list_head dfs_link;
78c349dbc7Sjsg 	unsigned long flags;
79c349dbc7Sjsg #define I915_DEPENDENCY_ALLOC		BIT(0)
80c349dbc7Sjsg #define I915_DEPENDENCY_EXTERNAL	BIT(1)
81c349dbc7Sjsg #define I915_DEPENDENCY_WEAK		BIT(2)
82c349dbc7Sjsg };
83c349dbc7Sjsg 
84*5ca02815Sjsg #define for_each_waiter(p__, rq__) \
85*5ca02815Sjsg 	list_for_each_entry_lockless(p__, \
86*5ca02815Sjsg 				     &(rq__)->sched.waiters_list, \
87*5ca02815Sjsg 				     wait_link)
88*5ca02815Sjsg 
89*5ca02815Sjsg #define for_each_signaler(p__, rq__) \
90*5ca02815Sjsg 	list_for_each_entry_rcu(p__, \
91*5ca02815Sjsg 				&(rq__)->sched.signalers_list, \
92*5ca02815Sjsg 				signal_link)
93*5ca02815Sjsg 
94*5ca02815Sjsg /**
95*5ca02815Sjsg  * struct i915_sched_engine - scheduler engine
96*5ca02815Sjsg  *
97*5ca02815Sjsg  * A schedule engine represents a submission queue with different priority
98*5ca02815Sjsg  * bands. It contains all the common state (relative to the backend) to queue,
99*5ca02815Sjsg  * track, and submit a request.
100*5ca02815Sjsg  *
101*5ca02815Sjsg  * This object at the moment is quite i915 specific but will transition into a
102*5ca02815Sjsg  * container for the drm_gpu_scheduler plus a few other variables once the i915
103*5ca02815Sjsg  * is integrated with the DRM scheduler.
104*5ca02815Sjsg  */
105*5ca02815Sjsg struct i915_sched_engine {
106*5ca02815Sjsg 	/**
107*5ca02815Sjsg 	 * @ref: reference count of schedule engine object
108*5ca02815Sjsg 	 */
109*5ca02815Sjsg 	struct kref ref;
110*5ca02815Sjsg 
111*5ca02815Sjsg 	/**
112*5ca02815Sjsg 	 * @lock: protects requests in priority lists, requests, hold and
113*5ca02815Sjsg 	 * tasklet while running
114*5ca02815Sjsg 	 */
115*5ca02815Sjsg 	spinlock_t lock;
116*5ca02815Sjsg 
117*5ca02815Sjsg 	/**
118*5ca02815Sjsg 	 * @requests: list of requests inflight on this schedule engine
119*5ca02815Sjsg 	 */
120*5ca02815Sjsg 	struct list_head requests;
121*5ca02815Sjsg 
122*5ca02815Sjsg 	/**
123*5ca02815Sjsg 	 * @hold: list of ready requests, but on hold
124*5ca02815Sjsg 	 */
125*5ca02815Sjsg 	struct list_head hold;
126*5ca02815Sjsg 
127*5ca02815Sjsg 	/**
128*5ca02815Sjsg 	 * @tasklet: softirq tasklet for submission
129*5ca02815Sjsg 	 */
130*5ca02815Sjsg 	struct tasklet_struct tasklet;
131*5ca02815Sjsg 
132*5ca02815Sjsg 	/**
133*5ca02815Sjsg 	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
134*5ca02815Sjsg 	 */
135*5ca02815Sjsg 	struct i915_priolist default_priolist;
136*5ca02815Sjsg 
137*5ca02815Sjsg 	/**
138*5ca02815Sjsg 	 * @queue_priority_hint: Highest pending priority.
139*5ca02815Sjsg 	 *
140*5ca02815Sjsg 	 * When we add requests into the queue, or adjust the priority of
141*5ca02815Sjsg 	 * executing requests, we compute the maximum priority of those
142*5ca02815Sjsg 	 * pending requests. We can then use this value to determine if
143*5ca02815Sjsg 	 * we need to preempt the executing requests to service the queue.
144*5ca02815Sjsg 	 * However, since the we may have recorded the priority of an inflight
145*5ca02815Sjsg 	 * request we wanted to preempt but since completed, at the time of
146*5ca02815Sjsg 	 * dequeuing the priority hint may no longer may match the highest
147*5ca02815Sjsg 	 * available request priority.
148*5ca02815Sjsg 	 */
149*5ca02815Sjsg 	int queue_priority_hint;
150*5ca02815Sjsg 
151*5ca02815Sjsg 	/**
152*5ca02815Sjsg 	 * @queue: queue of requests, in priority lists
153*5ca02815Sjsg 	 */
154*5ca02815Sjsg 	struct rb_root_cached queue;
155*5ca02815Sjsg 
156*5ca02815Sjsg 	/**
157*5ca02815Sjsg 	 * @no_priolist: priority lists disabled
158*5ca02815Sjsg 	 */
159*5ca02815Sjsg 	bool no_priolist;
160*5ca02815Sjsg 
161*5ca02815Sjsg 	/**
162*5ca02815Sjsg 	 * @private_data: private data of the submission backend
163*5ca02815Sjsg 	 */
164*5ca02815Sjsg 	void *private_data;
165*5ca02815Sjsg 
166*5ca02815Sjsg 	/**
167*5ca02815Sjsg 	 * @destroy: destroy schedule engine / cleanup in backend
168*5ca02815Sjsg 	 */
169*5ca02815Sjsg 	void	(*destroy)(struct kref *kref);
170*5ca02815Sjsg 
171*5ca02815Sjsg 	/**
172*5ca02815Sjsg 	 * @disabled: check if backend has disabled submission
173*5ca02815Sjsg 	 */
174*5ca02815Sjsg 	bool	(*disabled)(struct i915_sched_engine *sched_engine);
175*5ca02815Sjsg 
176*5ca02815Sjsg 	/**
177*5ca02815Sjsg 	 * @kick_backend: kick backend after a request's priority has changed
178*5ca02815Sjsg 	 */
179*5ca02815Sjsg 	void	(*kick_backend)(const struct i915_request *rq,
180*5ca02815Sjsg 				int prio);
181*5ca02815Sjsg 
182*5ca02815Sjsg 	/**
183*5ca02815Sjsg 	 * @bump_inflight_request_prio: update priority of an inflight request
184*5ca02815Sjsg 	 */
185*5ca02815Sjsg 	void	(*bump_inflight_request_prio)(struct i915_request *rq,
186*5ca02815Sjsg 					      int prio);
187*5ca02815Sjsg 
188*5ca02815Sjsg 	/**
189*5ca02815Sjsg 	 * @retire_inflight_request_prio: indicate request is retired to
190*5ca02815Sjsg 	 * priority tracking
191*5ca02815Sjsg 	 */
192*5ca02815Sjsg 	void	(*retire_inflight_request_prio)(struct i915_request *rq);
193*5ca02815Sjsg 
194*5ca02815Sjsg 	/**
195*5ca02815Sjsg 	 * @schedule: adjust priority of request
196*5ca02815Sjsg 	 *
197*5ca02815Sjsg 	 * Call when the priority on a request has changed and it and its
198*5ca02815Sjsg 	 * dependencies may need rescheduling. Note the request itself may
199*5ca02815Sjsg 	 * not be ready to run!
200*5ca02815Sjsg 	 */
201*5ca02815Sjsg 	void	(*schedule)(struct i915_request *request,
202*5ca02815Sjsg 			    const struct i915_sched_attr *attr);
203*5ca02815Sjsg };
204*5ca02815Sjsg 
205c349dbc7Sjsg #endif /* _I915_SCHEDULER_TYPES_H_ */
206