1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
26
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
29 #include <linux/completion.h>
30 #include <linux/xarray.h>
31 #include <linux/workqueue.h>
32
33 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
34
35 /**
36 * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
37 *
38 * Setting this flag on a scheduler fence prevents pipelining of jobs depending
39 * on this fence. In other words we always insert a full CPU round trip before
40 * dependen jobs are pushed to the hw queue.
41 */
42 #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
43
44 /**
45 * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set
46 *
47 * Because we could have a deadline hint can be set before the backing hw
48 * fence is created, we need to keep track of whether a deadline has already
49 * been set.
50 */
51 #define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
52
53 enum dma_resv_usage;
54 struct dma_resv;
55 struct drm_gem_object;
56
57 struct drm_gpu_scheduler;
58 struct drm_sched_rq;
59
60 struct drm_file;
61
62 /* These are often used as an (initial) index
63 * to an array, and as such should start at 0.
64 */
65 enum drm_sched_priority {
66 DRM_SCHED_PRIORITY_MIN,
67 DRM_SCHED_PRIORITY_NORMAL,
68 DRM_SCHED_PRIORITY_HIGH,
69 DRM_SCHED_PRIORITY_KERNEL,
70
71 DRM_SCHED_PRIORITY_COUNT
72 };
73
74 /* Used to chose between FIFO and RR jobs scheduling */
75 extern int drm_sched_policy;
76
77 #define DRM_SCHED_POLICY_RR 0
78 #define DRM_SCHED_POLICY_FIFO 1
79
80 /**
81 * struct drm_sched_entity - A wrapper around a job queue (typically
82 * attached to the DRM file_priv).
83 *
84 * Entities will emit jobs in order to their corresponding hardware
85 * ring, and the scheduler will alternate between entities based on
86 * scheduling policy.
87 */
88 struct drm_sched_entity {
89 /**
90 * @list:
91 *
92 * Used to append this struct to the list of entities in the runqueue
93 * @rq under &drm_sched_rq.entities.
94 *
95 * Protected by &drm_sched_rq.lock of @rq.
96 */
97 struct list_head list;
98
99 /**
100 * @rq:
101 *
102 * Runqueue on which this entity is currently scheduled.
103 *
104 * FIXME: Locking is very unclear for this. Writers are protected by
105 * @rq_lock, but readers are generally lockless and seem to just race
106 * with not even a READ_ONCE.
107 */
108 struct drm_sched_rq *rq;
109
110 /**
111 * @sched_list:
112 *
113 * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
114 * be scheduled on any scheduler on this list.
115 *
116 * This can be modified by calling drm_sched_entity_modify_sched().
117 * Locking is entirely up to the driver, see the above function for more
118 * details.
119 *
120 * This will be set to NULL if &num_sched_list equals 1 and @rq has been
121 * set already.
122 *
123 * FIXME: This means priority changes through
124 * drm_sched_entity_set_priority() will be lost henceforth in this case.
125 */
126 struct drm_gpu_scheduler **sched_list;
127
128 /**
129 * @num_sched_list:
130 *
131 * Number of drm_gpu_schedulers in the @sched_list.
132 */
133 unsigned int num_sched_list;
134
135 /**
136 * @priority:
137 *
138 * Priority of the entity. This can be modified by calling
139 * drm_sched_entity_set_priority(). Protected by &rq_lock.
140 */
141 enum drm_sched_priority priority;
142
143 /**
144 * @rq_lock:
145 *
146 * Lock to modify the runqueue to which this entity belongs.
147 */
148 spinlock_t rq_lock;
149
150 /**
151 * @job_queue: the list of jobs of this entity.
152 */
153 struct spsc_queue job_queue;
154
155 /**
156 * @fence_seq:
157 *
158 * A linearly increasing seqno incremented with each new
159 * &drm_sched_fence which is part of the entity.
160 *
161 * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
162 * this doesn't need to be atomic.
163 */
164 atomic_t fence_seq;
165
166 /**
167 * @fence_context:
168 *
169 * A unique context for all the fences which belong to this entity. The
170 * &drm_sched_fence.scheduled uses the fence_context but
171 * &drm_sched_fence.finished uses fence_context + 1.
172 */
173 uint64_t fence_context;
174
175 /**
176 * @dependency:
177 *
178 * The dependency fence of the job which is on the top of the job queue.
179 */
180 struct dma_fence *dependency;
181
182 /**
183 * @cb:
184 *
185 * Callback for the dependency fence above.
186 */
187 struct dma_fence_cb cb;
188
189 /**
190 * @guilty:
191 *
192 * Points to entities' guilty.
193 */
194 atomic_t *guilty;
195
196 /**
197 * @last_scheduled:
198 *
199 * Points to the finished fence of the last scheduled job. Only written
200 * by the scheduler thread, can be accessed locklessly from
201 * drm_sched_job_arm() iff the queue is empty.
202 */
203 struct dma_fence __rcu *last_scheduled;
204
205 /**
206 * @last_user: last group leader pushing a job into the entity.
207 */
208 #ifdef __linux__
209 struct task_struct *last_user;
210 #else
211 struct process *last_user;
212 #endif
213
214 /**
215 * @stopped:
216 *
217 * Marks the enity as removed from rq and destined for
218 * termination. This is set by calling drm_sched_entity_flush() and by
219 * drm_sched_fini().
220 */
221 bool stopped;
222
223 /**
224 * @entity_idle:
225 *
226 * Signals when entity is not in use, used to sequence entity cleanup in
227 * drm_sched_entity_fini().
228 */
229 struct completion entity_idle;
230
231 /**
232 * @oldest_job_waiting:
233 *
234 * Marks earliest job waiting in SW queue
235 */
236 ktime_t oldest_job_waiting;
237
238 /**
239 * @rb_tree_node:
240 *
241 * The node used to insert this entity into time based priority queue
242 */
243 struct rb_node rb_tree_node;
244
245 };
246
247 /**
248 * struct drm_sched_rq - queue of entities to be scheduled.
249 *
250 * @lock: to modify the entities list.
251 * @sched: the scheduler to which this rq belongs to.
252 * @entities: list of the entities to be scheduled.
253 * @current_entity: the entity which is to be scheduled.
254 * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling
255 *
256 * Run queue is a set of entities scheduling command submissions for
257 * one specific ring. It implements the scheduling policy that selects
258 * the next entity to emit commands from.
259 */
260 struct drm_sched_rq {
261 spinlock_t lock;
262 struct drm_gpu_scheduler *sched;
263 struct list_head entities;
264 struct drm_sched_entity *current_entity;
265 struct rb_root_cached rb_tree_root;
266 };
267
268 /**
269 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
270 */
271 struct drm_sched_fence {
272 /**
273 * @scheduled: this fence is what will be signaled by the scheduler
274 * when the job is scheduled.
275 */
276 struct dma_fence scheduled;
277
278 /**
279 * @finished: this fence is what will be signaled by the scheduler
280 * when the job is completed.
281 *
282 * When setting up an out fence for the job, you should use
283 * this, since it's available immediately upon
284 * drm_sched_job_init(), and the fence returned by the driver
285 * from run_job() won't be created until the dependencies have
286 * resolved.
287 */
288 struct dma_fence finished;
289
290 /**
291 * @deadline: deadline set on &drm_sched_fence.finished which
292 * potentially needs to be propagated to &drm_sched_fence.parent
293 */
294 ktime_t deadline;
295
296 /**
297 * @parent: the fence returned by &drm_sched_backend_ops.run_job
298 * when scheduling the job on hardware. We signal the
299 * &drm_sched_fence.finished fence once parent is signalled.
300 */
301 struct dma_fence *parent;
302 /**
303 * @sched: the scheduler instance to which the job having this struct
304 * belongs to.
305 */
306 struct drm_gpu_scheduler *sched;
307 /**
308 * @lock: the lock used by the scheduled and the finished fences.
309 */
310 spinlock_t lock;
311 /**
312 * @owner: job owner for debugging
313 */
314 void *owner;
315 };
316
317 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
318
319 /**
320 * struct drm_sched_job - A job to be run by an entity.
321 *
322 * @queue_node: used to append this struct to the queue of jobs in an entity.
323 * @list: a job participates in a "pending" and "done" lists.
324 * @sched: the scheduler instance on which this job is scheduled.
325 * @s_fence: contains the fences for the scheduling of job.
326 * @finish_cb: the callback for the finished fence.
327 * @work: Helper to reschdeule job kill to different context.
328 * @id: a unique id assigned to each job scheduled on the scheduler.
329 * @karma: increment on every hang caused by this job. If this exceeds the hang
330 * limit of the scheduler then the job is marked guilty and will not
331 * be scheduled further.
332 * @s_priority: the priority of the job.
333 * @entity: the entity to which this job belongs.
334 * @cb: the callback for the parent fence in s_fence.
335 *
336 * A job is created by the driver using drm_sched_job_init(), and
337 * should call drm_sched_entity_push_job() once it wants the scheduler
338 * to schedule the job.
339 */
340 struct drm_sched_job {
341 struct spsc_node queue_node;
342 struct list_head list;
343 struct drm_gpu_scheduler *sched;
344 struct drm_sched_fence *s_fence;
345
346 /*
347 * work is used only after finish_cb has been used and will not be
348 * accessed anymore.
349 */
350 union {
351 struct dma_fence_cb finish_cb;
352 struct work_struct work;
353 };
354
355 uint64_t id;
356 atomic_t karma;
357 enum drm_sched_priority s_priority;
358 struct drm_sched_entity *entity;
359 struct dma_fence_cb cb;
360 /**
361 * @dependencies:
362 *
363 * Contains the dependencies as struct dma_fence for this job, see
364 * drm_sched_job_add_dependency() and
365 * drm_sched_job_add_implicit_dependencies().
366 */
367 struct xarray dependencies;
368
369 /** @last_dependency: tracks @dependencies as they signal */
370 unsigned long last_dependency;
371
372 /**
373 * @submit_ts:
374 *
375 * When the job was pushed into the entity queue.
376 */
377 ktime_t submit_ts;
378 };
379
drm_sched_invalidate_job(struct drm_sched_job * s_job,int threshold)380 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
381 int threshold)
382 {
383 return s_job && atomic_inc_return(&s_job->karma) > threshold;
384 }
385
386 enum drm_gpu_sched_stat {
387 DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
388 DRM_GPU_SCHED_STAT_NOMINAL,
389 DRM_GPU_SCHED_STAT_ENODEV,
390 };
391
392 /**
393 * struct drm_sched_backend_ops - Define the backend operations
394 * called by the scheduler
395 *
396 * These functions should be implemented in the driver side.
397 */
398 struct drm_sched_backend_ops {
399 /**
400 * @prepare_job:
401 *
402 * Called when the scheduler is considering scheduling this job next, to
403 * get another struct dma_fence for this job to block on. Once it
404 * returns NULL, run_job() may be called.
405 *
406 * Can be NULL if no additional preparation to the dependencies are
407 * necessary. Skipped when jobs are killed instead of run.
408 */
409 struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
410 struct drm_sched_entity *s_entity);
411
412 /**
413 * @run_job: Called to execute the job once all of the dependencies
414 * have been resolved. This may be called multiple times, if
415 * timedout_job() has happened and drm_sched_job_recovery()
416 * decides to try it again.
417 */
418 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
419
420 /**
421 * @timedout_job: Called when a job has taken too long to execute,
422 * to trigger GPU recovery.
423 *
424 * This method is called in a workqueue context.
425 *
426 * Drivers typically issue a reset to recover from GPU hangs, and this
427 * procedure usually follows the following workflow:
428 *
429 * 1. Stop the scheduler using drm_sched_stop(). This will park the
430 * scheduler thread and cancel the timeout work, guaranteeing that
431 * nothing is queued while we reset the hardware queue
432 * 2. Try to gracefully stop non-faulty jobs (optional)
433 * 3. Issue a GPU reset (driver-specific)
434 * 4. Re-submit jobs using drm_sched_resubmit_jobs()
435 * 5. Restart the scheduler using drm_sched_start(). At that point, new
436 * jobs can be queued, and the scheduler thread is unblocked
437 *
438 * Note that some GPUs have distinct hardware queues but need to reset
439 * the GPU globally, which requires extra synchronization between the
440 * timeout handler of the different &drm_gpu_scheduler. One way to
441 * achieve this synchronization is to create an ordered workqueue
442 * (using alloc_ordered_workqueue()) at the driver level, and pass this
443 * queue to drm_sched_init(), to guarantee that timeout handlers are
444 * executed sequentially. The above workflow needs to be slightly
445 * adjusted in that case:
446 *
447 * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
448 * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
449 * the reset (optional)
450 * 3. Issue a GPU reset on all faulty queues (driver-specific)
451 * 4. Re-submit jobs on all schedulers impacted by the reset using
452 * drm_sched_resubmit_jobs()
453 * 5. Restart all schedulers that were stopped in step #1 using
454 * drm_sched_start()
455 *
456 * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
457 * and the underlying driver has started or completed recovery.
458 *
459 * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
460 * available, i.e. has been unplugged.
461 */
462 enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
463
464 /**
465 * @free_job: Called once the job's finished fence has been signaled
466 * and it's time to clean it up.
467 */
468 void (*free_job)(struct drm_sched_job *sched_job);
469 };
470
471 /**
472 * struct drm_gpu_scheduler - scheduler instance-specific data
473 *
474 * @ops: backend operations provided by the driver.
475 * @hw_submission_limit: the max size of the hardware queue.
476 * @timeout: the time after which a job is removed from the scheduler.
477 * @name: name of the ring for which this scheduler is being used.
478 * @sched_rq: priority wise array of run queues.
479 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
480 * is ready to be scheduled.
481 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
482 * waits on this wait queue until all the scheduled jobs are
483 * finished.
484 * @hw_rq_count: the number of jobs currently in the hardware queue.
485 * @job_id_count: used to assign unique id to the each job.
486 * @timeout_wq: workqueue used to queue @work_tdr
487 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
488 * timeout interval is over.
489 * @thread: the kthread on which the scheduler which run.
490 * @pending_list: the list of jobs which are currently in the job queue.
491 * @job_list_lock: lock to protect the pending_list.
492 * @hang_limit: once the hangs by a job crosses this limit then it is marked
493 * guilty and it will no longer be considered for scheduling.
494 * @score: score to help loadbalancer pick a idle sched
495 * @_score: score used when the driver doesn't provide one
496 * @ready: marks if the underlying HW is ready to work
497 * @free_guilty: A hit to time out handler to free the guilty job.
498 * @dev: system &struct device
499 *
500 * One scheduler is implemented for each hardware ring.
501 */
502 struct drm_gpu_scheduler {
503 const struct drm_sched_backend_ops *ops;
504 uint32_t hw_submission_limit;
505 long timeout;
506 const char *name;
507 struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
508 wait_queue_head_t wake_up_worker;
509 wait_queue_head_t job_scheduled;
510 atomic_t hw_rq_count;
511 atomic64_t job_id_count;
512 struct workqueue_struct *timeout_wq;
513 struct delayed_work work_tdr;
514 #ifdef __linux__
515 struct task_struct *thread;
516 #else
517 struct proc *thread;
518 #endif
519 struct list_head pending_list;
520 spinlock_t job_list_lock;
521 int hang_limit;
522 atomic_t *score;
523 atomic_t _score;
524 bool ready;
525 bool free_guilty;
526 struct device *dev;
527 };
528
529 int drm_sched_init(struct drm_gpu_scheduler *sched,
530 const struct drm_sched_backend_ops *ops,
531 uint32_t hw_submission, unsigned hang_limit,
532 long timeout, struct workqueue_struct *timeout_wq,
533 atomic_t *score, const char *name, struct device *dev);
534
535 void drm_sched_fini(struct drm_gpu_scheduler *sched);
536 int drm_sched_job_init(struct drm_sched_job *job,
537 struct drm_sched_entity *entity,
538 void *owner);
539 void drm_sched_job_arm(struct drm_sched_job *job);
540 int drm_sched_job_add_dependency(struct drm_sched_job *job,
541 struct dma_fence *fence);
542 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
543 struct drm_file *file,
544 u32 handle,
545 u32 point);
546 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
547 struct dma_resv *resv,
548 enum dma_resv_usage usage);
549 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
550 struct drm_gem_object *obj,
551 bool write);
552
553
554 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
555 struct drm_gpu_scheduler **sched_list,
556 unsigned int num_sched_list);
557
558 void drm_sched_job_cleanup(struct drm_sched_job *job);
559 void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched);
560 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
561 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
562 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
563 void drm_sched_increase_karma(struct drm_sched_job *bad);
564 void drm_sched_reset_karma(struct drm_sched_job *bad);
565 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
566 bool drm_sched_dependency_optimized(struct dma_fence* fence,
567 struct drm_sched_entity *entity);
568 void drm_sched_fault(struct drm_gpu_scheduler *sched);
569
570 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
571 struct drm_sched_entity *entity);
572 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
573 struct drm_sched_entity *entity);
574
575 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts);
576
577 int drm_sched_entity_init(struct drm_sched_entity *entity,
578 enum drm_sched_priority priority,
579 struct drm_gpu_scheduler **sched_list,
580 unsigned int num_sched_list,
581 atomic_t *guilty);
582 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
583 void drm_sched_entity_fini(struct drm_sched_entity *entity);
584 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
585 void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
586 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
587 void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
588 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
589 enum drm_sched_priority priority);
590 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
591 int drm_sched_entity_error(struct drm_sched_entity *entity);
592
593 struct drm_sched_fence *drm_sched_fence_alloc(
594 struct drm_sched_entity *s_entity, void *owner);
595 void drm_sched_fence_init(struct drm_sched_fence *fence,
596 struct drm_sched_entity *entity);
597 void drm_sched_fence_free(struct drm_sched_fence *fence);
598
599 void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
600 struct dma_fence *parent);
601 void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
602
603 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
604 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
605 unsigned long remaining);
606 struct drm_gpu_scheduler *
607 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
608 unsigned int num_sched_list);
609
610 #endif
611