xref: /openbsd/sys/dev/pci/drm/scheduler/sched_main.c (revision f005ef32)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  *
46  * Note that once a job was taken from the entities queue and pushed to the
47  * hardware, i.e. the pending queue, the entity must not be referenced anymore
48  * through the jobs entity pointer.
49  */
50 
51 #include <linux/kthread.h>
52 #include <linux/wait.h>
53 #include <linux/sched.h>
54 #include <linux/completion.h>
55 #include <linux/dma-resv.h>
56 #ifdef __linux__
57 #include <uapi/linux/sched/types.h>
58 #endif
59 
60 #include <drm/drm_print.h>
61 #include <drm/drm_gem.h>
62 #include <drm/drm_syncobj.h>
63 #include <drm/gpu_scheduler.h>
64 #include <drm/spsc_queue.h>
65 
66 #define CREATE_TRACE_POINTS
67 #include "gpu_scheduler_trace.h"
68 
69 #define to_drm_sched_job(sched_job)		\
70 		container_of((sched_job), struct drm_sched_job, queue_node)
71 
72 int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
73 
74 /**
75  * DOC: sched_policy (int)
76  * Used to override default entities scheduling policy in a run queue.
77  */
78 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
79 module_param_named(sched_policy, drm_sched_policy, int, 0444);
80 
drm_sched_entity_compare_before(struct rb_node * a,const struct rb_node * b)81 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
82 							    const struct rb_node *b)
83 {
84 	struct drm_sched_entity *ent_a =  rb_entry((a), struct drm_sched_entity, rb_tree_node);
85 	struct drm_sched_entity *ent_b =  rb_entry((b), struct drm_sched_entity, rb_tree_node);
86 
87 	return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
88 }
89 
drm_sched_rq_remove_fifo_locked(struct drm_sched_entity * entity)90 static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
91 {
92 	struct drm_sched_rq *rq = entity->rq;
93 
94 	if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
95 		rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
96 		RB_CLEAR_NODE(&entity->rb_tree_node);
97 	}
98 }
99 
drm_sched_rq_update_fifo(struct drm_sched_entity * entity,ktime_t ts)100 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
101 {
102 	/*
103 	 * Both locks need to be grabbed, one to protect from entity->rq change
104 	 * for entity from within concurrent drm_sched_entity_select_rq and the
105 	 * other to update the rb tree structure.
106 	 */
107 	spin_lock(&entity->rq_lock);
108 	spin_lock(&entity->rq->lock);
109 
110 	drm_sched_rq_remove_fifo_locked(entity);
111 
112 	entity->oldest_job_waiting = ts;
113 
114 	rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
115 		      drm_sched_entity_compare_before);
116 
117 	spin_unlock(&entity->rq->lock);
118 	spin_unlock(&entity->rq_lock);
119 }
120 
121 /**
122  * drm_sched_rq_init - initialize a given run queue struct
123  *
124  * @sched: scheduler instance to associate with this run queue
125  * @rq: scheduler run queue
126  *
127  * Initializes a scheduler runqueue.
128  */
drm_sched_rq_init(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)129 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
130 			      struct drm_sched_rq *rq)
131 {
132 	mtx_init(&rq->lock, IPL_NONE);
133 	INIT_LIST_HEAD(&rq->entities);
134 	rq->rb_tree_root = RB_ROOT_CACHED;
135 	rq->current_entity = NULL;
136 	rq->sched = sched;
137 }
138 
139 /**
140  * drm_sched_rq_add_entity - add an entity
141  *
142  * @rq: scheduler run queue
143  * @entity: scheduler entity
144  *
145  * Adds a scheduler entity to the run queue.
146  */
drm_sched_rq_add_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)147 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
148 			     struct drm_sched_entity *entity)
149 {
150 	if (!list_empty(&entity->list))
151 		return;
152 
153 	spin_lock(&rq->lock);
154 
155 	atomic_inc(rq->sched->score);
156 	list_add_tail(&entity->list, &rq->entities);
157 
158 	spin_unlock(&rq->lock);
159 }
160 
161 /**
162  * drm_sched_rq_remove_entity - remove an entity
163  *
164  * @rq: scheduler run queue
165  * @entity: scheduler entity
166  *
167  * Removes a scheduler entity from the run queue.
168  */
drm_sched_rq_remove_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)169 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
170 				struct drm_sched_entity *entity)
171 {
172 	if (list_empty(&entity->list))
173 		return;
174 
175 	spin_lock(&rq->lock);
176 
177 	atomic_dec(rq->sched->score);
178 	list_del_init(&entity->list);
179 
180 	if (rq->current_entity == entity)
181 		rq->current_entity = NULL;
182 
183 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
184 		drm_sched_rq_remove_fifo_locked(entity);
185 
186 	spin_unlock(&rq->lock);
187 }
188 
189 /**
190  * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
191  *
192  * @rq: scheduler run queue to check.
193  *
194  * Try to find a ready entity, returns NULL if none found.
195  */
196 static struct drm_sched_entity *
drm_sched_rq_select_entity_rr(struct drm_sched_rq * rq)197 drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
198 {
199 	struct drm_sched_entity *entity;
200 
201 	spin_lock(&rq->lock);
202 
203 	entity = rq->current_entity;
204 	if (entity) {
205 		list_for_each_entry_continue(entity, &rq->entities, list) {
206 			if (drm_sched_entity_is_ready(entity)) {
207 				rq->current_entity = entity;
208 				reinit_completion(&entity->entity_idle);
209 				spin_unlock(&rq->lock);
210 				return entity;
211 			}
212 		}
213 	}
214 
215 	list_for_each_entry(entity, &rq->entities, list) {
216 
217 		if (drm_sched_entity_is_ready(entity)) {
218 			rq->current_entity = entity;
219 			reinit_completion(&entity->entity_idle);
220 			spin_unlock(&rq->lock);
221 			return entity;
222 		}
223 
224 		if (entity == rq->current_entity)
225 			break;
226 	}
227 
228 	spin_unlock(&rq->lock);
229 
230 	return NULL;
231 }
232 
233 /**
234  * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
235  *
236  * @rq: scheduler run queue to check.
237  *
238  * Find oldest waiting ready entity, returns NULL if none found.
239  */
240 static struct drm_sched_entity *
drm_sched_rq_select_entity_fifo(struct drm_sched_rq * rq)241 drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
242 {
243 	struct rb_node *rb;
244 
245 	spin_lock(&rq->lock);
246 	for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
247 		struct drm_sched_entity *entity;
248 
249 		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
250 		if (drm_sched_entity_is_ready(entity)) {
251 			rq->current_entity = entity;
252 			reinit_completion(&entity->entity_idle);
253 			break;
254 		}
255 	}
256 	spin_unlock(&rq->lock);
257 
258 	return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
259 }
260 
261 /**
262  * drm_sched_job_done - complete a job
263  * @s_job: pointer to the job which is done
264  *
265  * Finish the job's fence and wake up the worker thread.
266  */
drm_sched_job_done(struct drm_sched_job * s_job,int result)267 static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
268 {
269 	struct drm_sched_fence *s_fence = s_job->s_fence;
270 	struct drm_gpu_scheduler *sched = s_fence->sched;
271 
272 	atomic_dec(&sched->hw_rq_count);
273 	atomic_dec(sched->score);
274 
275 	trace_drm_sched_process_job(s_fence);
276 
277 	dma_fence_get(&s_fence->finished);
278 	drm_sched_fence_finished(s_fence, result);
279 	dma_fence_put(&s_fence->finished);
280 	wake_up_interruptible(&sched->wake_up_worker);
281 }
282 
283 /**
284  * drm_sched_job_done_cb - the callback for a done job
285  * @f: fence
286  * @cb: fence callbacks
287  */
drm_sched_job_done_cb(struct dma_fence * f,struct dma_fence_cb * cb)288 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
289 {
290 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
291 
292 	drm_sched_job_done(s_job, f->error);
293 }
294 
295 /**
296  * drm_sched_start_timeout - start timeout for reset worker
297  *
298  * @sched: scheduler instance to start the worker for
299  *
300  * Start the timeout for the given scheduler.
301  */
drm_sched_start_timeout(struct drm_gpu_scheduler * sched)302 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
303 {
304 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
305 	    !list_empty(&sched->pending_list))
306 		queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
307 }
308 
309 /**
310  * drm_sched_fault - immediately start timeout handler
311  *
312  * @sched: scheduler where the timeout handling should be started.
313  *
314  * Start timeout handling immediately when the driver detects a hardware fault.
315  */
drm_sched_fault(struct drm_gpu_scheduler * sched)316 void drm_sched_fault(struct drm_gpu_scheduler *sched)
317 {
318 	if (sched->timeout_wq)
319 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
320 }
321 EXPORT_SYMBOL(drm_sched_fault);
322 
323 /**
324  * drm_sched_suspend_timeout - Suspend scheduler job timeout
325  *
326  * @sched: scheduler instance for which to suspend the timeout
327  *
328  * Suspend the delayed work timeout for the scheduler. This is done by
329  * modifying the delayed work timeout to an arbitrary large value,
330  * MAX_SCHEDULE_TIMEOUT in this case.
331  *
332  * Returns the timeout remaining
333  *
334  */
drm_sched_suspend_timeout(struct drm_gpu_scheduler * sched)335 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
336 {
337 	unsigned long sched_timeout, now = jiffies;
338 
339 #ifdef __linux__
340 	sched_timeout = sched->work_tdr.timer.expires;
341 #else
342 	sched_timeout = sched->work_tdr.to.to_time;
343 #endif
344 
345 	/*
346 	 * Modify the timeout to an arbitrarily large value. This also prevents
347 	 * the timeout to be restarted when new submissions arrive
348 	 */
349 	if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
350 			&& time_after(sched_timeout, now))
351 		return sched_timeout - now;
352 	else
353 		return sched->timeout;
354 }
355 EXPORT_SYMBOL(drm_sched_suspend_timeout);
356 
357 /**
358  * drm_sched_resume_timeout - Resume scheduler job timeout
359  *
360  * @sched: scheduler instance for which to resume the timeout
361  * @remaining: remaining timeout
362  *
363  * Resume the delayed work timeout for the scheduler.
364  */
drm_sched_resume_timeout(struct drm_gpu_scheduler * sched,unsigned long remaining)365 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
366 		unsigned long remaining)
367 {
368 	spin_lock(&sched->job_list_lock);
369 
370 	if (list_empty(&sched->pending_list))
371 		cancel_delayed_work(&sched->work_tdr);
372 	else
373 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
374 
375 	spin_unlock(&sched->job_list_lock);
376 }
377 EXPORT_SYMBOL(drm_sched_resume_timeout);
378 
drm_sched_job_begin(struct drm_sched_job * s_job)379 static void drm_sched_job_begin(struct drm_sched_job *s_job)
380 {
381 	struct drm_gpu_scheduler *sched = s_job->sched;
382 
383 	spin_lock(&sched->job_list_lock);
384 	list_add_tail(&s_job->list, &sched->pending_list);
385 	drm_sched_start_timeout(sched);
386 	spin_unlock(&sched->job_list_lock);
387 }
388 
drm_sched_job_timedout(struct work_struct * work)389 static void drm_sched_job_timedout(struct work_struct *work)
390 {
391 	struct drm_gpu_scheduler *sched;
392 	struct drm_sched_job *job;
393 	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
394 
395 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
396 
397 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
398 	spin_lock(&sched->job_list_lock);
399 	job = list_first_entry_or_null(&sched->pending_list,
400 				       struct drm_sched_job, list);
401 
402 	if (job) {
403 		/*
404 		 * Remove the bad job so it cannot be freed by concurrent
405 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
406 		 * is parked at which point it's safe.
407 		 */
408 		list_del_init(&job->list);
409 		spin_unlock(&sched->job_list_lock);
410 
411 		status = job->sched->ops->timedout_job(job);
412 
413 		/*
414 		 * Guilty job did complete and hence needs to be manually removed
415 		 * See drm_sched_stop doc.
416 		 */
417 		if (sched->free_guilty) {
418 			job->sched->ops->free_job(job);
419 			sched->free_guilty = false;
420 		}
421 	} else {
422 		spin_unlock(&sched->job_list_lock);
423 	}
424 
425 	if (status != DRM_GPU_SCHED_STAT_ENODEV) {
426 		spin_lock(&sched->job_list_lock);
427 		drm_sched_start_timeout(sched);
428 		spin_unlock(&sched->job_list_lock);
429 	}
430 }
431 
432 /**
433  * drm_sched_stop - stop the scheduler
434  *
435  * @sched: scheduler instance
436  * @bad: job which caused the time out
437  *
438  * Stop the scheduler and also removes and frees all completed jobs.
439  * Note: bad job will not be freed as it might be used later and so it's
440  * callers responsibility to release it manually if it's not part of the
441  * pending list any more.
442  *
443  */
drm_sched_stop(struct drm_gpu_scheduler * sched,struct drm_sched_job * bad)444 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
445 {
446 	struct drm_sched_job *s_job, *tmp;
447 
448 	kthread_park(sched->thread);
449 
450 	/*
451 	 * Reinsert back the bad job here - now it's safe as
452 	 * drm_sched_get_cleanup_job cannot race against us and release the
453 	 * bad job at this point - we parked (waited for) any in progress
454 	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
455 	 * now until the scheduler thread is unparked.
456 	 */
457 	if (bad && bad->sched == sched)
458 		/*
459 		 * Add at the head of the queue to reflect it was the earliest
460 		 * job extracted.
461 		 */
462 		list_add(&bad->list, &sched->pending_list);
463 
464 	/*
465 	 * Iterate the job list from later to  earlier one and either deactive
466 	 * their HW callbacks or remove them from pending list if they already
467 	 * signaled.
468 	 * This iteration is thread safe as sched thread is stopped.
469 	 */
470 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
471 					 list) {
472 		if (s_job->s_fence->parent &&
473 		    dma_fence_remove_callback(s_job->s_fence->parent,
474 					      &s_job->cb)) {
475 			dma_fence_put(s_job->s_fence->parent);
476 			s_job->s_fence->parent = NULL;
477 			atomic_dec(&sched->hw_rq_count);
478 		} else {
479 			/*
480 			 * remove job from pending_list.
481 			 * Locking here is for concurrent resume timeout
482 			 */
483 			spin_lock(&sched->job_list_lock);
484 			list_del_init(&s_job->list);
485 			spin_unlock(&sched->job_list_lock);
486 
487 			/*
488 			 * Wait for job's HW fence callback to finish using s_job
489 			 * before releasing it.
490 			 *
491 			 * Job is still alive so fence refcount at least 1
492 			 */
493 			dma_fence_wait(&s_job->s_fence->finished, false);
494 
495 			/*
496 			 * We must keep bad job alive for later use during
497 			 * recovery by some of the drivers but leave a hint
498 			 * that the guilty job must be released.
499 			 */
500 			if (bad != s_job)
501 				sched->ops->free_job(s_job);
502 			else
503 				sched->free_guilty = true;
504 		}
505 	}
506 
507 	/*
508 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
509 	 * avoids the pending timeout work in progress to fire right away after
510 	 * this TDR finished and before the newly restarted jobs had a
511 	 * chance to complete.
512 	 */
513 	cancel_delayed_work(&sched->work_tdr);
514 }
515 
516 EXPORT_SYMBOL(drm_sched_stop);
517 
518 /**
519  * drm_sched_start - recover jobs after a reset
520  *
521  * @sched: scheduler instance
522  * @full_recovery: proceed with complete sched restart
523  *
524  */
drm_sched_start(struct drm_gpu_scheduler * sched,bool full_recovery)525 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
526 {
527 	struct drm_sched_job *s_job, *tmp;
528 	int r;
529 
530 	/*
531 	 * Locking the list is not required here as the sched thread is parked
532 	 * so no new jobs are being inserted or removed. Also concurrent
533 	 * GPU recovers can't run in parallel.
534 	 */
535 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
536 		struct dma_fence *fence = s_job->s_fence->parent;
537 
538 		atomic_inc(&sched->hw_rq_count);
539 
540 		if (!full_recovery)
541 			continue;
542 
543 		if (fence) {
544 			r = dma_fence_add_callback(fence, &s_job->cb,
545 						   drm_sched_job_done_cb);
546 			if (r == -ENOENT)
547 				drm_sched_job_done(s_job, fence->error);
548 			else if (r)
549 				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
550 					  r);
551 		} else
552 			drm_sched_job_done(s_job, -ECANCELED);
553 	}
554 
555 	if (full_recovery) {
556 		spin_lock(&sched->job_list_lock);
557 		drm_sched_start_timeout(sched);
558 		spin_unlock(&sched->job_list_lock);
559 	}
560 
561 	kthread_unpark(sched->thread);
562 }
563 EXPORT_SYMBOL(drm_sched_start);
564 
565 /**
566  * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
567  *
568  * @sched: scheduler instance
569  *
570  * Re-submitting jobs was a concept AMD came up as cheap way to implement
571  * recovery after a job timeout.
572  *
573  * This turned out to be not working very well. First of all there are many
574  * problem with the dma_fence implementation and requirements. Either the
575  * implementation is risking deadlocks with core memory management or violating
576  * documented implementation details of the dma_fence object.
577  *
578  * Drivers can still save and restore their state for recovery operations, but
579  * we shouldn't make this a general scheduler feature around the dma_fence
580  * interface.
581  */
drm_sched_resubmit_jobs(struct drm_gpu_scheduler * sched)582 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
583 {
584 	struct drm_sched_job *s_job, *tmp;
585 	uint64_t guilty_context;
586 	bool found_guilty = false;
587 	struct dma_fence *fence;
588 
589 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
590 		struct drm_sched_fence *s_fence = s_job->s_fence;
591 
592 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
593 			found_guilty = true;
594 			guilty_context = s_job->s_fence->scheduled.context;
595 		}
596 
597 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
598 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
599 
600 		fence = sched->ops->run_job(s_job);
601 
602 		if (IS_ERR_OR_NULL(fence)) {
603 			if (IS_ERR(fence))
604 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
605 
606 			s_job->s_fence->parent = NULL;
607 		} else {
608 
609 			s_job->s_fence->parent = dma_fence_get(fence);
610 
611 			/* Drop for orignal kref_init */
612 			dma_fence_put(fence);
613 		}
614 	}
615 }
616 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
617 
618 /**
619  * drm_sched_job_init - init a scheduler job
620  * @job: scheduler job to init
621  * @entity: scheduler entity to use
622  * @owner: job owner for debugging
623  *
624  * Refer to drm_sched_entity_push_job() documentation
625  * for locking considerations.
626  *
627  * Drivers must make sure drm_sched_job_cleanup() if this function returns
628  * successfully, even when @job is aborted before drm_sched_job_arm() is called.
629  *
630  * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
631  * has died, which can mean that there's no valid runqueue for a @entity.
632  * This function returns -ENOENT in this case (which probably should be -EIO as
633  * a more meanigful return value).
634  *
635  * Returns 0 for success, negative error code otherwise.
636  */
drm_sched_job_init(struct drm_sched_job * job,struct drm_sched_entity * entity,void * owner)637 int drm_sched_job_init(struct drm_sched_job *job,
638 		       struct drm_sched_entity *entity,
639 		       void *owner)
640 {
641 	if (!entity->rq)
642 		return -ENOENT;
643 
644 	job->entity = entity;
645 	job->s_fence = drm_sched_fence_alloc(entity, owner);
646 	if (!job->s_fence)
647 		return -ENOMEM;
648 
649 	INIT_LIST_HEAD(&job->list);
650 
651 	xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
652 
653 	return 0;
654 }
655 EXPORT_SYMBOL(drm_sched_job_init);
656 
657 /**
658  * drm_sched_job_arm - arm a scheduler job for execution
659  * @job: scheduler job to arm
660  *
661  * This arms a scheduler job for execution. Specifically it initializes the
662  * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
663  * or other places that need to track the completion of this job.
664  *
665  * Refer to drm_sched_entity_push_job() documentation for locking
666  * considerations.
667  *
668  * This can only be called if drm_sched_job_init() succeeded.
669  */
drm_sched_job_arm(struct drm_sched_job * job)670 void drm_sched_job_arm(struct drm_sched_job *job)
671 {
672 	struct drm_gpu_scheduler *sched;
673 	struct drm_sched_entity *entity = job->entity;
674 
675 	BUG_ON(!entity);
676 	drm_sched_entity_select_rq(entity);
677 	sched = entity->rq->sched;
678 
679 	job->sched = sched;
680 	job->s_priority = entity->rq - sched->sched_rq;
681 	job->id = atomic64_inc_return(&sched->job_id_count);
682 
683 	drm_sched_fence_init(job->s_fence, job->entity);
684 }
685 EXPORT_SYMBOL(drm_sched_job_arm);
686 
687 /**
688  * drm_sched_job_add_dependency - adds the fence as a job dependency
689  * @job: scheduler job to add the dependencies to
690  * @fence: the dma_fence to add to the list of dependencies.
691  *
692  * Note that @fence is consumed in both the success and error cases.
693  *
694  * Returns:
695  * 0 on success, or an error on failing to expand the array.
696  */
drm_sched_job_add_dependency(struct drm_sched_job * job,struct dma_fence * fence)697 int drm_sched_job_add_dependency(struct drm_sched_job *job,
698 				 struct dma_fence *fence)
699 {
700 	struct dma_fence *entry;
701 	unsigned long index;
702 	u32 id = 0;
703 	int ret;
704 
705 	if (!fence)
706 		return 0;
707 
708 	/* Deduplicate if we already depend on a fence from the same context.
709 	 * This lets the size of the array of deps scale with the number of
710 	 * engines involved, rather than the number of BOs.
711 	 */
712 	xa_for_each(&job->dependencies, index, entry) {
713 		if (entry->context != fence->context)
714 			continue;
715 
716 		if (dma_fence_is_later(fence, entry)) {
717 			dma_fence_put(entry);
718 			xa_store(&job->dependencies, index, fence, GFP_KERNEL);
719 		} else {
720 			dma_fence_put(fence);
721 		}
722 		return 0;
723 	}
724 
725 	ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
726 	if (ret != 0)
727 		dma_fence_put(fence);
728 
729 	return ret;
730 }
731 EXPORT_SYMBOL(drm_sched_job_add_dependency);
732 
733 /**
734  * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
735  * @job: scheduler job to add the dependencies to
736  * @file: drm file private pointer
737  * @handle: syncobj handle to lookup
738  * @point: timeline point
739  *
740  * This adds the fence matching the given syncobj to @job.
741  *
742  * Returns:
743  * 0 on success, or an error on failing to expand the array.
744  */
drm_sched_job_add_syncobj_dependency(struct drm_sched_job * job,struct drm_file * file,u32 handle,u32 point)745 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
746 					 struct drm_file *file,
747 					 u32 handle,
748 					 u32 point)
749 {
750 	struct dma_fence *fence;
751 	int ret;
752 
753 	ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
754 	if (ret)
755 		return ret;
756 
757 	return drm_sched_job_add_dependency(job, fence);
758 }
759 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
760 
761 /**
762  * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
763  * @job: scheduler job to add the dependencies to
764  * @resv: the dma_resv object to get the fences from
765  * @usage: the dma_resv_usage to use to filter the fences
766  *
767  * This adds all fences matching the given usage from @resv to @job.
768  * Must be called with the @resv lock held.
769  *
770  * Returns:
771  * 0 on success, or an error on failing to expand the array.
772  */
drm_sched_job_add_resv_dependencies(struct drm_sched_job * job,struct dma_resv * resv,enum dma_resv_usage usage)773 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
774 					struct dma_resv *resv,
775 					enum dma_resv_usage usage)
776 {
777 	struct dma_resv_iter cursor;
778 	struct dma_fence *fence;
779 	int ret;
780 
781 	dma_resv_assert_held(resv);
782 
783 	dma_resv_for_each_fence(&cursor, resv, usage, fence) {
784 		/* Make sure to grab an additional ref on the added fence */
785 		dma_fence_get(fence);
786 		ret = drm_sched_job_add_dependency(job, fence);
787 		if (ret) {
788 			dma_fence_put(fence);
789 			return ret;
790 		}
791 	}
792 	return 0;
793 }
794 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
795 
796 /**
797  * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
798  *   dependencies
799  * @job: scheduler job to add the dependencies to
800  * @obj: the gem object to add new dependencies from.
801  * @write: whether the job might write the object (so we need to depend on
802  * shared fences in the reservation object).
803  *
804  * This should be called after drm_gem_lock_reservations() on your array of
805  * GEM objects used in the job but before updating the reservations with your
806  * own fences.
807  *
808  * Returns:
809  * 0 on success, or an error on failing to expand the array.
810  */
drm_sched_job_add_implicit_dependencies(struct drm_sched_job * job,struct drm_gem_object * obj,bool write)811 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
812 					    struct drm_gem_object *obj,
813 					    bool write)
814 {
815 	return drm_sched_job_add_resv_dependencies(job, obj->resv,
816 						   dma_resv_usage_rw(write));
817 }
818 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
819 
820 /**
821  * drm_sched_job_cleanup - clean up scheduler job resources
822  * @job: scheduler job to clean up
823  *
824  * Cleans up the resources allocated with drm_sched_job_init().
825  *
826  * Drivers should call this from their error unwind code if @job is aborted
827  * before drm_sched_job_arm() is called.
828  *
829  * After that point of no return @job is committed to be executed by the
830  * scheduler, and this function should be called from the
831  * &drm_sched_backend_ops.free_job callback.
832  */
drm_sched_job_cleanup(struct drm_sched_job * job)833 void drm_sched_job_cleanup(struct drm_sched_job *job)
834 {
835 	struct dma_fence *fence;
836 	unsigned long index;
837 
838 	if (kref_read(&job->s_fence->finished.refcount)) {
839 		/* drm_sched_job_arm() has been called */
840 		dma_fence_put(&job->s_fence->finished);
841 	} else {
842 		/* aborted job before committing to run it */
843 		drm_sched_fence_free(job->s_fence);
844 	}
845 
846 	job->s_fence = NULL;
847 
848 	xa_for_each(&job->dependencies, index, fence) {
849 		dma_fence_put(fence);
850 	}
851 	xa_destroy(&job->dependencies);
852 
853 }
854 EXPORT_SYMBOL(drm_sched_job_cleanup);
855 
856 /**
857  * drm_sched_can_queue -- Can we queue more to the hardware?
858  * @sched: scheduler instance
859  *
860  * Return true if we can push more jobs to the hw, otherwise false.
861  */
drm_sched_can_queue(struct drm_gpu_scheduler * sched)862 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
863 {
864 	return atomic_read(&sched->hw_rq_count) <
865 		sched->hw_submission_limit;
866 }
867 
868 /**
869  * drm_sched_wakeup_if_can_queue - Wake up the scheduler
870  * @sched: scheduler instance
871  *
872  * Wake up the scheduler if we can queue jobs.
873  */
drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler * sched)874 void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
875 {
876 	if (drm_sched_can_queue(sched))
877 		wake_up_interruptible(&sched->wake_up_worker);
878 }
879 
880 /**
881  * drm_sched_select_entity - Select next entity to process
882  *
883  * @sched: scheduler instance
884  *
885  * Returns the entity to process or NULL if none are found.
886  */
887 static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler * sched)888 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
889 {
890 	struct drm_sched_entity *entity;
891 	int i;
892 
893 	if (!drm_sched_can_queue(sched))
894 		return NULL;
895 
896 	/* Kernel run queue has higher priority than normal run queue*/
897 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
898 		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
899 			drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
900 			drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
901 		if (entity)
902 			break;
903 	}
904 
905 	return entity;
906 }
907 
908 /**
909  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
910  *
911  * @sched: scheduler instance
912  *
913  * Returns the next finished job from the pending list (if there is one)
914  * ready for it to be destroyed.
915  */
916 static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler * sched)917 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
918 {
919 	struct drm_sched_job *job, *next;
920 
921 	spin_lock(&sched->job_list_lock);
922 
923 	job = list_first_entry_or_null(&sched->pending_list,
924 				       struct drm_sched_job, list);
925 
926 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
927 		/* remove job from pending_list */
928 		list_del_init(&job->list);
929 
930 		/* cancel this job's TO timer */
931 		cancel_delayed_work(&sched->work_tdr);
932 		/* make the scheduled timestamp more accurate */
933 		next = list_first_entry_or_null(&sched->pending_list,
934 						typeof(*next), list);
935 
936 		if (next) {
937 			next->s_fence->scheduled.timestamp =
938 				dma_fence_timestamp(&job->s_fence->finished);
939 			/* start TO timer for next job */
940 			drm_sched_start_timeout(sched);
941 		}
942 	} else {
943 		job = NULL;
944 	}
945 
946 	spin_unlock(&sched->job_list_lock);
947 
948 	return job;
949 }
950 
951 /**
952  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
953  * @sched_list: list of drm_gpu_schedulers
954  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
955  *
956  * Returns pointer of the sched with the least load or NULL if none of the
957  * drm_gpu_schedulers are ready
958  */
959 struct drm_gpu_scheduler *
drm_sched_pick_best(struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)960 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
961 		     unsigned int num_sched_list)
962 {
963 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
964 	int i;
965 	unsigned int min_score = UINT_MAX, num_score;
966 
967 	for (i = 0; i < num_sched_list; ++i) {
968 		sched = sched_list[i];
969 
970 		if (!sched->ready) {
971 			DRM_WARN("scheduler %s is not ready, skipping",
972 				 sched->name);
973 			continue;
974 		}
975 
976 		num_score = atomic_read(sched->score);
977 		if (num_score < min_score) {
978 			min_score = num_score;
979 			picked_sched = sched;
980 		}
981 	}
982 
983 	return picked_sched;
984 }
985 EXPORT_SYMBOL(drm_sched_pick_best);
986 
987 /**
988  * drm_sched_blocked - check if the scheduler is blocked
989  *
990  * @sched: scheduler instance
991  *
992  * Returns true if blocked, otherwise false.
993  */
drm_sched_blocked(struct drm_gpu_scheduler * sched)994 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
995 {
996 	if (kthread_should_park()) {
997 		kthread_parkme();
998 		return true;
999 	}
1000 
1001 	return false;
1002 }
1003 
1004 /**
1005  * drm_sched_main - main scheduler thread
1006  *
1007  * @param: scheduler instance
1008  *
1009  * Returns 0.
1010  */
drm_sched_main(void * param)1011 static int drm_sched_main(void *param)
1012 {
1013 	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
1014 	int r;
1015 
1016 #ifdef __linux__
1017 	sched_set_fifo_low(current);
1018 #endif
1019 
1020 	while (!kthread_should_stop()) {
1021 		struct drm_sched_entity *entity = NULL;
1022 		struct drm_sched_fence *s_fence;
1023 		struct drm_sched_job *sched_job;
1024 		struct dma_fence *fence;
1025 		struct drm_sched_job *cleanup_job = NULL;
1026 
1027 		wait_event_interruptible(sched->wake_up_worker,
1028 					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
1029 					 (!drm_sched_blocked(sched) &&
1030 					  (entity = drm_sched_select_entity(sched))) ||
1031 					 kthread_should_stop());
1032 
1033 		if (cleanup_job)
1034 			sched->ops->free_job(cleanup_job);
1035 
1036 		if (!entity)
1037 			continue;
1038 
1039 		sched_job = drm_sched_entity_pop_job(entity);
1040 
1041 		if (!sched_job) {
1042 			complete_all(&entity->entity_idle);
1043 			continue;
1044 		}
1045 
1046 		s_fence = sched_job->s_fence;
1047 
1048 		atomic_inc(&sched->hw_rq_count);
1049 		drm_sched_job_begin(sched_job);
1050 
1051 		trace_drm_run_job(sched_job, entity);
1052 		fence = sched->ops->run_job(sched_job);
1053 		complete_all(&entity->entity_idle);
1054 		drm_sched_fence_scheduled(s_fence, fence);
1055 
1056 		if (!IS_ERR_OR_NULL(fence)) {
1057 			/* Drop for original kref_init of the fence */
1058 			dma_fence_put(fence);
1059 
1060 			r = dma_fence_add_callback(fence, &sched_job->cb,
1061 						   drm_sched_job_done_cb);
1062 			if (r == -ENOENT)
1063 				drm_sched_job_done(sched_job, fence->error);
1064 			else if (r)
1065 				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
1066 					  r);
1067 		} else {
1068 			drm_sched_job_done(sched_job, IS_ERR(fence) ?
1069 					   PTR_ERR(fence) : 0);
1070 		}
1071 
1072 		wake_up(&sched->job_scheduled);
1073 	}
1074 	return 0;
1075 }
1076 
1077 /**
1078  * drm_sched_init - Init a gpu scheduler instance
1079  *
1080  * @sched: scheduler instance
1081  * @ops: backend operations for this scheduler
1082  * @hw_submission: number of hw submissions that can be in flight
1083  * @hang_limit: number of times to allow a job to hang before dropping it
1084  * @timeout: timeout value in jiffies for the scheduler
1085  * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1086  *		used
1087  * @score: optional score atomic shared with other schedulers
1088  * @name: name used for debugging
1089  * @dev: target &struct device
1090  *
1091  * Return 0 on success, otherwise error code.
1092  */
drm_sched_init(struct drm_gpu_scheduler * sched,const struct drm_sched_backend_ops * ops,unsigned hw_submission,unsigned hang_limit,long timeout,struct workqueue_struct * timeout_wq,atomic_t * score,const char * name,struct device * dev)1093 int drm_sched_init(struct drm_gpu_scheduler *sched,
1094 		   const struct drm_sched_backend_ops *ops,
1095 		   unsigned hw_submission, unsigned hang_limit,
1096 		   long timeout, struct workqueue_struct *timeout_wq,
1097 		   atomic_t *score, const char *name, struct device *dev)
1098 {
1099 	int i, ret;
1100 	sched->ops = ops;
1101 	sched->hw_submission_limit = hw_submission;
1102 	sched->name = name;
1103 	sched->timeout = timeout;
1104 	sched->timeout_wq = timeout_wq ? : system_wq;
1105 	sched->hang_limit = hang_limit;
1106 	sched->score = score ? score : &sched->_score;
1107 	sched->dev = dev;
1108 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
1109 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
1110 
1111 	init_waitqueue_head(&sched->wake_up_worker);
1112 	init_waitqueue_head(&sched->job_scheduled);
1113 	INIT_LIST_HEAD(&sched->pending_list);
1114 	mtx_init(&sched->job_list_lock, IPL_NONE);
1115 	atomic_set(&sched->hw_rq_count, 0);
1116 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1117 	atomic_set(&sched->_score, 0);
1118 	atomic64_set(&sched->job_id_count, 0);
1119 
1120 	/* Each scheduler will run on a seperate kernel thread */
1121 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
1122 	if (IS_ERR(sched->thread)) {
1123 		ret = PTR_ERR(sched->thread);
1124 		sched->thread = NULL;
1125 		DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
1126 		return ret;
1127 	}
1128 
1129 	sched->ready = true;
1130 	return 0;
1131 }
1132 EXPORT_SYMBOL(drm_sched_init);
1133 
1134 /**
1135  * drm_sched_fini - Destroy a gpu scheduler
1136  *
1137  * @sched: scheduler instance
1138  *
1139  * Tears down and cleans up the scheduler.
1140  */
drm_sched_fini(struct drm_gpu_scheduler * sched)1141 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1142 {
1143 	struct drm_sched_entity *s_entity;
1144 	int i;
1145 
1146 	if (sched->thread)
1147 		kthread_stop(sched->thread);
1148 
1149 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
1150 		struct drm_sched_rq *rq = &sched->sched_rq[i];
1151 
1152 		spin_lock(&rq->lock);
1153 		list_for_each_entry(s_entity, &rq->entities, list)
1154 			/*
1155 			 * Prevents reinsertion and marks job_queue as idle,
1156 			 * it will removed from rq in drm_sched_entity_fini
1157 			 * eventually
1158 			 */
1159 			s_entity->stopped = true;
1160 		spin_unlock(&rq->lock);
1161 
1162 	}
1163 
1164 	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1165 	wake_up_all(&sched->job_scheduled);
1166 
1167 	/* Confirm no work left behind accessing device structures */
1168 	cancel_delayed_work_sync(&sched->work_tdr);
1169 
1170 	sched->ready = false;
1171 }
1172 EXPORT_SYMBOL(drm_sched_fini);
1173 
1174 /**
1175  * drm_sched_increase_karma - Update sched_entity guilty flag
1176  *
1177  * @bad: The job guilty of time out
1178  *
1179  * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1180  * limit of the scheduler then the respective sched entity is marked guilty and
1181  * jobs from it will not be scheduled further
1182  */
drm_sched_increase_karma(struct drm_sched_job * bad)1183 void drm_sched_increase_karma(struct drm_sched_job *bad)
1184 {
1185 	int i;
1186 	struct drm_sched_entity *tmp;
1187 	struct drm_sched_entity *entity;
1188 	struct drm_gpu_scheduler *sched = bad->sched;
1189 
1190 	/* don't change @bad's karma if it's from KERNEL RQ,
1191 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1192 	 * corrupt but keep in mind that kernel jobs always considered good.
1193 	 */
1194 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1195 		atomic_inc(&bad->karma);
1196 
1197 		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
1198 		     i++) {
1199 			struct drm_sched_rq *rq = &sched->sched_rq[i];
1200 
1201 			spin_lock(&rq->lock);
1202 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1203 				if (bad->s_fence->scheduled.context ==
1204 				    entity->fence_context) {
1205 					if (entity->guilty)
1206 						atomic_set(entity->guilty, 1);
1207 					break;
1208 				}
1209 			}
1210 			spin_unlock(&rq->lock);
1211 			if (&entity->list != &rq->entities)
1212 				break;
1213 		}
1214 	}
1215 }
1216 EXPORT_SYMBOL(drm_sched_increase_karma);
1217