xref: /openbsd/sys/dev/pci/drm/scheduler/sched_main.c (revision 62d8ac78)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  *
46  * Note that once a job was taken from the entities queue and pushed to the
47  * hardware, i.e. the pending queue, the entity must not be referenced anymore
48  * through the jobs entity pointer.
49  */
50 
51 #include <linux/kthread.h>
52 #include <linux/wait.h>
53 #include <linux/sched.h>
54 #include <linux/completion.h>
55 #include <linux/dma-resv.h>
56 #ifdef __linux__
57 #include <uapi/linux/sched/types.h>
58 #endif
59 
60 #include <drm/drm_print.h>
61 #include <drm/drm_gem.h>
62 #include <drm/drm_syncobj.h>
63 #include <drm/gpu_scheduler.h>
64 #include <drm/spsc_queue.h>
65 
66 #define CREATE_TRACE_POINTS
67 #include "gpu_scheduler_trace.h"
68 
69 #define to_drm_sched_job(sched_job)		\
70 		container_of((sched_job), struct drm_sched_job, queue_node)
71 
72 int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
73 
74 /**
75  * DOC: sched_policy (int)
76  * Used to override default entities scheduling policy in a run queue.
77  */
78 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
79 module_param_named(sched_policy, drm_sched_policy, int, 0444);
80 
drm_sched_entity_compare_before(struct rb_node * a,const struct rb_node * b)81 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
82 							    const struct rb_node *b)
83 {
84 	struct drm_sched_entity *ent_a =  rb_entry((a), struct drm_sched_entity, rb_tree_node);
85 	struct drm_sched_entity *ent_b =  rb_entry((b), struct drm_sched_entity, rb_tree_node);
86 
87 	return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
88 }
89 
drm_sched_rq_remove_fifo_locked(struct drm_sched_entity * entity)90 static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
91 {
92 	struct drm_sched_rq *rq = entity->rq;
93 
94 	if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
95 		rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
96 		RB_CLEAR_NODE(&entity->rb_tree_node);
97 	}
98 }
99 
drm_sched_rq_update_fifo(struct drm_sched_entity * entity,ktime_t ts)100 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
101 {
102 	/*
103 	 * Both locks need to be grabbed, one to protect from entity->rq change
104 	 * for entity from within concurrent drm_sched_entity_select_rq and the
105 	 * other to update the rb tree structure.
106 	 */
107 	spin_lock(&entity->rq_lock);
108 	spin_lock(&entity->rq->lock);
109 
110 	drm_sched_rq_remove_fifo_locked(entity);
111 
112 	entity->oldest_job_waiting = ts;
113 
114 	rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
115 		      drm_sched_entity_compare_before);
116 
117 	spin_unlock(&entity->rq->lock);
118 	spin_unlock(&entity->rq_lock);
119 }
120 
121 /**
122  * drm_sched_rq_init - initialize a given run queue struct
123  *
124  * @sched: scheduler instance to associate with this run queue
125  * @rq: scheduler run queue
126  *
127  * Initializes a scheduler runqueue.
128  */
drm_sched_rq_init(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)129 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
130 			      struct drm_sched_rq *rq)
131 {
132 	mtx_init(&rq->lock, IPL_NONE);
133 	INIT_LIST_HEAD(&rq->entities);
134 	rq->rb_tree_root = RB_ROOT_CACHED;
135 	rq->current_entity = NULL;
136 	rq->sched = sched;
137 }
138 
139 /**
140  * drm_sched_rq_add_entity - add an entity
141  *
142  * @rq: scheduler run queue
143  * @entity: scheduler entity
144  *
145  * Adds a scheduler entity to the run queue.
146  */
drm_sched_rq_add_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)147 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
148 			     struct drm_sched_entity *entity)
149 {
150 	if (!list_empty(&entity->list))
151 		return;
152 
153 	spin_lock(&rq->lock);
154 
155 	atomic_inc(rq->sched->score);
156 	list_add_tail(&entity->list, &rq->entities);
157 
158 	spin_unlock(&rq->lock);
159 }
160 
161 /**
162  * drm_sched_rq_remove_entity - remove an entity
163  *
164  * @rq: scheduler run queue
165  * @entity: scheduler entity
166  *
167  * Removes a scheduler entity from the run queue.
168  */
drm_sched_rq_remove_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)169 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
170 				struct drm_sched_entity *entity)
171 {
172 	if (list_empty(&entity->list))
173 		return;
174 
175 	spin_lock(&rq->lock);
176 
177 	atomic_dec(rq->sched->score);
178 	list_del_init(&entity->list);
179 
180 	if (rq->current_entity == entity)
181 		rq->current_entity = NULL;
182 
183 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
184 		drm_sched_rq_remove_fifo_locked(entity);
185 
186 	spin_unlock(&rq->lock);
187 }
188 
189 /**
190  * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
191  *
192  * @rq: scheduler run queue to check.
193  *
194  * Try to find a ready entity, returns NULL if none found.
195  */
196 static struct drm_sched_entity *
drm_sched_rq_select_entity_rr(struct drm_sched_rq * rq)197 drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
198 {
199 	struct drm_sched_entity *entity;
200 
201 	spin_lock(&rq->lock);
202 
203 	entity = rq->current_entity;
204 	if (entity) {
205 		list_for_each_entry_continue(entity, &rq->entities, list) {
206 			if (drm_sched_entity_is_ready(entity)) {
207 				rq->current_entity = entity;
208 				reinit_completion(&entity->entity_idle);
209 				spin_unlock(&rq->lock);
210 				return entity;
211 			}
212 		}
213 	}
214 
215 	list_for_each_entry(entity, &rq->entities, list) {
216 
217 		if (drm_sched_entity_is_ready(entity)) {
218 			rq->current_entity = entity;
219 			reinit_completion(&entity->entity_idle);
220 			spin_unlock(&rq->lock);
221 			return entity;
222 		}
223 
224 		if (entity == rq->current_entity)
225 			break;
226 	}
227 
228 	spin_unlock(&rq->lock);
229 
230 	return NULL;
231 }
232 
233 /**
234  * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
235  *
236  * @rq: scheduler run queue to check.
237  *
238  * Find oldest waiting ready entity, returns NULL if none found.
239  */
240 static struct drm_sched_entity *
drm_sched_rq_select_entity_fifo(struct drm_sched_rq * rq)241 drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
242 {
243 	struct rb_node *rb;
244 
245 	spin_lock(&rq->lock);
246 	for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
247 		struct drm_sched_entity *entity;
248 
249 		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
250 		if (drm_sched_entity_is_ready(entity)) {
251 			rq->current_entity = entity;
252 			reinit_completion(&entity->entity_idle);
253 			break;
254 		}
255 	}
256 	spin_unlock(&rq->lock);
257 
258 	return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
259 }
260 
261 /**
262  * drm_sched_job_done - complete a job
263  * @s_job: pointer to the job which is done
264  *
265  * Finish the job's fence and wake up the worker thread.
266  */
drm_sched_job_done(struct drm_sched_job * s_job,int result)267 static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
268 {
269 	struct drm_sched_fence *s_fence = s_job->s_fence;
270 	struct drm_gpu_scheduler *sched = s_fence->sched;
271 
272 	atomic_dec(&sched->hw_rq_count);
273 	atomic_dec(sched->score);
274 
275 	trace_drm_sched_process_job(s_fence);
276 
277 	dma_fence_get(&s_fence->finished);
278 	drm_sched_fence_finished(s_fence, result);
279 	dma_fence_put(&s_fence->finished);
280 	wake_up_interruptible(&sched->wake_up_worker);
281 }
282 
283 /**
284  * drm_sched_job_done_cb - the callback for a done job
285  * @f: fence
286  * @cb: fence callbacks
287  */
drm_sched_job_done_cb(struct dma_fence * f,struct dma_fence_cb * cb)288 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
289 {
290 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
291 
292 	drm_sched_job_done(s_job, f->error);
293 }
294 
295 /**
296  * drm_sched_start_timeout - start timeout for reset worker
297  *
298  * @sched: scheduler instance to start the worker for
299  *
300  * Start the timeout for the given scheduler.
301  */
drm_sched_start_timeout(struct drm_gpu_scheduler * sched)302 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
303 {
304 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
305 	    !list_empty(&sched->pending_list))
306 		queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
307 }
308 
309 /**
310  * drm_sched_fault - immediately start timeout handler
311  *
312  * @sched: scheduler where the timeout handling should be started.
313  *
314  * Start timeout handling immediately when the driver detects a hardware fault.
315  */
drm_sched_fault(struct drm_gpu_scheduler * sched)316 void drm_sched_fault(struct drm_gpu_scheduler *sched)
317 {
318 	if (sched->timeout_wq)
319 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
320 }
321 EXPORT_SYMBOL(drm_sched_fault);
322 
323 /**
324  * drm_sched_suspend_timeout - Suspend scheduler job timeout
325  *
326  * @sched: scheduler instance for which to suspend the timeout
327  *
328  * Suspend the delayed work timeout for the scheduler. This is done by
329  * modifying the delayed work timeout to an arbitrary large value,
330  * MAX_SCHEDULE_TIMEOUT in this case.
331  *
332  * Returns the timeout remaining
333  *
334  */
drm_sched_suspend_timeout(struct drm_gpu_scheduler * sched)335 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
336 {
337 	unsigned long sched_timeout, now = jiffies;
338 
339 #ifdef __linux__
340 	sched_timeout = sched->work_tdr.timer.expires;
341 #else
342 	sched_timeout = sched->work_tdr.to.to_time;
343 #endif
344 
345 	/*
346 	 * Modify the timeout to an arbitrarily large value. This also prevents
347 	 * the timeout to be restarted when new submissions arrive
348 	 */
349 	if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
350 			&& time_after(sched_timeout, now))
351 		return sched_timeout - now;
352 	else
353 		return sched->timeout;
354 }
355 EXPORT_SYMBOL(drm_sched_suspend_timeout);
356 
357 /**
358  * drm_sched_resume_timeout - Resume scheduler job timeout
359  *
360  * @sched: scheduler instance for which to resume the timeout
361  * @remaining: remaining timeout
362  *
363  * Resume the delayed work timeout for the scheduler.
364  */
drm_sched_resume_timeout(struct drm_gpu_scheduler * sched,unsigned long remaining)365 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
366 		unsigned long remaining)
367 {
368 	spin_lock(&sched->job_list_lock);
369 
370 	if (list_empty(&sched->pending_list))
371 		cancel_delayed_work(&sched->work_tdr);
372 	else
373 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
374 
375 	spin_unlock(&sched->job_list_lock);
376 }
377 EXPORT_SYMBOL(drm_sched_resume_timeout);
378 
drm_sched_job_begin(struct drm_sched_job * s_job)379 static void drm_sched_job_begin(struct drm_sched_job *s_job)
380 {
381 	struct drm_gpu_scheduler *sched = s_job->sched;
382 
383 	spin_lock(&sched->job_list_lock);
384 	list_add_tail(&s_job->list, &sched->pending_list);
385 	drm_sched_start_timeout(sched);
386 	spin_unlock(&sched->job_list_lock);
387 }
388 
drm_sched_job_timedout(struct work_struct * work)389 static void drm_sched_job_timedout(struct work_struct *work)
390 {
391 	struct drm_gpu_scheduler *sched;
392 	struct drm_sched_job *job;
393 	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
394 
395 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
396 
397 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
398 	spin_lock(&sched->job_list_lock);
399 	job = list_first_entry_or_null(&sched->pending_list,
400 				       struct drm_sched_job, list);
401 
402 	if (job) {
403 		/*
404 		 * Remove the bad job so it cannot be freed by concurrent
405 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
406 		 * is parked at which point it's safe.
407 		 */
408 		list_del_init(&job->list);
409 		spin_unlock(&sched->job_list_lock);
410 
411 		status = job->sched->ops->timedout_job(job);
412 
413 		/*
414 		 * Guilty job did complete and hence needs to be manually removed
415 		 * See drm_sched_stop doc.
416 		 */
417 		if (sched->free_guilty) {
418 			job->sched->ops->free_job(job);
419 			sched->free_guilty = false;
420 		}
421 	} else {
422 		spin_unlock(&sched->job_list_lock);
423 	}
424 
425 	if (status != DRM_GPU_SCHED_STAT_ENODEV) {
426 		spin_lock(&sched->job_list_lock);
427 		drm_sched_start_timeout(sched);
428 		spin_unlock(&sched->job_list_lock);
429 	}
430 }
431 
432 /**
433  * drm_sched_stop - stop the scheduler
434  *
435  * @sched: scheduler instance
436  * @bad: job which caused the time out
437  *
438  * Stop the scheduler and also removes and frees all completed jobs.
439  * Note: bad job will not be freed as it might be used later and so it's
440  * callers responsibility to release it manually if it's not part of the
441  * pending list any more.
442  *
443  */
drm_sched_stop(struct drm_gpu_scheduler * sched,struct drm_sched_job * bad)444 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
445 {
446 	struct drm_sched_job *s_job, *tmp;
447 
448 	kthread_park(sched->thread);
449 
450 	/*
451 	 * Reinsert back the bad job here - now it's safe as
452 	 * drm_sched_get_cleanup_job cannot race against us and release the
453 	 * bad job at this point - we parked (waited for) any in progress
454 	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
455 	 * now until the scheduler thread is unparked.
456 	 */
457 	if (bad && bad->sched == sched)
458 		/*
459 		 * Add at the head of the queue to reflect it was the earliest
460 		 * job extracted.
461 		 */
462 		list_add(&bad->list, &sched->pending_list);
463 
464 	/*
465 	 * Iterate the job list from later to  earlier one and either deactive
466 	 * their HW callbacks or remove them from pending list if they already
467 	 * signaled.
468 	 * This iteration is thread safe as sched thread is stopped.
469 	 */
470 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
471 					 list) {
472 		if (s_job->s_fence->parent &&
473 		    dma_fence_remove_callback(s_job->s_fence->parent,
474 					      &s_job->cb)) {
475 			dma_fence_put(s_job->s_fence->parent);
476 			s_job->s_fence->parent = NULL;
477 			atomic_dec(&sched->hw_rq_count);
478 		} else {
479 			/*
480 			 * remove job from pending_list.
481 			 * Locking here is for concurrent resume timeout
482 			 */
483 			spin_lock(&sched->job_list_lock);
484 			list_del_init(&s_job->list);
485 			spin_unlock(&sched->job_list_lock);
486 
487 			/*
488 			 * Wait for job's HW fence callback to finish using s_job
489 			 * before releasing it.
490 			 *
491 			 * Job is still alive so fence refcount at least 1
492 			 */
493 			dma_fence_wait(&s_job->s_fence->finished, false);
494 
495 			/*
496 			 * We must keep bad job alive for later use during
497 			 * recovery by some of the drivers but leave a hint
498 			 * that the guilty job must be released.
499 			 */
500 			if (bad != s_job)
501 				sched->ops->free_job(s_job);
502 			else
503 				sched->free_guilty = true;
504 		}
505 	}
506 
507 	/*
508 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
509 	 * avoids the pending timeout work in progress to fire right away after
510 	 * this TDR finished and before the newly restarted jobs had a
511 	 * chance to complete.
512 	 */
513 	cancel_delayed_work(&sched->work_tdr);
514 }
515 
516 EXPORT_SYMBOL(drm_sched_stop);
517 
518 /**
519  * drm_sched_start - recover jobs after a reset
520  *
521  * @sched: scheduler instance
522  * @full_recovery: proceed with complete sched restart
523  *
524  */
drm_sched_start(struct drm_gpu_scheduler * sched,bool full_recovery)525 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
526 {
527 	struct drm_sched_job *s_job, *tmp;
528 	int r;
529 
530 	/*
531 	 * Locking the list is not required here as the sched thread is parked
532 	 * so no new jobs are being inserted or removed. Also concurrent
533 	 * GPU recovers can't run in parallel.
534 	 */
535 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
536 		struct dma_fence *fence = s_job->s_fence->parent;
537 
538 		atomic_inc(&sched->hw_rq_count);
539 
540 		if (!full_recovery)
541 			continue;
542 
543 		if (fence) {
544 			r = dma_fence_add_callback(fence, &s_job->cb,
545 						   drm_sched_job_done_cb);
546 			if (r == -ENOENT)
547 				drm_sched_job_done(s_job, fence->error);
548 			else if (r)
549 				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
550 					  r);
551 		} else
552 			drm_sched_job_done(s_job, -ECANCELED);
553 	}
554 
555 	if (full_recovery) {
556 		spin_lock(&sched->job_list_lock);
557 		drm_sched_start_timeout(sched);
558 		spin_unlock(&sched->job_list_lock);
559 	}
560 
561 	kthread_unpark(sched->thread);
562 }
563 EXPORT_SYMBOL(drm_sched_start);
564 
565 /**
566  * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
567  *
568  * @sched: scheduler instance
569  *
570  * Re-submitting jobs was a concept AMD came up as cheap way to implement
571  * recovery after a job timeout.
572  *
573  * This turned out to be not working very well. First of all there are many
574  * problem with the dma_fence implementation and requirements. Either the
575  * implementation is risking deadlocks with core memory management or violating
576  * documented implementation details of the dma_fence object.
577  *
578  * Drivers can still save and restore their state for recovery operations, but
579  * we shouldn't make this a general scheduler feature around the dma_fence
580  * interface.
581  */
drm_sched_resubmit_jobs(struct drm_gpu_scheduler * sched)582 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
583 {
584 	struct drm_sched_job *s_job, *tmp;
585 	uint64_t guilty_context;
586 	bool found_guilty = false;
587 	struct dma_fence *fence;
588 
589 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
590 		struct drm_sched_fence *s_fence = s_job->s_fence;
591 
592 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
593 			found_guilty = true;
594 			guilty_context = s_job->s_fence->scheduled.context;
595 		}
596 
597 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
598 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
599 
600 		fence = sched->ops->run_job(s_job);
601 
602 		if (IS_ERR_OR_NULL(fence)) {
603 			if (IS_ERR(fence))
604 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
605 
606 			s_job->s_fence->parent = NULL;
607 		} else {
608 
609 			s_job->s_fence->parent = dma_fence_get(fence);
610 
611 			/* Drop for orignal kref_init */
612 			dma_fence_put(fence);
613 		}
614 	}
615 }
616 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
617 
618 /**
619  * drm_sched_job_init - init a scheduler job
620  * @job: scheduler job to init
621  * @entity: scheduler entity to use
622  * @owner: job owner for debugging
623  *
624  * Refer to drm_sched_entity_push_job() documentation
625  * for locking considerations.
626  *
627  * Drivers must make sure drm_sched_job_cleanup() if this function returns
628  * successfully, even when @job is aborted before drm_sched_job_arm() is called.
629  *
630  * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
631  * has died, which can mean that there's no valid runqueue for a @entity.
632  * This function returns -ENOENT in this case (which probably should be -EIO as
633  * a more meanigful return value).
634  *
635  * Returns 0 for success, negative error code otherwise.
636  */
drm_sched_job_init(struct drm_sched_job * job,struct drm_sched_entity * entity,void * owner)637 int drm_sched_job_init(struct drm_sched_job *job,
638 		       struct drm_sched_entity *entity,
639 		       void *owner)
640 {
641 	if (!entity->rq)
642 		return -ENOENT;
643 
644 	/*
645 	 * We don't know for sure how the user has allocated. Thus, zero the
646 	 * struct so that unallowed (i.e., too early) usage of pointers that
647 	 * this function does not set is guaranteed to lead to a NULL pointer
648 	 * exception instead of UB.
649 	 */
650 	memset(job, 0, sizeof(*job));
651 
652 	job->entity = entity;
653 	job->s_fence = drm_sched_fence_alloc(entity, owner);
654 	if (!job->s_fence)
655 		return -ENOMEM;
656 
657 	INIT_LIST_HEAD(&job->list);
658 
659 	xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
660 
661 	return 0;
662 }
663 EXPORT_SYMBOL(drm_sched_job_init);
664 
665 /**
666  * drm_sched_job_arm - arm a scheduler job for execution
667  * @job: scheduler job to arm
668  *
669  * This arms a scheduler job for execution. Specifically it initializes the
670  * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
671  * or other places that need to track the completion of this job.
672  *
673  * Refer to drm_sched_entity_push_job() documentation for locking
674  * considerations.
675  *
676  * This can only be called if drm_sched_job_init() succeeded.
677  */
drm_sched_job_arm(struct drm_sched_job * job)678 void drm_sched_job_arm(struct drm_sched_job *job)
679 {
680 	struct drm_gpu_scheduler *sched;
681 	struct drm_sched_entity *entity = job->entity;
682 
683 	BUG_ON(!entity);
684 	drm_sched_entity_select_rq(entity);
685 	sched = entity->rq->sched;
686 
687 	job->sched = sched;
688 	job->s_priority = entity->rq - sched->sched_rq;
689 	job->id = atomic64_inc_return(&sched->job_id_count);
690 
691 	drm_sched_fence_init(job->s_fence, job->entity);
692 }
693 EXPORT_SYMBOL(drm_sched_job_arm);
694 
695 /**
696  * drm_sched_job_add_dependency - adds the fence as a job dependency
697  * @job: scheduler job to add the dependencies to
698  * @fence: the dma_fence to add to the list of dependencies.
699  *
700  * Note that @fence is consumed in both the success and error cases.
701  *
702  * Returns:
703  * 0 on success, or an error on failing to expand the array.
704  */
drm_sched_job_add_dependency(struct drm_sched_job * job,struct dma_fence * fence)705 int drm_sched_job_add_dependency(struct drm_sched_job *job,
706 				 struct dma_fence *fence)
707 {
708 	struct dma_fence *entry;
709 	unsigned long index;
710 	u32 id = 0;
711 	int ret;
712 
713 	if (!fence)
714 		return 0;
715 
716 	/* Deduplicate if we already depend on a fence from the same context.
717 	 * This lets the size of the array of deps scale with the number of
718 	 * engines involved, rather than the number of BOs.
719 	 */
720 	xa_for_each(&job->dependencies, index, entry) {
721 		if (entry->context != fence->context)
722 			continue;
723 
724 		if (dma_fence_is_later(fence, entry)) {
725 			dma_fence_put(entry);
726 			xa_store(&job->dependencies, index, fence, GFP_KERNEL);
727 		} else {
728 			dma_fence_put(fence);
729 		}
730 		return 0;
731 	}
732 
733 	ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
734 	if (ret != 0)
735 		dma_fence_put(fence);
736 
737 	return ret;
738 }
739 EXPORT_SYMBOL(drm_sched_job_add_dependency);
740 
741 /**
742  * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
743  * @job: scheduler job to add the dependencies to
744  * @file: drm file private pointer
745  * @handle: syncobj handle to lookup
746  * @point: timeline point
747  *
748  * This adds the fence matching the given syncobj to @job.
749  *
750  * Returns:
751  * 0 on success, or an error on failing to expand the array.
752  */
drm_sched_job_add_syncobj_dependency(struct drm_sched_job * job,struct drm_file * file,u32 handle,u32 point)753 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
754 					 struct drm_file *file,
755 					 u32 handle,
756 					 u32 point)
757 {
758 	struct dma_fence *fence;
759 	int ret;
760 
761 	ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
762 	if (ret)
763 		return ret;
764 
765 	return drm_sched_job_add_dependency(job, fence);
766 }
767 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
768 
769 /**
770  * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
771  * @job: scheduler job to add the dependencies to
772  * @resv: the dma_resv object to get the fences from
773  * @usage: the dma_resv_usage to use to filter the fences
774  *
775  * This adds all fences matching the given usage from @resv to @job.
776  * Must be called with the @resv lock held.
777  *
778  * Returns:
779  * 0 on success, or an error on failing to expand the array.
780  */
drm_sched_job_add_resv_dependencies(struct drm_sched_job * job,struct dma_resv * resv,enum dma_resv_usage usage)781 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
782 					struct dma_resv *resv,
783 					enum dma_resv_usage usage)
784 {
785 	struct dma_resv_iter cursor;
786 	struct dma_fence *fence;
787 	int ret;
788 
789 	dma_resv_assert_held(resv);
790 
791 	dma_resv_for_each_fence(&cursor, resv, usage, fence) {
792 		/* Make sure to grab an additional ref on the added fence */
793 		dma_fence_get(fence);
794 		ret = drm_sched_job_add_dependency(job, fence);
795 		if (ret) {
796 			dma_fence_put(fence);
797 			return ret;
798 		}
799 	}
800 	return 0;
801 }
802 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
803 
804 /**
805  * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
806  *   dependencies
807  * @job: scheduler job to add the dependencies to
808  * @obj: the gem object to add new dependencies from.
809  * @write: whether the job might write the object (so we need to depend on
810  * shared fences in the reservation object).
811  *
812  * This should be called after drm_gem_lock_reservations() on your array of
813  * GEM objects used in the job but before updating the reservations with your
814  * own fences.
815  *
816  * Returns:
817  * 0 on success, or an error on failing to expand the array.
818  */
drm_sched_job_add_implicit_dependencies(struct drm_sched_job * job,struct drm_gem_object * obj,bool write)819 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
820 					    struct drm_gem_object *obj,
821 					    bool write)
822 {
823 	return drm_sched_job_add_resv_dependencies(job, obj->resv,
824 						   dma_resv_usage_rw(write));
825 }
826 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
827 
828 /**
829  * drm_sched_job_cleanup - clean up scheduler job resources
830  * @job: scheduler job to clean up
831  *
832  * Cleans up the resources allocated with drm_sched_job_init().
833  *
834  * Drivers should call this from their error unwind code if @job is aborted
835  * before drm_sched_job_arm() is called.
836  *
837  * After that point of no return @job is committed to be executed by the
838  * scheduler, and this function should be called from the
839  * &drm_sched_backend_ops.free_job callback.
840  */
drm_sched_job_cleanup(struct drm_sched_job * job)841 void drm_sched_job_cleanup(struct drm_sched_job *job)
842 {
843 	struct dma_fence *fence;
844 	unsigned long index;
845 
846 	if (kref_read(&job->s_fence->finished.refcount)) {
847 		/* drm_sched_job_arm() has been called */
848 		dma_fence_put(&job->s_fence->finished);
849 	} else {
850 		/* aborted job before committing to run it */
851 		drm_sched_fence_free(job->s_fence);
852 	}
853 
854 	job->s_fence = NULL;
855 
856 	xa_for_each(&job->dependencies, index, fence) {
857 		dma_fence_put(fence);
858 	}
859 	xa_destroy(&job->dependencies);
860 
861 }
862 EXPORT_SYMBOL(drm_sched_job_cleanup);
863 
864 /**
865  * drm_sched_can_queue -- Can we queue more to the hardware?
866  * @sched: scheduler instance
867  *
868  * Return true if we can push more jobs to the hw, otherwise false.
869  */
drm_sched_can_queue(struct drm_gpu_scheduler * sched)870 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
871 {
872 	return atomic_read(&sched->hw_rq_count) <
873 		sched->hw_submission_limit;
874 }
875 
876 /**
877  * drm_sched_wakeup_if_can_queue - Wake up the scheduler
878  * @sched: scheduler instance
879  *
880  * Wake up the scheduler if we can queue jobs.
881  */
drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler * sched)882 void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
883 {
884 	if (drm_sched_can_queue(sched))
885 		wake_up_interruptible(&sched->wake_up_worker);
886 }
887 
888 /**
889  * drm_sched_select_entity - Select next entity to process
890  *
891  * @sched: scheduler instance
892  *
893  * Returns the entity to process or NULL if none are found.
894  */
895 static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler * sched)896 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
897 {
898 	struct drm_sched_entity *entity;
899 	int i;
900 
901 	if (!drm_sched_can_queue(sched))
902 		return NULL;
903 
904 	/* Kernel run queue has higher priority than normal run queue*/
905 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
906 		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
907 			drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
908 			drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
909 		if (entity)
910 			break;
911 	}
912 
913 	return entity;
914 }
915 
916 /**
917  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
918  *
919  * @sched: scheduler instance
920  *
921  * Returns the next finished job from the pending list (if there is one)
922  * ready for it to be destroyed.
923  */
924 static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler * sched)925 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
926 {
927 	struct drm_sched_job *job, *next;
928 
929 	spin_lock(&sched->job_list_lock);
930 
931 	job = list_first_entry_or_null(&sched->pending_list,
932 				       struct drm_sched_job, list);
933 
934 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
935 		/* remove job from pending_list */
936 		list_del_init(&job->list);
937 
938 		/* cancel this job's TO timer */
939 		cancel_delayed_work(&sched->work_tdr);
940 		/* make the scheduled timestamp more accurate */
941 		next = list_first_entry_or_null(&sched->pending_list,
942 						typeof(*next), list);
943 
944 		if (next) {
945 			next->s_fence->scheduled.timestamp =
946 				dma_fence_timestamp(&job->s_fence->finished);
947 			/* start TO timer for next job */
948 			drm_sched_start_timeout(sched);
949 		}
950 	} else {
951 		job = NULL;
952 	}
953 
954 	spin_unlock(&sched->job_list_lock);
955 
956 	return job;
957 }
958 
959 /**
960  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
961  * @sched_list: list of drm_gpu_schedulers
962  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
963  *
964  * Returns pointer of the sched with the least load or NULL if none of the
965  * drm_gpu_schedulers are ready
966  */
967 struct drm_gpu_scheduler *
drm_sched_pick_best(struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)968 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
969 		     unsigned int num_sched_list)
970 {
971 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
972 	int i;
973 	unsigned int min_score = UINT_MAX, num_score;
974 
975 	for (i = 0; i < num_sched_list; ++i) {
976 		sched = sched_list[i];
977 
978 		if (!sched->ready) {
979 			DRM_WARN("scheduler %s is not ready, skipping",
980 				 sched->name);
981 			continue;
982 		}
983 
984 		num_score = atomic_read(sched->score);
985 		if (num_score < min_score) {
986 			min_score = num_score;
987 			picked_sched = sched;
988 		}
989 	}
990 
991 	return picked_sched;
992 }
993 EXPORT_SYMBOL(drm_sched_pick_best);
994 
995 /**
996  * drm_sched_blocked - check if the scheduler is blocked
997  *
998  * @sched: scheduler instance
999  *
1000  * Returns true if blocked, otherwise false.
1001  */
drm_sched_blocked(struct drm_gpu_scheduler * sched)1002 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
1003 {
1004 	if (kthread_should_park()) {
1005 		kthread_parkme();
1006 		return true;
1007 	}
1008 
1009 	return false;
1010 }
1011 
1012 /**
1013  * drm_sched_main - main scheduler thread
1014  *
1015  * @param: scheduler instance
1016  *
1017  * Returns 0.
1018  */
drm_sched_main(void * param)1019 static int drm_sched_main(void *param)
1020 {
1021 	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
1022 	int r;
1023 
1024 #ifdef __linux__
1025 	sched_set_fifo_low(current);
1026 #endif
1027 
1028 	while (!kthread_should_stop()) {
1029 		struct drm_sched_entity *entity = NULL;
1030 		struct drm_sched_fence *s_fence;
1031 		struct drm_sched_job *sched_job;
1032 		struct dma_fence *fence;
1033 		struct drm_sched_job *cleanup_job = NULL;
1034 
1035 		wait_event_interruptible(sched->wake_up_worker,
1036 					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
1037 					 (!drm_sched_blocked(sched) &&
1038 					  (entity = drm_sched_select_entity(sched))) ||
1039 					 kthread_should_stop());
1040 
1041 		if (cleanup_job)
1042 			sched->ops->free_job(cleanup_job);
1043 
1044 		if (!entity)
1045 			continue;
1046 
1047 		sched_job = drm_sched_entity_pop_job(entity);
1048 
1049 		if (!sched_job) {
1050 			complete_all(&entity->entity_idle);
1051 			continue;
1052 		}
1053 
1054 		s_fence = sched_job->s_fence;
1055 
1056 		atomic_inc(&sched->hw_rq_count);
1057 		drm_sched_job_begin(sched_job);
1058 
1059 		trace_drm_run_job(sched_job, entity);
1060 		fence = sched->ops->run_job(sched_job);
1061 		complete_all(&entity->entity_idle);
1062 		drm_sched_fence_scheduled(s_fence, fence);
1063 
1064 		if (!IS_ERR_OR_NULL(fence)) {
1065 			/* Drop for original kref_init of the fence */
1066 			dma_fence_put(fence);
1067 
1068 			r = dma_fence_add_callback(fence, &sched_job->cb,
1069 						   drm_sched_job_done_cb);
1070 			if (r == -ENOENT)
1071 				drm_sched_job_done(sched_job, fence->error);
1072 			else if (r)
1073 				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
1074 					  r);
1075 		} else {
1076 			drm_sched_job_done(sched_job, IS_ERR(fence) ?
1077 					   PTR_ERR(fence) : 0);
1078 		}
1079 
1080 		wake_up(&sched->job_scheduled);
1081 	}
1082 	return 0;
1083 }
1084 
1085 /**
1086  * drm_sched_init - Init a gpu scheduler instance
1087  *
1088  * @sched: scheduler instance
1089  * @ops: backend operations for this scheduler
1090  * @hw_submission: number of hw submissions that can be in flight
1091  * @hang_limit: number of times to allow a job to hang before dropping it
1092  * @timeout: timeout value in jiffies for the scheduler
1093  * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1094  *		used
1095  * @score: optional score atomic shared with other schedulers
1096  * @name: name used for debugging
1097  * @dev: target &struct device
1098  *
1099  * Return 0 on success, otherwise error code.
1100  */
drm_sched_init(struct drm_gpu_scheduler * sched,const struct drm_sched_backend_ops * ops,unsigned hw_submission,unsigned hang_limit,long timeout,struct workqueue_struct * timeout_wq,atomic_t * score,const char * name,struct device * dev)1101 int drm_sched_init(struct drm_gpu_scheduler *sched,
1102 		   const struct drm_sched_backend_ops *ops,
1103 		   unsigned hw_submission, unsigned hang_limit,
1104 		   long timeout, struct workqueue_struct *timeout_wq,
1105 		   atomic_t *score, const char *name, struct device *dev)
1106 {
1107 	int i, ret;
1108 	sched->ops = ops;
1109 	sched->hw_submission_limit = hw_submission;
1110 	sched->name = name;
1111 	sched->timeout = timeout;
1112 	sched->timeout_wq = timeout_wq ? : system_wq;
1113 	sched->hang_limit = hang_limit;
1114 	sched->score = score ? score : &sched->_score;
1115 	sched->dev = dev;
1116 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
1117 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
1118 
1119 	init_waitqueue_head(&sched->wake_up_worker);
1120 	init_waitqueue_head(&sched->job_scheduled);
1121 	INIT_LIST_HEAD(&sched->pending_list);
1122 	mtx_init(&sched->job_list_lock, IPL_NONE);
1123 	atomic_set(&sched->hw_rq_count, 0);
1124 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1125 	atomic_set(&sched->_score, 0);
1126 	atomic64_set(&sched->job_id_count, 0);
1127 
1128 	/* Each scheduler will run on a seperate kernel thread */
1129 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
1130 	if (IS_ERR(sched->thread)) {
1131 		ret = PTR_ERR(sched->thread);
1132 		sched->thread = NULL;
1133 		DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
1134 		return ret;
1135 	}
1136 
1137 	sched->ready = true;
1138 	return 0;
1139 }
1140 EXPORT_SYMBOL(drm_sched_init);
1141 
1142 /**
1143  * drm_sched_fini - Destroy a gpu scheduler
1144  *
1145  * @sched: scheduler instance
1146  *
1147  * Tears down and cleans up the scheduler.
1148  */
drm_sched_fini(struct drm_gpu_scheduler * sched)1149 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1150 {
1151 	struct drm_sched_entity *s_entity;
1152 	int i;
1153 
1154 	if (sched->thread)
1155 		kthread_stop(sched->thread);
1156 
1157 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
1158 		struct drm_sched_rq *rq = &sched->sched_rq[i];
1159 
1160 		spin_lock(&rq->lock);
1161 		list_for_each_entry(s_entity, &rq->entities, list)
1162 			/*
1163 			 * Prevents reinsertion and marks job_queue as idle,
1164 			 * it will removed from rq in drm_sched_entity_fini
1165 			 * eventually
1166 			 */
1167 			s_entity->stopped = true;
1168 		spin_unlock(&rq->lock);
1169 
1170 	}
1171 
1172 	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1173 	wake_up_all(&sched->job_scheduled);
1174 
1175 	/* Confirm no work left behind accessing device structures */
1176 	cancel_delayed_work_sync(&sched->work_tdr);
1177 
1178 	sched->ready = false;
1179 }
1180 EXPORT_SYMBOL(drm_sched_fini);
1181 
1182 /**
1183  * drm_sched_increase_karma - Update sched_entity guilty flag
1184  *
1185  * @bad: The job guilty of time out
1186  *
1187  * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1188  * limit of the scheduler then the respective sched entity is marked guilty and
1189  * jobs from it will not be scheduled further
1190  */
drm_sched_increase_karma(struct drm_sched_job * bad)1191 void drm_sched_increase_karma(struct drm_sched_job *bad)
1192 {
1193 	int i;
1194 	struct drm_sched_entity *tmp;
1195 	struct drm_sched_entity *entity;
1196 	struct drm_gpu_scheduler *sched = bad->sched;
1197 
1198 	/* don't change @bad's karma if it's from KERNEL RQ,
1199 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1200 	 * corrupt but keep in mind that kernel jobs always considered good.
1201 	 */
1202 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1203 		atomic_inc(&bad->karma);
1204 
1205 		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
1206 		     i++) {
1207 			struct drm_sched_rq *rq = &sched->sched_rq[i];
1208 
1209 			spin_lock(&rq->lock);
1210 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1211 				if (bad->s_fence->scheduled.context ==
1212 				    entity->fence_context) {
1213 					if (entity->guilty)
1214 						atomic_set(entity->guilty, 1);
1215 					break;
1216 				}
1217 			}
1218 			spin_unlock(&rq->lock);
1219 			if (&entity->list != &rq->entities)
1220 				break;
1221 		}
1222 	}
1223 }
1224 EXPORT_SYMBOL(drm_sched_increase_karma);
1225