xref: /linux/drivers/gpu/drm/scheduler/sched_main.c (revision 44f57d78)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46 
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <uapi/linux/sched/types.h>
51 #include <drm/drmP.h>
52 #include <drm/gpu_scheduler.h>
53 #include <drm/spsc_queue.h>
54 
55 #define CREATE_TRACE_POINTS
56 #include "gpu_scheduler_trace.h"
57 
58 #define to_drm_sched_job(sched_job)		\
59 		container_of((sched_job), struct drm_sched_job, queue_node)
60 
61 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
62 
63 /**
64  * drm_sched_rq_init - initialize a given run queue struct
65  *
66  * @rq: scheduler run queue
67  *
68  * Initializes a scheduler runqueue.
69  */
70 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
71 			      struct drm_sched_rq *rq)
72 {
73 	spin_lock_init(&rq->lock);
74 	INIT_LIST_HEAD(&rq->entities);
75 	rq->current_entity = NULL;
76 	rq->sched = sched;
77 }
78 
79 /**
80  * drm_sched_rq_add_entity - add an entity
81  *
82  * @rq: scheduler run queue
83  * @entity: scheduler entity
84  *
85  * Adds a scheduler entity to the run queue.
86  */
87 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
88 			     struct drm_sched_entity *entity)
89 {
90 	if (!list_empty(&entity->list))
91 		return;
92 	spin_lock(&rq->lock);
93 	list_add_tail(&entity->list, &rq->entities);
94 	spin_unlock(&rq->lock);
95 }
96 
97 /**
98  * drm_sched_rq_remove_entity - remove an entity
99  *
100  * @rq: scheduler run queue
101  * @entity: scheduler entity
102  *
103  * Removes a scheduler entity from the run queue.
104  */
105 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
106 				struct drm_sched_entity *entity)
107 {
108 	if (list_empty(&entity->list))
109 		return;
110 	spin_lock(&rq->lock);
111 	list_del_init(&entity->list);
112 	if (rq->current_entity == entity)
113 		rq->current_entity = NULL;
114 	spin_unlock(&rq->lock);
115 }
116 
117 /**
118  * drm_sched_rq_select_entity - Select an entity which could provide a job to run
119  *
120  * @rq: scheduler run queue to check.
121  *
122  * Try to find a ready entity, returns NULL if none found.
123  */
124 static struct drm_sched_entity *
125 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
126 {
127 	struct drm_sched_entity *entity;
128 
129 	spin_lock(&rq->lock);
130 
131 	entity = rq->current_entity;
132 	if (entity) {
133 		list_for_each_entry_continue(entity, &rq->entities, list) {
134 			if (drm_sched_entity_is_ready(entity)) {
135 				rq->current_entity = entity;
136 				spin_unlock(&rq->lock);
137 				return entity;
138 			}
139 		}
140 	}
141 
142 	list_for_each_entry(entity, &rq->entities, list) {
143 
144 		if (drm_sched_entity_is_ready(entity)) {
145 			rq->current_entity = entity;
146 			spin_unlock(&rq->lock);
147 			return entity;
148 		}
149 
150 		if (entity == rq->current_entity)
151 			break;
152 	}
153 
154 	spin_unlock(&rq->lock);
155 
156 	return NULL;
157 }
158 
159 /**
160  * drm_sched_dependency_optimized
161  *
162  * @fence: the dependency fence
163  * @entity: the entity which depends on the above fence
164  *
165  * Returns true if the dependency can be optimized and false otherwise
166  */
167 bool drm_sched_dependency_optimized(struct dma_fence* fence,
168 				    struct drm_sched_entity *entity)
169 {
170 	struct drm_gpu_scheduler *sched = entity->rq->sched;
171 	struct drm_sched_fence *s_fence;
172 
173 	if (!fence || dma_fence_is_signaled(fence))
174 		return false;
175 	if (fence->context == entity->fence_context)
176 		return true;
177 	s_fence = to_drm_sched_fence(fence);
178 	if (s_fence && s_fence->sched == sched)
179 		return true;
180 
181 	return false;
182 }
183 EXPORT_SYMBOL(drm_sched_dependency_optimized);
184 
185 /**
186  * drm_sched_start_timeout - start timeout for reset worker
187  *
188  * @sched: scheduler instance to start the worker for
189  *
190  * Start the timeout for the given scheduler.
191  */
192 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
193 {
194 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
195 	    !list_empty(&sched->ring_mirror_list))
196 		schedule_delayed_work(&sched->work_tdr, sched->timeout);
197 }
198 
199 /**
200  * drm_sched_fault - immediately start timeout handler
201  *
202  * @sched: scheduler where the timeout handling should be started.
203  *
204  * Start timeout handling immediately when the driver detects a hardware fault.
205  */
206 void drm_sched_fault(struct drm_gpu_scheduler *sched)
207 {
208 	mod_delayed_work(system_wq, &sched->work_tdr, 0);
209 }
210 EXPORT_SYMBOL(drm_sched_fault);
211 
212 /**
213  * drm_sched_suspend_timeout - Suspend scheduler job timeout
214  *
215  * @sched: scheduler instance for which to suspend the timeout
216  *
217  * Suspend the delayed work timeout for the scheduler. This is done by
218  * modifying the delayed work timeout to an arbitrary large value,
219  * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
220  * called from an IRQ context.
221  *
222  * Returns the timeout remaining
223  *
224  */
225 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
226 {
227 	unsigned long sched_timeout, now = jiffies;
228 
229 	sched_timeout = sched->work_tdr.timer.expires;
230 
231 	/*
232 	 * Modify the timeout to an arbitrarily large value. This also prevents
233 	 * the timeout to be restarted when new submissions arrive
234 	 */
235 	if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
236 			&& time_after(sched_timeout, now))
237 		return sched_timeout - now;
238 	else
239 		return sched->timeout;
240 }
241 EXPORT_SYMBOL(drm_sched_suspend_timeout);
242 
243 /**
244  * drm_sched_resume_timeout - Resume scheduler job timeout
245  *
246  * @sched: scheduler instance for which to resume the timeout
247  * @remaining: remaining timeout
248  *
249  * Resume the delayed work timeout for the scheduler. Note that
250  * this function can be called from an IRQ context.
251  */
252 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
253 		unsigned long remaining)
254 {
255 	unsigned long flags;
256 
257 	spin_lock_irqsave(&sched->job_list_lock, flags);
258 
259 	if (list_empty(&sched->ring_mirror_list))
260 		cancel_delayed_work(&sched->work_tdr);
261 	else
262 		mod_delayed_work(system_wq, &sched->work_tdr, remaining);
263 
264 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
265 }
266 EXPORT_SYMBOL(drm_sched_resume_timeout);
267 
268 /* job_finish is called after hw fence signaled
269  */
270 static void drm_sched_job_finish(struct work_struct *work)
271 {
272 	struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
273 						   finish_work);
274 	struct drm_gpu_scheduler *sched = s_job->sched;
275 	unsigned long flags;
276 
277 	/*
278 	 * Canceling the timeout without removing our job from the ring mirror
279 	 * list is safe, as we will only end up in this worker if our jobs
280 	 * finished fence has been signaled. So even if some another worker
281 	 * manages to find this job as the next job in the list, the fence
282 	 * signaled check below will prevent the timeout to be restarted.
283 	 */
284 	cancel_delayed_work_sync(&sched->work_tdr);
285 
286 	spin_lock_irqsave(&sched->job_list_lock, flags);
287 	/* queue TDR for next job */
288 	drm_sched_start_timeout(sched);
289 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
290 
291 	sched->ops->free_job(s_job);
292 }
293 
294 static void drm_sched_job_begin(struct drm_sched_job *s_job)
295 {
296 	struct drm_gpu_scheduler *sched = s_job->sched;
297 	unsigned long flags;
298 
299 	spin_lock_irqsave(&sched->job_list_lock, flags);
300 	list_add_tail(&s_job->node, &sched->ring_mirror_list);
301 	drm_sched_start_timeout(sched);
302 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
303 }
304 
305 static void drm_sched_job_timedout(struct work_struct *work)
306 {
307 	struct drm_gpu_scheduler *sched;
308 	struct drm_sched_job *job;
309 	unsigned long flags;
310 
311 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
312 	job = list_first_entry_or_null(&sched->ring_mirror_list,
313 				       struct drm_sched_job, node);
314 
315 	if (job)
316 		job->sched->ops->timedout_job(job);
317 
318 	spin_lock_irqsave(&sched->job_list_lock, flags);
319 	drm_sched_start_timeout(sched);
320 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
321 }
322 
323  /**
324   * drm_sched_increase_karma - Update sched_entity guilty flag
325   *
326   * @bad: The job guilty of time out
327   *
328   * Increment on every hang caused by the 'bad' job. If this exceeds the hang
329   * limit of the scheduler then the respective sched entity is marked guilty and
330   * jobs from it will not be scheduled further
331   */
332 void drm_sched_increase_karma(struct drm_sched_job *bad)
333 {
334 	int i;
335 	struct drm_sched_entity *tmp;
336 	struct drm_sched_entity *entity;
337 	struct drm_gpu_scheduler *sched = bad->sched;
338 
339 	/* don't increase @bad's karma if it's from KERNEL RQ,
340 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
341 	 * corrupt but keep in mind that kernel jobs always considered good.
342 	 */
343 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
344 		atomic_inc(&bad->karma);
345 		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
346 		     i++) {
347 			struct drm_sched_rq *rq = &sched->sched_rq[i];
348 
349 			spin_lock(&rq->lock);
350 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
351 				if (bad->s_fence->scheduled.context ==
352 				    entity->fence_context) {
353 					if (atomic_read(&bad->karma) >
354 					    bad->sched->hang_limit)
355 						if (entity->guilty)
356 							atomic_set(entity->guilty, 1);
357 					break;
358 				}
359 			}
360 			spin_unlock(&rq->lock);
361 			if (&entity->list != &rq->entities)
362 				break;
363 		}
364 	}
365 }
366 EXPORT_SYMBOL(drm_sched_increase_karma);
367 
368 /**
369  * drm_sched_stop - stop the scheduler
370  *
371  * @sched: scheduler instance
372  *
373  */
374 void drm_sched_stop(struct drm_gpu_scheduler *sched)
375 {
376 	struct drm_sched_job *s_job;
377 	unsigned long flags;
378 	struct dma_fence *last_fence =  NULL;
379 
380 	kthread_park(sched->thread);
381 
382 	/*
383 	 * Verify all the signaled jobs in mirror list are removed from the ring
384 	 * by waiting for the latest job to enter the list. This should insure that
385 	 * also all the previous jobs that were in flight also already singaled
386 	 * and removed from the list.
387 	 */
388 	spin_lock_irqsave(&sched->job_list_lock, flags);
389 	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
390 		if (s_job->s_fence->parent &&
391 		    dma_fence_remove_callback(s_job->s_fence->parent,
392 					      &s_job->cb)) {
393 			dma_fence_put(s_job->s_fence->parent);
394 			s_job->s_fence->parent = NULL;
395 			atomic_dec(&sched->hw_rq_count);
396 		} else {
397 			 last_fence = dma_fence_get(&s_job->s_fence->finished);
398 			 break;
399 		}
400 	}
401 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
402 
403 	if (last_fence) {
404 		dma_fence_wait(last_fence, false);
405 		dma_fence_put(last_fence);
406 	}
407 }
408 
409 EXPORT_SYMBOL(drm_sched_stop);
410 
411 /**
412  * drm_sched_job_recovery - recover jobs after a reset
413  *
414  * @sched: scheduler instance
415  *
416  */
417 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
418 {
419 	struct drm_sched_job *s_job, *tmp;
420 	int r;
421 
422 	if (!full_recovery)
423 		goto unpark;
424 
425 	/*
426 	 * Locking the list is not required here as the sched thread is parked
427 	 * so no new jobs are being pushed in to HW and in drm_sched_stop we
428 	 * flushed all the jobs who were still in mirror list but who already
429 	 * signaled and removed them self from the list. Also concurrent
430 	 * GPU recovers can't run in parallel.
431 	 */
432 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
433 		struct dma_fence *fence = s_job->s_fence->parent;
434 
435 		if (fence) {
436 			r = dma_fence_add_callback(fence, &s_job->cb,
437 						   drm_sched_process_job);
438 			if (r == -ENOENT)
439 				drm_sched_process_job(fence, &s_job->cb);
440 			else if (r)
441 				DRM_ERROR("fence add callback failed (%d)\n",
442 					  r);
443 		} else
444 			drm_sched_process_job(NULL, &s_job->cb);
445 	}
446 
447 	drm_sched_start_timeout(sched);
448 
449 unpark:
450 	kthread_unpark(sched->thread);
451 }
452 EXPORT_SYMBOL(drm_sched_start);
453 
454 /**
455  * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
456  *
457  * @sched: scheduler instance
458  *
459  */
460 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
461 {
462 	struct drm_sched_job *s_job, *tmp;
463 	uint64_t guilty_context;
464 	bool found_guilty = false;
465 
466 	/*TODO DO we need spinlock here ? */
467 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
468 		struct drm_sched_fence *s_fence = s_job->s_fence;
469 
470 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
471 			found_guilty = true;
472 			guilty_context = s_job->s_fence->scheduled.context;
473 		}
474 
475 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
476 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
477 
478 		s_job->s_fence->parent = sched->ops->run_job(s_job);
479 		atomic_inc(&sched->hw_rq_count);
480 	}
481 }
482 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
483 
484 /**
485  * drm_sched_job_init - init a scheduler job
486  *
487  * @job: scheduler job to init
488  * @entity: scheduler entity to use
489  * @owner: job owner for debugging
490  *
491  * Refer to drm_sched_entity_push_job() documentation
492  * for locking considerations.
493  *
494  * Returns 0 for success, negative error code otherwise.
495  */
496 int drm_sched_job_init(struct drm_sched_job *job,
497 		       struct drm_sched_entity *entity,
498 		       void *owner)
499 {
500 	struct drm_gpu_scheduler *sched;
501 
502 	drm_sched_entity_select_rq(entity);
503 	if (!entity->rq)
504 		return -ENOENT;
505 
506 	sched = entity->rq->sched;
507 
508 	job->sched = sched;
509 	job->entity = entity;
510 	job->s_priority = entity->rq - sched->sched_rq;
511 	job->s_fence = drm_sched_fence_create(entity, owner);
512 	if (!job->s_fence)
513 		return -ENOMEM;
514 	job->id = atomic64_inc_return(&sched->job_id_count);
515 
516 	INIT_WORK(&job->finish_work, drm_sched_job_finish);
517 	INIT_LIST_HEAD(&job->node);
518 
519 	return 0;
520 }
521 EXPORT_SYMBOL(drm_sched_job_init);
522 
523 /**
524  * drm_sched_job_cleanup - clean up scheduler job resources
525  *
526  * @job: scheduler job to clean up
527  */
528 void drm_sched_job_cleanup(struct drm_sched_job *job)
529 {
530 	dma_fence_put(&job->s_fence->finished);
531 	job->s_fence = NULL;
532 }
533 EXPORT_SYMBOL(drm_sched_job_cleanup);
534 
535 /**
536  * drm_sched_ready - is the scheduler ready
537  *
538  * @sched: scheduler instance
539  *
540  * Return true if we can push more jobs to the hw, otherwise false.
541  */
542 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
543 {
544 	return atomic_read(&sched->hw_rq_count) <
545 		sched->hw_submission_limit;
546 }
547 
548 /**
549  * drm_sched_wakeup - Wake up the scheduler when it is ready
550  *
551  * @sched: scheduler instance
552  *
553  */
554 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
555 {
556 	if (drm_sched_ready(sched))
557 		wake_up_interruptible(&sched->wake_up_worker);
558 }
559 
560 /**
561  * drm_sched_select_entity - Select next entity to process
562  *
563  * @sched: scheduler instance
564  *
565  * Returns the entity to process or NULL if none are found.
566  */
567 static struct drm_sched_entity *
568 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
569 {
570 	struct drm_sched_entity *entity;
571 	int i;
572 
573 	if (!drm_sched_ready(sched))
574 		return NULL;
575 
576 	/* Kernel run queue has higher priority than normal run queue*/
577 	for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
578 		entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
579 		if (entity)
580 			break;
581 	}
582 
583 	return entity;
584 }
585 
586 /**
587  * drm_sched_process_job - process a job
588  *
589  * @f: fence
590  * @cb: fence callbacks
591  *
592  * Called after job has finished execution.
593  */
594 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
595 {
596 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
597 	struct drm_sched_fence *s_fence = s_job->s_fence;
598 	struct drm_gpu_scheduler *sched = s_fence->sched;
599 	unsigned long flags;
600 
601 	cancel_delayed_work(&sched->work_tdr);
602 
603 	atomic_dec(&sched->hw_rq_count);
604 	atomic_dec(&sched->num_jobs);
605 
606 	spin_lock_irqsave(&sched->job_list_lock, flags);
607 	/* remove job from ring_mirror_list */
608 	list_del_init(&s_job->node);
609 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
610 
611 	drm_sched_fence_finished(s_fence);
612 
613 	trace_drm_sched_process_job(s_fence);
614 	wake_up_interruptible(&sched->wake_up_worker);
615 
616 	schedule_work(&s_job->finish_work);
617 }
618 
619 /**
620  * drm_sched_blocked - check if the scheduler is blocked
621  *
622  * @sched: scheduler instance
623  *
624  * Returns true if blocked, otherwise false.
625  */
626 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
627 {
628 	if (kthread_should_park()) {
629 		kthread_parkme();
630 		return true;
631 	}
632 
633 	return false;
634 }
635 
636 /**
637  * drm_sched_main - main scheduler thread
638  *
639  * @param: scheduler instance
640  *
641  * Returns 0.
642  */
643 static int drm_sched_main(void *param)
644 {
645 	struct sched_param sparam = {.sched_priority = 1};
646 	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
647 	int r;
648 
649 	sched_setscheduler(current, SCHED_FIFO, &sparam);
650 
651 	while (!kthread_should_stop()) {
652 		struct drm_sched_entity *entity = NULL;
653 		struct drm_sched_fence *s_fence;
654 		struct drm_sched_job *sched_job;
655 		struct dma_fence *fence;
656 
657 		wait_event_interruptible(sched->wake_up_worker,
658 					 (!drm_sched_blocked(sched) &&
659 					  (entity = drm_sched_select_entity(sched))) ||
660 					 kthread_should_stop());
661 
662 		if (!entity)
663 			continue;
664 
665 		sched_job = drm_sched_entity_pop_job(entity);
666 		if (!sched_job)
667 			continue;
668 
669 		s_fence = sched_job->s_fence;
670 
671 		atomic_inc(&sched->hw_rq_count);
672 		drm_sched_job_begin(sched_job);
673 
674 		fence = sched->ops->run_job(sched_job);
675 		drm_sched_fence_scheduled(s_fence);
676 
677 		if (fence) {
678 			s_fence->parent = dma_fence_get(fence);
679 			r = dma_fence_add_callback(fence, &sched_job->cb,
680 						   drm_sched_process_job);
681 			if (r == -ENOENT)
682 				drm_sched_process_job(fence, &sched_job->cb);
683 			else if (r)
684 				DRM_ERROR("fence add callback failed (%d)\n",
685 					  r);
686 			dma_fence_put(fence);
687 		} else
688 			drm_sched_process_job(NULL, &sched_job->cb);
689 
690 		wake_up(&sched->job_scheduled);
691 	}
692 	return 0;
693 }
694 
695 /**
696  * drm_sched_init - Init a gpu scheduler instance
697  *
698  * @sched: scheduler instance
699  * @ops: backend operations for this scheduler
700  * @hw_submission: number of hw submissions that can be in flight
701  * @hang_limit: number of times to allow a job to hang before dropping it
702  * @timeout: timeout value in jiffies for the scheduler
703  * @name: name used for debugging
704  *
705  * Return 0 on success, otherwise error code.
706  */
707 int drm_sched_init(struct drm_gpu_scheduler *sched,
708 		   const struct drm_sched_backend_ops *ops,
709 		   unsigned hw_submission,
710 		   unsigned hang_limit,
711 		   long timeout,
712 		   const char *name)
713 {
714 	int i, ret;
715 	sched->ops = ops;
716 	sched->hw_submission_limit = hw_submission;
717 	sched->name = name;
718 	sched->timeout = timeout;
719 	sched->hang_limit = hang_limit;
720 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
721 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
722 
723 	init_waitqueue_head(&sched->wake_up_worker);
724 	init_waitqueue_head(&sched->job_scheduled);
725 	INIT_LIST_HEAD(&sched->ring_mirror_list);
726 	spin_lock_init(&sched->job_list_lock);
727 	atomic_set(&sched->hw_rq_count, 0);
728 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
729 	atomic_set(&sched->num_jobs, 0);
730 	atomic64_set(&sched->job_id_count, 0);
731 
732 	/* Each scheduler will run on a seperate kernel thread */
733 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
734 	if (IS_ERR(sched->thread)) {
735 		ret = PTR_ERR(sched->thread);
736 		sched->thread = NULL;
737 		DRM_ERROR("Failed to create scheduler for %s.\n", name);
738 		return ret;
739 	}
740 
741 	sched->ready = true;
742 	return 0;
743 }
744 EXPORT_SYMBOL(drm_sched_init);
745 
746 /**
747  * drm_sched_fini - Destroy a gpu scheduler
748  *
749  * @sched: scheduler instance
750  *
751  * Tears down and cleans up the scheduler.
752  */
753 void drm_sched_fini(struct drm_gpu_scheduler *sched)
754 {
755 	if (sched->thread)
756 		kthread_stop(sched->thread);
757 
758 	sched->ready = false;
759 }
760 EXPORT_SYMBOL(drm_sched_fini);
761