1 /*	$NetBSD: sched_entity.c,v 1.7 2021/12/24 15:26:35 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2015 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: sched_entity.c,v 1.7 2021/12/24 15:26:35 riastradh Exp $");
28 
29 #include <linux/kthread.h>
30 #include <linux/slab.h>
31 #include <linux/completion.h>
32 
33 #include <drm/drm_print.h>
34 #include <drm/gpu_scheduler.h>
35 
36 #include "gpu_scheduler_trace.h"
37 
38 #define to_drm_sched_job(sched_job)		\
39 		container_of((sched_job), struct drm_sched_job, queue_node)
40 
41 /**
42  * drm_sched_entity_init - Init a context entity used by scheduler when
43  * submit to HW ring.
44  *
45  * @entity: scheduler entity to init
46  * @priority: priority of the entity
47  * @sched_list: the list of drm scheds on which jobs from this
48  *           entity can be submitted
49  * @num_sched_list: number of drm sched in sched_list
50  * @guilty: atomic_t set to 1 when a job on this queue
51  *          is found to be guilty causing a timeout
52  *
53  * Note: the sched_list should have at least one element to schedule
54  *       the entity
55  *
56  * Returns 0 on success or a negative error code on failure.
57  */
drm_sched_entity_init(struct drm_sched_entity * entity,enum drm_sched_priority priority,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list,atomic_t * guilty)58 int drm_sched_entity_init(struct drm_sched_entity *entity,
59 			  enum drm_sched_priority priority,
60 			  struct drm_gpu_scheduler **sched_list,
61 			  unsigned int num_sched_list,
62 			  atomic_t *guilty)
63 {
64 	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
65 		return -EINVAL;
66 
67 	memset(entity, 0, sizeof(struct drm_sched_entity));
68 	INIT_LIST_HEAD(&entity->list);
69 	entity->rq = NULL;
70 	entity->guilty = guilty;
71 	entity->num_sched_list = num_sched_list;
72 	entity->priority = priority;
73 	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
74 	entity->last_scheduled = NULL;
75 
76 	if(num_sched_list)
77 		entity->rq = &sched_list[0]->sched_rq[entity->priority];
78 
79 	init_completion(&entity->entity_idle);
80 
81 	spin_lock_init(&entity->rq_lock);
82 	spsc_queue_init(&entity->job_queue);
83 
84 	atomic_set(&entity->fence_seq, 0);
85 	entity->fence_context = dma_fence_context_alloc(2);
86 
87 	return 0;
88 }
89 EXPORT_SYMBOL(drm_sched_entity_init);
90 
91 /**
92  * drm_sched_entity_is_idle - Check if entity is idle
93  *
94  * @entity: scheduler entity
95  *
96  * Returns true if the entity does not have any unscheduled jobs.
97  */
drm_sched_entity_is_idle(struct drm_sched_entity * entity)98 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
99 {
100 	assert_spin_locked(&entity->rq->sched->job_list_lock);
101 
102 	if (list_empty(&entity->list) ||
103 	    spsc_queue_count(&entity->job_queue) == 0)
104 		return true;
105 
106 	return false;
107 }
108 
109 /**
110  * drm_sched_entity_is_ready - Check if entity is ready
111  *
112  * @entity: scheduler entity
113  *
114  * Return true if entity could provide a job.
115  */
drm_sched_entity_is_ready(struct drm_sched_entity * entity)116 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
117 {
118 	if (spsc_queue_peek(&entity->job_queue) == NULL)
119 		return false;
120 
121 	if (READ_ONCE(entity->dependency))
122 		return false;
123 
124 	return true;
125 }
126 
127 /**
128  * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
129  *
130  * @entity: scheduler entity
131  *
132  * Return the pointer to the rq with least load.
133  */
134 static struct drm_sched_rq *
drm_sched_entity_get_free_sched(struct drm_sched_entity * entity)135 drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
136 {
137 	struct drm_sched_rq *rq = NULL;
138 	unsigned int min_score = UINT_MAX, num_score;
139 	int i;
140 
141 	for (i = 0; i < entity->num_sched_list; ++i) {
142 		struct drm_gpu_scheduler *sched = entity->sched_list[i];
143 
144 		if (!entity->sched_list[i]->ready) {
145 			DRM_WARN("sched%s is not ready, skipping\n", sched->name);
146 			continue;
147 		}
148 
149 		num_score = atomic_read(&sched->score);
150 		if (num_score < min_score) {
151 			min_score = num_score;
152 			rq = &entity->sched_list[i]->sched_rq[entity->priority];
153 		}
154 	}
155 
156 	return rq;
157 }
158 
159 /**
160  * drm_sched_entity_flush - Flush a context entity
161  *
162  * @entity: scheduler entity
163  * @timeout: time to wait in for Q to become empty in jiffies.
164  *
165  * Splitting drm_sched_entity_fini() into two functions, The first one does the
166  * waiting, removes the entity from the runqueue and returns an error when the
167  * process was killed.
168  *
169  * Returns the remaining time in jiffies left from the input timeout
170  */
drm_sched_entity_flush(struct drm_sched_entity * entity,long timeout)171 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
172 {
173 	struct drm_gpu_scheduler *sched;
174 #ifdef __NetBSD__
175 	struct proc *last_user;
176 #else
177 	struct task_struct *last_user;
178 #endif
179 	long ret = timeout;
180 
181 	if (!entity->rq)
182 		return 0;
183 
184 	sched = entity->rq->sched;
185 #ifdef __NetBSD__
186 	spin_lock(&sched->job_list_lock);
187 	DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &sched->job_scheduled,
188 	    &sched->job_list_lock,
189 	    drm_sched_entity_is_idle(entity));
190 	spin_unlock(&sched->job_list_lock);
191 #else
192 	/**
193 	 * The client will not queue more IBs during this fini, consume existing
194 	 * queued IBs or discard them on SIGKILL
195 	 */
196 	if (current->flags & PF_EXITING) {
197 		if (timeout)
198 			ret = wait_event_timeout(
199 					sched->job_scheduled,
200 					drm_sched_entity_is_idle(entity),
201 					timeout);
202 	} else {
203 		wait_event_killable(sched->job_scheduled,
204 				    drm_sched_entity_is_idle(entity));
205 	}
206 #endif
207 
208 	/* For killed process disable any more IBs enqueue right now */
209 #ifdef __NetBSD__
210 	last_user = cmpxchg(&entity->last_user, curproc, NULL);
211 	if ((!last_user || last_user == curproc) &&
212 	    (curproc->p_sflag & PS_WEXIT))
213 #else
214 	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
215 	if ((!last_user || last_user == current->group_leader) &&
216 	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
217 #endif
218 	{
219 		spin_lock(&entity->rq_lock);
220 		entity->stopped = true;
221 		drm_sched_rq_remove_entity(entity->rq, entity);
222 		spin_unlock(&entity->rq_lock);
223 	}
224 
225 	return ret;
226 }
227 EXPORT_SYMBOL(drm_sched_entity_flush);
228 
229 /**
230  * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
231  *
232  * @f: signaled fence
233  * @cb: our callback structure
234  *
235  * Signal the scheduler finished fence when the entity in question is killed.
236  */
drm_sched_entity_kill_jobs_cb(struct dma_fence * f,struct dma_fence_cb * cb)237 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
238 					  struct dma_fence_cb *cb)
239 {
240 	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
241 						 finish_cb);
242 
243 	drm_sched_fence_finished(job->s_fence);
244 	WARN_ON(job->s_fence->parent);
245 	job->sched->ops->free_job(job);
246 }
247 
248 /**
249  * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
250  *
251  * @entity: entity which is cleaned up
252  *
253  * Makes sure that all remaining jobs in an entity are killed before it is
254  * destroyed.
255  */
drm_sched_entity_kill_jobs(struct drm_sched_entity * entity)256 static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
257 {
258 	struct drm_sched_job *job;
259 	int r;
260 
261 	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
262 		struct drm_sched_fence *s_fence = job->s_fence;
263 
264 		drm_sched_fence_scheduled(s_fence);
265 		dma_fence_set_error(&s_fence->finished, -ESRCH);
266 
267 		/*
268 		 * When pipe is hanged by older entity, new entity might
269 		 * not even have chance to submit it's first job to HW
270 		 * and so entity->last_scheduled will remain NULL
271 		 */
272 		if (!entity->last_scheduled) {
273 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
274 			continue;
275 		}
276 
277 		r = dma_fence_add_callback(entity->last_scheduled,
278 					   &job->finish_cb,
279 					   drm_sched_entity_kill_jobs_cb);
280 		if (r == -ENOENT)
281 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
282 		else if (r)
283 			DRM_ERROR("fence add callback failed (%d)\n", r);
284 	}
285 }
286 
287 /**
288  * drm_sched_entity_cleanup - Destroy a context entity
289  *
290  * @entity: scheduler entity
291  *
292  * This should be called after @drm_sched_entity_do_release. It goes over the
293  * entity and signals all jobs with an error code if the process was killed.
294  *
295  */
drm_sched_entity_fini(struct drm_sched_entity * entity)296 void drm_sched_entity_fini(struct drm_sched_entity *entity)
297 {
298 	struct drm_gpu_scheduler *sched = NULL;
299 
300 	if (entity->rq) {
301 		sched = entity->rq->sched;
302 		drm_sched_rq_remove_entity(entity->rq, entity);
303 	}
304 
305 	spin_lock_destroy(&entity->rq_lock);
306 
307 	/* Consumption of existing IBs wasn't completed. Forcefully
308 	 * remove them here.
309 	 */
310 	if (spsc_queue_count(&entity->job_queue)) {
311 		if (sched) {
312 			/*
313 			 * Wait for thread to idle to make sure it isn't processing
314 			 * this entity.
315 			 */
316 			wait_for_completion(&entity->entity_idle);
317 
318 		}
319 		if (entity->dependency) {
320 			dma_fence_remove_callback(entity->dependency,
321 						  &entity->cb);
322 			dma_fence_put(entity->dependency);
323 			entity->dependency = NULL;
324 		}
325 
326 		drm_sched_entity_kill_jobs(entity);
327 	}
328 
329 	destroy_completion(&entity->entity_idle);
330 
331 	dma_fence_put(entity->last_scheduled);
332 	entity->last_scheduled = NULL;
333 }
334 EXPORT_SYMBOL(drm_sched_entity_fini);
335 
336 /**
337  * drm_sched_entity_fini - Destroy a context entity
338  *
339  * @entity: scheduler entity
340  *
341  * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
342  */
drm_sched_entity_destroy(struct drm_sched_entity * entity)343 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
344 {
345 	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
346 	drm_sched_entity_fini(entity);
347 }
348 EXPORT_SYMBOL(drm_sched_entity_destroy);
349 
350 /**
351  * drm_sched_entity_clear_dep - callback to clear the entities dependency
352  */
drm_sched_entity_clear_dep(struct dma_fence * f,struct dma_fence_cb * cb)353 static void drm_sched_entity_clear_dep(struct dma_fence *f,
354 				       struct dma_fence_cb *cb)
355 {
356 	struct drm_sched_entity *entity =
357 		container_of(cb, struct drm_sched_entity, cb);
358 
359 	entity->dependency = NULL;
360 	dma_fence_put(f);
361 }
362 
363 /**
364  * drm_sched_entity_clear_dep - callback to clear the entities dependency and
365  * wake up scheduler
366  */
drm_sched_entity_wakeup(struct dma_fence * f,struct dma_fence_cb * cb)367 static void drm_sched_entity_wakeup(struct dma_fence *f,
368 				    struct dma_fence_cb *cb)
369 {
370 	struct drm_sched_entity *entity =
371 		container_of(cb, struct drm_sched_entity, cb);
372 
373 	drm_sched_entity_clear_dep(f, cb);
374 	spin_lock(&entity->rq->sched->job_list_lock);
375 	drm_sched_wakeup(entity->rq->sched);
376 	spin_unlock(&entity->rq->sched->job_list_lock);
377 }
378 
379 /**
380  * drm_sched_entity_set_priority - Sets priority of the entity
381  *
382  * @entity: scheduler entity
383  * @priority: scheduler priority
384  *
385  * Update the priority of runqueus used for the entity.
386  */
drm_sched_entity_set_priority(struct drm_sched_entity * entity,enum drm_sched_priority priority)387 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
388 				   enum drm_sched_priority priority)
389 {
390 	spin_lock(&entity->rq_lock);
391 	entity->priority = priority;
392 	spin_unlock(&entity->rq_lock);
393 }
394 EXPORT_SYMBOL(drm_sched_entity_set_priority);
395 
396 /**
397  * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
398  *
399  * @entity: entity with dependency
400  *
401  * Add a callback to the current dependency of the entity to wake up the
402  * scheduler when the entity becomes available.
403  */
drm_sched_entity_add_dependency_cb(struct drm_sched_entity * entity)404 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
405 {
406 	struct drm_gpu_scheduler *sched = entity->rq->sched;
407 	struct dma_fence *fence = entity->dependency;
408 	struct drm_sched_fence *s_fence;
409 
410 	if (fence->context == entity->fence_context ||
411 	    fence->context == entity->fence_context + 1) {
412 		/*
413 		 * Fence is a scheduled/finished fence from a job
414 		 * which belongs to the same entity, we can ignore
415 		 * fences from ourself
416 		 */
417 		dma_fence_put(entity->dependency);
418 		return false;
419 	}
420 
421 	s_fence = to_drm_sched_fence(fence);
422 	if (s_fence && s_fence->sched == sched) {
423 
424 		/*
425 		 * Fence is from the same scheduler, only need to wait for
426 		 * it to be scheduled
427 		 */
428 		fence = dma_fence_get(&s_fence->scheduled);
429 		dma_fence_put(entity->dependency);
430 		entity->dependency = fence;
431 		if (!dma_fence_add_callback(fence, &entity->cb,
432 					    drm_sched_entity_clear_dep))
433 			return true;
434 
435 		/* Ignore it when it is already scheduled */
436 		dma_fence_put(fence);
437 		return false;
438 	}
439 
440 	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
441 				    drm_sched_entity_wakeup))
442 		return true;
443 
444 	dma_fence_put(entity->dependency);
445 	return false;
446 }
447 
448 /**
449  * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
450  *
451  * @entity: entity to get the job from
452  *
453  * Process all dependencies and try to get one job from the entities queue.
454  */
drm_sched_entity_pop_job(struct drm_sched_entity * entity)455 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
456 {
457 	struct drm_gpu_scheduler *sched = entity->rq->sched;
458 	struct drm_sched_job *sched_job;
459 
460 	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
461 	if (!sched_job)
462 		return NULL;
463 
464 	while ((entity->dependency =
465 			sched->ops->dependency(sched_job, entity))) {
466 		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
467 
468 		if (drm_sched_entity_add_dependency_cb(entity))
469 			return NULL;
470 	}
471 
472 	/* skip jobs from entity that marked guilty */
473 	if (entity->guilty && atomic_read(entity->guilty))
474 		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
475 
476 	dma_fence_put(entity->last_scheduled);
477 	entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
478 
479 	spsc_queue_pop(&entity->job_queue);
480 	return sched_job;
481 }
482 
483 /**
484  * drm_sched_entity_select_rq - select a new rq for the entity
485  *
486  * @entity: scheduler entity
487  *
488  * Check all prerequisites and select a new rq for the entity for load
489  * balancing.
490  */
drm_sched_entity_select_rq(struct drm_sched_entity * entity)491 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
492 {
493 	struct dma_fence *fence;
494 	struct drm_sched_rq *rq;
495 
496 	if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
497 		return;
498 
499 	fence = READ_ONCE(entity->last_scheduled);
500 	if (fence && !dma_fence_is_signaled(fence))
501 		return;
502 
503 	spin_lock(&entity->rq_lock);
504 	rq = drm_sched_entity_get_free_sched(entity);
505 	if (rq != entity->rq) {
506 		drm_sched_rq_remove_entity(entity->rq, entity);
507 		entity->rq = rq;
508 	}
509 
510 	spin_unlock(&entity->rq_lock);
511 }
512 
513 /**
514  * drm_sched_entity_push_job - Submit a job to the entity's job queue
515  *
516  * @sched_job: job to submit
517  * @entity: scheduler entity
518  *
519  * Note: To guarantee that the order of insertion to queue matches
520  * the job's fence sequence number this function should be
521  * called with drm_sched_job_init under common lock.
522  *
523  * Returns 0 for success, negative error code otherwise.
524  */
drm_sched_entity_push_job(struct drm_sched_job * sched_job,struct drm_sched_entity * entity)525 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
526 			       struct drm_sched_entity *entity)
527 {
528 	bool first;
529 
530 	trace_drm_sched_job(sched_job, entity);
531 	atomic_inc(&entity->rq->sched->score);
532 #ifdef __NetBSD__
533 	WRITE_ONCE(entity->last_user, curproc);
534 #else
535 	WRITE_ONCE(entity->last_user, current->group_leader);
536 #endif
537 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
538 
539 	/* first job wakes up scheduler */
540 	if (first) {
541 		/* Add the entity to the run queue */
542 		spin_lock(&entity->rq_lock);
543 		if (entity->stopped) {
544 			spin_unlock(&entity->rq_lock);
545 
546 			DRM_ERROR("Trying to push to a killed entity\n");
547 			return;
548 		}
549 		drm_sched_rq_add_entity(entity->rq, entity);
550 		spin_unlock(&entity->rq_lock);
551 		spin_lock(&entity->rq->sched->job_list_lock);
552 		drm_sched_wakeup(entity->rq->sched);
553 		spin_unlock(&entity->rq->sched->job_list_lock);
554 	}
555 }
556 EXPORT_SYMBOL(drm_sched_entity_push_job);
557