1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #ifndef _DRM_GPU_SCHEDULER_H_ 25 #define _DRM_GPU_SCHEDULER_H_ 26 27 #include <drm/spsc_queue.h> 28 #include <linux/dma-fence.h> 29 #include <linux/completion.h> 30 #include <linux/xarray.h> 31 #include <linux/workqueue.h> 32 33 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) 34 35 /** 36 * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining 37 * 38 * Setting this flag on a scheduler fence prevents pipelining of jobs depending 39 * on this fence. In other words we always insert a full CPU round trip before 40 * dependen jobs are pushed to the hw queue. 41 */ 42 #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS 43 44 struct drm_gem_object; 45 46 struct drm_gpu_scheduler; 47 struct drm_sched_rq; 48 49 /* These are often used as an (initial) index 50 * to an array, and as such should start at 0. 51 */ 52 enum drm_sched_priority { 53 DRM_SCHED_PRIORITY_MIN, 54 DRM_SCHED_PRIORITY_NORMAL, 55 DRM_SCHED_PRIORITY_HIGH, 56 DRM_SCHED_PRIORITY_KERNEL, 57 58 DRM_SCHED_PRIORITY_COUNT, 59 DRM_SCHED_PRIORITY_UNSET = -2 60 }; 61 62 /* Used to chose between FIFO and RR jobs scheduling */ 63 extern int drm_sched_policy; 64 65 #define DRM_SCHED_POLICY_RR 0 66 #define DRM_SCHED_POLICY_FIFO 1 67 68 /** 69 * struct drm_sched_entity - A wrapper around a job queue (typically 70 * attached to the DRM file_priv). 71 * 72 * Entities will emit jobs in order to their corresponding hardware 73 * ring, and the scheduler will alternate between entities based on 74 * scheduling policy. 75 */ 76 struct drm_sched_entity { 77 /** 78 * @list: 79 * 80 * Used to append this struct to the list of entities in the runqueue 81 * @rq under &drm_sched_rq.entities. 82 * 83 * Protected by &drm_sched_rq.lock of @rq. 84 */ 85 struct list_head list; 86 87 /** 88 * @rq: 89 * 90 * Runqueue on which this entity is currently scheduled. 91 * 92 * FIXME: Locking is very unclear for this. Writers are protected by 93 * @rq_lock, but readers are generally lockless and seem to just race 94 * with not even a READ_ONCE. 95 */ 96 struct drm_sched_rq *rq; 97 98 /** 99 * @sched_list: 100 * 101 * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can 102 * be scheduled on any scheduler on this list. 103 * 104 * This can be modified by calling drm_sched_entity_modify_sched(). 105 * Locking is entirely up to the driver, see the above function for more 106 * details. 107 * 108 * This will be set to NULL if &num_sched_list equals 1 and @rq has been 109 * set already. 110 * 111 * FIXME: This means priority changes through 112 * drm_sched_entity_set_priority() will be lost henceforth in this case. 113 */ 114 struct drm_gpu_scheduler **sched_list; 115 116 /** 117 * @num_sched_list: 118 * 119 * Number of drm_gpu_schedulers in the @sched_list. 120 */ 121 unsigned int num_sched_list; 122 123 /** 124 * @priority: 125 * 126 * Priority of the entity. This can be modified by calling 127 * drm_sched_entity_set_priority(). Protected by &rq_lock. 128 */ 129 enum drm_sched_priority priority; 130 131 /** 132 * @rq_lock: 133 * 134 * Lock to modify the runqueue to which this entity belongs. 135 */ 136 spinlock_t rq_lock; 137 138 /** 139 * @job_queue: the list of jobs of this entity. 140 */ 141 struct spsc_queue job_queue; 142 143 /** 144 * @fence_seq: 145 * 146 * A linearly increasing seqno incremented with each new 147 * &drm_sched_fence which is part of the entity. 148 * 149 * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking, 150 * this doesn't need to be atomic. 151 */ 152 atomic_t fence_seq; 153 154 /** 155 * @fence_context: 156 * 157 * A unique context for all the fences which belong to this entity. The 158 * &drm_sched_fence.scheduled uses the fence_context but 159 * &drm_sched_fence.finished uses fence_context + 1. 160 */ 161 uint64_t fence_context; 162 163 /** 164 * @dependency: 165 * 166 * The dependency fence of the job which is on the top of the job queue. 167 */ 168 struct dma_fence *dependency; 169 170 /** 171 * @cb: 172 * 173 * Callback for the dependency fence above. 174 */ 175 struct dma_fence_cb cb; 176 177 /** 178 * @guilty: 179 * 180 * Points to entities' guilty. 181 */ 182 atomic_t *guilty; 183 184 /** 185 * @last_scheduled: 186 * 187 * Points to the finished fence of the last scheduled job. Only written 188 * by the scheduler thread, can be accessed locklessly from 189 * drm_sched_job_arm() iff the queue is empty. 190 */ 191 struct dma_fence *last_scheduled; 192 193 /** 194 * @last_user: last group leader pushing a job into the entity. 195 */ 196 struct task_struct *last_user; 197 198 /** 199 * @stopped: 200 * 201 * Marks the enity as removed from rq and destined for 202 * termination. This is set by calling drm_sched_entity_flush() and by 203 * drm_sched_fini(). 204 */ 205 bool stopped; 206 207 /** 208 * @entity_idle: 209 * 210 * Signals when entity is not in use, used to sequence entity cleanup in 211 * drm_sched_entity_fini(). 212 */ 213 struct completion entity_idle; 214 215 /** 216 * @oldest_job_waiting: 217 * 218 * Marks earliest job waiting in SW queue 219 */ 220 ktime_t oldest_job_waiting; 221 222 /** 223 * @rb_tree_node: 224 * 225 * The node used to insert this entity into time based priority queue 226 */ 227 struct rb_node rb_tree_node; 228 229 }; 230 231 /** 232 * struct drm_sched_rq - queue of entities to be scheduled. 233 * 234 * @lock: to modify the entities list. 235 * @sched: the scheduler to which this rq belongs to. 236 * @entities: list of the entities to be scheduled. 237 * @current_entity: the entity which is to be scheduled. 238 * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling 239 * 240 * Run queue is a set of entities scheduling command submissions for 241 * one specific ring. It implements the scheduling policy that selects 242 * the next entity to emit commands from. 243 */ 244 struct drm_sched_rq { 245 spinlock_t lock; 246 struct drm_gpu_scheduler *sched; 247 struct list_head entities; 248 struct drm_sched_entity *current_entity; 249 struct rb_root_cached rb_tree_root; 250 }; 251 252 /** 253 * struct drm_sched_fence - fences corresponding to the scheduling of a job. 254 */ 255 struct drm_sched_fence { 256 /** 257 * @scheduled: this fence is what will be signaled by the scheduler 258 * when the job is scheduled. 259 */ 260 struct dma_fence scheduled; 261 262 /** 263 * @finished: this fence is what will be signaled by the scheduler 264 * when the job is completed. 265 * 266 * When setting up an out fence for the job, you should use 267 * this, since it's available immediately upon 268 * drm_sched_job_init(), and the fence returned by the driver 269 * from run_job() won't be created until the dependencies have 270 * resolved. 271 */ 272 struct dma_fence finished; 273 274 /** 275 * @parent: the fence returned by &drm_sched_backend_ops.run_job 276 * when scheduling the job on hardware. We signal the 277 * &drm_sched_fence.finished fence once parent is signalled. 278 */ 279 struct dma_fence *parent; 280 /** 281 * @sched: the scheduler instance to which the job having this struct 282 * belongs to. 283 */ 284 struct drm_gpu_scheduler *sched; 285 /** 286 * @lock: the lock used by the scheduled and the finished fences. 287 */ 288 spinlock_t lock; 289 /** 290 * @owner: job owner for debugging 291 */ 292 void *owner; 293 }; 294 295 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); 296 297 /** 298 * struct drm_sched_job - A job to be run by an entity. 299 * 300 * @queue_node: used to append this struct to the queue of jobs in an entity. 301 * @list: a job participates in a "pending" and "done" lists. 302 * @sched: the scheduler instance on which this job is scheduled. 303 * @s_fence: contains the fences for the scheduling of job. 304 * @finish_cb: the callback for the finished fence. 305 * @work: Helper to reschdeule job kill to different context. 306 * @id: a unique id assigned to each job scheduled on the scheduler. 307 * @karma: increment on every hang caused by this job. If this exceeds the hang 308 * limit of the scheduler then the job is marked guilty and will not 309 * be scheduled further. 310 * @s_priority: the priority of the job. 311 * @entity: the entity to which this job belongs. 312 * @cb: the callback for the parent fence in s_fence. 313 * 314 * A job is created by the driver using drm_sched_job_init(), and 315 * should call drm_sched_entity_push_job() once it wants the scheduler 316 * to schedule the job. 317 */ 318 struct drm_sched_job { 319 struct spsc_node queue_node; 320 struct list_head list; 321 struct drm_gpu_scheduler *sched; 322 struct drm_sched_fence *s_fence; 323 324 /* 325 * work is used only after finish_cb has been used and will not be 326 * accessed anymore. 327 */ 328 union { 329 struct dma_fence_cb finish_cb; 330 struct work_struct work; 331 }; 332 333 uint64_t id; 334 atomic_t karma; 335 enum drm_sched_priority s_priority; 336 struct drm_sched_entity *entity; 337 struct dma_fence_cb cb; 338 /** 339 * @dependencies: 340 * 341 * Contains the dependencies as struct dma_fence for this job, see 342 * drm_sched_job_add_dependency() and 343 * drm_sched_job_add_implicit_dependencies(). 344 */ 345 struct xarray dependencies; 346 347 /** @last_dependency: tracks @dependencies as they signal */ 348 unsigned long last_dependency; 349 350 /** 351 * @submit_ts: 352 * 353 * When the job was pushed into the entity queue. 354 */ 355 ktime_t submit_ts; 356 }; 357 358 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, 359 int threshold) 360 { 361 return s_job && atomic_inc_return(&s_job->karma) > threshold; 362 } 363 364 enum drm_gpu_sched_stat { 365 DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ 366 DRM_GPU_SCHED_STAT_NOMINAL, 367 DRM_GPU_SCHED_STAT_ENODEV, 368 }; 369 370 /** 371 * struct drm_sched_backend_ops - Define the backend operations 372 * called by the scheduler 373 * 374 * These functions should be implemented in the driver side. 375 */ 376 struct drm_sched_backend_ops { 377 /** 378 * @dependency: 379 * 380 * Called when the scheduler is considering scheduling this job next, to 381 * get another struct dma_fence for this job to block on. Once it 382 * returns NULL, run_job() may be called. 383 * 384 * If a driver exclusively uses drm_sched_job_add_dependency() and 385 * drm_sched_job_add_implicit_dependencies() this can be ommitted and 386 * left as NULL. 387 */ 388 struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, 389 struct drm_sched_entity *s_entity); 390 391 /** 392 * @run_job: Called to execute the job once all of the dependencies 393 * have been resolved. This may be called multiple times, if 394 * timedout_job() has happened and drm_sched_job_recovery() 395 * decides to try it again. 396 */ 397 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); 398 399 /** 400 * @timedout_job: Called when a job has taken too long to execute, 401 * to trigger GPU recovery. 402 * 403 * This method is called in a workqueue context. 404 * 405 * Drivers typically issue a reset to recover from GPU hangs, and this 406 * procedure usually follows the following workflow: 407 * 408 * 1. Stop the scheduler using drm_sched_stop(). This will park the 409 * scheduler thread and cancel the timeout work, guaranteeing that 410 * nothing is queued while we reset the hardware queue 411 * 2. Try to gracefully stop non-faulty jobs (optional) 412 * 3. Issue a GPU reset (driver-specific) 413 * 4. Re-submit jobs using drm_sched_resubmit_jobs() 414 * 5. Restart the scheduler using drm_sched_start(). At that point, new 415 * jobs can be queued, and the scheduler thread is unblocked 416 * 417 * Note that some GPUs have distinct hardware queues but need to reset 418 * the GPU globally, which requires extra synchronization between the 419 * timeout handler of the different &drm_gpu_scheduler. One way to 420 * achieve this synchronization is to create an ordered workqueue 421 * (using alloc_ordered_workqueue()) at the driver level, and pass this 422 * queue to drm_sched_init(), to guarantee that timeout handlers are 423 * executed sequentially. The above workflow needs to be slightly 424 * adjusted in that case: 425 * 426 * 1. Stop all schedulers impacted by the reset using drm_sched_stop() 427 * 2. Try to gracefully stop non-faulty jobs on all queues impacted by 428 * the reset (optional) 429 * 3. Issue a GPU reset on all faulty queues (driver-specific) 430 * 4. Re-submit jobs on all schedulers impacted by the reset using 431 * drm_sched_resubmit_jobs() 432 * 5. Restart all schedulers that were stopped in step #1 using 433 * drm_sched_start() 434 * 435 * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, 436 * and the underlying driver has started or completed recovery. 437 * 438 * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer 439 * available, i.e. has been unplugged. 440 */ 441 enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); 442 443 /** 444 * @free_job: Called once the job's finished fence has been signaled 445 * and it's time to clean it up. 446 */ 447 void (*free_job)(struct drm_sched_job *sched_job); 448 }; 449 450 /** 451 * struct drm_gpu_scheduler - scheduler instance-specific data 452 * 453 * @ops: backend operations provided by the driver. 454 * @hw_submission_limit: the max size of the hardware queue. 455 * @timeout: the time after which a job is removed from the scheduler. 456 * @name: name of the ring for which this scheduler is being used. 457 * @sched_rq: priority wise array of run queues. 458 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job 459 * is ready to be scheduled. 460 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler 461 * waits on this wait queue until all the scheduled jobs are 462 * finished. 463 * @hw_rq_count: the number of jobs currently in the hardware queue. 464 * @job_id_count: used to assign unique id to the each job. 465 * @timeout_wq: workqueue used to queue @work_tdr 466 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the 467 * timeout interval is over. 468 * @thread: the kthread on which the scheduler which run. 469 * @pending_list: the list of jobs which are currently in the job queue. 470 * @job_list_lock: lock to protect the pending_list. 471 * @hang_limit: once the hangs by a job crosses this limit then it is marked 472 * guilty and it will no longer be considered for scheduling. 473 * @score: score to help loadbalancer pick a idle sched 474 * @_score: score used when the driver doesn't provide one 475 * @ready: marks if the underlying HW is ready to work 476 * @free_guilty: A hit to time out handler to free the guilty job. 477 * @dev: system &struct device 478 * 479 * One scheduler is implemented for each hardware ring. 480 */ 481 struct drm_gpu_scheduler { 482 const struct drm_sched_backend_ops *ops; 483 uint32_t hw_submission_limit; 484 long timeout; 485 const char *name; 486 struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; 487 wait_queue_head_t wake_up_worker; 488 wait_queue_head_t job_scheduled; 489 atomic_t hw_rq_count; 490 atomic64_t job_id_count; 491 struct workqueue_struct *timeout_wq; 492 struct delayed_work work_tdr; 493 struct task_struct *thread; 494 struct list_head pending_list; 495 spinlock_t job_list_lock; 496 int hang_limit; 497 atomic_t *score; 498 atomic_t _score; 499 bool ready; 500 bool free_guilty; 501 struct device *dev; 502 }; 503 504 int drm_sched_init(struct drm_gpu_scheduler *sched, 505 const struct drm_sched_backend_ops *ops, 506 uint32_t hw_submission, unsigned hang_limit, 507 long timeout, struct workqueue_struct *timeout_wq, 508 atomic_t *score, const char *name, struct device *dev); 509 510 void drm_sched_fini(struct drm_gpu_scheduler *sched); 511 int drm_sched_job_init(struct drm_sched_job *job, 512 struct drm_sched_entity *entity, 513 void *owner); 514 void drm_sched_job_arm(struct drm_sched_job *job); 515 int drm_sched_job_add_dependency(struct drm_sched_job *job, 516 struct dma_fence *fence); 517 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, 518 struct drm_gem_object *obj, 519 bool write); 520 521 522 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 523 struct drm_gpu_scheduler **sched_list, 524 unsigned int num_sched_list); 525 526 void drm_sched_job_cleanup(struct drm_sched_job *job); 527 void drm_sched_wakeup(struct drm_gpu_scheduler *sched); 528 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); 529 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); 530 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); 531 void drm_sched_increase_karma(struct drm_sched_job *bad); 532 bool drm_sched_dependency_optimized(struct dma_fence* fence, 533 struct drm_sched_entity *entity); 534 void drm_sched_fault(struct drm_gpu_scheduler *sched); 535 void drm_sched_job_kickout(struct drm_sched_job *s_job); 536 537 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 538 struct drm_sched_entity *entity); 539 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 540 struct drm_sched_entity *entity); 541 542 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts); 543 544 int drm_sched_entity_init(struct drm_sched_entity *entity, 545 enum drm_sched_priority priority, 546 struct drm_gpu_scheduler **sched_list, 547 unsigned int num_sched_list, 548 atomic_t *guilty); 549 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); 550 void drm_sched_entity_fini(struct drm_sched_entity *entity); 551 void drm_sched_entity_destroy(struct drm_sched_entity *entity); 552 void drm_sched_entity_select_rq(struct drm_sched_entity *entity); 553 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); 554 void drm_sched_entity_push_job(struct drm_sched_job *sched_job); 555 void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 556 enum drm_sched_priority priority); 557 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); 558 559 struct drm_sched_fence *drm_sched_fence_alloc( 560 struct drm_sched_entity *s_entity, void *owner); 561 void drm_sched_fence_init(struct drm_sched_fence *fence, 562 struct drm_sched_entity *entity); 563 void drm_sched_fence_free(struct drm_sched_fence *fence); 564 565 void drm_sched_fence_scheduled(struct drm_sched_fence *fence); 566 void drm_sched_fence_finished(struct drm_sched_fence *fence); 567 568 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); 569 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 570 unsigned long remaining); 571 struct drm_gpu_scheduler * 572 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, 573 unsigned int num_sched_list); 574 575 #endif 576