xref: /linux/drivers/gpu/drm/panfrost/panfrost_job.c (revision f86fd32d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2019 Collabora ltd. */
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/dma-resv.h>
10 #include <drm/gpu_scheduler.h>
11 #include <drm/panfrost_drm.h>
12 
13 #include "panfrost_device.h"
14 #include "panfrost_devfreq.h"
15 #include "panfrost_job.h"
16 #include "panfrost_features.h"
17 #include "panfrost_issues.h"
18 #include "panfrost_gem.h"
19 #include "panfrost_regs.h"
20 #include "panfrost_gpu.h"
21 #include "panfrost_mmu.h"
22 
23 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
24 #define job_read(dev, reg) readl(dev->iomem + (reg))
25 
26 struct panfrost_queue_state {
27 	struct drm_gpu_scheduler sched;
28 
29 	u64 fence_context;
30 	u64 emit_seqno;
31 };
32 
33 struct panfrost_job_slot {
34 	struct panfrost_queue_state queue[NUM_JOB_SLOTS];
35 	spinlock_t job_lock;
36 };
37 
38 static struct panfrost_job *
39 to_panfrost_job(struct drm_sched_job *sched_job)
40 {
41 	return container_of(sched_job, struct panfrost_job, base);
42 }
43 
44 struct panfrost_fence {
45 	struct dma_fence base;
46 	struct drm_device *dev;
47 	/* panfrost seqno for signaled() test */
48 	u64 seqno;
49 	int queue;
50 };
51 
52 static inline struct panfrost_fence *
53 to_panfrost_fence(struct dma_fence *fence)
54 {
55 	return (struct panfrost_fence *)fence;
56 }
57 
58 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
59 {
60 	return "panfrost";
61 }
62 
63 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
64 {
65 	struct panfrost_fence *f = to_panfrost_fence(fence);
66 
67 	switch (f->queue) {
68 	case 0:
69 		return "panfrost-js-0";
70 	case 1:
71 		return "panfrost-js-1";
72 	case 2:
73 		return "panfrost-js-2";
74 	default:
75 		return NULL;
76 	}
77 }
78 
79 static const struct dma_fence_ops panfrost_fence_ops = {
80 	.get_driver_name = panfrost_fence_get_driver_name,
81 	.get_timeline_name = panfrost_fence_get_timeline_name,
82 };
83 
84 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
85 {
86 	struct panfrost_fence *fence;
87 	struct panfrost_job_slot *js = pfdev->js;
88 
89 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
90 	if (!fence)
91 		return ERR_PTR(-ENOMEM);
92 
93 	fence->dev = pfdev->ddev;
94 	fence->queue = js_num;
95 	fence->seqno = ++js->queue[js_num].emit_seqno;
96 	dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
97 		       js->queue[js_num].fence_context, fence->seqno);
98 
99 	return &fence->base;
100 }
101 
102 static int panfrost_job_get_slot(struct panfrost_job *job)
103 {
104 	/* JS0: fragment jobs.
105 	 * JS1: vertex/tiler jobs
106 	 * JS2: compute jobs
107 	 */
108 	if (job->requirements & PANFROST_JD_REQ_FS)
109 		return 0;
110 
111 /* Not exposed to userspace yet */
112 #if 0
113 	if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
114 		if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
115 		    (job->pfdev->features.nr_core_groups == 2))
116 			return 2;
117 		if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
118 			return 2;
119 	}
120 #endif
121 	return 1;
122 }
123 
124 static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
125 					u32 requirements,
126 					int js)
127 {
128 	u64 affinity;
129 
130 	/*
131 	 * Use all cores for now.
132 	 * Eventually we may need to support tiler only jobs and h/w with
133 	 * multiple (2) coherent core groups
134 	 */
135 	affinity = pfdev->features.shader_present;
136 
137 	job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
138 	job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
139 }
140 
141 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
142 {
143 	struct panfrost_device *pfdev = job->pfdev;
144 	u32 cfg;
145 	u64 jc_head = job->jc;
146 	int ret;
147 
148 	ret = pm_runtime_get_sync(pfdev->dev);
149 	if (ret < 0)
150 		return;
151 
152 	if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
153 		pm_runtime_put_sync_autosuspend(pfdev->dev);
154 		return;
155 	}
156 
157 	cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
158 	panfrost_devfreq_record_busy(pfdev);
159 
160 	job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
161 	job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
162 
163 	panfrost_job_write_affinity(pfdev, job->requirements, js);
164 
165 	/* start MMU, medium priority, cache clean/flush on end, clean/flush on
166 	 * start */
167 	cfg |= JS_CONFIG_THREAD_PRI(8) |
168 		JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
169 		JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
170 
171 	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
172 		cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
173 
174 	if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
175 		cfg |= JS_CONFIG_START_MMU;
176 
177 	job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
178 
179 	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
180 		job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
181 
182 	/* GO ! */
183 	dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
184 				job, js, jc_head);
185 
186 	job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
187 }
188 
189 static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
190 					   int bo_count,
191 					   struct dma_fence **implicit_fences)
192 {
193 	int i;
194 
195 	for (i = 0; i < bo_count; i++)
196 		implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
197 }
198 
199 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
200 					  int bo_count,
201 					  struct dma_fence *fence)
202 {
203 	int i;
204 
205 	for (i = 0; i < bo_count; i++)
206 		dma_resv_add_excl_fence(bos[i]->resv, fence);
207 }
208 
209 int panfrost_job_push(struct panfrost_job *job)
210 {
211 	struct panfrost_device *pfdev = job->pfdev;
212 	int slot = panfrost_job_get_slot(job);
213 	struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
214 	struct ww_acquire_ctx acquire_ctx;
215 	int ret = 0;
216 
217 	mutex_lock(&pfdev->sched_lock);
218 
219 	ret = drm_gem_lock_reservations(job->bos, job->bo_count,
220 					    &acquire_ctx);
221 	if (ret) {
222 		mutex_unlock(&pfdev->sched_lock);
223 		return ret;
224 	}
225 
226 	ret = drm_sched_job_init(&job->base, entity, NULL);
227 	if (ret) {
228 		mutex_unlock(&pfdev->sched_lock);
229 		goto unlock;
230 	}
231 
232 	job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
233 
234 	kref_get(&job->refcount); /* put by scheduler job completion */
235 
236 	panfrost_acquire_object_fences(job->bos, job->bo_count,
237 				       job->implicit_fences);
238 
239 	drm_sched_entity_push_job(&job->base, entity);
240 
241 	mutex_unlock(&pfdev->sched_lock);
242 
243 	panfrost_attach_object_fences(job->bos, job->bo_count,
244 				      job->render_done_fence);
245 
246 unlock:
247 	drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
248 
249 	return ret;
250 }
251 
252 static void panfrost_job_cleanup(struct kref *ref)
253 {
254 	struct panfrost_job *job = container_of(ref, struct panfrost_job,
255 						refcount);
256 	unsigned int i;
257 
258 	if (job->in_fences) {
259 		for (i = 0; i < job->in_fence_count; i++)
260 			dma_fence_put(job->in_fences[i]);
261 		kvfree(job->in_fences);
262 	}
263 	if (job->implicit_fences) {
264 		for (i = 0; i < job->bo_count; i++)
265 			dma_fence_put(job->implicit_fences[i]);
266 		kvfree(job->implicit_fences);
267 	}
268 	dma_fence_put(job->done_fence);
269 	dma_fence_put(job->render_done_fence);
270 
271 	if (job->mappings) {
272 		for (i = 0; i < job->bo_count; i++) {
273 			if (!job->mappings[i])
274 				break;
275 
276 			atomic_dec(&job->mappings[i]->obj->gpu_usecount);
277 			panfrost_gem_mapping_put(job->mappings[i]);
278 		}
279 		kvfree(job->mappings);
280 	}
281 
282 	if (job->bos) {
283 		struct panfrost_gem_object *bo;
284 
285 		for (i = 0; i < job->bo_count; i++) {
286 			bo = to_panfrost_bo(job->bos[i]);
287 			drm_gem_object_put_unlocked(job->bos[i]);
288 		}
289 
290 		kvfree(job->bos);
291 	}
292 
293 	kfree(job);
294 }
295 
296 void panfrost_job_put(struct panfrost_job *job)
297 {
298 	kref_put(&job->refcount, panfrost_job_cleanup);
299 }
300 
301 static void panfrost_job_free(struct drm_sched_job *sched_job)
302 {
303 	struct panfrost_job *job = to_panfrost_job(sched_job);
304 
305 	drm_sched_job_cleanup(sched_job);
306 
307 	panfrost_job_put(job);
308 }
309 
310 static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
311 						 struct drm_sched_entity *s_entity)
312 {
313 	struct panfrost_job *job = to_panfrost_job(sched_job);
314 	struct dma_fence *fence;
315 	unsigned int i;
316 
317 	/* Explicit fences */
318 	for (i = 0; i < job->in_fence_count; i++) {
319 		if (job->in_fences[i]) {
320 			fence = job->in_fences[i];
321 			job->in_fences[i] = NULL;
322 			return fence;
323 		}
324 	}
325 
326 	/* Implicit fences, max. one per BO */
327 	for (i = 0; i < job->bo_count; i++) {
328 		if (job->implicit_fences[i]) {
329 			fence = job->implicit_fences[i];
330 			job->implicit_fences[i] = NULL;
331 			return fence;
332 		}
333 	}
334 
335 	return NULL;
336 }
337 
338 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
339 {
340 	struct panfrost_job *job = to_panfrost_job(sched_job);
341 	struct panfrost_device *pfdev = job->pfdev;
342 	int slot = panfrost_job_get_slot(job);
343 	struct dma_fence *fence = NULL;
344 
345 	if (unlikely(job->base.s_fence->finished.error))
346 		return NULL;
347 
348 	pfdev->jobs[slot] = job;
349 
350 	fence = panfrost_fence_create(pfdev, slot);
351 	if (IS_ERR(fence))
352 		return NULL;
353 
354 	if (job->done_fence)
355 		dma_fence_put(job->done_fence);
356 	job->done_fence = dma_fence_get(fence);
357 
358 	panfrost_job_hw_submit(job, slot);
359 
360 	return fence;
361 }
362 
363 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
364 {
365 	int j;
366 	u32 irq_mask = 0;
367 
368 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
369 		irq_mask |= MK_JS_MASK(j);
370 	}
371 
372 	job_write(pfdev, JOB_INT_CLEAR, irq_mask);
373 	job_write(pfdev, JOB_INT_MASK, irq_mask);
374 }
375 
376 static void panfrost_job_timedout(struct drm_sched_job *sched_job)
377 {
378 	struct panfrost_job *job = to_panfrost_job(sched_job);
379 	struct panfrost_device *pfdev = job->pfdev;
380 	int js = panfrost_job_get_slot(job);
381 	unsigned long flags;
382 	int i;
383 
384 	/*
385 	 * If the GPU managed to complete this jobs fence, the timeout is
386 	 * spurious. Bail out.
387 	 */
388 	if (dma_fence_is_signaled(job->done_fence))
389 		return;
390 
391 	dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
392 		js,
393 		job_read(pfdev, JS_CONFIG(js)),
394 		job_read(pfdev, JS_STATUS(js)),
395 		job_read(pfdev, JS_HEAD_LO(js)),
396 		job_read(pfdev, JS_TAIL_LO(js)),
397 		sched_job);
398 
399 	if (!mutex_trylock(&pfdev->reset_lock))
400 		return;
401 
402 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
403 		struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
404 
405 		drm_sched_stop(sched, sched_job);
406 		if (js != i)
407 			/* Ensure any timeouts on other slots have finished */
408 			cancel_delayed_work_sync(&sched->work_tdr);
409 	}
410 
411 	drm_sched_increase_karma(sched_job);
412 
413 	spin_lock_irqsave(&pfdev->js->job_lock, flags);
414 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
415 		if (pfdev->jobs[i]) {
416 			pm_runtime_put_noidle(pfdev->dev);
417 			pfdev->jobs[i] = NULL;
418 		}
419 	}
420 	spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
421 
422 	panfrost_devfreq_record_idle(pfdev);
423 	panfrost_device_reset(pfdev);
424 
425 	for (i = 0; i < NUM_JOB_SLOTS; i++)
426 		drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
427 
428 	/* restart scheduler after GPU is usable again */
429 	for (i = 0; i < NUM_JOB_SLOTS; i++)
430 		drm_sched_start(&pfdev->js->queue[i].sched, true);
431 
432 	mutex_unlock(&pfdev->reset_lock);
433 }
434 
435 static const struct drm_sched_backend_ops panfrost_sched_ops = {
436 	.dependency = panfrost_job_dependency,
437 	.run_job = panfrost_job_run,
438 	.timedout_job = panfrost_job_timedout,
439 	.free_job = panfrost_job_free
440 };
441 
442 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
443 {
444 	struct panfrost_device *pfdev = data;
445 	u32 status = job_read(pfdev, JOB_INT_STAT);
446 	int j;
447 
448 	dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
449 
450 	if (!status)
451 		return IRQ_NONE;
452 
453 	pm_runtime_mark_last_busy(pfdev->dev);
454 
455 	for (j = 0; status; j++) {
456 		u32 mask = MK_JS_MASK(j);
457 
458 		if (!(status & mask))
459 			continue;
460 
461 		job_write(pfdev, JOB_INT_CLEAR, mask);
462 
463 		if (status & JOB_INT_MASK_ERR(j)) {
464 			job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
465 
466 			dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
467 				j,
468 				panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
469 				job_read(pfdev, JS_HEAD_LO(j)),
470 				job_read(pfdev, JS_TAIL_LO(j)));
471 
472 			drm_sched_fault(&pfdev->js->queue[j].sched);
473 		}
474 
475 		if (status & JOB_INT_MASK_DONE(j)) {
476 			struct panfrost_job *job;
477 
478 			spin_lock(&pfdev->js->job_lock);
479 			job = pfdev->jobs[j];
480 			/* Only NULL if job timeout occurred */
481 			if (job) {
482 				pfdev->jobs[j] = NULL;
483 
484 				panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
485 				panfrost_devfreq_record_idle(pfdev);
486 
487 				dma_fence_signal_locked(job->done_fence);
488 				pm_runtime_put_autosuspend(pfdev->dev);
489 			}
490 			spin_unlock(&pfdev->js->job_lock);
491 		}
492 
493 		status &= ~mask;
494 	}
495 
496 	return IRQ_HANDLED;
497 }
498 
499 int panfrost_job_init(struct panfrost_device *pfdev)
500 {
501 	struct panfrost_job_slot *js;
502 	int ret, j, irq;
503 
504 	pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
505 	if (!js)
506 		return -ENOMEM;
507 
508 	spin_lock_init(&js->job_lock);
509 
510 	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
511 	if (irq <= 0)
512 		return -ENODEV;
513 
514 	ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
515 			       IRQF_SHARED, "job", pfdev);
516 	if (ret) {
517 		dev_err(pfdev->dev, "failed to request job irq");
518 		return ret;
519 	}
520 
521 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
522 		js->queue[j].fence_context = dma_fence_context_alloc(1);
523 
524 		ret = drm_sched_init(&js->queue[j].sched,
525 				     &panfrost_sched_ops,
526 				     1, 0, msecs_to_jiffies(500),
527 				     "pan_js");
528 		if (ret) {
529 			dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
530 			goto err_sched;
531 		}
532 	}
533 
534 	panfrost_job_enable_interrupts(pfdev);
535 
536 	return 0;
537 
538 err_sched:
539 	for (j--; j >= 0; j--)
540 		drm_sched_fini(&js->queue[j].sched);
541 
542 	return ret;
543 }
544 
545 void panfrost_job_fini(struct panfrost_device *pfdev)
546 {
547 	struct panfrost_job_slot *js = pfdev->js;
548 	int j;
549 
550 	job_write(pfdev, JOB_INT_MASK, 0);
551 
552 	for (j = 0; j < NUM_JOB_SLOTS; j++)
553 		drm_sched_fini(&js->queue[j].sched);
554 
555 }
556 
557 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
558 {
559 	struct panfrost_device *pfdev = panfrost_priv->pfdev;
560 	struct panfrost_job_slot *js = pfdev->js;
561 	struct drm_gpu_scheduler *sched;
562 	int ret, i;
563 
564 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
565 		sched = &js->queue[i].sched;
566 		ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
567 					    DRM_SCHED_PRIORITY_NORMAL, &sched,
568 					    1, NULL);
569 		if (WARN_ON(ret))
570 			return ret;
571 	}
572 	return 0;
573 }
574 
575 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
576 {
577 	int i;
578 
579 	for (i = 0; i < NUM_JOB_SLOTS; i++)
580 		drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
581 }
582 
583 int panfrost_job_is_idle(struct panfrost_device *pfdev)
584 {
585 	struct panfrost_job_slot *js = pfdev->js;
586 	int i;
587 
588 	/* Check whether the hardware is idle */
589 	if (atomic_read(&pfdev->devfreq.busy_count))
590 		return false;
591 
592 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
593 		/* If there are any jobs in the HW queue, we're not idle */
594 		if (atomic_read(&js->queue[i].sched.hw_rq_count))
595 			return false;
596 	}
597 
598 	return true;
599 }
600