1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <drm/drmP.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40
41 /*
42 * Fences
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
48 */
49
50 struct amdgpu_fence {
51 struct dma_fence base;
52
53 /* RB, DMA, etc. */
54 struct amdgpu_ring *ring;
55 };
56
57 static struct kmem_cache *amdgpu_fence_slab;
58
amdgpu_fence_slab_init(void)59 int amdgpu_fence_slab_init(void)
60 {
61 amdgpu_fence_slab = kmem_cache_create(
62 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
63 SLAB_HWCACHE_ALIGN, NULL);
64 if (!amdgpu_fence_slab)
65 return -ENOMEM;
66 return 0;
67 }
68
amdgpu_fence_slab_fini(void)69 void amdgpu_fence_slab_fini(void)
70 {
71 rcu_barrier();
72 kmem_cache_destroy(amdgpu_fence_slab);
73 }
74 /*
75 * Cast helper
76 */
77 static const struct dma_fence_ops amdgpu_fence_ops;
to_amdgpu_fence(struct dma_fence * f)78 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
79 {
80 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
81
82 if (__f->base.ops == &amdgpu_fence_ops)
83 return __f;
84
85 return NULL;
86 }
87
88 /**
89 * amdgpu_fence_write - write a fence value
90 *
91 * @ring: ring the fence is associated with
92 * @seq: sequence number to write
93 *
94 * Writes a fence value to memory (all asics).
95 */
amdgpu_fence_write(struct amdgpu_ring * ring,u32 seq)96 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
97 {
98 struct amdgpu_fence_driver *drv = &ring->fence_drv;
99
100 if (drv->cpu_addr)
101 *drv->cpu_addr = cpu_to_le32(seq);
102 }
103
104 /**
105 * amdgpu_fence_read - read a fence value
106 *
107 * @ring: ring the fence is associated with
108 *
109 * Reads a fence value from memory (all asics).
110 * Returns the value of the fence read from memory.
111 */
amdgpu_fence_read(struct amdgpu_ring * ring)112 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
113 {
114 struct amdgpu_fence_driver *drv = &ring->fence_drv;
115 u32 seq = 0;
116
117 if (drv->cpu_addr)
118 seq = le32_to_cpu(*drv->cpu_addr);
119 else
120 seq = atomic_read(&drv->last_seq);
121
122 return seq;
123 }
124
125 /**
126 * amdgpu_fence_emit - emit a fence on the requested ring
127 *
128 * @ring: ring the fence is associated with
129 * @f: resulting fence object
130 *
131 * Emits a fence command on the requested ring (all asics).
132 * Returns 0 on success, -ENOMEM on failure.
133 */
amdgpu_fence_emit(struct amdgpu_ring * ring,struct dma_fence ** f,unsigned flags)134 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
135 unsigned flags)
136 {
137 struct amdgpu_device *adev = ring->adev;
138 struct amdgpu_fence *fence;
139 struct dma_fence __rcu **ptr;
140 uint32_t seq;
141 int r;
142
143 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
144 if (fence == NULL)
145 return -ENOMEM;
146
147 seq = ++ring->fence_drv.sync_seq;
148 fence->ring = ring;
149 dma_fence_init(&fence->base, &amdgpu_fence_ops,
150 &ring->fence_drv.lock,
151 adev->fence_context + ring->idx,
152 seq);
153 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
154 seq, flags | AMDGPU_FENCE_FLAG_INT);
155
156 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
157 if (unlikely(rcu_dereference_protected(*ptr, 1))) {
158 struct dma_fence *old;
159
160 rcu_read_lock();
161 old = dma_fence_get_rcu_safe(ptr);
162 rcu_read_unlock();
163
164 if (old) {
165 r = dma_fence_wait(old, false);
166 dma_fence_put(old);
167 if (r)
168 return r;
169 }
170 }
171
172 /* This function can't be called concurrently anyway, otherwise
173 * emitting the fence would mess up the hardware ring buffer.
174 */
175 rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
176
177 *f = &fence->base;
178
179 return 0;
180 }
181
182 /**
183 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
184 *
185 * @ring: ring the fence is associated with
186 * @s: resulting sequence number
187 *
188 * Emits a fence command on the requested ring (all asics).
189 * Used For polling fence.
190 * Returns 0 on success, -ENOMEM on failure.
191 */
amdgpu_fence_emit_polling(struct amdgpu_ring * ring,uint32_t * s)192 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
193 {
194 uint32_t seq;
195
196 if (!s)
197 return -EINVAL;
198
199 seq = ++ring->fence_drv.sync_seq;
200 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
201 seq, 0);
202
203 *s = seq;
204
205 return 0;
206 }
207
208 /**
209 * amdgpu_fence_schedule_fallback - schedule fallback check
210 *
211 * @ring: pointer to struct amdgpu_ring
212 *
213 * Start a timer as fallback to our interrupts.
214 */
amdgpu_fence_schedule_fallback(struct amdgpu_ring * ring)215 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
216 {
217 mod_timer(&ring->fence_drv.fallback_timer,
218 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
219 }
220
221 /**
222 * amdgpu_fence_process - check for fence activity
223 *
224 * @ring: pointer to struct amdgpu_ring
225 *
226 * Checks the current fence value and calculates the last
227 * signalled fence value. Wakes the fence queue if the
228 * sequence number has increased.
229 */
amdgpu_fence_process(struct amdgpu_ring * ring)230 void amdgpu_fence_process(struct amdgpu_ring *ring)
231 {
232 struct amdgpu_fence_driver *drv = &ring->fence_drv;
233 uint32_t seq, last_seq;
234 int r;
235 do {
236 last_seq = atomic_read(&ring->fence_drv.last_seq);
237 seq = amdgpu_fence_read(ring);
238 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
239 if (seq != ring->fence_drv.sync_seq)
240 amdgpu_fence_schedule_fallback(ring);
241 if (unlikely(seq == last_seq))
242 return;
243
244 last_seq &= drv->num_fences_mask;
245 seq &= drv->num_fences_mask;
246 do {
247 struct dma_fence *fence, **ptr;
248
249 ++last_seq;
250 last_seq &= drv->num_fences_mask;
251 ptr = &drv->fences[last_seq];
252
253 /* There is always exactly one thread signaling this fence slot */
254 fence = rcu_dereference_protected(*ptr, 1);
255 RCU_INIT_POINTER(*ptr, NULL);
256
257 if (!fence)
258 continue;
259 r = dma_fence_signal(fence);
260 if (!r)
261 DMA_FENCE_TRACE(fence, "signaled from irq context\n");
262 else
263 BUG();
264 dma_fence_put(fence);
265 } while (last_seq != seq);
266 }
267
268 /**
269 * amdgpu_fence_fallback - fallback for hardware interrupts
270 *
271 * @work: delayed work item
272 *
273 * Checks for fence activity.
274 */
amdgpu_fence_fallback(struct timer_list * t)275 static void amdgpu_fence_fallback(struct timer_list *t)
276 {
277 struct amdgpu_ring *ring = from_timer(ring, t,
278 fence_drv.fallback_timer);
279
280 amdgpu_fence_process(ring);
281 }
282
283 /**
284 * amdgpu_fence_wait_empty - wait for all fences to signal
285 *
286 * @adev: amdgpu device pointer
287 * @ring: ring index the fence is associated with
288 *
289 * Wait for all fences on the requested ring to signal (all asics).
290 * Returns 0 if the fences have passed, error for all other cases.
291 */
amdgpu_fence_wait_empty(struct amdgpu_ring * ring)292 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
293 {
294 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
295 struct dma_fence *fence, **ptr;
296 int r;
297
298 if (!seq)
299 return 0;
300
301 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
302 rcu_read_lock();
303 fence = rcu_dereference(*ptr);
304 if (!fence || !dma_fence_get_rcu(fence)) {
305 rcu_read_unlock();
306 return 0;
307 }
308 rcu_read_unlock();
309
310 r = dma_fence_wait(fence, false);
311 dma_fence_put(fence);
312 return r;
313 }
314
315 /**
316 * amdgpu_fence_wait_polling - busy wait for givn sequence number
317 *
318 * @ring: ring index the fence is associated with
319 * @wait_seq: sequence number to wait
320 * @timeout: the timeout for waiting in usecs
321 *
322 * Wait for all fences on the requested ring to signal (all asics).
323 * Returns left time if no timeout, 0 or minus if timeout.
324 */
amdgpu_fence_wait_polling(struct amdgpu_ring * ring,uint32_t wait_seq,signed long timeout)325 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
326 uint32_t wait_seq,
327 signed long timeout)
328 {
329 uint32_t seq;
330
331 do {
332 seq = amdgpu_fence_read(ring);
333 udelay(5);
334 timeout -= 5;
335 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
336
337 return timeout > 0 ? timeout : 0;
338 }
339 /**
340 * amdgpu_fence_count_emitted - get the count of emitted fences
341 *
342 * @ring: ring the fence is associated with
343 *
344 * Get the number of fences emitted on the requested ring (all asics).
345 * Returns the number of emitted fences on the ring. Used by the
346 * dynpm code to ring track activity.
347 */
amdgpu_fence_count_emitted(struct amdgpu_ring * ring)348 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
349 {
350 uint64_t emitted;
351
352 /* We are not protected by ring lock when reading the last sequence
353 * but it's ok to report slightly wrong fence count here.
354 */
355 amdgpu_fence_process(ring);
356 emitted = 0x100000000ull;
357 emitted -= atomic_read(&ring->fence_drv.last_seq);
358 emitted += READ_ONCE(ring->fence_drv.sync_seq);
359 return lower_32_bits(emitted);
360 }
361
362 /**
363 * amdgpu_fence_driver_start_ring - make the fence driver
364 * ready for use on the requested ring.
365 *
366 * @ring: ring to start the fence driver on
367 * @irq_src: interrupt source to use for this ring
368 * @irq_type: interrupt type to use for this ring
369 *
370 * Make the fence driver ready for processing (all asics).
371 * Not all asics have all rings, so each asic will only
372 * start the fence driver on the rings it has.
373 * Returns 0 for success, errors for failure.
374 */
amdgpu_fence_driver_start_ring(struct amdgpu_ring * ring,struct amdgpu_irq_src * irq_src,unsigned irq_type)375 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
376 struct amdgpu_irq_src *irq_src,
377 unsigned irq_type)
378 {
379 struct amdgpu_device *adev = ring->adev;
380 uint64_t index;
381
382 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
383 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
384 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
385 } else {
386 /* put fence directly behind firmware */
387 index = ALIGN(adev->uvd.fw->datasize, 8);
388 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
389 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
390 }
391 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
392
393 if (irq_src)
394 amdgpu_irq_get(adev, irq_src, irq_type);
395
396 ring->fence_drv.irq_src = irq_src;
397 ring->fence_drv.irq_type = irq_type;
398 ring->fence_drv.initialized = true;
399
400 dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016jx, "
401 "cpu addr 0x%p\n", ring->idx,
402 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
403 return 0;
404 }
405
406 /**
407 * amdgpu_fence_driver_init_ring - init the fence driver
408 * for the requested ring.
409 *
410 * @ring: ring to init the fence driver on
411 * @num_hw_submission: number of entries on the hardware queue
412 *
413 * Init the fence driver for the requested ring (all asics).
414 * Helper function for amdgpu_fence_driver_init().
415 */
amdgpu_fence_driver_init_ring(struct amdgpu_ring * ring,unsigned num_hw_submission)416 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
417 unsigned num_hw_submission)
418 {
419 long timeout;
420 int r;
421
422 /* Check that num_hw_submission is a power of two */
423 if ((num_hw_submission & (num_hw_submission - 1)) != 0)
424 return -EINVAL;
425
426 ring->fence_drv.cpu_addr = NULL;
427 ring->fence_drv.gpu_addr = 0;
428 ring->fence_drv.sync_seq = 0;
429 atomic_set(&ring->fence_drv.last_seq, 0);
430 ring->fence_drv.initialized = false;
431
432 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
433
434 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
435 lockinit(&ring->fence_drv.lock, "agrfdl", 0, LK_CANRECURSE);
436 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
437 GFP_KERNEL);
438 if (!ring->fence_drv.fences)
439 return -ENOMEM;
440
441 /* No need to setup the GPU scheduler for KIQ ring */
442 if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
443 /* for non-sriov case, no timeout enforce on compute ring */
444 if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
445 && !amdgpu_sriov_vf(ring->adev))
446 timeout = MAX_SCHEDULE_TIMEOUT;
447 else
448 timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
449
450 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
451 num_hw_submission, amdgpu_job_hang_limit,
452 timeout, ring->name);
453 if (r) {
454 DRM_ERROR("Failed to create scheduler on ring %s.\n",
455 ring->name);
456 return r;
457 }
458 }
459
460 return 0;
461 }
462
463 /**
464 * amdgpu_fence_driver_init - init the fence driver
465 * for all possible rings.
466 *
467 * @adev: amdgpu device pointer
468 *
469 * Init the fence driver for all possible rings (all asics).
470 * Not all asics have all rings, so each asic will only
471 * start the fence driver on the rings it has using
472 * amdgpu_fence_driver_start_ring().
473 * Returns 0 for success.
474 */
amdgpu_fence_driver_init(struct amdgpu_device * adev)475 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
476 {
477 if (amdgpu_debugfs_fence_init(adev))
478 dev_err(adev->dev, "fence debugfs file creation failed\n");
479
480 return 0;
481 }
482
483 /**
484 * amdgpu_fence_driver_fini - tear down the fence driver
485 * for all possible rings.
486 *
487 * @adev: amdgpu device pointer
488 *
489 * Tear down the fence driver for all possible rings (all asics).
490 */
amdgpu_fence_driver_fini(struct amdgpu_device * adev)491 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
492 {
493 unsigned i, j;
494 int r;
495
496 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
497 struct amdgpu_ring *ring = adev->rings[i];
498
499 if (!ring || !ring->fence_drv.initialized)
500 continue;
501 r = amdgpu_fence_wait_empty(ring);
502 if (r) {
503 /* no need to trigger GPU reset as we are unloading */
504 amdgpu_fence_driver_force_completion(ring);
505 }
506 if (ring->fence_drv.irq_src)
507 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
508 ring->fence_drv.irq_type);
509 drm_sched_fini(&ring->sched);
510 del_timer_sync(&ring->fence_drv.fallback_timer);
511 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
512 dma_fence_put(ring->fence_drv.fences[j]);
513 kfree(ring->fence_drv.fences);
514 ring->fence_drv.fences = NULL;
515 ring->fence_drv.initialized = false;
516 }
517 }
518
519 /**
520 * amdgpu_fence_driver_suspend - suspend the fence driver
521 * for all possible rings.
522 *
523 * @adev: amdgpu device pointer
524 *
525 * Suspend the fence driver for all possible rings (all asics).
526 */
amdgpu_fence_driver_suspend(struct amdgpu_device * adev)527 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
528 {
529 int i, r;
530
531 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
532 struct amdgpu_ring *ring = adev->rings[i];
533 if (!ring || !ring->fence_drv.initialized)
534 continue;
535
536 /* wait for gpu to finish processing current batch */
537 r = amdgpu_fence_wait_empty(ring);
538 if (r) {
539 /* delay GPU reset to resume */
540 amdgpu_fence_driver_force_completion(ring);
541 }
542
543 /* disable the interrupt */
544 if (ring->fence_drv.irq_src)
545 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
546 ring->fence_drv.irq_type);
547 }
548 }
549
550 /**
551 * amdgpu_fence_driver_resume - resume the fence driver
552 * for all possible rings.
553 *
554 * @adev: amdgpu device pointer
555 *
556 * Resume the fence driver for all possible rings (all asics).
557 * Not all asics have all rings, so each asic will only
558 * start the fence driver on the rings it has using
559 * amdgpu_fence_driver_start_ring().
560 * Returns 0 for success.
561 */
amdgpu_fence_driver_resume(struct amdgpu_device * adev)562 void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
563 {
564 int i;
565
566 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
567 struct amdgpu_ring *ring = adev->rings[i];
568 if (!ring || !ring->fence_drv.initialized)
569 continue;
570
571 /* enable the interrupt */
572 if (ring->fence_drv.irq_src)
573 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
574 ring->fence_drv.irq_type);
575 }
576 }
577
578 /**
579 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
580 *
581 * @ring: fence of the ring to signal
582 *
583 */
amdgpu_fence_driver_force_completion(struct amdgpu_ring * ring)584 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
585 {
586 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
587 amdgpu_fence_process(ring);
588 }
589
590 /*
591 * Common fence implementation
592 */
593
amdgpu_fence_get_driver_name(struct dma_fence * fence)594 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
595 {
596 return "amdgpu";
597 }
598
amdgpu_fence_get_timeline_name(struct dma_fence * f)599 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
600 {
601 struct amdgpu_fence *fence = to_amdgpu_fence(f);
602 return (const char *)fence->ring->name;
603 }
604
605 /**
606 * amdgpu_fence_enable_signaling - enable signalling on fence
607 * @fence: fence
608 *
609 * This function is called with fence_queue lock held, and adds a callback
610 * to fence_queue that checks if this fence is signaled, and if so it
611 * signals the fence and removes itself.
612 */
amdgpu_fence_enable_signaling(struct dma_fence * f)613 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
614 {
615 struct amdgpu_fence *fence = to_amdgpu_fence(f);
616 struct amdgpu_ring *ring = fence->ring;
617
618 if (!timer_pending(&ring->fence_drv.fallback_timer))
619 amdgpu_fence_schedule_fallback(ring);
620
621 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
622
623 return true;
624 }
625
626 /**
627 * amdgpu_fence_free - free up the fence memory
628 *
629 * @rcu: RCU callback head
630 *
631 * Free up the fence memory after the RCU grace period.
632 */
amdgpu_fence_free(struct rcu_head * rcu)633 static void amdgpu_fence_free(struct rcu_head *rcu)
634 {
635 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
636 struct amdgpu_fence *fence = to_amdgpu_fence(f);
637 kmem_cache_free(amdgpu_fence_slab, fence);
638 }
639
640 /**
641 * amdgpu_fence_release - callback that fence can be freed
642 *
643 * @fence: fence
644 *
645 * This function is called when the reference count becomes zero.
646 * It just RCU schedules freeing up the fence.
647 */
amdgpu_fence_release(struct dma_fence * f)648 static void amdgpu_fence_release(struct dma_fence *f)
649 {
650 call_rcu(&f->rcu, amdgpu_fence_free);
651 }
652
653 static const struct dma_fence_ops amdgpu_fence_ops = {
654 .get_driver_name = amdgpu_fence_get_driver_name,
655 .get_timeline_name = amdgpu_fence_get_timeline_name,
656 .enable_signaling = amdgpu_fence_enable_signaling,
657 .release = amdgpu_fence_release,
658 };
659
660 /*
661 * Fence debugfs
662 */
663 #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_fence_info(struct seq_file * m,void * data)664 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
665 {
666 struct drm_info_node *node = (struct drm_info_node *)m->private;
667 struct drm_device *dev = node->minor->dev;
668 struct amdgpu_device *adev = dev->dev_private;
669 int i;
670
671 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
672 struct amdgpu_ring *ring = adev->rings[i];
673 if (!ring || !ring->fence_drv.initialized)
674 continue;
675
676 amdgpu_fence_process(ring);
677
678 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
679 seq_printf(m, "Last signaled fence 0x%08x\n",
680 atomic_read(&ring->fence_drv.last_seq));
681 seq_printf(m, "Last emitted 0x%08x\n",
682 ring->fence_drv.sync_seq);
683
684 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
685 continue;
686
687 /* set in CP_VMID_PREEMPT and preemption occurred */
688 seq_printf(m, "Last preempted 0x%08x\n",
689 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
690 /* set in CP_VMID_RESET and reset occurred */
691 seq_printf(m, "Last reset 0x%08x\n",
692 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
693 /* Both preemption and reset occurred */
694 seq_printf(m, "Last both 0x%08x\n",
695 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
696 }
697 return 0;
698 }
699
700 /**
701 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
702 *
703 * Manually trigger a gpu reset at the next fence wait.
704 */
amdgpu_debugfs_gpu_recover(struct seq_file * m,void * data)705 static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
706 {
707 struct drm_info_node *node = (struct drm_info_node *) m->private;
708 struct drm_device *dev = node->minor->dev;
709 struct amdgpu_device *adev = dev->dev_private;
710
711 seq_printf(m, "gpu recover\n");
712 amdgpu_device_gpu_recover(adev, NULL, true);
713
714 return 0;
715 }
716
717 static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
718 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
719 {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
720 };
721
722 static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
723 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
724 };
725 #endif
726
amdgpu_debugfs_fence_init(struct amdgpu_device * adev)727 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
728 {
729 #if defined(CONFIG_DEBUG_FS)
730 if (amdgpu_sriov_vf(adev))
731 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1);
732 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
733 #else
734 return 0;
735 #endif
736 }
737
738