xref: /dragonfly/sys/dev/drm/radeon/radeon_fence.c (revision 3f2dd94a)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Dave Airlie
30  */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/firmware.h>
36 #include <drm/drmP.h>
37 #include "radeon_reg.h"
38 #include "radeon.h"
39 #include "radeon_trace.h"
40 
41 /*
42  * Fences
43  * Fences mark an event in the GPUs pipeline and are used
44  * for GPU/CPU synchronization.  When the fence is written,
45  * it is expected that all buffers associated with that fence
46  * are no longer in use by the associated ring on the GPU and
47  * that the the relevant GPU caches have been flushed.  Whether
48  * we use a scratch register or memory location depends on the asic
49  * and whether writeback is enabled.
50  */
51 
52 /**
53  * radeon_fence_write - write a fence value
54  *
55  * @rdev: radeon_device pointer
56  * @seq: sequence number to write
57  * @ring: ring index the fence is associated with
58  *
59  * Writes a fence value to memory or a scratch register (all asics).
60  */
radeon_fence_write(struct radeon_device * rdev,u32 seq,int ring)61 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
62 {
63 	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
64 	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
65 		if (drv->cpu_addr) {
66 			*drv->cpu_addr = cpu_to_le32(seq);
67 		}
68 	} else {
69 		WREG32(drv->scratch_reg, seq);
70 	}
71 }
72 
73 /**
74  * radeon_fence_read - read a fence value
75  *
76  * @rdev: radeon_device pointer
77  * @ring: ring index the fence is associated with
78  *
79  * Reads a fence value from memory or a scratch register (all asics).
80  * Returns the value of the fence read from memory or register.
81  */
radeon_fence_read(struct radeon_device * rdev,int ring)82 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
83 {
84 	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
85 	u32 seq = 0;
86 
87 	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
88 		if (drv->cpu_addr) {
89 			seq = le32_to_cpu(*drv->cpu_addr);
90 		} else {
91 			seq = lower_32_bits(atomic64_read(&drv->last_seq));
92 		}
93 	} else {
94 		seq = RREG32(drv->scratch_reg);
95 	}
96 	return seq;
97 }
98 
99 /**
100  * radeon_fence_schedule_check - schedule lockup check
101  *
102  * @rdev: radeon_device pointer
103  * @ring: ring index we should work with
104  *
105  * Queues a delayed work item to check for lockups.
106  */
radeon_fence_schedule_check(struct radeon_device * rdev,int ring)107 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
108 {
109 	/*
110 	 * Do not reset the timer here with mod_delayed_work,
111 	 * this can livelock in an interaction with TTM delayed destroy.
112 	 */
113 	queue_delayed_work(system_power_efficient_wq,
114 			   &rdev->fence_drv[ring].lockup_work,
115 			   RADEON_FENCE_JIFFIES_TIMEOUT);
116 }
117 
118 /**
119  * radeon_fence_emit - emit a fence on the requested ring
120  *
121  * @rdev: radeon_device pointer
122  * @fence: radeon fence object
123  * @ring: ring index the fence is associated with
124  *
125  * Emits a fence command on the requested ring (all asics).
126  * Returns 0 on success, -ENOMEM on failure.
127  */
radeon_fence_emit(struct radeon_device * rdev,struct radeon_fence ** fence,int ring)128 int radeon_fence_emit(struct radeon_device *rdev,
129 		      struct radeon_fence **fence,
130 		      int ring)
131 {
132 	u64 seq;
133 
134 	/* we are protected by the ring emission mutex */
135 	*fence = kmalloc(sizeof(struct radeon_fence), M_DRM, M_WAITOK);
136 	if ((*fence) == NULL) {
137 		return -ENOMEM;
138 	}
139 	(*fence)->rdev = rdev;
140 	(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
141 	(*fence)->ring = ring;
142 	(*fence)->is_vm_update = false;
143 	dma_fence_init(&(*fence)->base, &radeon_fence_ops,
144 		       &rdev->fence_queue.lock,
145 		       rdev->fence_context + ring,
146 		       seq);
147 	radeon_fence_ring_emit(rdev, ring, *fence);
148 	trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
149 	radeon_fence_schedule_check(rdev, ring);
150 	return 0;
151 }
152 
153 /**
154  * radeon_fence_check_signaled - callback from fence_queue
155  *
156  * this function is called with fence_queue lock held, which is also used
157  * for the fence locking itself, so unlocked variants are used for
158  * fence_signal, and remove_wait_queue.
159  */
radeon_fence_check_signaled(wait_queue_entry_t * wait,unsigned mode,int flags,void * key)160 static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)
161 {
162 	struct radeon_fence *fence;
163 	u64 seq;
164 
165 	fence = container_of(wait, struct radeon_fence, fence_wake);
166 
167 	/*
168 	 * We cannot use radeon_fence_process here because we're already
169 	 * in the waitqueue, in a call from wake_up_all.
170 	 */
171 	seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
172 	if (seq >= fence->seq) {
173 		int ret = dma_fence_signal_locked(&fence->base);
174 
175 		if (!ret)
176 			DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
177 		else
178 			DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
179 
180 		radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
181 		__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
182 		dma_fence_put(&fence->base);
183 	} else
184 		DMA_FENCE_TRACE(&fence->base, "pending\n");
185 	return 0;
186 }
187 
188 /**
189  * radeon_fence_activity - check for fence activity
190  *
191  * @rdev: radeon_device pointer
192  * @ring: ring index the fence is associated with
193  *
194  * Checks the current fence value and calculates the last
195  * signalled fence value. Returns true if activity occured
196  * on the ring, and the fence_queue should be waken up.
197  */
radeon_fence_activity(struct radeon_device * rdev,int ring)198 static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
199 {
200 	uint64_t seq, last_seq, last_emitted;
201 	unsigned count_loop = 0;
202 	bool wake = false;
203 
204 	/* Note there is a scenario here for an infinite loop but it's
205 	 * very unlikely to happen. For it to happen, the current polling
206 	 * process need to be interrupted by another process and another
207 	 * process needs to update the last_seq btw the atomic read and
208 	 * xchg of the current process.
209 	 *
210 	 * More over for this to go in infinite loop there need to be
211 	 * continuously new fence signaled ie radeon_fence_read needs
212 	 * to return a different value each time for both the currently
213 	 * polling process and the other process that xchg the last_seq
214 	 * btw atomic read and xchg of the current process. And the
215 	 * value the other process set as last seq must be higher than
216 	 * the seq value we just read. Which means that current process
217 	 * need to be interrupted after radeon_fence_read and before
218 	 * atomic xchg.
219 	 *
220 	 * To be even more safe we count the number of time we loop and
221 	 * we bail after 10 loop just accepting the fact that we might
222 	 * have temporarly set the last_seq not to the true real last
223 	 * seq but to an older one.
224 	 */
225 	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
226 	do {
227 		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
228 		seq = radeon_fence_read(rdev, ring);
229 		seq |= last_seq & 0xffffffff00000000LL;
230 		if (seq < last_seq) {
231 			seq &= 0xffffffff;
232 			seq |= last_emitted & 0xffffffff00000000LL;
233 		}
234 
235 		if (seq <= last_seq || seq > last_emitted) {
236 			break;
237 		}
238 		/* If we loop over we don't want to return without
239 		 * checking if a fence is signaled as it means that the
240 		 * seq we just read is different from the previous on.
241 		 */
242 		wake = true;
243 		last_seq = seq;
244 		if ((count_loop++) > 10) {
245 			/* We looped over too many time leave with the
246 			 * fact that we might have set an older fence
247 			 * seq then the current real last seq as signaled
248 			 * by the hw.
249 			 */
250 			break;
251 		}
252 	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
253 
254 	if (seq < last_emitted)
255 		radeon_fence_schedule_check(rdev, ring);
256 
257 	return wake;
258 }
259 
260 /**
261  * radeon_fence_check_lockup - check for hardware lockup
262  *
263  * @work: delayed work item
264  *
265  * Checks for fence activity and if there is none probe
266  * the hardware if a lockup occured.
267  */
radeon_fence_check_lockup(struct work_struct * work)268 static void radeon_fence_check_lockup(struct work_struct *work)
269 {
270 	struct radeon_fence_driver *fence_drv;
271 	struct radeon_device *rdev;
272 	int ring;
273 
274 	fence_drv = container_of(work, struct radeon_fence_driver,
275 				 lockup_work.work);
276 	rdev = fence_drv->rdev;
277 	ring = fence_drv - &rdev->fence_drv[0];
278 
279 	if (!down_read_trylock(&rdev->exclusive_lock)) {
280 		/* just reschedule the check if a reset is going on */
281 		radeon_fence_schedule_check(rdev, ring);
282 		return;
283 	}
284 
285 	if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
286 		unsigned long irqflags;
287 
288 		fence_drv->delayed_irq = false;
289 		spin_lock_irqsave(&rdev->irq.lock, irqflags);
290 		radeon_irq_set(rdev);
291 		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
292 	}
293 
294 	if (radeon_fence_activity(rdev, ring))
295 		wake_up_all(&rdev->fence_queue);
296 
297 	else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
298 
299 		/* good news we believe it's a lockup */
300 		dev_warn(rdev->dev, "GPU lockup (current fence id "
301 			 "0x%016lx last fence id 0x%016lx on ring %d)\n",
302 			 (uint64_t)atomic64_read(&fence_drv->last_seq),
303 			 fence_drv->sync_seq[ring], ring);
304 
305 		/* remember that we need an reset */
306 		rdev->needs_reset = true;
307 		wake_up_all(&rdev->fence_queue);
308 	}
309 	up_read(&rdev->exclusive_lock);
310 }
311 
312 /**
313  * radeon_fence_process - process a fence
314  *
315  * @rdev: radeon_device pointer
316  * @ring: ring index the fence is associated with
317  *
318  * Checks the current fence value and wakes the fence queue
319  * if the sequence number has increased (all asics).
320  */
radeon_fence_process(struct radeon_device * rdev,int ring)321 void radeon_fence_process(struct radeon_device *rdev, int ring)
322 {
323 	if (radeon_fence_activity(rdev, ring))
324 		wake_up_all(&rdev->fence_queue);
325 }
326 
327 /**
328  * radeon_fence_seq_signaled - check if a fence sequence number has signaled
329  *
330  * @rdev: radeon device pointer
331  * @seq: sequence number
332  * @ring: ring index the fence is associated with
333  *
334  * Check if the last signaled fence sequnce number is >= the requested
335  * sequence number (all asics).
336  * Returns true if the fence has signaled (current fence value
337  * is >= requested value) or false if it has not (current fence
338  * value is < the requested value.  Helper function for
339  * radeon_fence_signaled().
340  */
radeon_fence_seq_signaled(struct radeon_device * rdev,u64 seq,unsigned ring)341 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
342 				      u64 seq, unsigned ring)
343 {
344 	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
345 		return true;
346 	}
347 	/* poll new last sequence at least once */
348 	radeon_fence_process(rdev, ring);
349 	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
350 		return true;
351 	}
352 	return false;
353 }
354 
radeon_fence_is_signaled(struct dma_fence * f)355 static bool radeon_fence_is_signaled(struct dma_fence *f)
356 {
357 	struct radeon_fence *fence = to_radeon_fence(f);
358 	struct radeon_device *rdev = fence->rdev;
359 	unsigned ring = fence->ring;
360 	u64 seq = fence->seq;
361 
362 	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
363 		return true;
364 	}
365 
366 	if (down_read_trylock(&rdev->exclusive_lock)) {
367 		radeon_fence_process(rdev, ring);
368 		up_read(&rdev->exclusive_lock);
369 
370 		if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
371 			return true;
372 		}
373 	}
374 	return false;
375 }
376 
377 /**
378  * radeon_fence_enable_signaling - enable signalling on fence
379  * @fence: fence
380  *
381  * This function is called with fence_queue lock held, and adds a callback
382  * to fence_queue that checks if this fence is signaled, and if so it
383  * signals the fence and removes itself.
384  */
radeon_fence_enable_signaling(struct dma_fence * f)385 static bool radeon_fence_enable_signaling(struct dma_fence *f)
386 {
387 	struct radeon_fence *fence = to_radeon_fence(f);
388 	struct radeon_device *rdev = fence->rdev;
389 
390 	if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
391 		return false;
392 
393 	if (down_read_trylock(&rdev->exclusive_lock)) {
394 		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
395 
396 		if (radeon_fence_activity(rdev, fence->ring))
397 			wake_up_all_locked(&rdev->fence_queue);
398 
399 		/* did fence get signaled after we enabled the sw irq? */
400 		if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
401 			radeon_irq_kms_sw_irq_put(rdev, fence->ring);
402 			up_read(&rdev->exclusive_lock);
403 			return false;
404 		}
405 
406 		up_read(&rdev->exclusive_lock);
407 	} else {
408 		/* we're probably in a lockup, lets not fiddle too much */
409 		if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
410 			rdev->fence_drv[fence->ring].delayed_irq = true;
411 		radeon_fence_schedule_check(rdev, fence->ring);
412 	}
413 
414 	fence->fence_wake.flags = 0;
415 	fence->fence_wake.private = NULL;
416 	fence->fence_wake.func = radeon_fence_check_signaled;
417 	__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
418 	dma_fence_get(f);
419 
420 	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
421 	return true;
422 }
423 
424 /**
425  * radeon_fence_signaled - check if a fence has signaled
426  *
427  * @fence: radeon fence object
428  *
429  * Check if the requested fence has signaled (all asics).
430  * Returns true if the fence has signaled or false if it has not.
431  */
radeon_fence_signaled(struct radeon_fence * fence)432 bool radeon_fence_signaled(struct radeon_fence *fence)
433 {
434 	if (!fence)
435 		return true;
436 
437 	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
438 		int ret;
439 
440 		ret = dma_fence_signal(&fence->base);
441 		if (!ret)
442 			DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
443 		return true;
444 	}
445 	return false;
446 }
447 
448 /**
449  * radeon_fence_any_seq_signaled - check if any sequence number is signaled
450  *
451  * @rdev: radeon device pointer
452  * @seq: sequence numbers
453  *
454  * Check if the last signaled fence sequnce number is >= the requested
455  * sequence number (all asics).
456  * Returns true if any has signaled (current value is >= requested value)
457  * or false if it has not. Helper function for radeon_fence_wait_seq.
458  */
radeon_fence_any_seq_signaled(struct radeon_device * rdev,u64 * seq)459 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
460 {
461 	unsigned i;
462 
463 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
464 		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
465 			return true;
466 	}
467 	return false;
468 }
469 
470 /**
471  * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
472  *
473  * @rdev: radeon device pointer
474  * @target_seq: sequence number(s) we want to wait for
475  * @intr: use interruptable sleep
476  * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
477  *
478  * Wait for the requested sequence number(s) to be written by any ring
479  * (all asics).  Sequnce number array is indexed by ring id.
480  * @intr selects whether to use interruptable (true) or non-interruptable
481  * (false) sleep when waiting for the sequence number.  Helper function
482  * for radeon_fence_wait_*().
483  * Returns remaining time if the sequence number has passed, 0 when
484  * the wait timeout, or an error for all other cases.
485  * -EDEADLK is returned when a GPU lockup has been detected.
486  */
radeon_fence_wait_seq_timeout(struct radeon_device * rdev,u64 * target_seq,bool intr,long timeout)487 static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
488 					  u64 *target_seq, bool intr,
489 					  long timeout)
490 {
491 	long r;
492 	int i;
493 
494 	if (radeon_fence_any_seq_signaled(rdev, target_seq))
495 		return timeout;
496 
497 	/* enable IRQs and tracing */
498 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
499 		if (!target_seq[i])
500 			continue;
501 
502 		trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
503 		radeon_irq_kms_sw_irq_get(rdev, i);
504 	}
505 
506 	if (intr) {
507 		r = wait_event_interruptible_timeout(rdev->fence_queue, (
508 			radeon_fence_any_seq_signaled(rdev, target_seq)
509 			 || rdev->needs_reset), timeout);
510 	} else {
511 		r = wait_event_timeout(rdev->fence_queue, (
512 			radeon_fence_any_seq_signaled(rdev, target_seq)
513 			 || rdev->needs_reset), timeout);
514 	}
515 
516 	if (rdev->needs_reset)
517 		r = -EDEADLK;
518 
519 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
520 		if (!target_seq[i])
521 			continue;
522 
523 		radeon_irq_kms_sw_irq_put(rdev, i);
524 		trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
525 	}
526 
527 	return r;
528 }
529 
530 /**
531  * radeon_fence_wait_timeout - wait for a fence to signal with timeout
532  *
533  * @fence: radeon fence object
534  * @intr: use interruptible sleep
535  *
536  * Wait for the requested fence to signal (all asics).
537  * @intr selects whether to use interruptable (true) or non-interruptable
538  * (false) sleep when waiting for the fence.
539  * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
540  * Returns remaining time if the sequence number has passed, 0 when
541  * the wait timeout, or an error for all other cases.
542  */
radeon_fence_wait_timeout(struct radeon_fence * fence,bool intr,long timeout)543 long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout)
544 {
545 	u64 seq[RADEON_NUM_RINGS] = {};
546 	long r;
547 	int r_sig;
548 
549 	/*
550 	 * This function should not be called on !radeon fences.
551 	 * If this is the case, it would mean this function can
552 	 * also be called on radeon fences belonging to another card.
553 	 * exclusive_lock is not held in that case.
554 	 */
555 	if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
556 		return dma_fence_wait(&fence->base, intr);
557 
558 	seq[fence->ring] = fence->seq;
559 	r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
560 	if (r <= 0) {
561 		return r;
562 	}
563 
564 	r_sig = dma_fence_signal(&fence->base);
565 	if (!r_sig)
566 		DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
567 	return r;
568 }
569 
570 /**
571  * radeon_fence_wait - wait for a fence to signal
572  *
573  * @fence: radeon fence object
574  * @intr: use interruptible sleep
575  *
576  * Wait for the requested fence to signal (all asics).
577  * @intr selects whether to use interruptable (true) or non-interruptable
578  * (false) sleep when waiting for the fence.
579  * Returns 0 if the fence has passed, error for all other cases.
580  */
radeon_fence_wait(struct radeon_fence * fence,bool intr)581 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
582 {
583 	long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
584 	if (r > 0) {
585 		return 0;
586 	} else {
587 		return r;
588 	}
589 }
590 
591 /**
592  * radeon_fence_wait_any - wait for a fence to signal on any ring
593  *
594  * @rdev: radeon device pointer
595  * @fences: radeon fence object(s)
596  * @intr: use interruptable sleep
597  *
598  * Wait for any requested fence to signal (all asics).  Fence
599  * array is indexed by ring id.  @intr selects whether to use
600  * interruptable (true) or non-interruptable (false) sleep when
601  * waiting for the fences. Used by the suballocator.
602  * Returns 0 if any fence has passed, error for all other cases.
603  */
radeon_fence_wait_any(struct radeon_device * rdev,struct radeon_fence ** fences,bool intr)604 int radeon_fence_wait_any(struct radeon_device *rdev,
605 			  struct radeon_fence **fences,
606 			  bool intr)
607 {
608 	u64 seq[RADEON_NUM_RINGS];
609 	unsigned i, num_rings = 0;
610 	long r;
611 
612 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
613 		seq[i] = 0;
614 
615 		if (!fences[i]) {
616 			continue;
617 		}
618 
619 		seq[i] = fences[i]->seq;
620 		++num_rings;
621 	}
622 
623 	/* nothing to wait for ? */
624 	if (num_rings == 0)
625 		return -ENOENT;
626 
627 	r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
628 	if (r < 0) {
629 		return r;
630 	}
631 	return 0;
632 }
633 
634 /**
635  * radeon_fence_wait_next - wait for the next fence to signal
636  *
637  * @rdev: radeon device pointer
638  * @ring: ring index the fence is associated with
639  *
640  * Wait for the next fence on the requested ring to signal (all asics).
641  * Returns 0 if the next fence has passed, error for all other cases.
642  * Caller must hold ring lock.
643  */
radeon_fence_wait_next(struct radeon_device * rdev,int ring)644 int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
645 {
646 	u64 seq[RADEON_NUM_RINGS] = {};
647 	long r;
648 
649 	seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
650 	if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
651 		/* nothing to wait for, last_seq is
652 		   already the last emited fence */
653 		return -ENOENT;
654 	}
655 	r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
656 	if (r < 0)
657 		return r;
658 	return 0;
659 }
660 
661 /**
662  * radeon_fence_wait_empty - wait for all fences to signal
663  *
664  * @rdev: radeon device pointer
665  * @ring: ring index the fence is associated with
666  *
667  * Wait for all fences on the requested ring to signal (all asics).
668  * Returns 0 if the fences have passed, error for all other cases.
669  * Caller must hold ring lock.
670  */
radeon_fence_wait_empty(struct radeon_device * rdev,int ring)671 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
672 {
673 	u64 seq[RADEON_NUM_RINGS] = {};
674 	long r;
675 
676 	seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
677 	if (!seq[ring])
678 		return 0;
679 
680 	r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
681 	if (r < 0) {
682 		if (r == -EDEADLK)
683 			return -EDEADLK;
684 
685 		dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
686 			ring, r);
687 	}
688 	return 0;
689 }
690 
691 /**
692  * radeon_fence_ref - take a ref on a fence
693  *
694  * @fence: radeon fence object
695  *
696  * Take a reference on a fence (all asics).
697  * Returns the fence.
698  */
radeon_fence_ref(struct radeon_fence * fence)699 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
700 {
701 	dma_fence_get(&fence->base);
702 	return fence;
703 }
704 
705 /**
706  * radeon_fence_unref - remove a ref on a fence
707  *
708  * @fence: radeon fence object
709  *
710  * Remove a reference on a fence (all asics).
711  */
radeon_fence_unref(struct radeon_fence ** fence)712 void radeon_fence_unref(struct radeon_fence **fence)
713 {
714 	struct radeon_fence *tmp = *fence;
715 
716 	*fence = NULL;
717 	if (tmp) {
718 		dma_fence_put(&tmp->base);
719 	}
720 }
721 
722 /**
723  * radeon_fence_count_emitted - get the count of emitted fences
724  *
725  * @rdev: radeon device pointer
726  * @ring: ring index the fence is associated with
727  *
728  * Get the number of fences emitted on the requested ring (all asics).
729  * Returns the number of emitted fences on the ring.  Used by the
730  * dynpm code to ring track activity.
731  */
radeon_fence_count_emitted(struct radeon_device * rdev,int ring)732 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
733 {
734 	uint64_t emitted;
735 
736 	/* We are not protected by ring lock when reading the last sequence
737 	 * but it's ok to report slightly wrong fence count here.
738 	 */
739 	radeon_fence_process(rdev, ring);
740 	emitted = rdev->fence_drv[ring].sync_seq[ring]
741 		- atomic64_read(&rdev->fence_drv[ring].last_seq);
742 	/* to avoid 32bits warp around */
743 	if (emitted > 0x10000000) {
744 		emitted = 0x10000000;
745 	}
746 	return (unsigned)emitted;
747 }
748 
749 /**
750  * radeon_fence_need_sync - do we need a semaphore
751  *
752  * @fence: radeon fence object
753  * @dst_ring: which ring to check against
754  *
755  * Check if the fence needs to be synced against another ring
756  * (all asics).  If so, we need to emit a semaphore.
757  * Returns true if we need to sync with another ring, false if
758  * not.
759  */
radeon_fence_need_sync(struct radeon_fence * fence,int dst_ring)760 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
761 {
762 	struct radeon_fence_driver *fdrv;
763 
764 	if (!fence) {
765 		return false;
766 	}
767 
768 	if (fence->ring == dst_ring) {
769 		return false;
770 	}
771 
772 	/* we are protected by the ring mutex */
773 	fdrv = &fence->rdev->fence_drv[dst_ring];
774 	if (fence->seq <= fdrv->sync_seq[fence->ring]) {
775 		return false;
776 	}
777 
778 	return true;
779 }
780 
781 /**
782  * radeon_fence_note_sync - record the sync point
783  *
784  * @fence: radeon fence object
785  * @dst_ring: which ring to check against
786  *
787  * Note the sequence number at which point the fence will
788  * be synced with the requested ring (all asics).
789  */
radeon_fence_note_sync(struct radeon_fence * fence,int dst_ring)790 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
791 {
792 	struct radeon_fence_driver *dst, *src;
793 	unsigned i;
794 
795 	if (!fence) {
796 		return;
797 	}
798 
799 	if (fence->ring == dst_ring) {
800 		return;
801 	}
802 
803 	/* we are protected by the ring mutex */
804 	src = &fence->rdev->fence_drv[fence->ring];
805 	dst = &fence->rdev->fence_drv[dst_ring];
806 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
807 		if (i == dst_ring) {
808 			continue;
809 		}
810 		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
811 	}
812 }
813 
814 /**
815  * radeon_fence_driver_start_ring - make the fence driver
816  * ready for use on the requested ring.
817  *
818  * @rdev: radeon device pointer
819  * @ring: ring index to start the fence driver on
820  *
821  * Make the fence driver ready for processing (all asics).
822  * Not all asics have all rings, so each asic will only
823  * start the fence driver on the rings it has.
824  * Returns 0 for success, errors for failure.
825  */
radeon_fence_driver_start_ring(struct radeon_device * rdev,int ring)826 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
827 {
828 	uint64_t index;
829 	int r;
830 
831 	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
832 	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
833 		rdev->fence_drv[ring].scratch_reg = 0;
834 		if (ring != R600_RING_TYPE_UVD_INDEX) {
835 			index = R600_WB_EVENT_OFFSET + ring * 4;
836 			rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
837 			rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
838 							 index;
839 
840 		} else {
841 			/* put fence directly behind firmware */
842 			index = ALIGN(rdev->uvd_fw->datasize, 8);
843 			rdev->fence_drv[ring].cpu_addr = (void*)((uint8_t*)rdev->uvd.cpu_addr + index);
844 			rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
845 		}
846 
847 	} else {
848 		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
849 		if (r) {
850 			dev_err(rdev->dev, "fence failed to get scratch register\n");
851 			return r;
852 		}
853 		index = RADEON_WB_SCRATCH_OFFSET +
854 			rdev->fence_drv[ring].scratch_reg -
855 			rdev->scratch.reg_base;
856 		rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
857 		rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
858 	}
859 	radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
860 	rdev->fence_drv[ring].initialized = true;
861 	dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016lx and cpu addr 0x%p\n",
862 		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
863 	return 0;
864 }
865 
866 /**
867  * radeon_fence_driver_init_ring - init the fence driver
868  * for the requested ring.
869  *
870  * @rdev: radeon device pointer
871  * @ring: ring index to start the fence driver on
872  *
873  * Init the fence driver for the requested ring (all asics).
874  * Helper function for radeon_fence_driver_init().
875  */
radeon_fence_driver_init_ring(struct radeon_device * rdev,int ring)876 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
877 {
878 	int i;
879 
880 	rdev->fence_drv[ring].scratch_reg = -1;
881 	rdev->fence_drv[ring].cpu_addr = NULL;
882 	rdev->fence_drv[ring].gpu_addr = 0;
883 	for (i = 0; i < RADEON_NUM_RINGS; ++i)
884 		rdev->fence_drv[ring].sync_seq[i] = 0;
885 	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
886 	rdev->fence_drv[ring].initialized = false;
887 	INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
888 			  radeon_fence_check_lockup);
889 	rdev->fence_drv[ring].rdev = rdev;
890 }
891 
892 /**
893  * radeon_fence_driver_init - init the fence driver
894  * for all possible rings.
895  *
896  * @rdev: radeon device pointer
897  *
898  * Init the fence driver for all possible rings (all asics).
899  * Not all asics have all rings, so each asic will only
900  * start the fence driver on the rings it has using
901  * radeon_fence_driver_start_ring().
902  * Returns 0 for success.
903  */
radeon_fence_driver_init(struct radeon_device * rdev)904 int radeon_fence_driver_init(struct radeon_device *rdev)
905 {
906 	int ring;
907 
908 	init_waitqueue_head(&rdev->fence_queue);
909 	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
910 		radeon_fence_driver_init_ring(rdev, ring);
911 	}
912 	if (radeon_debugfs_fence_init(rdev)) {
913 		dev_err(rdev->dev, "fence debugfs file creation failed\n");
914 	}
915 	return 0;
916 }
917 
918 /**
919  * radeon_fence_driver_fini - tear down the fence driver
920  * for all possible rings.
921  *
922  * @rdev: radeon device pointer
923  *
924  * Tear down the fence driver for all possible rings (all asics).
925  */
radeon_fence_driver_fini(struct radeon_device * rdev)926 void radeon_fence_driver_fini(struct radeon_device *rdev)
927 {
928 	int ring, r;
929 
930 	mutex_lock(&rdev->ring_lock);
931 	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
932 		if (!rdev->fence_drv[ring].initialized)
933 			continue;
934 		r = radeon_fence_wait_empty(rdev, ring);
935 		if (r) {
936 			/* no need to trigger GPU reset as we are unloading */
937 			radeon_fence_driver_force_completion(rdev, ring);
938 		}
939 		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
940 		wake_up_all(&rdev->fence_queue);
941 		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
942 		rdev->fence_drv[ring].initialized = false;
943 	}
944 	mutex_unlock(&rdev->ring_lock);
945 }
946 
947 /**
948  * radeon_fence_driver_force_completion - force all fence waiter to complete
949  *
950  * @rdev: radeon device pointer
951  * @ring: the ring to complete
952  *
953  * In case of GPU reset failure make sure no process keep waiting on fence
954  * that will never complete.
955  */
radeon_fence_driver_force_completion(struct radeon_device * rdev,int ring)956 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
957 {
958 	if (rdev->fence_drv[ring].initialized) {
959 		radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
960 		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
961 	}
962 }
963 
964 
965 /*
966  * Fence debugfs
967  */
968 #if defined(CONFIG_DEBUG_FS)
radeon_debugfs_fence_info(struct seq_file * m,void * data)969 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
970 {
971 	struct drm_info_node *node = (struct drm_info_node *)m->private;
972 	struct drm_device *dev = node->minor->dev;
973 	struct radeon_device *rdev = dev->dev_private;
974 	int i, j;
975 
976 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
977 		if (!rdev->fence_drv[i].initialized)
978 			continue;
979 
980 		radeon_fence_process(rdev, i);
981 
982 		seq_printf(m, "--- ring %d ---\n", i);
983 		seq_printf(m, "Last signaled fence 0x%016llx\n",
984 			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
985 		seq_printf(m, "Last emitted        0x%016llx\n",
986 			   rdev->fence_drv[i].sync_seq[i]);
987 
988 		for (j = 0; j < RADEON_NUM_RINGS; ++j) {
989 			if (i != j && rdev->fence_drv[j].initialized)
990 				seq_printf(m, "Last sync to ring %d 0x%016llx\n",
991 					   j, rdev->fence_drv[i].sync_seq[j]);
992 		}
993 	}
994 	return 0;
995 }
996 
997 /**
998  * radeon_debugfs_gpu_reset - manually trigger a gpu reset
999  *
1000  * Manually trigger a gpu reset at the next fence wait.
1001  */
radeon_debugfs_gpu_reset(struct seq_file * m,void * data)1002 static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
1003 {
1004 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1005 	struct drm_device *dev = node->minor->dev;
1006 	struct radeon_device *rdev = dev->dev_private;
1007 
1008 	down_read(&rdev->exclusive_lock);
1009 	seq_printf(m, "%d\n", rdev->needs_reset);
1010 	rdev->needs_reset = true;
1011 	wake_up_all(&rdev->fence_queue);
1012 	up_read(&rdev->exclusive_lock);
1013 
1014 	return 0;
1015 }
1016 
1017 static struct drm_info_list radeon_debugfs_fence_list[] = {
1018 	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
1019 	{"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
1020 };
1021 #endif
1022 
radeon_debugfs_fence_init(struct radeon_device * rdev)1023 int radeon_debugfs_fence_init(struct radeon_device *rdev)
1024 {
1025 #if defined(CONFIG_DEBUG_FS)
1026 	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
1027 #else
1028 	return 0;
1029 #endif
1030 }
1031 
radeon_fence_get_driver_name(struct dma_fence * fence)1032 static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
1033 {
1034 	return "radeon";
1035 }
1036 
radeon_fence_get_timeline_name(struct dma_fence * f)1037 static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
1038 {
1039 	struct radeon_fence *fence = to_radeon_fence(f);
1040 	switch (fence->ring) {
1041 	case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
1042 	case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
1043 	case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
1044 	case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
1045 	case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
1046 	case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
1047 	case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
1048 	case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
1049 	default: WARN_ON_ONCE(1); return "radeon.unk";
1050 	}
1051 }
1052 
radeon_test_signaled(struct radeon_fence * fence)1053 static inline bool radeon_test_signaled(struct radeon_fence *fence)
1054 {
1055 	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1056 }
1057 
1058 struct radeon_wait_cb {
1059 	struct dma_fence_cb base;
1060 	struct task_struct *task;
1061 };
1062 
1063 static void
radeon_fence_wait_cb(struct dma_fence * fence,struct dma_fence_cb * cb)1064 radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1065 {
1066 	struct radeon_wait_cb *wait =
1067 		container_of(cb, struct radeon_wait_cb, base);
1068 
1069 	wake_up_process(wait->task);
1070 }
1071 
radeon_fence_default_wait(struct dma_fence * f,bool intr,signed long t)1072 static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
1073 					     signed long t)
1074 {
1075 	struct radeon_fence *fence = to_radeon_fence(f);
1076 	struct radeon_device *rdev = fence->rdev;
1077 	struct radeon_wait_cb cb;
1078 
1079 	cb.task = current;
1080 
1081 	if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
1082 		return t;
1083 
1084 	while (t > 0) {
1085 		if (intr)
1086 			set_current_state(TASK_INTERRUPTIBLE);
1087 		else
1088 			set_current_state(TASK_UNINTERRUPTIBLE);
1089 
1090 		/*
1091 		 * radeon_test_signaled must be called after
1092 		 * set_current_state to prevent a race with wake_up_process
1093 		 */
1094 		if (radeon_test_signaled(fence))
1095 			break;
1096 
1097 		if (rdev->needs_reset) {
1098 			t = -EDEADLK;
1099 			break;
1100 		}
1101 
1102 		t = schedule_timeout(t);
1103 
1104 		if (t > 0 && intr && signal_pending(current))
1105 			t = -ERESTARTSYS;
1106 	}
1107 
1108 	__set_current_state(TASK_RUNNING);
1109 	dma_fence_remove_callback(f, &cb.base);
1110 
1111 	return t;
1112 }
1113 
1114 const struct dma_fence_ops radeon_fence_ops = {
1115 	.get_driver_name = radeon_fence_get_driver_name,
1116 	.get_timeline_name = radeon_fence_get_timeline_name,
1117 	.enable_signaling = radeon_fence_enable_signaling,
1118 	.signaled = radeon_fence_is_signaled,
1119 	.wait = radeon_fence_default_wait,
1120 	.release = NULL,
1121 };
1122