xref: /openbsd/sys/dev/pci/drm/amd/amdgpu/amdgpu_sync.c (revision 097a140d)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <christian.koenig@amd.com>
29  */
30 
31 #include "amdgpu.h"
32 #include "amdgpu_trace.h"
33 #include "amdgpu_amdkfd.h"
34 
35 struct amdgpu_sync_entry {
36 	struct hlist_node	node;
37 	struct dma_fence	*fence;
38 	bool	explicit;
39 };
40 
41 static struct pool amdgpu_sync_slab;
42 
43 /**
44  * amdgpu_sync_create - zero init sync object
45  *
46  * @sync: sync object to initialize
47  *
48  * Just clear the sync object for now.
49  */
50 void amdgpu_sync_create(struct amdgpu_sync *sync)
51 {
52 	hash_init(sync->fences);
53 	sync->last_vm_update = NULL;
54 }
55 
56 /**
57  * amdgpu_sync_same_dev - test if fence belong to us
58  *
59  * @adev: amdgpu device to use for the test
60  * @f: fence to test
61  *
62  * Test if the fence was issued by us.
63  */
64 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
65 				 struct dma_fence *f)
66 {
67 	struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
68 
69 	if (s_fence) {
70 		struct amdgpu_ring *ring;
71 
72 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
73 		return ring->adev == adev;
74 	}
75 
76 	return false;
77 }
78 
79 /**
80  * amdgpu_sync_get_owner - extract the owner of a fence
81  *
82  * @fence: fence get the owner from
83  *
84  * Extract who originally created the fence.
85  */
86 static void *amdgpu_sync_get_owner(struct dma_fence *f)
87 {
88 	struct drm_sched_fence *s_fence;
89 	struct amdgpu_amdkfd_fence *kfd_fence;
90 
91 	if (!f)
92 		return AMDGPU_FENCE_OWNER_UNDEFINED;
93 
94 	s_fence = to_drm_sched_fence(f);
95 	if (s_fence)
96 		return s_fence->owner;
97 
98 	kfd_fence = to_amdgpu_amdkfd_fence(f);
99 	if (kfd_fence)
100 		return AMDGPU_FENCE_OWNER_KFD;
101 
102 	return AMDGPU_FENCE_OWNER_UNDEFINED;
103 }
104 
105 /**
106  * amdgpu_sync_keep_later - Keep the later fence
107  *
108  * @keep: existing fence to test
109  * @fence: new fence
110  *
111  * Either keep the existing fence or the new one, depending which one is later.
112  */
113 static void amdgpu_sync_keep_later(struct dma_fence **keep,
114 				   struct dma_fence *fence)
115 {
116 	if (*keep && dma_fence_is_later(*keep, fence))
117 		return;
118 
119 	dma_fence_put(*keep);
120 	*keep = dma_fence_get(fence);
121 }
122 
123 /**
124  * amdgpu_sync_add_later - add the fence to the hash
125  *
126  * @sync: sync object to add the fence to
127  * @f: fence to add
128  *
129  * Tries to add the fence to an existing hash entry. Returns true when an entry
130  * was found, false otherwise.
131  */
132 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
133 				  bool explicit)
134 {
135 	struct amdgpu_sync_entry *e;
136 
137 	hash_for_each_possible(sync->fences, e, node, f->context) {
138 		if (unlikely(e->fence->context != f->context))
139 			continue;
140 
141 		amdgpu_sync_keep_later(&e->fence, f);
142 
143 		/* Preserve eplicit flag to not loose pipe line sync */
144 		e->explicit |= explicit;
145 
146 		return true;
147 	}
148 	return false;
149 }
150 
151 /**
152  * amdgpu_sync_fence - remember to sync to this fence
153  *
154  * @sync: sync object to add fence to
155  * @f: fence to sync to
156  * @explicit: if this is an explicit dependency
157  *
158  * Add the fence to the sync object.
159  */
160 int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
161 		      bool explicit)
162 {
163 	struct amdgpu_sync_entry *e;
164 
165 	if (!f)
166 		return 0;
167 
168 	if (amdgpu_sync_add_later(sync, f, explicit))
169 		return 0;
170 
171 #ifdef __linux__
172 	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
173 #else
174 	e = pool_get(&amdgpu_sync_slab, PR_WAITOK);
175 #endif
176 	if (!e)
177 		return -ENOMEM;
178 
179 	e->explicit = explicit;
180 
181 	hash_add(sync->fences, &e->node, f->context);
182 	e->fence = dma_fence_get(f);
183 	return 0;
184 }
185 
186 /**
187  * amdgpu_sync_vm_fence - remember to sync to this VM fence
188  *
189  * @adev: amdgpu device
190  * @sync: sync object to add fence to
191  * @fence: the VM fence to add
192  *
193  * Add the fence to the sync object and remember it as VM update.
194  */
195 int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
196 {
197 	if (!fence)
198 		return 0;
199 
200 	amdgpu_sync_keep_later(&sync->last_vm_update, fence);
201 	return amdgpu_sync_fence(sync, fence, false);
202 }
203 
204 /**
205  * amdgpu_sync_resv - sync to a reservation object
206  *
207  * @sync: sync object to add fences from reservation object to
208  * @resv: reservation object with embedded fence
209  * @mode: how owner affects which fences we sync to
210  * @owner: owner of the planned job submission
211  *
212  * Sync to the fence
213  */
214 int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
215 		     struct dma_resv *resv, enum amdgpu_sync_mode mode,
216 		     void *owner)
217 {
218 	struct dma_resv_list *flist;
219 	struct dma_fence *f;
220 	unsigned i;
221 	int r = 0;
222 
223 	if (resv == NULL)
224 		return -EINVAL;
225 
226 	/* always sync to the exclusive fence */
227 	f = dma_resv_get_excl(resv);
228 	r = amdgpu_sync_fence(sync, f, false);
229 
230 	flist = dma_resv_get_list(resv);
231 	if (!flist || r)
232 		return r;
233 
234 	for (i = 0; i < flist->shared_count; ++i) {
235 		void *fence_owner;
236 
237 		f = rcu_dereference_protected(flist->shared[i],
238 					      dma_resv_held(resv));
239 
240 		fence_owner = amdgpu_sync_get_owner(f);
241 
242 		/* Always sync to moves, no matter what */
243 		if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
244 			r = amdgpu_sync_fence(sync, f, false);
245 			if (r)
246 				break;
247 		}
248 
249 		/* We only want to trigger KFD eviction fences on
250 		 * evict or move jobs. Skip KFD fences otherwise.
251 		 */
252 		if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
253 		    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
254 			continue;
255 
256 		/* Ignore fences depending on the sync mode */
257 		switch (mode) {
258 		case AMDGPU_SYNC_ALWAYS:
259 			break;
260 
261 		case AMDGPU_SYNC_NE_OWNER:
262 			if (amdgpu_sync_same_dev(adev, f) &&
263 			    fence_owner == owner)
264 				continue;
265 			break;
266 
267 		case AMDGPU_SYNC_EQ_OWNER:
268 			if (amdgpu_sync_same_dev(adev, f) &&
269 			    fence_owner != owner)
270 				continue;
271 			break;
272 
273 		case AMDGPU_SYNC_EXPLICIT:
274 			continue;
275 		}
276 
277 		r = amdgpu_sync_fence(sync, f, false);
278 		if (r)
279 			break;
280 	}
281 	return r;
282 }
283 
284 /**
285  * amdgpu_sync_peek_fence - get the next fence not signaled yet
286  *
287  * @sync: the sync object
288  * @ring: optional ring to use for test
289  *
290  * Returns the next fence not signaled yet without removing it from the sync
291  * object.
292  */
293 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
294 					 struct amdgpu_ring *ring)
295 {
296 	struct amdgpu_sync_entry *e;
297 	struct hlist_node *tmp;
298 	int i;
299 
300 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
301 		struct dma_fence *f = e->fence;
302 		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
303 
304 		if (dma_fence_is_signaled(f)) {
305 			hash_del(&e->node);
306 			dma_fence_put(f);
307 #ifdef __linux__
308 			kmem_cache_free(amdgpu_sync_slab, e);
309 #else
310 			pool_put(&amdgpu_sync_slab, e);
311 #endif
312 			continue;
313 		}
314 		if (ring && s_fence) {
315 			/* For fences from the same ring it is sufficient
316 			 * when they are scheduled.
317 			 */
318 			if (s_fence->sched == &ring->sched) {
319 				if (dma_fence_is_signaled(&s_fence->scheduled))
320 					continue;
321 
322 				return &s_fence->scheduled;
323 			}
324 		}
325 
326 		return f;
327 	}
328 
329 	return NULL;
330 }
331 
332 /**
333  * amdgpu_sync_get_fence - get the next fence from the sync object
334  *
335  * @sync: sync object to use
336  * @explicit: true if the next fence is explicit
337  *
338  * Get and removes the next fence from the sync object not signaled yet.
339  */
340 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
341 {
342 	struct amdgpu_sync_entry *e;
343 	struct hlist_node *tmp;
344 	struct dma_fence *f;
345 	int i;
346 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
347 
348 		f = e->fence;
349 		if (explicit)
350 			*explicit = e->explicit;
351 
352 		hash_del(&e->node);
353 #ifdef __linux__
354 		kmem_cache_free(amdgpu_sync_slab, e);
355 #else
356 		pool_put(&amdgpu_sync_slab, e);
357 #endif
358 
359 		if (!dma_fence_is_signaled(f))
360 			return f;
361 
362 		dma_fence_put(f);
363 	}
364 	return NULL;
365 }
366 
367 /**
368  * amdgpu_sync_clone - clone a sync object
369  *
370  * @source: sync object to clone
371  * @clone: pointer to destination sync object
372  *
373  * Adds references to all unsignaled fences in @source to @clone. Also
374  * removes signaled fences from @source while at it.
375  */
376 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
377 {
378 	struct amdgpu_sync_entry *e;
379 	struct hlist_node *tmp;
380 	struct dma_fence *f;
381 	int i, r;
382 
383 	hash_for_each_safe(source->fences, i, tmp, e, node) {
384 		f = e->fence;
385 		if (!dma_fence_is_signaled(f)) {
386 			r = amdgpu_sync_fence(clone, f, e->explicit);
387 			if (r)
388 				return r;
389 		} else {
390 			hash_del(&e->node);
391 			dma_fence_put(f);
392 #ifdef __linux__
393 			kmem_cache_free(amdgpu_sync_slab, e);
394 #else
395 			pool_put(&amdgpu_sync_slab, e);
396 #endif
397 		}
398 	}
399 
400 	dma_fence_put(clone->last_vm_update);
401 	clone->last_vm_update = dma_fence_get(source->last_vm_update);
402 
403 	return 0;
404 }
405 
406 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
407 {
408 	struct amdgpu_sync_entry *e;
409 	struct hlist_node *tmp;
410 	int i, r;
411 
412 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
413 		r = dma_fence_wait(e->fence, intr);
414 		if (r)
415 			return r;
416 
417 		hash_del(&e->node);
418 		dma_fence_put(e->fence);
419 #ifdef __linux__
420 		kmem_cache_free(amdgpu_sync_slab, e);
421 #else
422 		pool_put(&amdgpu_sync_slab, e);
423 #endif
424 	}
425 
426 	return 0;
427 }
428 
429 /**
430  * amdgpu_sync_free - free the sync object
431  *
432  * @sync: sync object to use
433  *
434  * Free the sync object.
435  */
436 void amdgpu_sync_free(struct amdgpu_sync *sync)
437 {
438 	struct amdgpu_sync_entry *e;
439 	struct hlist_node *tmp;
440 	unsigned i;
441 
442 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
443 		hash_del(&e->node);
444 		dma_fence_put(e->fence);
445 #ifdef __linux__
446 		kmem_cache_free(amdgpu_sync_slab, e);
447 #else
448 		pool_put(&amdgpu_sync_slab, e);
449 #endif
450 	}
451 
452 	dma_fence_put(sync->last_vm_update);
453 }
454 
455 /**
456  * amdgpu_sync_init - init sync object subsystem
457  *
458  * Allocate the slab allocator.
459  */
460 int amdgpu_sync_init(void)
461 {
462 #ifdef __linux__
463 	amdgpu_sync_slab = kmem_cache_create(
464 		"amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
465 		SLAB_HWCACHE_ALIGN, NULL);
466 	if (!amdgpu_sync_slab)
467 		return -ENOMEM;
468 #else
469 	pool_init(&amdgpu_sync_slab, sizeof(struct amdgpu_sync_entry),
470 	    CACHELINESIZE, IPL_TTY, 0, "amdgpu_sync", NULL);
471 #endif
472 
473 	return 0;
474 }
475 
476 /**
477  * amdgpu_sync_fini - fini sync object subsystem
478  *
479  * Free the slab allocator.
480  */
481 void amdgpu_sync_fini(void)
482 {
483 #ifdef __linux__
484 	kmem_cache_destroy(amdgpu_sync_slab);
485 #else
486 	pool_destroy(&amdgpu_sync_slab);
487 #endif
488 }
489