xref: /openbsd/sys/dev/pci/drm/amd/amdgpu/amdgpu_sync.c (revision 8e8da360)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2014 Advanced Micro Devices, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  *
26  */
27 /*
28  * Authors:
29  *    Christian König <christian.koenig@amd.com>
30  */
31 
32 #include <linux/dma-fence-chain.h>
33 
34 #include "amdgpu.h"
35 #include "amdgpu_trace.h"
36 #include "amdgpu_amdkfd.h"
37 
38 struct amdgpu_sync_entry {
39 	struct hlist_node	node;
40 	struct dma_fence	*fence;
41 };
42 
43 static struct pool amdgpu_sync_slab;
44 
45 /**
46  * amdgpu_sync_create - zero init sync object
47  *
48  * @sync: sync object to initialize
49  *
50  * Just clear the sync object for now.
51  */
amdgpu_sync_create(struct amdgpu_sync * sync)52 void amdgpu_sync_create(struct amdgpu_sync *sync)
53 {
54 	hash_init(sync->fences);
55 }
56 
57 /**
58  * amdgpu_sync_same_dev - test if fence belong to us
59  *
60  * @adev: amdgpu device to use for the test
61  * @f: fence to test
62  *
63  * Test if the fence was issued by us.
64  */
amdgpu_sync_same_dev(struct amdgpu_device * adev,struct dma_fence * f)65 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
66 				 struct dma_fence *f)
67 {
68 	struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
69 
70 	if (s_fence) {
71 		struct amdgpu_ring *ring;
72 
73 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
74 		return ring->adev == adev;
75 	}
76 
77 	return false;
78 }
79 
80 /**
81  * amdgpu_sync_get_owner - extract the owner of a fence
82  *
83  * @f: fence get the owner from
84  *
85  * Extract who originally created the fence.
86  */
amdgpu_sync_get_owner(struct dma_fence * f)87 static void *amdgpu_sync_get_owner(struct dma_fence *f)
88 {
89 	struct drm_sched_fence *s_fence;
90 	struct amdgpu_amdkfd_fence *kfd_fence;
91 
92 	if (!f)
93 		return AMDGPU_FENCE_OWNER_UNDEFINED;
94 
95 	s_fence = to_drm_sched_fence(f);
96 	if (s_fence)
97 		return s_fence->owner;
98 
99 	kfd_fence = to_amdgpu_amdkfd_fence(f);
100 	if (kfd_fence)
101 		return AMDGPU_FENCE_OWNER_KFD;
102 
103 	return AMDGPU_FENCE_OWNER_UNDEFINED;
104 }
105 
106 /**
107  * amdgpu_sync_keep_later - Keep the later fence
108  *
109  * @keep: existing fence to test
110  * @fence: new fence
111  *
112  * Either keep the existing fence or the new one, depending which one is later.
113  */
amdgpu_sync_keep_later(struct dma_fence ** keep,struct dma_fence * fence)114 static void amdgpu_sync_keep_later(struct dma_fence **keep,
115 				   struct dma_fence *fence)
116 {
117 	if (*keep && dma_fence_is_later(*keep, fence))
118 		return;
119 
120 	dma_fence_put(*keep);
121 	*keep = dma_fence_get(fence);
122 }
123 
124 /**
125  * amdgpu_sync_add_later - add the fence to the hash
126  *
127  * @sync: sync object to add the fence to
128  * @f: fence to add
129  *
130  * Tries to add the fence to an existing hash entry. Returns true when an entry
131  * was found, false otherwise.
132  */
amdgpu_sync_add_later(struct amdgpu_sync * sync,struct dma_fence * f)133 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
134 {
135 	struct amdgpu_sync_entry *e;
136 
137 	hash_for_each_possible(sync->fences, e, node, f->context) {
138 		if (unlikely(e->fence->context != f->context))
139 			continue;
140 
141 		amdgpu_sync_keep_later(&e->fence, f);
142 		return true;
143 	}
144 	return false;
145 }
146 
147 /**
148  * amdgpu_sync_fence - remember to sync to this fence
149  *
150  * @sync: sync object to add fence to
151  * @f: fence to sync to
152  *
153  * Add the fence to the sync object.
154  */
amdgpu_sync_fence(struct amdgpu_sync * sync,struct dma_fence * f)155 int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
156 {
157 	struct amdgpu_sync_entry *e;
158 
159 	if (!f)
160 		return 0;
161 
162 	if (amdgpu_sync_add_later(sync, f))
163 		return 0;
164 
165 #ifdef __linux__
166 	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
167 #else
168 	e = pool_get(&amdgpu_sync_slab, PR_WAITOK);
169 #endif
170 	if (!e)
171 		return -ENOMEM;
172 
173 	hash_add(sync->fences, &e->node, f->context);
174 	e->fence = dma_fence_get(f);
175 	return 0;
176 }
177 
178 /* Determine based on the owner and mode if we should sync to a fence or not */
amdgpu_sync_test_fence(struct amdgpu_device * adev,enum amdgpu_sync_mode mode,void * owner,struct dma_fence * f)179 static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
180 				   enum amdgpu_sync_mode mode,
181 				   void *owner, struct dma_fence *f)
182 {
183 	void *fence_owner = amdgpu_sync_get_owner(f);
184 
185 	/* Always sync to moves, no matter what */
186 	if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED)
187 		return true;
188 
189 	/* We only want to trigger KFD eviction fences on
190 	 * evict or move jobs. Skip KFD fences otherwise.
191 	 */
192 	if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
193 	    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
194 		return false;
195 
196 	/* Never sync to VM updates either. */
197 	if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
198 	    owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
199 	    owner != AMDGPU_FENCE_OWNER_KFD)
200 		return false;
201 
202 	/* Ignore fences depending on the sync mode */
203 	switch (mode) {
204 	case AMDGPU_SYNC_ALWAYS:
205 		return true;
206 
207 	case AMDGPU_SYNC_NE_OWNER:
208 		if (amdgpu_sync_same_dev(adev, f) &&
209 		    fence_owner == owner)
210 			return false;
211 		break;
212 
213 	case AMDGPU_SYNC_EQ_OWNER:
214 		if (amdgpu_sync_same_dev(adev, f) &&
215 		    fence_owner != owner)
216 			return false;
217 		break;
218 
219 	case AMDGPU_SYNC_EXPLICIT:
220 		return false;
221 	}
222 
223 	WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD,
224 	     "Adding eviction fence to sync obj");
225 	return true;
226 }
227 
228 /**
229  * amdgpu_sync_resv - sync to a reservation object
230  *
231  * @adev: amdgpu device
232  * @sync: sync object to add fences from reservation object to
233  * @resv: reservation object with embedded fence
234  * @mode: how owner affects which fences we sync to
235  * @owner: owner of the planned job submission
236  *
237  * Sync to the fence
238  */
amdgpu_sync_resv(struct amdgpu_device * adev,struct amdgpu_sync * sync,struct dma_resv * resv,enum amdgpu_sync_mode mode,void * owner)239 int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
240 		     struct dma_resv *resv, enum amdgpu_sync_mode mode,
241 		     void *owner)
242 {
243 	struct dma_resv_iter cursor;
244 	struct dma_fence *f;
245 	int r;
246 
247 	if (resv == NULL)
248 		return -EINVAL;
249 
250 	/* TODO: Use DMA_RESV_USAGE_READ here */
251 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
252 		dma_fence_chain_for_each(f, f) {
253 			struct dma_fence *tmp = dma_fence_chain_contained(f);
254 
255 			if (amdgpu_sync_test_fence(adev, mode, owner, tmp)) {
256 				r = amdgpu_sync_fence(sync, f);
257 				dma_fence_put(f);
258 				if (r)
259 					return r;
260 				break;
261 			}
262 		}
263 	}
264 	return 0;
265 }
266 
267 /* Free the entry back to the slab */
amdgpu_sync_entry_free(struct amdgpu_sync_entry * e)268 static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e)
269 {
270 	hash_del(&e->node);
271 	dma_fence_put(e->fence);
272 #ifdef __linux__
273 	kmem_cache_free(amdgpu_sync_slab, e);
274 #else
275 	pool_put(&amdgpu_sync_slab, e);
276 #endif
277 }
278 
279 /**
280  * amdgpu_sync_peek_fence - get the next fence not signaled yet
281  *
282  * @sync: the sync object
283  * @ring: optional ring to use for test
284  *
285  * Returns the next fence not signaled yet without removing it from the sync
286  * object.
287  */
amdgpu_sync_peek_fence(struct amdgpu_sync * sync,struct amdgpu_ring * ring)288 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
289 					 struct amdgpu_ring *ring)
290 {
291 	struct amdgpu_sync_entry *e;
292 	struct hlist_node *tmp;
293 	int i;
294 
295 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
296 		struct dma_fence *f = e->fence;
297 		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
298 
299 		if (dma_fence_is_signaled(f)) {
300 			amdgpu_sync_entry_free(e);
301 			continue;
302 		}
303 		if (ring && s_fence) {
304 			/* For fences from the same ring it is sufficient
305 			 * when they are scheduled.
306 			 */
307 			if (s_fence->sched == &ring->sched) {
308 				if (dma_fence_is_signaled(&s_fence->scheduled))
309 					continue;
310 
311 				return &s_fence->scheduled;
312 			}
313 		}
314 
315 		return f;
316 	}
317 
318 	return NULL;
319 }
320 
321 /**
322  * amdgpu_sync_get_fence - get the next fence from the sync object
323  *
324  * @sync: sync object to use
325  *
326  * Get and removes the next fence from the sync object not signaled yet.
327  */
amdgpu_sync_get_fence(struct amdgpu_sync * sync)328 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
329 {
330 	struct amdgpu_sync_entry *e;
331 	struct hlist_node *tmp;
332 	struct dma_fence *f;
333 	int i;
334 
335 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
336 
337 		f = e->fence;
338 
339 		hash_del(&e->node);
340 #ifdef __linux__
341 		kmem_cache_free(amdgpu_sync_slab, e);
342 #else
343 		pool_put(&amdgpu_sync_slab, e);
344 #endif
345 
346 		if (!dma_fence_is_signaled(f))
347 			return f;
348 
349 		dma_fence_put(f);
350 	}
351 	return NULL;
352 }
353 
354 /**
355  * amdgpu_sync_clone - clone a sync object
356  *
357  * @source: sync object to clone
358  * @clone: pointer to destination sync object
359  *
360  * Adds references to all unsignaled fences in @source to @clone. Also
361  * removes signaled fences from @source while at it.
362  */
amdgpu_sync_clone(struct amdgpu_sync * source,struct amdgpu_sync * clone)363 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
364 {
365 	struct amdgpu_sync_entry *e;
366 	struct hlist_node *tmp;
367 	struct dma_fence *f;
368 	int i, r;
369 
370 	hash_for_each_safe(source->fences, i, tmp, e, node) {
371 		f = e->fence;
372 		if (!dma_fence_is_signaled(f)) {
373 			r = amdgpu_sync_fence(clone, f);
374 			if (r)
375 				return r;
376 		} else {
377 			amdgpu_sync_entry_free(e);
378 		}
379 	}
380 
381 	return 0;
382 }
383 
384 /**
385  * amdgpu_sync_push_to_job - push fences into job
386  * @sync: sync object to get the fences from
387  * @job: job to push the fences into
388  *
389  * Add all unsignaled fences from sync to job.
390  */
amdgpu_sync_push_to_job(struct amdgpu_sync * sync,struct amdgpu_job * job)391 int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
392 {
393 	struct amdgpu_sync_entry *e;
394 	struct hlist_node *tmp;
395 	struct dma_fence *f;
396 	int i, r;
397 
398 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
399 		f = e->fence;
400 		if (dma_fence_is_signaled(f)) {
401 			amdgpu_sync_entry_free(e);
402 			continue;
403 		}
404 
405 		dma_fence_get(f);
406 		r = drm_sched_job_add_dependency(&job->base, f);
407 		if (r) {
408 			dma_fence_put(f);
409 			return r;
410 		}
411 	}
412 	return 0;
413 }
414 
amdgpu_sync_wait(struct amdgpu_sync * sync,bool intr)415 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
416 {
417 	struct amdgpu_sync_entry *e;
418 	struct hlist_node *tmp;
419 	int i, r;
420 
421 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
422 		r = dma_fence_wait(e->fence, intr);
423 		if (r)
424 			return r;
425 
426 		amdgpu_sync_entry_free(e);
427 	}
428 
429 	return 0;
430 }
431 
432 /**
433  * amdgpu_sync_free - free the sync object
434  *
435  * @sync: sync object to use
436  *
437  * Free the sync object.
438  */
amdgpu_sync_free(struct amdgpu_sync * sync)439 void amdgpu_sync_free(struct amdgpu_sync *sync)
440 {
441 	struct amdgpu_sync_entry *e;
442 	struct hlist_node *tmp;
443 	unsigned int i;
444 
445 	hash_for_each_safe(sync->fences, i, tmp, e, node)
446 		amdgpu_sync_entry_free(e);
447 }
448 
449 /**
450  * amdgpu_sync_init - init sync object subsystem
451  *
452  * Allocate the slab allocator.
453  */
amdgpu_sync_init(void)454 int amdgpu_sync_init(void)
455 {
456 #ifdef __linux__
457 	amdgpu_sync_slab = kmem_cache_create(
458 		"amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
459 		SLAB_HWCACHE_ALIGN, NULL);
460 	if (!amdgpu_sync_slab)
461 		return -ENOMEM;
462 #else
463 	pool_init(&amdgpu_sync_slab, sizeof(struct amdgpu_sync_entry),
464 	    CACHELINESIZE, IPL_TTY, 0, "amdgpu_sync", NULL);
465 #endif
466 
467 	return 0;
468 }
469 
470 /**
471  * amdgpu_sync_fini - fini sync object subsystem
472  *
473  * Free the slab allocator.
474  */
amdgpu_sync_fini(void)475 void amdgpu_sync_fini(void)
476 {
477 #ifdef __linux__
478 	kmem_cache_destroy(amdgpu_sync_slab);
479 #else
480 	pool_destroy(&amdgpu_sync_slab);
481 #endif
482 }
483