xref: /dragonfly/sys/dev/drm/radeon/radeon_sa.c (revision ad85b67d)
1 /*
2  * Copyright 2011 Red Hat Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  */
30 /* Algorithm:
31  *
32  * We store the last allocated bo in "hole", we always try to allocate
33  * after the last allocated bo. Principle is that in a linear GPU ring
34  * progression was is after last is the oldest bo we allocated and thus
35  * the first one that should no longer be in use by the GPU.
36  *
37  * If it's not the case we skip over the bo after last to the closest
38  * done bo if such one exist. If none exist and we are not asked to
39  * block we report failure to allocate.
40  *
41  * If we are asked to block we wait on all the oldest fence of all
42  * rings. We just wait for any of those fence to complete.
43  *
44  * $FreeBSD: head/sys/dev/drm2/radeon/radeon_sa.c 254885 2013-08-25 19:37:15Z dumbbell $
45  */
46 
47 #include <drm/drmP.h>
48 #include "radeon.h"
49 
50 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
51 static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
52 
53 int radeon_sa_bo_manager_init(struct radeon_device *rdev,
54 			      struct radeon_sa_manager *sa_manager,
55 			      unsigned size, u32 align, u32 domain, u32 flags)
56 {
57 	int i, r;
58 
59 	lockinit(&sa_manager->wq_lock, "drsamwm", 0, LK_CANRECURSE);
60 	cv_init(&sa_manager->wq, "drm__radeon_sa_manager__wq");
61 	sa_manager->bo = NULL;
62 	sa_manager->size = size;
63 	sa_manager->domain = domain;
64 	sa_manager->align = align;
65 	sa_manager->hole = &sa_manager->olist;
66 	INIT_LIST_HEAD(&sa_manager->olist);
67 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
68 		INIT_LIST_HEAD(&sa_manager->flist[i]);
69 	}
70 
71 	r = radeon_bo_create(rdev, size, align, true,
72 			     domain, flags, NULL, &sa_manager->bo);
73 	if (r) {
74 		dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
75 		return r;
76 	}
77 
78 	return r;
79 }
80 
81 void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
82 			       struct radeon_sa_manager *sa_manager)
83 {
84 	struct radeon_sa_bo *sa_bo, *tmp;
85 
86 	if (!list_empty(&sa_manager->olist)) {
87 		sa_manager->hole = &sa_manager->olist,
88 		radeon_sa_bo_try_free(sa_manager);
89 		if (!list_empty(&sa_manager->olist)) {
90 			dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
91 		}
92 	}
93 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
94 		radeon_sa_bo_remove_locked(sa_bo);
95 	}
96 	radeon_bo_unref(&sa_manager->bo);
97 	sa_manager->size = 0;
98 	cv_destroy(&sa_manager->wq);
99 	lockuninit(&sa_manager->wq_lock);
100 }
101 
102 int radeon_sa_bo_manager_start(struct radeon_device *rdev,
103 			       struct radeon_sa_manager *sa_manager)
104 {
105 	int r;
106 
107 	if (sa_manager->bo == NULL) {
108 		dev_err(rdev->dev, "no bo for sa manager\n");
109 		return -EINVAL;
110 	}
111 
112 	/* map the buffer */
113 	r = radeon_bo_reserve(sa_manager->bo, false);
114 	if (r) {
115 		dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
116 		return r;
117 	}
118 	r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
119 	if (r) {
120 		radeon_bo_unreserve(sa_manager->bo);
121 		dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
122 		return r;
123 	}
124 	r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
125 	radeon_bo_unreserve(sa_manager->bo);
126 	return r;
127 }
128 
129 int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
130 				 struct radeon_sa_manager *sa_manager)
131 {
132 	int r;
133 
134 	if (sa_manager->bo == NULL) {
135 		dev_err(rdev->dev, "no bo for sa manager\n");
136 		return -EINVAL;
137 	}
138 
139 	r = radeon_bo_reserve(sa_manager->bo, false);
140 	if (!r) {
141 		radeon_bo_kunmap(sa_manager->bo);
142 		radeon_bo_unpin(sa_manager->bo);
143 		radeon_bo_unreserve(sa_manager->bo);
144 	}
145 	return r;
146 }
147 
148 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
149 {
150 	struct radeon_sa_manager *sa_manager = sa_bo->manager;
151 	if (sa_manager->hole == &sa_bo->olist) {
152 		sa_manager->hole = sa_bo->olist.prev;
153 	}
154 	list_del_init(&sa_bo->olist);
155 	list_del_init(&sa_bo->flist);
156 	radeon_fence_unref(&sa_bo->fence);
157 	kfree(sa_bo);
158 }
159 
160 static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
161 {
162 	struct radeon_sa_bo *sa_bo, *tmp;
163 
164 	if (sa_manager->hole->next == &sa_manager->olist)
165 		return;
166 
167 	sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
168 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
169 		if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
170 			return;
171 		}
172 		radeon_sa_bo_remove_locked(sa_bo);
173 	}
174 }
175 
176 static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
177 {
178 	struct list_head *hole = sa_manager->hole;
179 
180 	if (hole != &sa_manager->olist) {
181 		return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
182 	}
183 	return 0;
184 }
185 
186 static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
187 {
188 	struct list_head *hole = sa_manager->hole;
189 
190 	if (hole->next != &sa_manager->olist) {
191 		return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
192 	}
193 	return sa_manager->size;
194 }
195 
196 static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
197 				   struct radeon_sa_bo *sa_bo,
198 				   unsigned size, unsigned align)
199 {
200 	unsigned soffset, eoffset, wasted;
201 
202 	soffset = radeon_sa_bo_hole_soffset(sa_manager);
203 	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
204 	wasted = (align - (soffset % align)) % align;
205 
206 	if ((eoffset - soffset) >= (size + wasted)) {
207 		soffset += wasted;
208 
209 		sa_bo->manager = sa_manager;
210 		sa_bo->soffset = soffset;
211 		sa_bo->eoffset = soffset + size;
212 		list_add(&sa_bo->olist, sa_manager->hole);
213 		INIT_LIST_HEAD(&sa_bo->flist);
214 		sa_manager->hole = &sa_bo->olist;
215 		return true;
216 	}
217 	return false;
218 }
219 
220 /**
221  * radeon_sa_event - Check if we can stop waiting
222  *
223  * @sa_manager: pointer to the sa_manager
224  * @size: number of bytes we want to allocate
225  * @align: alignment we need to match
226  *
227  * Check if either there is a fence we can wait for or
228  * enough free memory to satisfy the allocation directly
229  */
230 static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
231 			    unsigned size, unsigned align)
232 {
233 	unsigned soffset, eoffset, wasted;
234 	int i;
235 
236 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
237 		if (!list_empty(&sa_manager->flist[i])) {
238 			return true;
239 		}
240 	}
241 
242 	soffset = radeon_sa_bo_hole_soffset(sa_manager);
243 	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
244 	wasted = (align - (soffset % align)) % align;
245 
246 	if ((eoffset - soffset) >= (size + wasted)) {
247 		return true;
248 	}
249 
250 	return false;
251 }
252 
253 static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
254 				   struct radeon_fence **fences,
255 				   unsigned *tries)
256 {
257 	struct radeon_sa_bo *best_bo = NULL;
258 	unsigned i, soffset, best, tmp;
259 
260 	/* if hole points to the end of the buffer */
261 	if (sa_manager->hole->next == &sa_manager->olist) {
262 		/* try again with its beginning */
263 		sa_manager->hole = &sa_manager->olist;
264 		return true;
265 	}
266 
267 	soffset = radeon_sa_bo_hole_soffset(sa_manager);
268 	/* to handle wrap around we add sa_manager->size */
269 	best = sa_manager->size * 2;
270 	/* go over all fence list and try to find the closest sa_bo
271 	 * of the current last
272 	 */
273 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
274 		struct radeon_sa_bo *sa_bo;
275 
276 		if (list_empty(&sa_manager->flist[i])) {
277 			continue;
278 		}
279 
280 		sa_bo = list_first_entry(&sa_manager->flist[i],
281 					 struct radeon_sa_bo, flist);
282 
283 		if (!radeon_fence_signaled(sa_bo->fence)) {
284 			fences[i] = sa_bo->fence;
285 			continue;
286 		}
287 
288 		/* limit the number of tries each ring gets */
289 		if (tries[i] > 2) {
290 			continue;
291 		}
292 
293 		tmp = sa_bo->soffset;
294 		if (tmp < soffset) {
295 			/* wrap around, pretend it's after */
296 			tmp += sa_manager->size;
297 		}
298 		tmp -= soffset;
299 		if (tmp < best) {
300 			/* this sa bo is the closest one */
301 			best = tmp;
302 			best_bo = sa_bo;
303 		}
304 	}
305 
306 	if (best_bo) {
307 		++tries[best_bo->fence->ring];
308 		sa_manager->hole = best_bo->olist.prev;
309 
310 		/* we knew that this one is signaled,
311 		   so it's save to remote it */
312 		radeon_sa_bo_remove_locked(best_bo);
313 		return true;
314 	}
315 	return false;
316 }
317 
318 int radeon_sa_bo_new(struct radeon_device *rdev,
319 		     struct radeon_sa_manager *sa_manager,
320 		     struct radeon_sa_bo **sa_bo,
321 		     unsigned size, unsigned align)
322 {
323 	struct radeon_fence *fences[RADEON_NUM_RINGS];
324 	unsigned tries[RADEON_NUM_RINGS];
325 	int i, r;
326 
327 	BUG_ON(align > sa_manager->align);
328 	BUG_ON(size > sa_manager->size);
329 
330 	*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), M_DRM,
331 			 M_WAITOK | M_ZERO);
332 	if ((*sa_bo) == NULL) {
333 		return -ENOMEM;
334 	}
335 	(*sa_bo)->manager = sa_manager;
336 	(*sa_bo)->fence = NULL;
337 	INIT_LIST_HEAD(&(*sa_bo)->olist);
338 	INIT_LIST_HEAD(&(*sa_bo)->flist);
339 
340 	lockmgr(&sa_manager->wq_lock, LK_EXCLUSIVE);
341 	do {
342 		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
343 			fences[i] = NULL;
344 			tries[i] = 0;
345 		}
346 
347 		do {
348 			radeon_sa_bo_try_free(sa_manager);
349 
350 			if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
351 						   size, align)) {
352 				lockmgr(&sa_manager->wq_lock, LK_RELEASE);
353 				return 0;
354 			}
355 
356 			/* see if we can skip over some allocations */
357 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
358 
359 		lockmgr(&sa_manager->wq_lock, LK_RELEASE);
360 		r = radeon_fence_wait_any(rdev, fences, false);
361 		lockmgr(&sa_manager->wq_lock, LK_EXCLUSIVE);
362 		/* if we have nothing to wait for block */
363 		if (r == -ENOENT) {
364 			while (!radeon_sa_event(sa_manager, size, align)) {
365 				r = -cv_wait_sig(&sa_manager->wq,
366 						 &sa_manager->wq_lock);
367 				if (r != 0)
368 					break;
369 			}
370 		}
371 
372 	} while (!r);
373 
374 	lockmgr(&sa_manager->wq_lock, LK_RELEASE);
375 	kfree(*sa_bo);
376 	*sa_bo = NULL;
377 	return r;
378 }
379 
380 void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
381 		       struct radeon_fence *fence)
382 {
383 	struct radeon_sa_manager *sa_manager;
384 
385 	if (sa_bo == NULL || *sa_bo == NULL) {
386 		return;
387 	}
388 
389 	sa_manager = (*sa_bo)->manager;
390 	lockmgr(&sa_manager->wq_lock, LK_EXCLUSIVE);
391 	if (fence && !radeon_fence_signaled(fence)) {
392 		(*sa_bo)->fence = radeon_fence_ref(fence);
393 		list_add_tail(&(*sa_bo)->flist,
394 			      &sa_manager->flist[fence->ring]);
395 	} else {
396 		radeon_sa_bo_remove_locked(*sa_bo);
397 	}
398 	cv_broadcast(&sa_manager->wq);
399 	lockmgr(&sa_manager->wq_lock, LK_RELEASE);
400 	*sa_bo = NULL;
401 }
402 
403 #if defined(CONFIG_DEBUG_FS)
404 void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
405 				  struct seq_file *m)
406 {
407 	struct radeon_sa_bo *i;
408 
409 	spin_lock(&sa_manager->wq.lock);
410 	list_for_each_entry(i, &sa_manager->olist, olist) {
411 		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
412 		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
413 		if (&i->olist == sa_manager->hole) {
414 			seq_printf(m, ">");
415 		} else {
416 			seq_printf(m, " ");
417 		}
418 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
419 			   soffset, eoffset, eoffset - soffset);
420 		if (i->fence) {
421 			seq_printf(m, " protected by 0x%016llx on ring %d",
422 				   i->fence->seq, i->fence->ring);
423 		}
424 		seq_printf(m, "\n");
425 	}
426 	spin_unlock(&sa_manager->wq.lock);
427 }
428 #endif
429