xref: /openbsd/sys/dev/pci/drm/radeon/radeon_sa.c (revision 5af055cd)
1 /*	$OpenBSD: radeon_sa.c,v 1.8 2015/04/12 03:54:10 jsg Exp $	*/
2 /*
3  * Copyright 2011 Red Hat Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  *
26  */
27 /*
28  * Authors:
29  *    Jerome Glisse <glisse@freedesktop.org>
30  */
31 /* Algorithm:
32  *
33  * We store the last allocated bo in "hole", we always try to allocate
34  * after the last allocated bo. Principle is that in a linear GPU ring
35  * progression was is after last is the oldest bo we allocated and thus
36  * the first one that should no longer be in use by the GPU.
37  *
38  * If it's not the case we skip over the bo after last to the closest
39  * done bo if such one exist. If none exist and we are not asked to
40  * block we report failure to allocate.
41  *
42  * If we are asked to block we wait on all the oldest fence of all
43  * rings. We just wait for any of those fence to complete.
44  */
45 #include <dev/pci/drm/drmP.h>
46 #include "radeon.h"
47 
48 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
49 static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
50 
51 int radeon_sa_bo_manager_init(struct radeon_device *rdev,
52 			      struct radeon_sa_manager *sa_manager,
53 			      unsigned size, u32 align, u32 domain)
54 {
55 	int i, r;
56 
57 	init_waitqueue_head(&sa_manager->wq);
58 	sa_manager->bo = NULL;
59 	sa_manager->size = size;
60 	sa_manager->domain = domain;
61 	sa_manager->align = align;
62 	sa_manager->hole = &sa_manager->olist;
63 	INIT_LIST_HEAD(&sa_manager->olist);
64 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
65 		INIT_LIST_HEAD(&sa_manager->flist[i]);
66 	}
67 
68 	r = radeon_bo_create(rdev, size, align, true,
69 			     domain, NULL, &sa_manager->bo);
70 	if (r) {
71 		dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
72 		return r;
73 	}
74 
75 	return r;
76 }
77 
78 void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
79 			       struct radeon_sa_manager *sa_manager)
80 {
81 	struct radeon_sa_bo *sa_bo, *tmp;
82 
83 	if (!list_empty(&sa_manager->olist)) {
84 		sa_manager->hole = &sa_manager->olist,
85 		radeon_sa_bo_try_free(sa_manager);
86 		if (!list_empty(&sa_manager->olist)) {
87 			dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
88 		}
89 	}
90 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
91 		radeon_sa_bo_remove_locked(sa_bo);
92 	}
93 	radeon_bo_unref(&sa_manager->bo);
94 	sa_manager->size = 0;
95 }
96 
97 int radeon_sa_bo_manager_start(struct radeon_device *rdev,
98 			       struct radeon_sa_manager *sa_manager)
99 {
100 	int r;
101 
102 	if (sa_manager->bo == NULL) {
103 		dev_err(rdev->dev, "no bo for sa manager\n");
104 		return -EINVAL;
105 	}
106 
107 	/* map the buffer */
108 	r = radeon_bo_reserve(sa_manager->bo, false);
109 	if (r) {
110 		dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
111 		return r;
112 	}
113 	r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
114 	if (r) {
115 		radeon_bo_unreserve(sa_manager->bo);
116 		dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
117 		return r;
118 	}
119 	r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
120 	radeon_bo_unreserve(sa_manager->bo);
121 	return r;
122 }
123 
124 int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
125 				 struct radeon_sa_manager *sa_manager)
126 {
127 	int r;
128 
129 	if (sa_manager->bo == NULL) {
130 		dev_err(rdev->dev, "no bo for sa manager\n");
131 		return -EINVAL;
132 	}
133 
134 	r = radeon_bo_reserve(sa_manager->bo, false);
135 	if (!r) {
136 		radeon_bo_kunmap(sa_manager->bo);
137 		radeon_bo_unpin(sa_manager->bo);
138 		radeon_bo_unreserve(sa_manager->bo);
139 	}
140 	return r;
141 }
142 
143 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
144 {
145 	struct radeon_sa_manager *sa_manager = sa_bo->manager;
146 	if (sa_manager->hole == &sa_bo->olist) {
147 		sa_manager->hole = sa_bo->olist.prev;
148 	}
149 	list_del_init(&sa_bo->olist);
150 	list_del_init(&sa_bo->flist);
151 	radeon_fence_unref(&sa_bo->fence);
152 	kfree(sa_bo);
153 }
154 
155 static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
156 {
157 	struct radeon_sa_bo *sa_bo, *tmp;
158 
159 	if (sa_manager->hole->next == &sa_manager->olist)
160 		return;
161 
162 	sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
163 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
164 		if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
165 			return;
166 		}
167 		radeon_sa_bo_remove_locked(sa_bo);
168 	}
169 }
170 
171 static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
172 {
173 	struct list_head *hole = sa_manager->hole;
174 
175 	if (hole != &sa_manager->olist) {
176 		return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
177 	}
178 	return 0;
179 }
180 
181 static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
182 {
183 	struct list_head *hole = sa_manager->hole;
184 
185 	if (hole->next != &sa_manager->olist) {
186 		return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
187 	}
188 	return sa_manager->size;
189 }
190 
191 static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
192 				   struct radeon_sa_bo *sa_bo,
193 				   unsigned size, unsigned align)
194 {
195 	unsigned soffset, eoffset, wasted;
196 
197 	soffset = radeon_sa_bo_hole_soffset(sa_manager);
198 	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
199 	wasted = (align - (soffset % align)) % align;
200 
201 	if ((eoffset - soffset) >= (size + wasted)) {
202 		soffset += wasted;
203 
204 		sa_bo->manager = sa_manager;
205 		sa_bo->soffset = soffset;
206 		sa_bo->eoffset = soffset + size;
207 		list_add(&sa_bo->olist, sa_manager->hole);
208 		INIT_LIST_HEAD(&sa_bo->flist);
209 		sa_manager->hole = &sa_bo->olist;
210 		return true;
211 	}
212 	return false;
213 }
214 
215 /**
216  * radeon_sa_event - Check if we can stop waiting
217  *
218  * @sa_manager: pointer to the sa_manager
219  * @size: number of bytes we want to allocate
220  * @align: alignment we need to match
221  *
222  * Check if either there is a fence we can wait for or
223  * enough free memory to satisfy the allocation directly
224  */
225 static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
226 			    unsigned size, unsigned align)
227 {
228 	unsigned soffset, eoffset, wasted;
229 	int i;
230 
231 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
232 		if (!list_empty(&sa_manager->flist[i])) {
233 			return true;
234 		}
235 	}
236 
237 	soffset = radeon_sa_bo_hole_soffset(sa_manager);
238 	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
239 	wasted = (align - (soffset % align)) % align;
240 
241 	if ((eoffset - soffset) >= (size + wasted)) {
242 		return true;
243 	}
244 
245 	return false;
246 }
247 
248 static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
249 				   struct radeon_fence **fences,
250 				   unsigned *tries)
251 {
252 	struct radeon_sa_bo *best_bo = NULL;
253 	unsigned i, soffset, best, tmp;
254 
255 	/* if hole points to the end of the buffer */
256 	if (sa_manager->hole->next == &sa_manager->olist) {
257 		/* try again with its beginning */
258 		sa_manager->hole = &sa_manager->olist;
259 		return true;
260 	}
261 
262 	soffset = radeon_sa_bo_hole_soffset(sa_manager);
263 	/* to handle wrap around we add sa_manager->size */
264 	best = sa_manager->size * 2;
265 	/* go over all fence list and try to find the closest sa_bo
266 	 * of the current last
267 	 */
268 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
269 		struct radeon_sa_bo *sa_bo;
270 
271 		if (list_empty(&sa_manager->flist[i])) {
272 			continue;
273 		}
274 
275 		sa_bo = list_first_entry(&sa_manager->flist[i],
276 					 struct radeon_sa_bo, flist);
277 
278 		if (!radeon_fence_signaled(sa_bo->fence)) {
279 			fences[i] = sa_bo->fence;
280 			continue;
281 		}
282 
283 		/* limit the number of tries each ring gets */
284 		if (tries[i] > 2) {
285 			continue;
286 		}
287 
288 		tmp = sa_bo->soffset;
289 		if (tmp < soffset) {
290 			/* wrap around, pretend it's after */
291 			tmp += sa_manager->size;
292 		}
293 		tmp -= soffset;
294 		if (tmp < best) {
295 			/* this sa bo is the closest one */
296 			best = tmp;
297 			best_bo = sa_bo;
298 		}
299 	}
300 
301 	if (best_bo) {
302 		++tries[best_bo->fence->ring];
303 		sa_manager->hole = best_bo->olist.prev;
304 
305 		/* we knew that this one is signaled,
306 		   so it's save to remote it */
307 		radeon_sa_bo_remove_locked(best_bo);
308 		return true;
309 	}
310 	return false;
311 }
312 
313 int radeon_sa_bo_new(struct radeon_device *rdev,
314 		     struct radeon_sa_manager *sa_manager,
315 		     struct radeon_sa_bo **sa_bo,
316 		     unsigned size, unsigned align, bool block)
317 {
318 	struct radeon_fence *fences[RADEON_NUM_RINGS];
319 	unsigned tries[RADEON_NUM_RINGS];
320 	int i, r, error;
321 
322 	BUG_ON(align > sa_manager->align);
323 	BUG_ON(size > sa_manager->size);
324 
325 	*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
326 	if ((*sa_bo) == NULL) {
327 		return -ENOMEM;
328 	}
329 	(*sa_bo)->manager = sa_manager;
330 	(*sa_bo)->fence = NULL;
331 	INIT_LIST_HEAD(&(*sa_bo)->olist);
332 	INIT_LIST_HEAD(&(*sa_bo)->flist);
333 
334 	spin_lock(&sa_manager->wq.lock);
335 	do {
336 		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
337 			fences[i] = NULL;
338 			tries[i] = 0;
339 		}
340 
341 		do {
342 			radeon_sa_bo_try_free(sa_manager);
343 
344 			if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
345 						   size, align)) {
346 				spin_unlock(&sa_manager->wq.lock);
347 				return 0;
348 			}
349 
350 			/* see if we can skip over some allocations */
351 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
352 
353 		spin_unlock(&sa_manager->wq.lock);
354 		r = radeon_fence_wait_any(rdev, fences, false);
355 		spin_lock(&sa_manager->wq.lock);
356 		/* if we have nothing to wait for block */
357 		if (r == -ENOENT && block) {
358 			r = 0;
359 			while (r == 0) {
360 				if (radeon_sa_event(sa_manager, size, align))
361 					break;
362 				error = msleep(&sa_manager->wq, &sa_manager->wq.lock,
363 				    PZERO | PCATCH, "samgr", 0);
364 				if (error == ERESTART)
365 					error = EINTR; /* XXX */
366 				r = -error;
367 			}
368 
369 		} else if (r == -ENOENT) {
370 			r = -ENOMEM;
371 		}
372 
373 	} while (!r);
374 
375 	spin_unlock(&sa_manager->wq.lock);
376 	kfree(*sa_bo);
377 	*sa_bo = NULL;
378 	return r;
379 }
380 
381 void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
382 		       struct radeon_fence *fence)
383 {
384 	struct radeon_sa_manager *sa_manager;
385 
386 	if (sa_bo == NULL || *sa_bo == NULL) {
387 		return;
388 	}
389 
390 	sa_manager = (*sa_bo)->manager;
391 	spin_lock(&sa_manager->wq.lock);
392 	if (fence && !radeon_fence_signaled(fence)) {
393 		(*sa_bo)->fence = radeon_fence_ref(fence);
394 		list_add_tail(&(*sa_bo)->flist,
395 			      &sa_manager->flist[fence->ring]);
396 	} else {
397 		radeon_sa_bo_remove_locked(*sa_bo);
398 	}
399 	wake_up_all_locked(&sa_manager->wq);
400 	spin_unlock(&sa_manager->wq.lock);
401 	*sa_bo = NULL;
402 }
403 
404 #if defined(CONFIG_DEBUG_FS)
405 void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
406 				  struct seq_file *m)
407 {
408 	struct radeon_sa_bo *i;
409 
410 	spin_lock(&sa_manager->wq.lock);
411 	list_for_each_entry(i, &sa_manager->olist, olist) {
412 		if (&i->olist == sa_manager->hole) {
413 			seq_printf(m, ">");
414 		} else {
415 			seq_printf(m, " ");
416 		}
417 		seq_printf(m, "[0x%08x 0x%08x] size %8d",
418 			   i->soffset, i->eoffset, i->eoffset - i->soffset);
419 		if (i->fence) {
420 			seq_printf(m, " protected by 0x%016llx on ring %d",
421 				   i->fence->seq, i->fence->ring);
422 		}
423 		seq_printf(m, "\n");
424 	}
425 	spin_unlock(&sa_manager->wq.lock);
426 }
427 #endif
428