1 /* 2 * Copyright 2011 Red Hat Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 */ 30 /* Algorithm: 31 * 32 * We store the last allocated bo in "hole", we always try to allocate 33 * after the last allocated bo. Principle is that in a linear GPU ring 34 * progression was is after last is the oldest bo we allocated and thus 35 * the first one that should no longer be in use by the GPU. 36 * 37 * If it's not the case we skip over the bo after last to the closest 38 * done bo if such one exist. If none exist and we are not asked to 39 * block we report failure to allocate. 40 * 41 * If we are asked to block we wait on all the oldest fence of all 42 * rings. We just wait for any of those fence to complete. 43 * 44 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_sa.c 254885 2013-08-25 19:37:15Z dumbbell $ 45 */ 46 47 #include <drm/drmP.h> 48 #include "radeon.h" 49 50 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo); 51 static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager); 52 53 int radeon_sa_bo_manager_init(struct radeon_device *rdev, 54 struct radeon_sa_manager *sa_manager, 55 unsigned size, u32 align, u32 domain, u32 flags) 56 { 57 int i, r; 58 59 lockinit(&sa_manager->wq_lock, "drm__radeon_sa_manager_wq_mtx", 0, 60 LK_CANRECURSE); 61 cv_init(&sa_manager->wq, "drm__radeon_sa_manager__wq"); 62 sa_manager->bo = NULL; 63 sa_manager->size = size; 64 sa_manager->domain = domain; 65 sa_manager->align = align; 66 sa_manager->hole = &sa_manager->olist; 67 INIT_LIST_HEAD(&sa_manager->olist); 68 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 69 INIT_LIST_HEAD(&sa_manager->flist[i]); 70 } 71 72 r = radeon_bo_create(rdev, size, align, true, 73 domain, flags, NULL, &sa_manager->bo); 74 if (r) { 75 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); 76 return r; 77 } 78 79 return r; 80 } 81 82 void radeon_sa_bo_manager_fini(struct radeon_device *rdev, 83 struct radeon_sa_manager *sa_manager) 84 { 85 struct radeon_sa_bo *sa_bo, *tmp; 86 87 if (!list_empty(&sa_manager->olist)) { 88 sa_manager->hole = &sa_manager->olist, 89 radeon_sa_bo_try_free(sa_manager); 90 if (!list_empty(&sa_manager->olist)) { 91 dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n"); 92 } 93 } 94 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) { 95 radeon_sa_bo_remove_locked(sa_bo); 96 } 97 radeon_bo_unref(&sa_manager->bo); 98 sa_manager->size = 0; 99 cv_destroy(&sa_manager->wq); 100 lockuninit(&sa_manager->wq_lock); 101 } 102 103 int radeon_sa_bo_manager_start(struct radeon_device *rdev, 104 struct radeon_sa_manager *sa_manager) 105 { 106 int r; 107 108 if (sa_manager->bo == NULL) { 109 dev_err(rdev->dev, "no bo for sa manager\n"); 110 return -EINVAL; 111 } 112 113 /* map the buffer */ 114 r = radeon_bo_reserve(sa_manager->bo, false); 115 if (r) { 116 dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r); 117 return r; 118 } 119 r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr); 120 if (r) { 121 radeon_bo_unreserve(sa_manager->bo); 122 dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r); 123 return r; 124 } 125 r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); 126 radeon_bo_unreserve(sa_manager->bo); 127 return r; 128 } 129 130 int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, 131 struct radeon_sa_manager *sa_manager) 132 { 133 int r; 134 135 if (sa_manager->bo == NULL) { 136 dev_err(rdev->dev, "no bo for sa manager\n"); 137 return -EINVAL; 138 } 139 140 r = radeon_bo_reserve(sa_manager->bo, false); 141 if (!r) { 142 radeon_bo_kunmap(sa_manager->bo); 143 radeon_bo_unpin(sa_manager->bo); 144 radeon_bo_unreserve(sa_manager->bo); 145 } 146 return r; 147 } 148 149 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo) 150 { 151 struct radeon_sa_manager *sa_manager = sa_bo->manager; 152 if (sa_manager->hole == &sa_bo->olist) { 153 sa_manager->hole = sa_bo->olist.prev; 154 } 155 list_del_init(&sa_bo->olist); 156 list_del_init(&sa_bo->flist); 157 radeon_fence_unref(&sa_bo->fence); 158 kfree(sa_bo); 159 } 160 161 static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager) 162 { 163 struct radeon_sa_bo *sa_bo, *tmp; 164 165 if (sa_manager->hole->next == &sa_manager->olist) 166 return; 167 168 sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist); 169 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { 170 if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) { 171 return; 172 } 173 radeon_sa_bo_remove_locked(sa_bo); 174 } 175 } 176 177 static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager) 178 { 179 struct list_head *hole = sa_manager->hole; 180 181 if (hole != &sa_manager->olist) { 182 return list_entry(hole, struct radeon_sa_bo, olist)->eoffset; 183 } 184 return 0; 185 } 186 187 static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager) 188 { 189 struct list_head *hole = sa_manager->hole; 190 191 if (hole->next != &sa_manager->olist) { 192 return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset; 193 } 194 return sa_manager->size; 195 } 196 197 static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager, 198 struct radeon_sa_bo *sa_bo, 199 unsigned size, unsigned align) 200 { 201 unsigned soffset, eoffset, wasted; 202 203 soffset = radeon_sa_bo_hole_soffset(sa_manager); 204 eoffset = radeon_sa_bo_hole_eoffset(sa_manager); 205 wasted = (align - (soffset % align)) % align; 206 207 if ((eoffset - soffset) >= (size + wasted)) { 208 soffset += wasted; 209 210 sa_bo->manager = sa_manager; 211 sa_bo->soffset = soffset; 212 sa_bo->eoffset = soffset + size; 213 list_add(&sa_bo->olist, sa_manager->hole); 214 INIT_LIST_HEAD(&sa_bo->flist); 215 sa_manager->hole = &sa_bo->olist; 216 return true; 217 } 218 return false; 219 } 220 221 /** 222 * radeon_sa_event - Check if we can stop waiting 223 * 224 * @sa_manager: pointer to the sa_manager 225 * @size: number of bytes we want to allocate 226 * @align: alignment we need to match 227 * 228 * Check if either there is a fence we can wait for or 229 * enough free memory to satisfy the allocation directly 230 */ 231 static bool radeon_sa_event(struct radeon_sa_manager *sa_manager, 232 unsigned size, unsigned align) 233 { 234 unsigned soffset, eoffset, wasted; 235 int i; 236 237 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 238 if (!list_empty(&sa_manager->flist[i])) { 239 return true; 240 } 241 } 242 243 soffset = radeon_sa_bo_hole_soffset(sa_manager); 244 eoffset = radeon_sa_bo_hole_eoffset(sa_manager); 245 wasted = (align - (soffset % align)) % align; 246 247 if ((eoffset - soffset) >= (size + wasted)) { 248 return true; 249 } 250 251 return false; 252 } 253 254 static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager, 255 struct radeon_fence **fences, 256 unsigned *tries) 257 { 258 struct radeon_sa_bo *best_bo = NULL; 259 unsigned i, soffset, best, tmp; 260 261 /* if hole points to the end of the buffer */ 262 if (sa_manager->hole->next == &sa_manager->olist) { 263 /* try again with its beginning */ 264 sa_manager->hole = &sa_manager->olist; 265 return true; 266 } 267 268 soffset = radeon_sa_bo_hole_soffset(sa_manager); 269 /* to handle wrap around we add sa_manager->size */ 270 best = sa_manager->size * 2; 271 /* go over all fence list and try to find the closest sa_bo 272 * of the current last 273 */ 274 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 275 struct radeon_sa_bo *sa_bo; 276 277 if (list_empty(&sa_manager->flist[i])) { 278 continue; 279 } 280 281 sa_bo = list_first_entry(&sa_manager->flist[i], 282 struct radeon_sa_bo, flist); 283 284 if (!radeon_fence_signaled(sa_bo->fence)) { 285 fences[i] = sa_bo->fence; 286 continue; 287 } 288 289 /* limit the number of tries each ring gets */ 290 if (tries[i] > 2) { 291 continue; 292 } 293 294 tmp = sa_bo->soffset; 295 if (tmp < soffset) { 296 /* wrap around, pretend it's after */ 297 tmp += sa_manager->size; 298 } 299 tmp -= soffset; 300 if (tmp < best) { 301 /* this sa bo is the closest one */ 302 best = tmp; 303 best_bo = sa_bo; 304 } 305 } 306 307 if (best_bo) { 308 ++tries[best_bo->fence->ring]; 309 sa_manager->hole = best_bo->olist.prev; 310 311 /* we knew that this one is signaled, 312 so it's save to remote it */ 313 radeon_sa_bo_remove_locked(best_bo); 314 return true; 315 } 316 return false; 317 } 318 319 int radeon_sa_bo_new(struct radeon_device *rdev, 320 struct radeon_sa_manager *sa_manager, 321 struct radeon_sa_bo **sa_bo, 322 unsigned size, unsigned align) 323 { 324 struct radeon_fence *fences[RADEON_NUM_RINGS]; 325 unsigned tries[RADEON_NUM_RINGS]; 326 int i, r; 327 328 BUG_ON(align > sa_manager->align); 329 BUG_ON(size > sa_manager->size); 330 331 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), M_DRM, 332 M_WAITOK | M_ZERO); 333 if ((*sa_bo) == NULL) { 334 return -ENOMEM; 335 } 336 (*sa_bo)->manager = sa_manager; 337 (*sa_bo)->fence = NULL; 338 INIT_LIST_HEAD(&(*sa_bo)->olist); 339 INIT_LIST_HEAD(&(*sa_bo)->flist); 340 341 lockmgr(&sa_manager->wq_lock, LK_EXCLUSIVE); 342 do { 343 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 344 fences[i] = NULL; 345 tries[i] = 0; 346 } 347 348 do { 349 radeon_sa_bo_try_free(sa_manager); 350 351 if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo, 352 size, align)) { 353 lockmgr(&sa_manager->wq_lock, LK_RELEASE); 354 return 0; 355 } 356 357 /* see if we can skip over some allocations */ 358 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 359 360 lockmgr(&sa_manager->wq_lock, LK_RELEASE); 361 r = radeon_fence_wait_any(rdev, fences, false); 362 lockmgr(&sa_manager->wq_lock, LK_EXCLUSIVE); 363 /* if we have nothing to wait for block */ 364 if (r == -ENOENT) { 365 while (!radeon_sa_event(sa_manager, size, align)) { 366 r = -cv_wait_sig(&sa_manager->wq, 367 &sa_manager->wq_lock); 368 if (r != 0) 369 break; 370 } 371 } 372 373 } while (!r); 374 375 lockmgr(&sa_manager->wq_lock, LK_RELEASE); 376 kfree(*sa_bo); 377 *sa_bo = NULL; 378 return r; 379 } 380 381 void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, 382 struct radeon_fence *fence) 383 { 384 struct radeon_sa_manager *sa_manager; 385 386 if (sa_bo == NULL || *sa_bo == NULL) { 387 return; 388 } 389 390 sa_manager = (*sa_bo)->manager; 391 lockmgr(&sa_manager->wq_lock, LK_EXCLUSIVE); 392 if (fence && !radeon_fence_signaled(fence)) { 393 (*sa_bo)->fence = radeon_fence_ref(fence); 394 list_add_tail(&(*sa_bo)->flist, 395 &sa_manager->flist[fence->ring]); 396 } else { 397 radeon_sa_bo_remove_locked(*sa_bo); 398 } 399 cv_broadcast(&sa_manager->wq); 400 lockmgr(&sa_manager->wq_lock, LK_RELEASE); 401 *sa_bo = NULL; 402 } 403 404 #if defined(CONFIG_DEBUG_FS) 405 void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager, 406 struct seq_file *m) 407 { 408 struct radeon_sa_bo *i; 409 410 spin_lock(&sa_manager->wq.lock); 411 list_for_each_entry(i, &sa_manager->olist, olist) { 412 uint64_t soffset = i->soffset + sa_manager->gpu_addr; 413 uint64_t eoffset = i->eoffset + sa_manager->gpu_addr; 414 if (&i->olist == sa_manager->hole) { 415 seq_printf(m, ">"); 416 } else { 417 seq_printf(m, " "); 418 } 419 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", 420 soffset, eoffset, eoffset - soffset); 421 if (i->fence) { 422 seq_printf(m, " protected by 0x%016llx on ring %d", 423 i->fence->seq, i->fence->ring); 424 } 425 seq_printf(m, "\n"); 426 } 427 spin_unlock(&sa_manager->wq.lock); 428 } 429 #endif 430