1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu_ids.h"
24
25 #include <linux/idr.h>
26 #include <linux/dma-fence-array.h>
27 #include <drm/drmP.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_trace.h"
31
32 /*
33 * PASID manager
34 *
35 * PASIDs are global address space identifiers that can be shared
36 * between the GPU, an IOMMU and the driver. VMs on different devices
37 * may use the same PASID if they share the same address
38 * space. Therefore PASIDs are allocated using a global IDA. VMs are
39 * looked up from the PASID per amdgpu_device.
40 */
41 static DEFINE_IDA(amdgpu_pasid_ida);
42
43 /* Helper to free pasid from a fence callback */
44 struct amdgpu_pasid_cb {
45 struct dma_fence_cb cb;
46 unsigned int pasid;
47 };
48
49 /**
50 * amdgpu_pasid_alloc - Allocate a PASID
51 * @bits: Maximum width of the PASID in bits, must be at least 1
52 *
53 * Allocates a PASID of the given width while keeping smaller PASIDs
54 * available if possible.
55 *
56 * Returns a positive integer on success. Returns %-EINVAL if bits==0.
57 * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
58 * memory allocation failure.
59 */
amdgpu_pasid_alloc(unsigned int bits)60 int amdgpu_pasid_alloc(unsigned int bits)
61 {
62 int pasid = -EINVAL;
63
64 for (bits = min(bits, 31U); bits > 0; bits--) {
65 pasid = ida_simple_get(&amdgpu_pasid_ida,
66 1U << (bits - 1), 1U << bits,
67 GFP_KERNEL);
68 if (pasid != -ENOSPC)
69 break;
70 }
71
72 #if 0
73 if (pasid >= 0)
74 trace_amdgpu_pasid_allocated(pasid);
75 #endif
76
77 return pasid;
78 }
79
80 /**
81 * amdgpu_pasid_free - Free a PASID
82 * @pasid: PASID to free
83 */
amdgpu_pasid_free(unsigned int pasid)84 void amdgpu_pasid_free(unsigned int pasid)
85 {
86 #if 0
87 trace_amdgpu_pasid_freed(pasid);
88 #endif
89 ida_simple_remove(&amdgpu_pasid_ida, pasid);
90 }
91
amdgpu_pasid_free_cb(struct dma_fence * fence,struct dma_fence_cb * _cb)92 static void amdgpu_pasid_free_cb(struct dma_fence *fence,
93 struct dma_fence_cb *_cb)
94 {
95 struct amdgpu_pasid_cb *cb =
96 container_of(_cb, struct amdgpu_pasid_cb, cb);
97
98 amdgpu_pasid_free(cb->pasid);
99 dma_fence_put(fence);
100 kfree(cb);
101 }
102
103 /**
104 * amdgpu_pasid_free_delayed - free pasid when fences signal
105 *
106 * @resv: reservation object with the fences to wait for
107 * @pasid: pasid to free
108 *
109 * Free the pasid only after all the fences in resv are signaled.
110 */
amdgpu_pasid_free_delayed(struct reservation_object * resv,unsigned int pasid)111 void amdgpu_pasid_free_delayed(struct reservation_object *resv,
112 unsigned int pasid)
113 {
114 struct dma_fence *fence, **fences;
115 struct amdgpu_pasid_cb *cb;
116 unsigned count;
117 int r;
118
119 r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
120 if (r)
121 goto fallback;
122
123 if (count == 0) {
124 amdgpu_pasid_free(pasid);
125 return;
126 }
127
128 if (count == 1) {
129 fence = fences[0];
130 kfree(fences);
131 } else {
132 uint64_t context = dma_fence_context_alloc(1);
133 struct dma_fence_array *array;
134
135 array = dma_fence_array_create(count, fences, context,
136 1, false);
137 if (!array) {
138 kfree(fences);
139 goto fallback;
140 }
141 fence = &array->base;
142 }
143
144 cb = kmalloc(sizeof(*cb), M_DRM, GFP_KERNEL);
145 if (!cb) {
146 /* Last resort when we are OOM */
147 dma_fence_wait(fence, false);
148 dma_fence_put(fence);
149 amdgpu_pasid_free(pasid);
150 } else {
151 cb->pasid = pasid;
152 if (dma_fence_add_callback(fence, &cb->cb,
153 amdgpu_pasid_free_cb))
154 amdgpu_pasid_free_cb(fence, &cb->cb);
155 }
156
157 return;
158
159 fallback:
160 /* Not enough memory for the delayed delete, as last resort
161 * block for all the fences to complete.
162 */
163 reservation_object_wait_timeout_rcu(resv, true, false,
164 MAX_SCHEDULE_TIMEOUT);
165 amdgpu_pasid_free(pasid);
166 }
167
168 /*
169 * VMID manager
170 *
171 * VMIDs are a per VMHUB identifier for page tables handling.
172 */
173
174 /**
175 * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
176 *
177 * @adev: amdgpu_device pointer
178 * @id: VMID structure
179 *
180 * Check if GPU reset occured since last use of the VMID.
181 */
amdgpu_vmid_had_gpu_reset(struct amdgpu_device * adev,struct amdgpu_vmid * id)182 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
183 struct amdgpu_vmid *id)
184 {
185 return id->current_gpu_reset_count !=
186 atomic_read(&adev->gpu_reset_counter);
187 }
188
189 /**
190 * amdgpu_vm_grab_idle - grab idle VMID
191 *
192 * @vm: vm to allocate id for
193 * @ring: ring we want to submit job to
194 * @sync: sync object where we add dependencies
195 * @idle: resulting idle VMID
196 *
197 * Try to find an idle VMID, if none is idle add a fence to wait to the sync
198 * object. Returns -ENOMEM when we are out of memory.
199 */
amdgpu_vmid_grab_idle(struct amdgpu_vm * vm,struct amdgpu_ring * ring,struct amdgpu_sync * sync,struct amdgpu_vmid ** idle)200 static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
201 struct amdgpu_ring *ring,
202 struct amdgpu_sync *sync,
203 struct amdgpu_vmid **idle)
204 {
205 struct amdgpu_device *adev = ring->adev;
206 unsigned vmhub = ring->funcs->vmhub;
207 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
208 struct dma_fence **fences;
209 unsigned i;
210 int r;
211
212 if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
213 return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
214
215 fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
216 if (!fences)
217 return -ENOMEM;
218
219 /* Check if we have an idle VMID */
220 i = 0;
221 list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
222 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
223 if (!fences[i])
224 break;
225 ++i;
226 }
227
228 /* If we can't find a idle VMID to use, wait till one becomes available */
229 if (&(*idle)->list == &id_mgr->ids_lru) {
230 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
231 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
232 struct dma_fence_array *array;
233 unsigned j;
234
235 *idle = NULL;
236 for (j = 0; j < i; ++j)
237 dma_fence_get(fences[j]);
238
239 array = dma_fence_array_create(i, fences, fence_context,
240 seqno, true);
241 if (!array) {
242 for (j = 0; j < i; ++j)
243 dma_fence_put(fences[j]);
244 kfree(fences);
245 return -ENOMEM;
246 }
247
248 r = amdgpu_sync_fence(adev, sync, &array->base, false);
249 dma_fence_put(ring->vmid_wait);
250 ring->vmid_wait = &array->base;
251 return r;
252 }
253 kfree(fences);
254
255 return 0;
256 }
257
258 /**
259 * amdgpu_vm_grab_reserved - try to assign reserved VMID
260 *
261 * @vm: vm to allocate id for
262 * @ring: ring we want to submit job to
263 * @sync: sync object where we add dependencies
264 * @fence: fence protecting ID from reuse
265 * @job: job who wants to use the VMID
266 *
267 * Try to assign a reserved VMID.
268 */
amdgpu_vmid_grab_reserved(struct amdgpu_vm * vm,struct amdgpu_ring * ring,struct amdgpu_sync * sync,struct dma_fence * fence,struct amdgpu_job * job,struct amdgpu_vmid ** id)269 static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
270 struct amdgpu_ring *ring,
271 struct amdgpu_sync *sync,
272 struct dma_fence *fence,
273 struct amdgpu_job *job,
274 struct amdgpu_vmid **id)
275 {
276 struct amdgpu_device *adev = ring->adev;
277 unsigned vmhub = ring->funcs->vmhub;
278 uint64_t fence_context = adev->fence_context + ring->idx;
279 struct dma_fence *updates = sync->last_vm_update;
280 bool needs_flush = vm->use_cpu_for_update;
281 int r = 0;
282
283 *id = vm->reserved_vmid[vmhub];
284 if (updates && (*id)->flushed_updates &&
285 updates->context == (*id)->flushed_updates->context &&
286 !dma_fence_is_later(updates, (*id)->flushed_updates))
287 updates = NULL;
288
289 if ((*id)->owner != vm->entity.fence_context ||
290 job->vm_pd_addr != (*id)->pd_gpu_addr ||
291 updates || !(*id)->last_flush ||
292 ((*id)->last_flush->context != fence_context &&
293 !dma_fence_is_signaled((*id)->last_flush))) {
294 struct dma_fence *tmp;
295
296 /* to prevent one context starved by another context */
297 (*id)->pd_gpu_addr = 0;
298 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
299 if (tmp) {
300 *id = NULL;
301 r = amdgpu_sync_fence(adev, sync, tmp, false);
302 return r;
303 }
304 needs_flush = true;
305 }
306
307 /* Good we can use this VMID. Remember this submission as
308 * user of the VMID.
309 */
310 r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
311 if (r)
312 return r;
313
314 if (updates) {
315 dma_fence_put((*id)->flushed_updates);
316 (*id)->flushed_updates = dma_fence_get(updates);
317 }
318 job->vm_needs_flush = needs_flush;
319 return 0;
320 }
321
322 /**
323 * amdgpu_vm_grab_used - try to reuse a VMID
324 *
325 * @vm: vm to allocate id for
326 * @ring: ring we want to submit job to
327 * @sync: sync object where we add dependencies
328 * @fence: fence protecting ID from reuse
329 * @job: job who wants to use the VMID
330 * @id: resulting VMID
331 *
332 * Try to reuse a VMID for this submission.
333 */
amdgpu_vmid_grab_used(struct amdgpu_vm * vm,struct amdgpu_ring * ring,struct amdgpu_sync * sync,struct dma_fence * fence,struct amdgpu_job * job,struct amdgpu_vmid ** id)334 static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
335 struct amdgpu_ring *ring,
336 struct amdgpu_sync *sync,
337 struct dma_fence *fence,
338 struct amdgpu_job *job,
339 struct amdgpu_vmid **id)
340 {
341 struct amdgpu_device *adev = ring->adev;
342 unsigned vmhub = ring->funcs->vmhub;
343 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
344 uint64_t fence_context = adev->fence_context + ring->idx;
345 struct dma_fence *updates = sync->last_vm_update;
346 int r;
347
348 job->vm_needs_flush = vm->use_cpu_for_update;
349
350 /* Check if we can use a VMID already assigned to this VM */
351 list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
352 bool needs_flush = vm->use_cpu_for_update;
353 struct dma_fence *flushed;
354
355 /* Check all the prerequisites to using this VMID */
356 if ((*id)->owner != vm->entity.fence_context)
357 continue;
358
359 if ((*id)->pd_gpu_addr != job->vm_pd_addr)
360 continue;
361
362 if (!(*id)->last_flush ||
363 ((*id)->last_flush->context != fence_context &&
364 !dma_fence_is_signaled((*id)->last_flush)))
365 needs_flush = true;
366
367 flushed = (*id)->flushed_updates;
368 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
369 needs_flush = true;
370
371 /* Concurrent flushes are only possible starting with Vega10 */
372 if (adev->asic_type < CHIP_VEGA10 && needs_flush)
373 continue;
374
375 /* Good, we can use this VMID. Remember this submission as
376 * user of the VMID.
377 */
378 r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
379 if (r)
380 return r;
381
382 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
383 dma_fence_put((*id)->flushed_updates);
384 (*id)->flushed_updates = dma_fence_get(updates);
385 }
386
387 job->vm_needs_flush |= needs_flush;
388 return 0;
389 }
390
391 *id = NULL;
392 return 0;
393 }
394
395 /**
396 * amdgpu_vm_grab_id - allocate the next free VMID
397 *
398 * @vm: vm to allocate id for
399 * @ring: ring we want to submit job to
400 * @sync: sync object where we add dependencies
401 * @fence: fence protecting ID from reuse
402 * @job: job who wants to use the VMID
403 *
404 * Allocate an id for the vm, adding fences to the sync obj as necessary.
405 */
amdgpu_vmid_grab(struct amdgpu_vm * vm,struct amdgpu_ring * ring,struct amdgpu_sync * sync,struct dma_fence * fence,struct amdgpu_job * job)406 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
407 struct amdgpu_sync *sync, struct dma_fence *fence,
408 struct amdgpu_job *job)
409 {
410 struct amdgpu_device *adev = ring->adev;
411 unsigned vmhub = ring->funcs->vmhub;
412 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
413 struct amdgpu_vmid *idle = NULL;
414 struct amdgpu_vmid *id = NULL;
415 int r = 0;
416
417 mutex_lock(&id_mgr->lock);
418 r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
419 if (r || !idle)
420 goto error;
421
422 if (vm->reserved_vmid[vmhub]) {
423 r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
424 if (r || !id)
425 goto error;
426 } else {
427 r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
428 if (r)
429 goto error;
430
431 if (!id) {
432 struct dma_fence *updates = sync->last_vm_update;
433
434 /* Still no ID to use? Then use the idle one found earlier */
435 id = idle;
436
437 /* Remember this submission as user of the VMID */
438 r = amdgpu_sync_fence(ring->adev, &id->active,
439 fence, false);
440 if (r)
441 goto error;
442
443 dma_fence_put(id->flushed_updates);
444 id->flushed_updates = dma_fence_get(updates);
445 job->vm_needs_flush = true;
446 }
447
448 list_move_tail(&id->list, &id_mgr->ids_lru);
449 }
450
451 id->pd_gpu_addr = job->vm_pd_addr;
452 id->owner = vm->entity.fence_context;
453
454 if (job->vm_needs_flush) {
455 dma_fence_put(id->last_flush);
456 id->last_flush = NULL;
457 }
458 job->vmid = id - id_mgr->ids;
459 job->pasid = vm->pasid;
460 trace_amdgpu_vm_grab_id(vm, ring, job);
461
462 error:
463 mutex_unlock(&id_mgr->lock);
464 return r;
465 }
466
amdgpu_vmid_alloc_reserved(struct amdgpu_device * adev,struct amdgpu_vm * vm,unsigned vmhub)467 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
468 struct amdgpu_vm *vm,
469 unsigned vmhub)
470 {
471 struct amdgpu_vmid_mgr *id_mgr;
472 struct amdgpu_vmid *idle;
473 int r = 0;
474
475 id_mgr = &adev->vm_manager.id_mgr[vmhub];
476 mutex_lock(&id_mgr->lock);
477 if (vm->reserved_vmid[vmhub])
478 goto unlock;
479 if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
480 AMDGPU_VM_MAX_RESERVED_VMID) {
481 DRM_ERROR("Over limitation of reserved vmid\n");
482 atomic_dec(&id_mgr->reserved_vmid_num);
483 r = -EINVAL;
484 goto unlock;
485 }
486 /* Select the first entry VMID */
487 idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
488 list_del_init(&idle->list);
489 vm->reserved_vmid[vmhub] = idle;
490 mutex_unlock(&id_mgr->lock);
491
492 return 0;
493 unlock:
494 mutex_unlock(&id_mgr->lock);
495 return r;
496 }
497
amdgpu_vmid_free_reserved(struct amdgpu_device * adev,struct amdgpu_vm * vm,unsigned vmhub)498 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
499 struct amdgpu_vm *vm,
500 unsigned vmhub)
501 {
502 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
503
504 mutex_lock(&id_mgr->lock);
505 if (vm->reserved_vmid[vmhub]) {
506 list_add(&vm->reserved_vmid[vmhub]->list,
507 &id_mgr->ids_lru);
508 vm->reserved_vmid[vmhub] = NULL;
509 atomic_dec(&id_mgr->reserved_vmid_num);
510 }
511 mutex_unlock(&id_mgr->lock);
512 }
513
514 /**
515 * amdgpu_vmid_reset - reset VMID to zero
516 *
517 * @adev: amdgpu device structure
518 * @vmid: vmid number to use
519 *
520 * Reset saved GDW, GWS and OA to force switch on next flush.
521 */
amdgpu_vmid_reset(struct amdgpu_device * adev,unsigned vmhub,unsigned vmid)522 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
523 unsigned vmid)
524 {
525 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
526 struct amdgpu_vmid *id = &id_mgr->ids[vmid];
527
528 mutex_lock(&id_mgr->lock);
529 id->owner = 0;
530 id->gds_base = 0;
531 id->gds_size = 0;
532 id->gws_base = 0;
533 id->gws_size = 0;
534 id->oa_base = 0;
535 id->oa_size = 0;
536 mutex_unlock(&id_mgr->lock);
537 }
538
539 /**
540 * amdgpu_vmid_reset_all - reset VMID to zero
541 *
542 * @adev: amdgpu device structure
543 *
544 * Reset VMID to force flush on next use
545 */
amdgpu_vmid_reset_all(struct amdgpu_device * adev)546 void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
547 {
548 unsigned i, j;
549
550 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
551 struct amdgpu_vmid_mgr *id_mgr =
552 &adev->vm_manager.id_mgr[i];
553
554 for (j = 1; j < id_mgr->num_ids; ++j)
555 amdgpu_vmid_reset(adev, i, j);
556 }
557 }
558
559 /**
560 * amdgpu_vmid_mgr_init - init the VMID manager
561 *
562 * @adev: amdgpu_device pointer
563 *
564 * Initialize the VM manager structures
565 */
amdgpu_vmid_mgr_init(struct amdgpu_device * adev)566 void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
567 {
568 unsigned i, j;
569
570 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
571 struct amdgpu_vmid_mgr *id_mgr =
572 &adev->vm_manager.id_mgr[i];
573
574 lockinit(&id_mgr->lock, "agdimgrl", 0, LK_CANRECURSE);
575 INIT_LIST_HEAD(&id_mgr->ids_lru);
576 atomic_set(&id_mgr->reserved_vmid_num, 0);
577
578 /* skip over VMID 0, since it is the system VM */
579 for (j = 1; j < id_mgr->num_ids; ++j) {
580 amdgpu_vmid_reset(adev, i, j);
581 amdgpu_sync_create(&id_mgr->ids[j].active);
582 list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
583 }
584 }
585 }
586
587 /**
588 * amdgpu_vmid_mgr_fini - cleanup VM manager
589 *
590 * @adev: amdgpu_device pointer
591 *
592 * Cleanup the VM manager and free resources.
593 */
amdgpu_vmid_mgr_fini(struct amdgpu_device * adev)594 void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
595 {
596 unsigned i, j;
597
598 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
599 struct amdgpu_vmid_mgr *id_mgr =
600 &adev->vm_manager.id_mgr[i];
601
602 mutex_destroy(&id_mgr->lock);
603 for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
604 struct amdgpu_vmid *id = &id_mgr->ids[j];
605
606 amdgpu_sync_free(&id->active);
607 dma_fence_put(id->flushed_updates);
608 dma_fence_put(id->last_flush);
609 dma_fence_put(id->pasid_mapping);
610 }
611 }
612 }
613