1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27 #include <linux/io-64-nonatomic-lo-hi.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_gmc.h"
31 #include "amdgpu_ras.h"
32 #include "amdgpu_xgmi.h"
33
34 /**
35 * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
36 *
37 * @adev: amdgpu_device pointer
38 *
39 * Allocate video memory for pdb0 and map it for CPU access
40 * Returns 0 for success, error for failure.
41 */
amdgpu_gmc_pdb0_alloc(struct amdgpu_device * adev)42 int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
43 {
44 int r;
45 struct amdgpu_bo_param bp;
46 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
47 uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
48 uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift;
49
50 memset(&bp, 0, sizeof(bp));
51 bp.size = PAGE_ALIGN((npdes + 1) * 8);
52 bp.byte_align = PAGE_SIZE;
53 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
54 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
55 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
56 bp.type = ttm_bo_type_kernel;
57 bp.resv = NULL;
58 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
59
60 r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
61 if (r)
62 return r;
63
64 r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false);
65 if (unlikely(r != 0))
66 goto bo_reserve_failure;
67
68 r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM);
69 if (r)
70 goto bo_pin_failure;
71 r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0);
72 if (r)
73 goto bo_kmap_failure;
74
75 amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
76 return 0;
77
78 bo_kmap_failure:
79 amdgpu_bo_unpin(adev->gmc.pdb0_bo);
80 bo_pin_failure:
81 amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
82 bo_reserve_failure:
83 amdgpu_bo_unref(&adev->gmc.pdb0_bo);
84 return r;
85 }
86
87 /**
88 * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
89 *
90 * @bo: the BO to get the PDE for
91 * @level: the level in the PD hirarchy
92 * @addr: resulting addr
93 * @flags: resulting flags
94 *
95 * Get the address and flags to be used for a PDE (Page Directory Entry).
96 */
amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo * bo,int level,uint64_t * addr,uint64_t * flags)97 void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
98 uint64_t *addr, uint64_t *flags)
99 {
100 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
101
102 switch (bo->tbo.mem.mem_type) {
103 case TTM_PL_TT:
104 *addr = bo->tbo.ttm->dma_address[0];
105 break;
106 case TTM_PL_VRAM:
107 *addr = amdgpu_bo_gpu_offset(bo);
108 break;
109 default:
110 *addr = 0;
111 break;
112 }
113 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem);
114 amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
115 }
116
117 /*
118 * amdgpu_gmc_pd_addr - return the address of the root directory
119 */
amdgpu_gmc_pd_addr(struct amdgpu_bo * bo)120 uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
121 {
122 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
123 uint64_t pd_addr;
124
125 /* TODO: move that into ASIC specific code */
126 if (adev->asic_type >= CHIP_VEGA10) {
127 uint64_t flags = AMDGPU_PTE_VALID;
128
129 amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags);
130 pd_addr |= flags;
131 } else {
132 pd_addr = amdgpu_bo_gpu_offset(bo);
133 }
134 return pd_addr;
135 }
136
137 /**
138 * amdgpu_gmc_set_pte_pde - update the page tables using CPU
139 *
140 * @adev: amdgpu_device pointer
141 * @cpu_pt_addr: cpu address of the page table
142 * @gpu_page_idx: entry in the page table to update
143 * @addr: dst addr to write into pte/pde
144 * @flags: access flags
145 *
146 * Update the page tables using CPU.
147 */
amdgpu_gmc_set_pte_pde(struct amdgpu_device * adev,void * cpu_pt_addr,uint32_t gpu_page_idx,uint64_t addr,uint64_t flags)148 int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
149 uint32_t gpu_page_idx, uint64_t addr,
150 uint64_t flags)
151 {
152 void __iomem *ptr = (void *)cpu_pt_addr;
153 uint64_t value;
154
155 /*
156 * The following is for PTE only. GART does not have PDEs.
157 */
158 value = addr & 0x0000FFFFFFFFF000ULL;
159 value |= flags;
160 writeq(value, ptr + (gpu_page_idx * 8));
161 return 0;
162 }
163
164 /**
165 * amdgpu_gmc_agp_addr - return the address in the AGP address space
166 *
167 * @bo: TTM BO which needs the address, must be in GTT domain
168 *
169 * Tries to figure out how to access the BO through the AGP aperture. Returns
170 * AMDGPU_BO_INVALID_OFFSET if that is not possible.
171 */
amdgpu_gmc_agp_addr(struct ttm_buffer_object * bo)172 uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
173 {
174 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
175
176 if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
177 return AMDGPU_BO_INVALID_OFFSET;
178
179 if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
180 return AMDGPU_BO_INVALID_OFFSET;
181
182 return adev->gmc.agp_start + bo->ttm->dma_address[0];
183 }
184
185 /**
186 * amdgpu_gmc_vram_location - try to find VRAM location
187 *
188 * @adev: amdgpu device structure holding all necessary information
189 * @mc: memory controller structure holding memory information
190 * @base: base address at which to put VRAM
191 *
192 * Function will try to place VRAM at base address provided
193 * as parameter.
194 */
amdgpu_gmc_vram_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc,u64 base)195 void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
196 u64 base)
197 {
198 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
199
200 mc->vram_start = base;
201 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
202 if (limit && limit < mc->real_vram_size)
203 mc->real_vram_size = limit;
204
205 if (mc->xgmi.num_physical_nodes == 0) {
206 mc->fb_start = mc->vram_start;
207 mc->fb_end = mc->vram_end;
208 }
209 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
210 mc->mc_vram_size >> 20, mc->vram_start,
211 mc->vram_end, mc->real_vram_size >> 20);
212 }
213
214 /** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture
215 *
216 * @adev: amdgpu device structure holding all necessary information
217 * @mc: memory controller structure holding memory information
218 *
219 * This function is only used if use GART for FB translation. In such
220 * case, we use sysvm aperture (vmid0 page tables) for both vram
221 * and gart (aka system memory) access.
222 *
223 * GPUVM (and our organization of vmid0 page tables) require sysvm
224 * aperture to be placed at a location aligned with 8 times of native
225 * page size. For example, if vm_context0_cntl.page_table_block_size
226 * is 12, then native page size is 8G (2M*2^12), sysvm should start
227 * with a 64G aligned address. For simplicity, we just put sysvm at
228 * address 0. So vram start at address 0 and gart is right after vram.
229 */
amdgpu_gmc_sysvm_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)230 void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
231 {
232 u64 hive_vram_start = 0;
233 u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
234 mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
235 mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
236 mc->gart_start = hive_vram_end + 1;
237 mc->gart_end = mc->gart_start + mc->gart_size - 1;
238 mc->fb_start = hive_vram_start;
239 mc->fb_end = hive_vram_end;
240 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
241 mc->mc_vram_size >> 20, mc->vram_start,
242 mc->vram_end, mc->real_vram_size >> 20);
243 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
244 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
245 }
246
247 /**
248 * amdgpu_gmc_gart_location - try to find GART location
249 *
250 * @adev: amdgpu device structure holding all necessary information
251 * @mc: memory controller structure holding memory information
252 *
253 * Function will place try to place GART before or after VRAM.
254 * If GART size is bigger than space left then we ajust GART size.
255 * Thus function will never fails.
256 */
amdgpu_gmc_gart_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)257 void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
258 {
259 const uint64_t four_gb = 0x100000000ULL;
260 u64 size_af, size_bf;
261 /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
262 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
263
264 /* VCE doesn't like it when BOs cross a 4GB segment, so align
265 * the GART base on a 4GB boundary as well.
266 */
267 size_bf = mc->fb_start;
268 size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
269
270 if (mc->gart_size > max(size_bf, size_af)) {
271 dev_warn(adev->dev, "limiting GART\n");
272 mc->gart_size = max(size_bf, size_af);
273 }
274
275 if ((size_bf >= mc->gart_size && size_bf < size_af) ||
276 (size_af < mc->gart_size))
277 mc->gart_start = 0;
278 else
279 mc->gart_start = max_mc_address - mc->gart_size + 1;
280
281 mc->gart_start &= ~(four_gb - 1);
282 mc->gart_end = mc->gart_start + mc->gart_size - 1;
283 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
284 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
285 }
286
287 /**
288 * amdgpu_gmc_agp_location - try to find AGP location
289 * @adev: amdgpu device structure holding all necessary information
290 * @mc: memory controller structure holding memory information
291 *
292 * Function will place try to find a place for the AGP BAR in the MC address
293 * space.
294 *
295 * AGP BAR will be assigned the largest available hole in the address space.
296 * Should be called after VRAM and GART locations are setup.
297 */
amdgpu_gmc_agp_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)298 void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
299 {
300 const uint64_t sixteen_gb = 1ULL << 34;
301 const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
302 u64 size_af, size_bf;
303
304 if (amdgpu_sriov_vf(adev)) {
305 mc->agp_start = 0xffffffffffff;
306 mc->agp_end = 0x0;
307 mc->agp_size = 0;
308
309 return;
310 }
311
312 if (mc->fb_start > mc->gart_start) {
313 size_bf = (mc->fb_start & sixteen_gb_mask) -
314 ALIGN(mc->gart_end + 1, sixteen_gb);
315 size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb);
316 } else {
317 size_bf = mc->fb_start & sixteen_gb_mask;
318 size_af = (mc->gart_start & sixteen_gb_mask) -
319 ALIGN(mc->fb_end + 1, sixteen_gb);
320 }
321
322 if (size_bf > size_af) {
323 mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
324 mc->agp_size = size_bf;
325 } else {
326 mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
327 mc->agp_size = size_af;
328 }
329
330 mc->agp_end = mc->agp_start + mc->agp_size - 1;
331 dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
332 mc->agp_size >> 20, mc->agp_start, mc->agp_end);
333 }
334
335 /**
336 * amdgpu_gmc_filter_faults - filter VM faults
337 *
338 * @adev: amdgpu device structure
339 * @addr: address of the VM fault
340 * @pasid: PASID of the process causing the fault
341 * @timestamp: timestamp of the fault
342 *
343 * Returns:
344 * True if the fault was filtered and should not be processed further.
345 * False if the fault is a new one and needs to be handled.
346 */
amdgpu_gmc_filter_faults(struct amdgpu_device * adev,uint64_t addr,uint16_t pasid,uint64_t timestamp)347 bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
348 uint16_t pasid, uint64_t timestamp)
349 {
350 struct amdgpu_gmc *gmc = &adev->gmc;
351
352 uint64_t stamp, key = addr << 4 | pasid;
353 struct amdgpu_gmc_fault *fault;
354 uint32_t hash;
355
356 /* If we don't have space left in the ring buffer return immediately */
357 stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
358 AMDGPU_GMC_FAULT_TIMEOUT;
359 if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
360 return true;
361
362 /* Try to find the fault in the hash */
363 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
364 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
365 while (fault->timestamp >= stamp) {
366 uint64_t tmp;
367
368 if (fault->key == key)
369 return true;
370
371 tmp = fault->timestamp;
372 fault = &gmc->fault_ring[fault->next];
373
374 /* Check if the entry was reused */
375 if (fault->timestamp >= tmp)
376 break;
377 }
378
379 /* Add the fault to the ring */
380 fault = &gmc->fault_ring[gmc->last_fault];
381 fault->key = key;
382 fault->timestamp = timestamp;
383
384 /* And update the hash */
385 fault->next = gmc->fault_hash[hash].idx;
386 gmc->fault_hash[hash].idx = gmc->last_fault++;
387 return false;
388 }
389
amdgpu_gmc_ras_late_init(struct amdgpu_device * adev)390 int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
391 {
392 int r;
393
394 if (adev->umc.ras_funcs &&
395 adev->umc.ras_funcs->ras_late_init) {
396 r = adev->umc.ras_funcs->ras_late_init(adev);
397 if (r)
398 return r;
399 }
400
401 if (adev->mmhub.ras_funcs &&
402 adev->mmhub.ras_funcs->ras_late_init) {
403 r = adev->mmhub.ras_funcs->ras_late_init(adev);
404 if (r)
405 return r;
406 }
407
408 if (!adev->gmc.xgmi.connected_to_cpu)
409 adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs;
410
411 if (adev->gmc.xgmi.ras_funcs &&
412 adev->gmc.xgmi.ras_funcs->ras_late_init) {
413 r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev);
414 if (r)
415 return r;
416 }
417
418 return 0;
419 }
420
amdgpu_gmc_ras_fini(struct amdgpu_device * adev)421 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
422 {
423 if (adev->umc.ras_funcs &&
424 adev->umc.ras_funcs->ras_fini)
425 adev->umc.ras_funcs->ras_fini(adev);
426
427 if (adev->mmhub.ras_funcs &&
428 adev->mmhub.ras_funcs->ras_fini)
429 amdgpu_mmhub_ras_fini(adev);
430
431 if (adev->gmc.xgmi.ras_funcs &&
432 adev->gmc.xgmi.ras_funcs->ras_fini)
433 adev->gmc.xgmi.ras_funcs->ras_fini(adev);
434 }
435
436 /*
437 * The latest engine allocation on gfx9/10 is:
438 * Engine 2, 3: firmware
439 * Engine 0, 1, 4~16: amdgpu ring,
440 * subject to change when ring number changes
441 * Engine 17: Gart flushes
442 */
443 #define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
444 #define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
445
amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device * adev)446 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
447 {
448 struct amdgpu_ring *ring;
449 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
450 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
451 GFXHUB_FREE_VM_INV_ENGS_BITMAP};
452 unsigned i;
453 unsigned vmhub, inv_eng;
454
455 for (i = 0; i < adev->num_rings; ++i) {
456 ring = adev->rings[i];
457 vmhub = ring->funcs->vmhub;
458
459 if (ring == &adev->mes.ring)
460 continue;
461
462 inv_eng = ffs(vm_inv_engs[vmhub]);
463 if (!inv_eng) {
464 dev_err(adev->dev, "no VM inv eng for ring %s\n",
465 ring->name);
466 return -EINVAL;
467 }
468
469 ring->vm_inv_eng = inv_eng - 1;
470 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
471
472 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
473 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
474 }
475
476 return 0;
477 }
478
479 /**
480 * amdgpu_tmz_set -- check and set if a device supports TMZ
481 * @adev: amdgpu_device pointer
482 *
483 * Check and set if an the device @adev supports Trusted Memory
484 * Zones (TMZ).
485 */
amdgpu_gmc_tmz_set(struct amdgpu_device * adev)486 void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
487 {
488 switch (adev->asic_type) {
489 case CHIP_RAVEN:
490 case CHIP_RENOIR:
491 if (amdgpu_tmz == 0) {
492 adev->gmc.tmz_enabled = false;
493 dev_info(adev->dev,
494 "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n");
495 } else {
496 adev->gmc.tmz_enabled = true;
497 dev_info(adev->dev,
498 "Trusted Memory Zone (TMZ) feature enabled\n");
499 }
500 break;
501 case CHIP_NAVI10:
502 case CHIP_NAVI14:
503 case CHIP_NAVI12:
504 case CHIP_VANGOGH:
505 /* Don't enable it by default yet.
506 */
507 if (amdgpu_tmz < 1) {
508 adev->gmc.tmz_enabled = false;
509 dev_info(adev->dev,
510 "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
511 } else {
512 adev->gmc.tmz_enabled = true;
513 dev_info(adev->dev,
514 "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
515 }
516 break;
517 default:
518 adev->gmc.tmz_enabled = false;
519 dev_warn(adev->dev,
520 "Trusted Memory Zone (TMZ) feature not supported\n");
521 break;
522 }
523 }
524
525 /**
526 * amdgpu_noretry_set -- set per asic noretry defaults
527 * @adev: amdgpu_device pointer
528 *
529 * Set a per asic default for the no-retry parameter.
530 *
531 */
amdgpu_gmc_noretry_set(struct amdgpu_device * adev)532 void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
533 {
534 struct amdgpu_gmc *gmc = &adev->gmc;
535
536 switch (adev->asic_type) {
537 case CHIP_VEGA10:
538 case CHIP_VEGA20:
539 case CHIP_ARCTURUS:
540 case CHIP_ALDEBARAN:
541 /*
542 * noretry = 0 will cause kfd page fault tests fail
543 * for some ASICs, so set default to 1 for these ASICs.
544 */
545 if (amdgpu_noretry == -1)
546 gmc->noretry = 1;
547 else
548 gmc->noretry = amdgpu_noretry;
549 break;
550 case CHIP_RAVEN:
551 default:
552 /* Raven currently has issues with noretry
553 * regardless of what we decide for other
554 * asics, we should leave raven with
555 * noretry = 0 until we root cause the
556 * issues.
557 *
558 * default this to 0 for now, but we may want
559 * to change this in the future for certain
560 * GPUs as it can increase performance in
561 * certain cases.
562 */
563 if (amdgpu_noretry == -1)
564 gmc->noretry = 0;
565 else
566 gmc->noretry = amdgpu_noretry;
567 break;
568 }
569 }
570
amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device * adev,int hub_type,bool enable)571 void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
572 bool enable)
573 {
574 struct amdgpu_vmhub *hub;
575 u32 tmp, reg, i;
576
577 hub = &adev->vmhub[hub_type];
578 for (i = 0; i < 16; i++) {
579 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
580
581 tmp = RREG32(reg);
582 if (enable)
583 tmp |= hub->vm_cntx_cntl_vm_fault;
584 else
585 tmp &= ~hub->vm_cntx_cntl_vm_fault;
586
587 WREG32(reg, tmp);
588 }
589 }
590
amdgpu_gmc_get_vbios_allocations(struct amdgpu_device * adev)591 void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
592 {
593 unsigned size;
594
595 /*
596 * TODO:
597 * Currently there is a bug where some memory client outside
598 * of the driver writes to first 8M of VRAM on S3 resume,
599 * this overrides GART which by default gets placed in first 8M and
600 * causes VM_FAULTS once GTT is accessed.
601 * Keep the stolen memory reservation until the while this is not solved.
602 */
603 switch (adev->asic_type) {
604 case CHIP_VEGA10:
605 case CHIP_RAVEN:
606 case CHIP_RENOIR:
607 adev->mman.keep_stolen_vga_memory = true;
608 break;
609 default:
610 adev->mman.keep_stolen_vga_memory = false;
611 break;
612 }
613
614 if (amdgpu_sriov_vf(adev) ||
615 !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
616 size = 0;
617 } else {
618 size = amdgpu_gmc_get_vbios_fb_size(adev);
619
620 if (adev->mman.keep_stolen_vga_memory)
621 size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
622 }
623
624 /* set to 0 if the pre-OS buffer uses up most of vram */
625 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
626 size = 0;
627
628 if (size > AMDGPU_VBIOS_VGA_ALLOCATION) {
629 adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION;
630 adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size;
631 } else {
632 adev->mman.stolen_vga_size = size;
633 adev->mman.stolen_extended_size = 0;
634 }
635 }
636
637 /**
638 * amdgpu_gmc_init_pdb0 - initialize PDB0
639 *
640 * @adev: amdgpu_device pointer
641 *
642 * This function is only used when GART page table is used
643 * for FB address translatioin. In such a case, we construct
644 * a 2-level system VM page table: PDB0->PTB, to cover both
645 * VRAM of the hive and system memory.
646 *
647 * PDB0 is static, initialized once on driver initialization.
648 * The first n entries of PDB0 are used as PTE by setting
649 * P bit to 1, pointing to VRAM. The n+1'th entry points
650 * to a big PTB covering system memory.
651 *
652 */
amdgpu_gmc_init_pdb0(struct amdgpu_device * adev)653 void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
654 {
655 int i;
656 uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW?
657 /* Each PDE0 (used as PTE) covers (2^vmid0_page_table_block_size)*2M
658 */
659 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
660 u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
661 u64 vram_addr = adev->vm_manager.vram_base_offset -
662 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
663 u64 vram_end = vram_addr + vram_size;
664 u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
665
666 flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
667 flags |= AMDGPU_PTE_WRITEABLE;
668 flags |= AMDGPU_PTE_SNOOPED;
669 flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
670 flags |= AMDGPU_PDE_PTE;
671
672 /* The first n PDE0 entries are used as PTE,
673 * pointing to vram
674 */
675 for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size)
676 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags);
677
678 /* The n+1'th PDE0 entry points to a huge
679 * PTB who has more than 512 entries each
680 * pointing to a 4K system page
681 */
682 flags = AMDGPU_PTE_VALID;
683 flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
684 /* Requires gart_ptb_gpu_pa to be 4K aligned */
685 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
686 }
687
688 /**
689 * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC
690 * address
691 *
692 * @adev: amdgpu_device pointer
693 * @mc_addr: MC address of buffer
694 */
amdgpu_gmc_vram_mc2pa(struct amdgpu_device * adev,uint64_t mc_addr)695 uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr)
696 {
697 return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
698 }
699
700 /**
701 * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from
702 * GPU's view
703 *
704 * @adev: amdgpu_device pointer
705 * @bo: amdgpu buffer object
706 */
amdgpu_gmc_vram_pa(struct amdgpu_device * adev,struct amdgpu_bo * bo)707 uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
708 {
709 return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
710 }
711
712 /**
713 * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address
714 * from CPU's view
715 *
716 * @adev: amdgpu_device pointer
717 * @bo: amdgpu buffer object
718 */
amdgpu_gmc_vram_cpu_pa(struct amdgpu_device * adev,struct amdgpu_bo * bo)719 uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
720 {
721 return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
722 }
723