xref: /linux/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c (revision 0be3ff0c)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include <drm/drm_cache.h>
29 #include "amdgpu.h"
30 #include "cikd.h"
31 #include "cik.h"
32 #include "gmc_v7_0.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_amdkfd.h"
35 #include "amdgpu_gem.h"
36 
37 #include "bif/bif_4_1_d.h"
38 #include "bif/bif_4_1_sh_mask.h"
39 
40 #include "gmc/gmc_7_1_d.h"
41 #include "gmc/gmc_7_1_sh_mask.h"
42 
43 #include "oss/oss_2_0_d.h"
44 #include "oss/oss_2_0_sh_mask.h"
45 
46 #include "dce/dce_8_0_d.h"
47 #include "dce/dce_8_0_sh_mask.h"
48 
49 #include "amdgpu_atombios.h"
50 
51 #include "ivsrcid/ivsrcid_vislands30.h"
52 
53 static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
54 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55 static int gmc_v7_0_wait_for_idle(void *handle);
56 
57 MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
58 MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
59 MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
60 
61 static const u32 golden_settings_iceland_a11[] =
62 {
63 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
64 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
65 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
66 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
67 };
68 
69 static const u32 iceland_mgcg_cgcg_init[] =
70 {
71 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
72 };
73 
74 static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
75 {
76 	switch (adev->asic_type) {
77 	case CHIP_TOPAZ:
78 		amdgpu_device_program_register_sequence(adev,
79 							iceland_mgcg_cgcg_init,
80 							ARRAY_SIZE(iceland_mgcg_cgcg_init));
81 		amdgpu_device_program_register_sequence(adev,
82 							golden_settings_iceland_a11,
83 							ARRAY_SIZE(golden_settings_iceland_a11));
84 		break;
85 	default:
86 		break;
87 	}
88 }
89 
90 static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
91 {
92 	u32 blackout;
93 
94 	gmc_v7_0_wait_for_idle((void *)adev);
95 
96 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
97 	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
98 		/* Block CPU access */
99 		WREG32(mmBIF_FB_EN, 0);
100 		/* blackout the MC */
101 		blackout = REG_SET_FIELD(blackout,
102 					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
103 		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
104 	}
105 	/* wait for the MC to settle */
106 	udelay(100);
107 }
108 
109 static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
110 {
111 	u32 tmp;
112 
113 	/* unblackout the MC */
114 	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
115 	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
116 	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
117 	/* allow CPU access */
118 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
119 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
120 	WREG32(mmBIF_FB_EN, tmp);
121 }
122 
123 /**
124  * gmc_v7_0_init_microcode - load ucode images from disk
125  *
126  * @adev: amdgpu_device pointer
127  *
128  * Use the firmware interface to load the ucode images into
129  * the driver (not loaded into hw).
130  * Returns 0 on success, error on failure.
131  */
132 static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
133 {
134 	const char *chip_name;
135 	char fw_name[30];
136 	int err;
137 
138 	DRM_DEBUG("\n");
139 
140 	switch (adev->asic_type) {
141 	case CHIP_BONAIRE:
142 		chip_name = "bonaire";
143 		break;
144 	case CHIP_HAWAII:
145 		chip_name = "hawaii";
146 		break;
147 	case CHIP_TOPAZ:
148 		chip_name = "topaz";
149 		break;
150 	case CHIP_KAVERI:
151 	case CHIP_KABINI:
152 	case CHIP_MULLINS:
153 		return 0;
154 	default: BUG();
155 	}
156 
157 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
158 
159 	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
160 	if (err)
161 		goto out;
162 	err = amdgpu_ucode_validate(adev->gmc.fw);
163 
164 out:
165 	if (err) {
166 		pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
167 		release_firmware(adev->gmc.fw);
168 		adev->gmc.fw = NULL;
169 	}
170 	return err;
171 }
172 
173 /**
174  * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
175  *
176  * @adev: amdgpu_device pointer
177  *
178  * Load the GDDR MC ucode into the hw (CIK).
179  * Returns 0 on success, error on failure.
180  */
181 static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
182 {
183 	const struct mc_firmware_header_v1_0 *hdr;
184 	const __le32 *fw_data = NULL;
185 	const __le32 *io_mc_regs = NULL;
186 	u32 running;
187 	int i, ucode_size, regs_size;
188 
189 	if (!adev->gmc.fw)
190 		return -EINVAL;
191 
192 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
193 	amdgpu_ucode_print_mc_hdr(&hdr->header);
194 
195 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
196 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
197 	io_mc_regs = (const __le32 *)
198 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
199 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
200 	fw_data = (const __le32 *)
201 		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
202 
203 	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
204 
205 	if (running == 0) {
206 		/* reset the engine and set to writable */
207 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
208 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
209 
210 		/* load mc io regs */
211 		for (i = 0; i < regs_size; i++) {
212 			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
213 			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
214 		}
215 		/* load the MC ucode */
216 		for (i = 0; i < ucode_size; i++)
217 			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
218 
219 		/* put the engine back into the active state */
220 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
221 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
222 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
223 
224 		/* wait for training to complete */
225 		for (i = 0; i < adev->usec_timeout; i++) {
226 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
227 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
228 				break;
229 			udelay(1);
230 		}
231 		for (i = 0; i < adev->usec_timeout; i++) {
232 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
233 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
234 				break;
235 			udelay(1);
236 		}
237 	}
238 
239 	return 0;
240 }
241 
242 static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
243 				       struct amdgpu_gmc *mc)
244 {
245 	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
246 	base <<= 24;
247 
248 	amdgpu_gmc_vram_location(adev, mc, base);
249 	amdgpu_gmc_gart_location(adev, mc);
250 }
251 
252 /**
253  * gmc_v7_0_mc_program - program the GPU memory controller
254  *
255  * @adev: amdgpu_device pointer
256  *
257  * Set the location of vram, gart, and AGP in the GPU's
258  * physical address space (CIK).
259  */
260 static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
261 {
262 	u32 tmp;
263 	int i, j;
264 
265 	/* Initialize HDP */
266 	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
267 		WREG32((0xb05 + j), 0x00000000);
268 		WREG32((0xb06 + j), 0x00000000);
269 		WREG32((0xb07 + j), 0x00000000);
270 		WREG32((0xb08 + j), 0x00000000);
271 		WREG32((0xb09 + j), 0x00000000);
272 	}
273 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
274 
275 	if (gmc_v7_0_wait_for_idle((void *)adev)) {
276 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
277 	}
278 	if (adev->mode_info.num_crtc) {
279 		/* Lockout access through VGA aperture*/
280 		tmp = RREG32(mmVGA_HDP_CONTROL);
281 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
282 		WREG32(mmVGA_HDP_CONTROL, tmp);
283 
284 		/* disable VGA render */
285 		tmp = RREG32(mmVGA_RENDER_CONTROL);
286 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
287 		WREG32(mmVGA_RENDER_CONTROL, tmp);
288 	}
289 	/* Update configuration */
290 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
291 	       adev->gmc.vram_start >> 12);
292 	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
293 	       adev->gmc.vram_end >> 12);
294 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
295 	       adev->vram_scratch.gpu_addr >> 12);
296 	WREG32(mmMC_VM_AGP_BASE, 0);
297 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
298 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
299 	if (gmc_v7_0_wait_for_idle((void *)adev)) {
300 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
301 	}
302 
303 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
304 
305 	tmp = RREG32(mmHDP_MISC_CNTL);
306 	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
307 	WREG32(mmHDP_MISC_CNTL, tmp);
308 
309 	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
310 	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
311 }
312 
313 /**
314  * gmc_v7_0_mc_init - initialize the memory controller driver params
315  *
316  * @adev: amdgpu_device pointer
317  *
318  * Look up the amount of vram, vram width, and decide how to place
319  * vram and gart within the GPU's physical address space (CIK).
320  * Returns 0 for success.
321  */
322 static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
323 {
324 	int r;
325 
326 	adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
327 	if (!adev->gmc.vram_width) {
328 		u32 tmp;
329 		int chansize, numchan;
330 
331 		/* Get VRAM informations */
332 		tmp = RREG32(mmMC_ARB_RAMCFG);
333 		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
334 			chansize = 64;
335 		} else {
336 			chansize = 32;
337 		}
338 		tmp = RREG32(mmMC_SHARED_CHMAP);
339 		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
340 		case 0:
341 		default:
342 			numchan = 1;
343 			break;
344 		case 1:
345 			numchan = 2;
346 			break;
347 		case 2:
348 			numchan = 4;
349 			break;
350 		case 3:
351 			numchan = 8;
352 			break;
353 		case 4:
354 			numchan = 3;
355 			break;
356 		case 5:
357 			numchan = 6;
358 			break;
359 		case 6:
360 			numchan = 10;
361 			break;
362 		case 7:
363 			numchan = 12;
364 			break;
365 		case 8:
366 			numchan = 16;
367 			break;
368 		}
369 		adev->gmc.vram_width = numchan * chansize;
370 	}
371 	/* size in MB on si */
372 	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
373 	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
374 
375 	if (!(adev->flags & AMD_IS_APU)) {
376 		r = amdgpu_device_resize_fb_bar(adev);
377 		if (r)
378 			return r;
379 	}
380 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
381 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
382 
383 #ifdef CONFIG_X86_64
384 	if ((adev->flags & AMD_IS_APU) &&
385 	    adev->gmc.real_vram_size > adev->gmc.aper_size &&
386 	    !amdgpu_passthrough(adev)) {
387 		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
388 		adev->gmc.aper_size = adev->gmc.real_vram_size;
389 	}
390 #endif
391 
392 	/* In case the PCI BAR is larger than the actual amount of vram */
393 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
394 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
395 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
396 
397 	/* set the gart size */
398 	if (amdgpu_gart_size == -1) {
399 		switch (adev->asic_type) {
400 		case CHIP_TOPAZ:     /* no MM engines */
401 		default:
402 			adev->gmc.gart_size = 256ULL << 20;
403 			break;
404 #ifdef CONFIG_DRM_AMDGPU_CIK
405 		case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
406 		case CHIP_HAWAII:  /* UVD, VCE do not support GPUVM */
407 		case CHIP_KAVERI:  /* UVD, VCE do not support GPUVM */
408 		case CHIP_KABINI:  /* UVD, VCE do not support GPUVM */
409 		case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
410 			adev->gmc.gart_size = 1024ULL << 20;
411 			break;
412 #endif
413 		}
414 	} else {
415 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
416 	}
417 
418 	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
419 	gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
420 
421 	return 0;
422 }
423 
424 /**
425  * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid
426  *
427  * @adev: amdgpu_device pointer
428  * @pasid: pasid to be flush
429  * @flush_type: type of flush
430  * @all_hub: flush all hubs
431  *
432  * Flush the TLB for the requested pasid.
433  */
434 static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
435 					uint16_t pasid, uint32_t flush_type,
436 					bool all_hub)
437 {
438 	int vmid;
439 	unsigned int tmp;
440 
441 	if (amdgpu_in_reset(adev))
442 		return -EIO;
443 
444 	for (vmid = 1; vmid < 16; vmid++) {
445 
446 		tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
447 		if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
448 			(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
449 			WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
450 			RREG32(mmVM_INVALIDATE_RESPONSE);
451 			break;
452 		}
453 	}
454 
455 	return 0;
456 }
457 
458 /*
459  * GART
460  * VMID 0 is the physical GPU addresses as used by the kernel.
461  * VMIDs 1-15 are used for userspace clients and are handled
462  * by the amdgpu vm/hsa code.
463  */
464 
465 /**
466  * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
467  *
468  * @adev: amdgpu_device pointer
469  * @vmid: vm instance to flush
470  * @vmhub: which hub to flush
471  * @flush_type: type of flush
472  * *
473  * Flush the TLB for the requested page table (CIK).
474  */
475 static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
476 					uint32_t vmhub, uint32_t flush_type)
477 {
478 	/* bits 0-15 are the VM contexts0-15 */
479 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
480 }
481 
482 static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
483 					    unsigned vmid, uint64_t pd_addr)
484 {
485 	uint32_t reg;
486 
487 	if (vmid < 8)
488 		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
489 	else
490 		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
491 	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
492 
493 	/* bits 0-15 are the VM contexts0-15 */
494 	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
495 
496 	return pd_addr;
497 }
498 
499 static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
500 					unsigned pasid)
501 {
502 	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
503 }
504 
505 static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
506 				uint64_t *addr, uint64_t *flags)
507 {
508 	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
509 }
510 
511 static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
512 				struct amdgpu_bo_va_mapping *mapping,
513 				uint64_t *flags)
514 {
515 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
516 	*flags &= ~AMDGPU_PTE_PRT;
517 }
518 
519 /**
520  * gmc_v7_0_set_fault_enable_default - update VM fault handling
521  *
522  * @adev: amdgpu_device pointer
523  * @value: true redirects VM faults to the default page
524  */
525 static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
526 					      bool value)
527 {
528 	u32 tmp;
529 
530 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
531 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
532 			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
533 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
534 			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
535 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
536 			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
537 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
538 			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
539 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
540 			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
541 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
542 			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
543 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
544 }
545 
546 /**
547  * gmc_v7_0_set_prt - set PRT VM fault
548  *
549  * @adev: amdgpu_device pointer
550  * @enable: enable/disable VM fault handling for PRT
551  */
552 static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
553 {
554 	uint32_t tmp;
555 
556 	if (enable && !adev->gmc.prt_warning) {
557 		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
558 		adev->gmc.prt_warning = true;
559 	}
560 
561 	tmp = RREG32(mmVM_PRT_CNTL);
562 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
563 			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
564 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
565 			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
566 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
567 			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
568 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
569 			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
570 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
571 			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
572 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
573 			    L1_TLB_STORE_INVALID_ENTRIES, enable);
574 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
575 			    MASK_PDE0_FAULT, enable);
576 	WREG32(mmVM_PRT_CNTL, tmp);
577 
578 	if (enable) {
579 		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
580 		uint32_t high = adev->vm_manager.max_pfn -
581 			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
582 
583 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
584 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
585 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
586 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
587 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
588 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
589 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
590 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
591 	} else {
592 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
593 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
594 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
595 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
596 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
597 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
598 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
599 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
600 	}
601 }
602 
603 /**
604  * gmc_v7_0_gart_enable - gart enable
605  *
606  * @adev: amdgpu_device pointer
607  *
608  * This sets up the TLBs, programs the page tables for VMID0,
609  * sets up the hw for VMIDs 1-15 which are allocated on
610  * demand, and sets up the global locations for the LDS, GDS,
611  * and GPUVM for FSA64 clients (CIK).
612  * Returns 0 for success, errors for failure.
613  */
614 static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
615 {
616 	uint64_t table_addr;
617 	u32 tmp, field;
618 	int i;
619 
620 	if (adev->gart.bo == NULL) {
621 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
622 		return -EINVAL;
623 	}
624 	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
625 	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
626 
627 	/* Setup TLB control */
628 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
629 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
630 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
631 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
632 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
633 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
634 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
635 	/* Setup L2 cache */
636 	tmp = RREG32(mmVM_L2_CNTL);
637 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
638 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
639 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
640 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
641 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
642 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
643 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
644 	WREG32(mmVM_L2_CNTL, tmp);
645 	tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
646 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
647 	WREG32(mmVM_L2_CNTL2, tmp);
648 
649 	field = adev->vm_manager.fragment_size;
650 	tmp = RREG32(mmVM_L2_CNTL3);
651 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
652 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
653 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
654 	WREG32(mmVM_L2_CNTL3, tmp);
655 	/* setup context0 */
656 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
657 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
658 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
659 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
660 			(u32)(adev->dummy_page_addr >> 12));
661 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
662 	tmp = RREG32(mmVM_CONTEXT0_CNTL);
663 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
664 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
665 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
666 	WREG32(mmVM_CONTEXT0_CNTL, tmp);
667 
668 	WREG32(0x575, 0);
669 	WREG32(0x576, 0);
670 	WREG32(0x577, 0);
671 
672 	/* empty context1-15 */
673 	/* FIXME start with 4G, once using 2 level pt switch to full
674 	 * vm size space
675 	 */
676 	/* set vm size, must be a multiple of 4 */
677 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
678 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
679 	for (i = 1; i < AMDGPU_NUM_VMID; i++) {
680 		if (i < 8)
681 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
682 			       table_addr >> 12);
683 		else
684 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
685 			       table_addr >> 12);
686 	}
687 
688 	/* enable context1-15 */
689 	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
690 	       (u32)(adev->dummy_page_addr >> 12));
691 	WREG32(mmVM_CONTEXT1_CNTL2, 4);
692 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
693 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
694 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
695 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
696 			    adev->vm_manager.block_size - 9);
697 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
698 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
699 		gmc_v7_0_set_fault_enable_default(adev, false);
700 	else
701 		gmc_v7_0_set_fault_enable_default(adev, true);
702 
703 	if (adev->asic_type == CHIP_KAVERI) {
704 		tmp = RREG32(mmCHUB_CONTROL);
705 		tmp &= ~BYPASS_VM;
706 		WREG32(mmCHUB_CONTROL, tmp);
707 	}
708 
709 	gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0);
710 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
711 		 (unsigned)(adev->gmc.gart_size >> 20),
712 		 (unsigned long long)table_addr);
713 	return 0;
714 }
715 
716 static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
717 {
718 	int r;
719 
720 	if (adev->gart.bo) {
721 		WARN(1, "R600 PCIE GART already initialized\n");
722 		return 0;
723 	}
724 	/* Initialize common gart structure */
725 	r = amdgpu_gart_init(adev);
726 	if (r)
727 		return r;
728 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
729 	adev->gart.gart_pte_flags = 0;
730 	return amdgpu_gart_table_vram_alloc(adev);
731 }
732 
733 /**
734  * gmc_v7_0_gart_disable - gart disable
735  *
736  * @adev: amdgpu_device pointer
737  *
738  * This disables all VM page table (CIK).
739  */
740 static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
741 {
742 	u32 tmp;
743 
744 	/* Disable all tables */
745 	WREG32(mmVM_CONTEXT0_CNTL, 0);
746 	WREG32(mmVM_CONTEXT1_CNTL, 0);
747 	/* Setup TLB control */
748 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
749 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
750 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
751 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
752 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
753 	/* Setup L2 cache */
754 	tmp = RREG32(mmVM_L2_CNTL);
755 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
756 	WREG32(mmVM_L2_CNTL, tmp);
757 	WREG32(mmVM_L2_CNTL2, 0);
758 }
759 
760 /**
761  * gmc_v7_0_vm_decode_fault - print human readable fault info
762  *
763  * @adev: amdgpu_device pointer
764  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
765  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
766  * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
767  * @pasid: debug logging only - no functional use
768  *
769  * Print human readable fault information (CIK).
770  */
771 static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
772 				     u32 addr, u32 mc_client, unsigned pasid)
773 {
774 	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
775 	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
776 					PROTECTIONS);
777 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
778 		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
779 	u32 mc_id;
780 
781 	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
782 			      MEMORY_CLIENT_ID);
783 
784 	dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
785 	       protections, vmid, pasid, addr,
786 	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
787 			     MEMORY_CLIENT_RW) ?
788 	       "write" : "read", block, mc_client, mc_id);
789 }
790 
791 
792 static const u32 mc_cg_registers[] = {
793 	mmMC_HUB_MISC_HUB_CG,
794 	mmMC_HUB_MISC_SIP_CG,
795 	mmMC_HUB_MISC_VM_CG,
796 	mmMC_XPB_CLK_GAT,
797 	mmATC_MISC_CG,
798 	mmMC_CITF_MISC_WR_CG,
799 	mmMC_CITF_MISC_RD_CG,
800 	mmMC_CITF_MISC_VM_CG,
801 	mmVM_L2_CG,
802 };
803 
804 static const u32 mc_cg_ls_en[] = {
805 	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
806 	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
807 	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
808 	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
809 	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
810 	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
811 	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
812 	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
813 	VM_L2_CG__MEM_LS_ENABLE_MASK,
814 };
815 
816 static const u32 mc_cg_en[] = {
817 	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
818 	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
819 	MC_HUB_MISC_VM_CG__ENABLE_MASK,
820 	MC_XPB_CLK_GAT__ENABLE_MASK,
821 	ATC_MISC_CG__ENABLE_MASK,
822 	MC_CITF_MISC_WR_CG__ENABLE_MASK,
823 	MC_CITF_MISC_RD_CG__ENABLE_MASK,
824 	MC_CITF_MISC_VM_CG__ENABLE_MASK,
825 	VM_L2_CG__ENABLE_MASK,
826 };
827 
828 static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
829 				  bool enable)
830 {
831 	int i;
832 	u32 orig, data;
833 
834 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
835 		orig = data = RREG32(mc_cg_registers[i]);
836 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
837 			data |= mc_cg_ls_en[i];
838 		else
839 			data &= ~mc_cg_ls_en[i];
840 		if (data != orig)
841 			WREG32(mc_cg_registers[i], data);
842 	}
843 }
844 
845 static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
846 				    bool enable)
847 {
848 	int i;
849 	u32 orig, data;
850 
851 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
852 		orig = data = RREG32(mc_cg_registers[i]);
853 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
854 			data |= mc_cg_en[i];
855 		else
856 			data &= ~mc_cg_en[i];
857 		if (data != orig)
858 			WREG32(mc_cg_registers[i], data);
859 	}
860 }
861 
862 static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
863 				     bool enable)
864 {
865 	u32 orig, data;
866 
867 	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
868 
869 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
870 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
871 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
872 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
873 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
874 	} else {
875 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
876 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
877 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
878 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
879 	}
880 
881 	if (orig != data)
882 		WREG32_PCIE(ixPCIE_CNTL2, data);
883 }
884 
885 static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
886 				     bool enable)
887 {
888 	u32 orig, data;
889 
890 	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
891 
892 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
893 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
894 	else
895 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
896 
897 	if (orig != data)
898 		WREG32(mmHDP_HOST_PATH_CNTL, data);
899 }
900 
901 static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
902 				   bool enable)
903 {
904 	u32 orig, data;
905 
906 	orig = data = RREG32(mmHDP_MEM_POWER_LS);
907 
908 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
909 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
910 	else
911 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
912 
913 	if (orig != data)
914 		WREG32(mmHDP_MEM_POWER_LS, data);
915 }
916 
917 static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
918 {
919 	switch (mc_seq_vram_type) {
920 	case MC_SEQ_MISC0__MT__GDDR1:
921 		return AMDGPU_VRAM_TYPE_GDDR1;
922 	case MC_SEQ_MISC0__MT__DDR2:
923 		return AMDGPU_VRAM_TYPE_DDR2;
924 	case MC_SEQ_MISC0__MT__GDDR3:
925 		return AMDGPU_VRAM_TYPE_GDDR3;
926 	case MC_SEQ_MISC0__MT__GDDR4:
927 		return AMDGPU_VRAM_TYPE_GDDR4;
928 	case MC_SEQ_MISC0__MT__GDDR5:
929 		return AMDGPU_VRAM_TYPE_GDDR5;
930 	case MC_SEQ_MISC0__MT__HBM:
931 		return AMDGPU_VRAM_TYPE_HBM;
932 	case MC_SEQ_MISC0__MT__DDR3:
933 		return AMDGPU_VRAM_TYPE_DDR3;
934 	default:
935 		return AMDGPU_VRAM_TYPE_UNKNOWN;
936 	}
937 }
938 
939 static int gmc_v7_0_early_init(void *handle)
940 {
941 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
942 
943 	gmc_v7_0_set_gmc_funcs(adev);
944 	gmc_v7_0_set_irq_funcs(adev);
945 
946 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
947 	adev->gmc.shared_aperture_end =
948 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
949 	adev->gmc.private_aperture_start =
950 		adev->gmc.shared_aperture_end + 1;
951 	adev->gmc.private_aperture_end =
952 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
953 
954 	return 0;
955 }
956 
957 static int gmc_v7_0_late_init(void *handle)
958 {
959 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
960 
961 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
962 		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
963 	else
964 		return 0;
965 }
966 
967 static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
968 {
969 	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
970 	unsigned size;
971 
972 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
973 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
974 	} else {
975 		u32 viewport = RREG32(mmVIEWPORT_SIZE);
976 		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
977 			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
978 			4);
979 	}
980 
981 	return size;
982 }
983 
984 static int gmc_v7_0_sw_init(void *handle)
985 {
986 	int r;
987 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
988 
989 	adev->num_vmhubs = 1;
990 
991 	if (adev->flags & AMD_IS_APU) {
992 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
993 	} else {
994 		u32 tmp = RREG32(mmMC_SEQ_MISC0);
995 		tmp &= MC_SEQ_MISC0__MT__MASK;
996 		adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
997 	}
998 
999 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1000 	if (r)
1001 		return r;
1002 
1003 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1004 	if (r)
1005 		return r;
1006 
1007 	/* Adjust VM size here.
1008 	 * Currently set to 4GB ((1 << 20) 4k pages).
1009 	 * Max GPUVM size for cayman and SI is 40 bits.
1010 	 */
1011 	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1012 
1013 	/* Set the internal MC address mask
1014 	 * This is the max address of the GPU's
1015 	 * internal address space.
1016 	 */
1017 	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1018 
1019 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
1020 	if (r) {
1021 		pr_warn("No suitable DMA available\n");
1022 		return r;
1023 	}
1024 	adev->need_swiotlb = drm_need_swiotlb(40);
1025 
1026 	r = gmc_v7_0_init_microcode(adev);
1027 	if (r) {
1028 		DRM_ERROR("Failed to load mc firmware!\n");
1029 		return r;
1030 	}
1031 
1032 	r = gmc_v7_0_mc_init(adev);
1033 	if (r)
1034 		return r;
1035 
1036 	amdgpu_gmc_get_vbios_allocations(adev);
1037 
1038 	/* Memory manager */
1039 	r = amdgpu_bo_init(adev);
1040 	if (r)
1041 		return r;
1042 
1043 	r = gmc_v7_0_gart_init(adev);
1044 	if (r)
1045 		return r;
1046 
1047 	/*
1048 	 * number of VMs
1049 	 * VMID 0 is reserved for System
1050 	 * amdgpu graphics/compute will use VMIDs 1-7
1051 	 * amdkfd will use VMIDs 8-15
1052 	 */
1053 	adev->vm_manager.first_kfd_vmid = 8;
1054 	amdgpu_vm_manager_init(adev);
1055 
1056 	/* base offset of vram pages */
1057 	if (adev->flags & AMD_IS_APU) {
1058 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1059 
1060 		tmp <<= 22;
1061 		adev->vm_manager.vram_base_offset = tmp;
1062 	} else {
1063 		adev->vm_manager.vram_base_offset = 0;
1064 	}
1065 
1066 	adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1067 					GFP_KERNEL);
1068 	if (!adev->gmc.vm_fault_info)
1069 		return -ENOMEM;
1070 	atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1071 
1072 	return 0;
1073 }
1074 
1075 static int gmc_v7_0_sw_fini(void *handle)
1076 {
1077 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1078 
1079 	amdgpu_gem_force_release(adev);
1080 	amdgpu_vm_manager_fini(adev);
1081 	kfree(adev->gmc.vm_fault_info);
1082 	amdgpu_gart_table_vram_free(adev);
1083 	amdgpu_bo_fini(adev);
1084 	release_firmware(adev->gmc.fw);
1085 	adev->gmc.fw = NULL;
1086 
1087 	return 0;
1088 }
1089 
1090 static int gmc_v7_0_hw_init(void *handle)
1091 {
1092 	int r;
1093 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1094 
1095 	gmc_v7_0_init_golden_registers(adev);
1096 
1097 	gmc_v7_0_mc_program(adev);
1098 
1099 	if (!(adev->flags & AMD_IS_APU)) {
1100 		r = gmc_v7_0_mc_load_microcode(adev);
1101 		if (r) {
1102 			DRM_ERROR("Failed to load MC firmware!\n");
1103 			return r;
1104 		}
1105 	}
1106 
1107 	r = gmc_v7_0_gart_enable(adev);
1108 	if (r)
1109 		return r;
1110 
1111 	if (amdgpu_emu_mode == 1)
1112 		return amdgpu_gmc_vram_checking(adev);
1113 	else
1114 		return r;
1115 }
1116 
1117 static int gmc_v7_0_hw_fini(void *handle)
1118 {
1119 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1120 
1121 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1122 	gmc_v7_0_gart_disable(adev);
1123 
1124 	return 0;
1125 }
1126 
1127 static int gmc_v7_0_suspend(void *handle)
1128 {
1129 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1130 
1131 	gmc_v7_0_hw_fini(adev);
1132 
1133 	return 0;
1134 }
1135 
1136 static int gmc_v7_0_resume(void *handle)
1137 {
1138 	int r;
1139 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1140 
1141 	r = gmc_v7_0_hw_init(adev);
1142 	if (r)
1143 		return r;
1144 
1145 	amdgpu_vmid_reset_all(adev);
1146 
1147 	return 0;
1148 }
1149 
1150 static bool gmc_v7_0_is_idle(void *handle)
1151 {
1152 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1153 	u32 tmp = RREG32(mmSRBM_STATUS);
1154 
1155 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1156 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1157 		return false;
1158 
1159 	return true;
1160 }
1161 
1162 static int gmc_v7_0_wait_for_idle(void *handle)
1163 {
1164 	unsigned i;
1165 	u32 tmp;
1166 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1167 
1168 	for (i = 0; i < adev->usec_timeout; i++) {
1169 		/* read MC_STATUS */
1170 		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1171 					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1172 					       SRBM_STATUS__MCC_BUSY_MASK |
1173 					       SRBM_STATUS__MCD_BUSY_MASK |
1174 					       SRBM_STATUS__VMC_BUSY_MASK);
1175 		if (!tmp)
1176 			return 0;
1177 		udelay(1);
1178 	}
1179 	return -ETIMEDOUT;
1180 
1181 }
1182 
1183 static int gmc_v7_0_soft_reset(void *handle)
1184 {
1185 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1186 	u32 srbm_soft_reset = 0;
1187 	u32 tmp = RREG32(mmSRBM_STATUS);
1188 
1189 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1190 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1191 						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1192 
1193 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1194 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1195 		if (!(adev->flags & AMD_IS_APU))
1196 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1197 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1198 	}
1199 
1200 	if (srbm_soft_reset) {
1201 		gmc_v7_0_mc_stop(adev);
1202 		if (gmc_v7_0_wait_for_idle((void *)adev)) {
1203 			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1204 		}
1205 
1206 
1207 		tmp = RREG32(mmSRBM_SOFT_RESET);
1208 		tmp |= srbm_soft_reset;
1209 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1210 		WREG32(mmSRBM_SOFT_RESET, tmp);
1211 		tmp = RREG32(mmSRBM_SOFT_RESET);
1212 
1213 		udelay(50);
1214 
1215 		tmp &= ~srbm_soft_reset;
1216 		WREG32(mmSRBM_SOFT_RESET, tmp);
1217 		tmp = RREG32(mmSRBM_SOFT_RESET);
1218 
1219 		/* Wait a little for things to settle down */
1220 		udelay(50);
1221 
1222 		gmc_v7_0_mc_resume(adev);
1223 		udelay(50);
1224 	}
1225 
1226 	return 0;
1227 }
1228 
1229 static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1230 					     struct amdgpu_irq_src *src,
1231 					     unsigned type,
1232 					     enum amdgpu_interrupt_state state)
1233 {
1234 	u32 tmp;
1235 	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1236 		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1237 		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1238 		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1239 		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1240 		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1241 
1242 	switch (state) {
1243 	case AMDGPU_IRQ_STATE_DISABLE:
1244 		/* system context */
1245 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1246 		tmp &= ~bits;
1247 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1248 		/* VMs */
1249 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1250 		tmp &= ~bits;
1251 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1252 		break;
1253 	case AMDGPU_IRQ_STATE_ENABLE:
1254 		/* system context */
1255 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1256 		tmp |= bits;
1257 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1258 		/* VMs */
1259 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1260 		tmp |= bits;
1261 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1262 		break;
1263 	default:
1264 		break;
1265 	}
1266 
1267 	return 0;
1268 }
1269 
1270 static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1271 				      struct amdgpu_irq_src *source,
1272 				      struct amdgpu_iv_entry *entry)
1273 {
1274 	u32 addr, status, mc_client, vmid;
1275 
1276 	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1277 	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1278 	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1279 	/* reset addr and status */
1280 	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1281 
1282 	if (!addr && !status)
1283 		return 0;
1284 
1285 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1286 		gmc_v7_0_set_fault_enable_default(adev, false);
1287 
1288 	if (printk_ratelimit()) {
1289 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1290 			entry->src_id, entry->src_data[0]);
1291 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1292 			addr);
1293 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1294 			status);
1295 		gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client,
1296 					 entry->pasid);
1297 	}
1298 
1299 	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1300 			     VMID);
1301 	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1302 		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1303 		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1304 		u32 protections = REG_GET_FIELD(status,
1305 					VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1306 					PROTECTIONS);
1307 
1308 		info->vmid = vmid;
1309 		info->mc_id = REG_GET_FIELD(status,
1310 					    VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1311 					    MEMORY_CLIENT_ID);
1312 		info->status = status;
1313 		info->page_addr = addr;
1314 		info->prot_valid = protections & 0x7 ? true : false;
1315 		info->prot_read = protections & 0x8 ? true : false;
1316 		info->prot_write = protections & 0x10 ? true : false;
1317 		info->prot_exec = protections & 0x20 ? true : false;
1318 		mb();
1319 		atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 static int gmc_v7_0_set_clockgating_state(void *handle,
1326 					  enum amd_clockgating_state state)
1327 {
1328 	bool gate = false;
1329 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1330 
1331 	if (state == AMD_CG_STATE_GATE)
1332 		gate = true;
1333 
1334 	if (!(adev->flags & AMD_IS_APU)) {
1335 		gmc_v7_0_enable_mc_mgcg(adev, gate);
1336 		gmc_v7_0_enable_mc_ls(adev, gate);
1337 	}
1338 	gmc_v7_0_enable_bif_mgls(adev, gate);
1339 	gmc_v7_0_enable_hdp_mgcg(adev, gate);
1340 	gmc_v7_0_enable_hdp_ls(adev, gate);
1341 
1342 	return 0;
1343 }
1344 
1345 static int gmc_v7_0_set_powergating_state(void *handle,
1346 					  enum amd_powergating_state state)
1347 {
1348 	return 0;
1349 }
1350 
1351 static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1352 	.name = "gmc_v7_0",
1353 	.early_init = gmc_v7_0_early_init,
1354 	.late_init = gmc_v7_0_late_init,
1355 	.sw_init = gmc_v7_0_sw_init,
1356 	.sw_fini = gmc_v7_0_sw_fini,
1357 	.hw_init = gmc_v7_0_hw_init,
1358 	.hw_fini = gmc_v7_0_hw_fini,
1359 	.suspend = gmc_v7_0_suspend,
1360 	.resume = gmc_v7_0_resume,
1361 	.is_idle = gmc_v7_0_is_idle,
1362 	.wait_for_idle = gmc_v7_0_wait_for_idle,
1363 	.soft_reset = gmc_v7_0_soft_reset,
1364 	.set_clockgating_state = gmc_v7_0_set_clockgating_state,
1365 	.set_powergating_state = gmc_v7_0_set_powergating_state,
1366 };
1367 
1368 static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
1369 	.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
1370 	.flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid,
1371 	.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
1372 	.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
1373 	.set_prt = gmc_v7_0_set_prt,
1374 	.get_vm_pde = gmc_v7_0_get_vm_pde,
1375 	.get_vm_pte = gmc_v7_0_get_vm_pte,
1376 	.get_vbios_fb_size = gmc_v7_0_get_vbios_fb_size,
1377 };
1378 
1379 static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1380 	.set = gmc_v7_0_vm_fault_interrupt_state,
1381 	.process = gmc_v7_0_process_interrupt,
1382 };
1383 
1384 static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
1385 {
1386 	adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
1387 }
1388 
1389 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1390 {
1391 	adev->gmc.vm_fault.num_types = 1;
1392 	adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
1393 }
1394 
1395 const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
1396 {
1397 	.type = AMD_IP_BLOCK_TYPE_GMC,
1398 	.major = 7,
1399 	.minor = 0,
1400 	.rev = 0,
1401 	.funcs = &gmc_v7_0_ip_funcs,
1402 };
1403 
1404 const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
1405 {
1406 	.type = AMD_IP_BLOCK_TYPE_GMC,
1407 	.major = 7,
1408 	.minor = 4,
1409 	.rev = 0,
1410 	.funcs = &gmc_v7_0_ip_funcs,
1411 };
1412