1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include <drm/drm_cache.h>
29 #include "amdgpu.h"
30 #include "cikd.h"
31 #include "cik.h"
32 #include "gmc_v7_0.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_amdkfd.h"
35 #include "amdgpu_gem.h"
36 
37 #include "bif/bif_4_1_d.h"
38 #include "bif/bif_4_1_sh_mask.h"
39 
40 #include "gmc/gmc_7_1_d.h"
41 #include "gmc/gmc_7_1_sh_mask.h"
42 
43 #include "oss/oss_2_0_d.h"
44 #include "oss/oss_2_0_sh_mask.h"
45 
46 #include "dce/dce_8_0_d.h"
47 #include "dce/dce_8_0_sh_mask.h"
48 
49 #include "amdgpu_atombios.h"
50 
51 #include "ivsrcid/ivsrcid_vislands30.h"
52 
53 static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
54 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55 static int gmc_v7_0_wait_for_idle(void *handle);
56 
57 MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
58 MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
59 MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
60 
61 static const u32 golden_settings_iceland_a11[] =
62 {
63 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
64 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
65 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
66 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
67 };
68 
69 static const u32 iceland_mgcg_cgcg_init[] =
70 {
71 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
72 };
73 
gmc_v7_0_init_golden_registers(struct amdgpu_device * adev)74 static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
75 {
76 	switch (adev->asic_type) {
77 	case CHIP_TOPAZ:
78 		amdgpu_device_program_register_sequence(adev,
79 							iceland_mgcg_cgcg_init,
80 							ARRAY_SIZE(iceland_mgcg_cgcg_init));
81 		amdgpu_device_program_register_sequence(adev,
82 							golden_settings_iceland_a11,
83 							ARRAY_SIZE(golden_settings_iceland_a11));
84 		break;
85 	default:
86 		break;
87 	}
88 }
89 
gmc_v7_0_mc_stop(struct amdgpu_device * adev)90 static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
91 {
92 	u32 blackout;
93 
94 	gmc_v7_0_wait_for_idle((void *)adev);
95 
96 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
97 	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
98 		/* Block CPU access */
99 		WREG32(mmBIF_FB_EN, 0);
100 		/* blackout the MC */
101 		blackout = REG_SET_FIELD(blackout,
102 					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
103 		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
104 	}
105 	/* wait for the MC to settle */
106 	udelay(100);
107 }
108 
gmc_v7_0_mc_resume(struct amdgpu_device * adev)109 static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
110 {
111 	u32 tmp;
112 
113 	/* unblackout the MC */
114 	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
115 	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
116 	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
117 	/* allow CPU access */
118 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
119 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
120 	WREG32(mmBIF_FB_EN, tmp);
121 }
122 
123 /**
124  * gmc_v7_0_init_microcode - load ucode images from disk
125  *
126  * @adev: amdgpu_device pointer
127  *
128  * Use the firmware interface to load the ucode images into
129  * the driver (not loaded into hw).
130  * Returns 0 on success, error on failure.
131  */
gmc_v7_0_init_microcode(struct amdgpu_device * adev)132 static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
133 {
134 	const char *chip_name;
135 	char fw_name[30];
136 	int err;
137 
138 	DRM_DEBUG("\n");
139 
140 	switch (adev->asic_type) {
141 	case CHIP_BONAIRE:
142 		chip_name = "bonaire";
143 		break;
144 	case CHIP_HAWAII:
145 		chip_name = "hawaii";
146 		break;
147 	case CHIP_TOPAZ:
148 		chip_name = "topaz";
149 		break;
150 	case CHIP_KAVERI:
151 	case CHIP_KABINI:
152 	case CHIP_MULLINS:
153 		return 0;
154 	default: BUG();
155 	}
156 
157 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
158 
159 	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
160 	if (err)
161 		goto out;
162 	err = amdgpu_ucode_validate(adev->gmc.fw);
163 
164 out:
165 	if (err) {
166 		pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
167 		release_firmware(adev->gmc.fw);
168 		adev->gmc.fw = NULL;
169 	}
170 	return err;
171 }
172 
173 /**
174  * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
175  *
176  * @adev: amdgpu_device pointer
177  *
178  * Load the GDDR MC ucode into the hw (CIK).
179  * Returns 0 on success, error on failure.
180  */
gmc_v7_0_mc_load_microcode(struct amdgpu_device * adev)181 static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
182 {
183 	const struct mc_firmware_header_v1_0 *hdr;
184 	const __le32 *fw_data = NULL;
185 	const __le32 *io_mc_regs = NULL;
186 	u32 running;
187 	int i, ucode_size, regs_size;
188 
189 	if (!adev->gmc.fw)
190 		return -EINVAL;
191 
192 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
193 	amdgpu_ucode_print_mc_hdr(&hdr->header);
194 
195 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
196 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
197 	io_mc_regs = (const __le32 *)
198 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
199 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
200 	fw_data = (const __le32 *)
201 		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
202 
203 	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
204 
205 	if (running == 0) {
206 		/* reset the engine and set to writable */
207 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
208 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
209 
210 		/* load mc io regs */
211 		for (i = 0; i < regs_size; i++) {
212 			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
213 			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
214 		}
215 		/* load the MC ucode */
216 		for (i = 0; i < ucode_size; i++)
217 			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
218 
219 		/* put the engine back into the active state */
220 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
221 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
222 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
223 
224 		/* wait for training to complete */
225 		for (i = 0; i < adev->usec_timeout; i++) {
226 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
227 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
228 				break;
229 			udelay(1);
230 		}
231 		for (i = 0; i < adev->usec_timeout; i++) {
232 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
233 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
234 				break;
235 			udelay(1);
236 		}
237 	}
238 
239 	return 0;
240 }
241 
gmc_v7_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)242 static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
243 				       struct amdgpu_gmc *mc)
244 {
245 	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
246 	base <<= 24;
247 
248 	amdgpu_gmc_vram_location(adev, mc, base);
249 	amdgpu_gmc_gart_location(adev, mc);
250 }
251 
252 /**
253  * gmc_v7_0_mc_program - program the GPU memory controller
254  *
255  * @adev: amdgpu_device pointer
256  *
257  * Set the location of vram, gart, and AGP in the GPU's
258  * physical address space (CIK).
259  */
gmc_v7_0_mc_program(struct amdgpu_device * adev)260 static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
261 {
262 	u32 tmp;
263 	int i, j;
264 
265 	/* Initialize HDP */
266 	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
267 		WREG32((0xb05 + j), 0x00000000);
268 		WREG32((0xb06 + j), 0x00000000);
269 		WREG32((0xb07 + j), 0x00000000);
270 		WREG32((0xb08 + j), 0x00000000);
271 		WREG32((0xb09 + j), 0x00000000);
272 	}
273 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
274 
275 	if (gmc_v7_0_wait_for_idle((void *)adev)) {
276 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
277 	}
278 	if (adev->mode_info.num_crtc) {
279 		/* Lockout access through VGA aperture*/
280 		tmp = RREG32(mmVGA_HDP_CONTROL);
281 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
282 		WREG32(mmVGA_HDP_CONTROL, tmp);
283 
284 		/* disable VGA render */
285 		tmp = RREG32(mmVGA_RENDER_CONTROL);
286 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
287 		WREG32(mmVGA_RENDER_CONTROL, tmp);
288 	}
289 	/* Update configuration */
290 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
291 	       adev->gmc.vram_start >> 12);
292 	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
293 	       adev->gmc.vram_end >> 12);
294 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
295 	       adev->vram_scratch.gpu_addr >> 12);
296 	WREG32(mmMC_VM_AGP_BASE, 0);
297 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
298 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
299 	if (gmc_v7_0_wait_for_idle((void *)adev)) {
300 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
301 	}
302 
303 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
304 
305 	tmp = RREG32(mmHDP_MISC_CNTL);
306 	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
307 	WREG32(mmHDP_MISC_CNTL, tmp);
308 
309 	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
310 	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
311 }
312 
313 /**
314  * gmc_v7_0_mc_init - initialize the memory controller driver params
315  *
316  * @adev: amdgpu_device pointer
317  *
318  * Look up the amount of vram, vram width, and decide how to place
319  * vram and gart within the GPU's physical address space (CIK).
320  * Returns 0 for success.
321  */
gmc_v7_0_mc_init(struct amdgpu_device * adev)322 static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
323 {
324 	int r;
325 
326 	adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
327 	if (!adev->gmc.vram_width) {
328 		u32 tmp;
329 		int chansize, numchan;
330 
331 		/* Get VRAM informations */
332 		tmp = RREG32(mmMC_ARB_RAMCFG);
333 		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
334 			chansize = 64;
335 		} else {
336 			chansize = 32;
337 		}
338 		tmp = RREG32(mmMC_SHARED_CHMAP);
339 		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
340 		case 0:
341 		default:
342 			numchan = 1;
343 			break;
344 		case 1:
345 			numchan = 2;
346 			break;
347 		case 2:
348 			numchan = 4;
349 			break;
350 		case 3:
351 			numchan = 8;
352 			break;
353 		case 4:
354 			numchan = 3;
355 			break;
356 		case 5:
357 			numchan = 6;
358 			break;
359 		case 6:
360 			numchan = 10;
361 			break;
362 		case 7:
363 			numchan = 12;
364 			break;
365 		case 8:
366 			numchan = 16;
367 			break;
368 		}
369 		adev->gmc.vram_width = numchan * chansize;
370 	}
371 	/* size in MB on si */
372 	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
373 	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
374 
375 	if (!(adev->flags & AMD_IS_APU)) {
376 		r = amdgpu_device_resize_fb_bar(adev);
377 		if (r)
378 			return r;
379 	}
380 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
381 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
382 
383 #ifdef CONFIG_X86_64
384 	if (adev->flags & AMD_IS_APU &&
385 	    adev->gmc.real_vram_size > adev->gmc.aper_size) {
386 		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
387 		adev->gmc.aper_size = adev->gmc.real_vram_size;
388 	}
389 #endif
390 
391 	/* In case the PCI BAR is larger than the actual amount of vram */
392 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
393 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
394 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
395 
396 	/* set the gart size */
397 	if (amdgpu_gart_size == -1) {
398 		switch (adev->asic_type) {
399 		case CHIP_TOPAZ:     /* no MM engines */
400 		default:
401 			adev->gmc.gart_size = 256ULL << 20;
402 			break;
403 #ifdef CONFIG_DRM_AMDGPU_CIK
404 		case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
405 		case CHIP_HAWAII:  /* UVD, VCE do not support GPUVM */
406 		case CHIP_KAVERI:  /* UVD, VCE do not support GPUVM */
407 		case CHIP_KABINI:  /* UVD, VCE do not support GPUVM */
408 		case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
409 			adev->gmc.gart_size = 1024ULL << 20;
410 			break;
411 #endif
412 		}
413 	} else {
414 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
415 	}
416 
417 	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
418 	gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
419 
420 	return 0;
421 }
422 
423 /**
424  * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid
425  *
426  * @adev: amdgpu_device pointer
427  * @pasid: pasid to be flush
428  * @flush_type: type of flush
429  * @all_hub: flush all hubs
430  *
431  * Flush the TLB for the requested pasid.
432  */
gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub)433 static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
434 					uint16_t pasid, uint32_t flush_type,
435 					bool all_hub)
436 {
437 	int vmid;
438 	unsigned int tmp;
439 
440 	if (amdgpu_in_reset(adev))
441 		return -EIO;
442 
443 	for (vmid = 1; vmid < 16; vmid++) {
444 
445 		tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
446 		if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
447 			(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
448 			WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
449 			RREG32(mmVM_INVALIDATE_RESPONSE);
450 			break;
451 		}
452 	}
453 
454 	return 0;
455 }
456 
457 /*
458  * GART
459  * VMID 0 is the physical GPU addresses as used by the kernel.
460  * VMIDs 1-15 are used for userspace clients and are handled
461  * by the amdgpu vm/hsa code.
462  */
463 
464 /**
465  * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
466  *
467  * @adev: amdgpu_device pointer
468  * @vmid: vm instance to flush
469  * @vmhub: which hub to flush
470  * @flush_type: type of flush
471  * *
472  * Flush the TLB for the requested page table (CIK).
473  */
gmc_v7_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)474 static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
475 					uint32_t vmhub, uint32_t flush_type)
476 {
477 	/* bits 0-15 are the VM contexts0-15 */
478 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
479 }
480 
gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)481 static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
482 					    unsigned vmid, uint64_t pd_addr)
483 {
484 	uint32_t reg;
485 
486 	if (vmid < 8)
487 		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
488 	else
489 		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
490 	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
491 
492 	/* bits 0-15 are the VM contexts0-15 */
493 	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
494 
495 	return pd_addr;
496 }
497 
gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned vmid,unsigned pasid)498 static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
499 					unsigned pasid)
500 {
501 	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
502 }
503 
gmc_v7_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)504 static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
505 				uint64_t *addr, uint64_t *flags)
506 {
507 	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
508 }
509 
gmc_v7_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)510 static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
511 				struct amdgpu_bo_va_mapping *mapping,
512 				uint64_t *flags)
513 {
514 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
515 	*flags &= ~AMDGPU_PTE_PRT;
516 }
517 
518 /**
519  * gmc_v8_0_set_fault_enable_default - update VM fault handling
520  *
521  * @adev: amdgpu_device pointer
522  * @value: true redirects VM faults to the default page
523  */
gmc_v7_0_set_fault_enable_default(struct amdgpu_device * adev,bool value)524 static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
525 					      bool value)
526 {
527 	u32 tmp;
528 
529 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
530 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
531 			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
532 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
533 			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
534 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
535 			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
536 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
537 			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
538 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
539 			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
540 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
541 			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
542 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
543 }
544 
545 /**
546  * gmc_v7_0_set_prt - set PRT VM fault
547  *
548  * @adev: amdgpu_device pointer
549  * @enable: enable/disable VM fault handling for PRT
550  */
gmc_v7_0_set_prt(struct amdgpu_device * adev,bool enable)551 static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
552 {
553 	uint32_t tmp;
554 
555 	if (enable && !adev->gmc.prt_warning) {
556 		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
557 		adev->gmc.prt_warning = true;
558 	}
559 
560 	tmp = RREG32(mmVM_PRT_CNTL);
561 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
562 			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
563 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
564 			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
565 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
566 			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
567 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
568 			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
569 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
570 			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
571 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
572 			    L1_TLB_STORE_INVALID_ENTRIES, enable);
573 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
574 			    MASK_PDE0_FAULT, enable);
575 	WREG32(mmVM_PRT_CNTL, tmp);
576 
577 	if (enable) {
578 		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
579 		uint32_t high = adev->vm_manager.max_pfn -
580 			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
581 
582 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
583 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
584 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
585 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
586 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
587 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
588 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
589 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
590 	} else {
591 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
592 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
593 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
594 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
595 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
596 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
597 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
598 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
599 	}
600 }
601 
602 /**
603  * gmc_v7_0_gart_enable - gart enable
604  *
605  * @adev: amdgpu_device pointer
606  *
607  * This sets up the TLBs, programs the page tables for VMID0,
608  * sets up the hw for VMIDs 1-15 which are allocated on
609  * demand, and sets up the global locations for the LDS, GDS,
610  * and GPUVM for FSA64 clients (CIK).
611  * Returns 0 for success, errors for failure.
612  */
gmc_v7_0_gart_enable(struct amdgpu_device * adev)613 static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
614 {
615 	uint64_t table_addr;
616 	int r, i;
617 	u32 tmp, field;
618 
619 	if (adev->gart.bo == NULL) {
620 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
621 		return -EINVAL;
622 	}
623 	r = amdgpu_gart_table_vram_pin(adev);
624 	if (r)
625 		return r;
626 
627 	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
628 
629 	/* Setup TLB control */
630 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
631 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
632 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
633 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
634 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
635 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
636 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
637 	/* Setup L2 cache */
638 	tmp = RREG32(mmVM_L2_CNTL);
639 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
640 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
641 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
642 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
643 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
644 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
645 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
646 	WREG32(mmVM_L2_CNTL, tmp);
647 	tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
648 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
649 	WREG32(mmVM_L2_CNTL2, tmp);
650 
651 	field = adev->vm_manager.fragment_size;
652 	tmp = RREG32(mmVM_L2_CNTL3);
653 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
654 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
655 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
656 	WREG32(mmVM_L2_CNTL3, tmp);
657 	/* setup context0 */
658 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
659 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
660 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
661 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
662 			(u32)(adev->dummy_page_addr >> 12));
663 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
664 	tmp = RREG32(mmVM_CONTEXT0_CNTL);
665 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
666 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
667 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
668 	WREG32(mmVM_CONTEXT0_CNTL, tmp);
669 
670 	WREG32(0x575, 0);
671 	WREG32(0x576, 0);
672 	WREG32(0x577, 0);
673 
674 	/* empty context1-15 */
675 	/* FIXME start with 4G, once using 2 level pt switch to full
676 	 * vm size space
677 	 */
678 	/* set vm size, must be a multiple of 4 */
679 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
680 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
681 	for (i = 1; i < AMDGPU_NUM_VMID; i++) {
682 		if (i < 8)
683 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
684 			       table_addr >> 12);
685 		else
686 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
687 			       table_addr >> 12);
688 	}
689 
690 	/* enable context1-15 */
691 	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
692 	       (u32)(adev->dummy_page_addr >> 12));
693 	WREG32(mmVM_CONTEXT1_CNTL2, 4);
694 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
695 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
696 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
697 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
698 			    adev->vm_manager.block_size - 9);
699 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
700 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
701 		gmc_v7_0_set_fault_enable_default(adev, false);
702 	else
703 		gmc_v7_0_set_fault_enable_default(adev, true);
704 
705 	if (adev->asic_type == CHIP_KAVERI) {
706 		tmp = RREG32(mmCHUB_CONTROL);
707 		tmp &= ~BYPASS_VM;
708 		WREG32(mmCHUB_CONTROL, tmp);
709 	}
710 
711 	gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0);
712 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
713 		 (unsigned)(adev->gmc.gart_size >> 20),
714 		 (unsigned long long)table_addr);
715 	adev->gart.ready = true;
716 	return 0;
717 }
718 
gmc_v7_0_gart_init(struct amdgpu_device * adev)719 static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
720 {
721 	int r;
722 
723 	if (adev->gart.bo) {
724 		WARN(1, "R600 PCIE GART already initialized\n");
725 		return 0;
726 	}
727 	/* Initialize common gart structure */
728 	r = amdgpu_gart_init(adev);
729 	if (r)
730 		return r;
731 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
732 	adev->gart.gart_pte_flags = 0;
733 	return amdgpu_gart_table_vram_alloc(adev);
734 }
735 
736 /**
737  * gmc_v7_0_gart_disable - gart disable
738  *
739  * @adev: amdgpu_device pointer
740  *
741  * This disables all VM page table (CIK).
742  */
gmc_v7_0_gart_disable(struct amdgpu_device * adev)743 static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
744 {
745 	u32 tmp;
746 
747 	/* Disable all tables */
748 	WREG32(mmVM_CONTEXT0_CNTL, 0);
749 	WREG32(mmVM_CONTEXT1_CNTL, 0);
750 	/* Setup TLB control */
751 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
752 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
753 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
754 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
755 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
756 	/* Setup L2 cache */
757 	tmp = RREG32(mmVM_L2_CNTL);
758 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
759 	WREG32(mmVM_L2_CNTL, tmp);
760 	WREG32(mmVM_L2_CNTL2, 0);
761 	amdgpu_gart_table_vram_unpin(adev);
762 }
763 
764 /**
765  * gmc_v7_0_vm_decode_fault - print human readable fault info
766  *
767  * @adev: amdgpu_device pointer
768  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
769  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
770  * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
771  * @pasid: debug logging only - no functional use
772  *
773  * Print human readable fault information (CIK).
774  */
gmc_v7_0_vm_decode_fault(struct amdgpu_device * adev,u32 status,u32 addr,u32 mc_client,unsigned pasid)775 static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
776 				     u32 addr, u32 mc_client, unsigned pasid)
777 {
778 	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
779 	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
780 					PROTECTIONS);
781 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
782 		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
783 	u32 mc_id;
784 
785 	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
786 			      MEMORY_CLIENT_ID);
787 
788 	dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
789 	       protections, vmid, pasid, addr,
790 	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
791 			     MEMORY_CLIENT_RW) ?
792 	       "write" : "read", block, mc_client, mc_id);
793 }
794 
795 
796 static const u32 mc_cg_registers[] = {
797 	mmMC_HUB_MISC_HUB_CG,
798 	mmMC_HUB_MISC_SIP_CG,
799 	mmMC_HUB_MISC_VM_CG,
800 	mmMC_XPB_CLK_GAT,
801 	mmATC_MISC_CG,
802 	mmMC_CITF_MISC_WR_CG,
803 	mmMC_CITF_MISC_RD_CG,
804 	mmMC_CITF_MISC_VM_CG,
805 	mmVM_L2_CG,
806 };
807 
808 static const u32 mc_cg_ls_en[] = {
809 	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
810 	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
811 	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
812 	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
813 	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
814 	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
815 	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
816 	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
817 	VM_L2_CG__MEM_LS_ENABLE_MASK,
818 };
819 
820 static const u32 mc_cg_en[] = {
821 	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
822 	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
823 	MC_HUB_MISC_VM_CG__ENABLE_MASK,
824 	MC_XPB_CLK_GAT__ENABLE_MASK,
825 	ATC_MISC_CG__ENABLE_MASK,
826 	MC_CITF_MISC_WR_CG__ENABLE_MASK,
827 	MC_CITF_MISC_RD_CG__ENABLE_MASK,
828 	MC_CITF_MISC_VM_CG__ENABLE_MASK,
829 	VM_L2_CG__ENABLE_MASK,
830 };
831 
gmc_v7_0_enable_mc_ls(struct amdgpu_device * adev,bool enable)832 static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
833 				  bool enable)
834 {
835 	int i;
836 	u32 orig, data;
837 
838 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
839 		orig = data = RREG32(mc_cg_registers[i]);
840 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
841 			data |= mc_cg_ls_en[i];
842 		else
843 			data &= ~mc_cg_ls_en[i];
844 		if (data != orig)
845 			WREG32(mc_cg_registers[i], data);
846 	}
847 }
848 
gmc_v7_0_enable_mc_mgcg(struct amdgpu_device * adev,bool enable)849 static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
850 				    bool enable)
851 {
852 	int i;
853 	u32 orig, data;
854 
855 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
856 		orig = data = RREG32(mc_cg_registers[i]);
857 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
858 			data |= mc_cg_en[i];
859 		else
860 			data &= ~mc_cg_en[i];
861 		if (data != orig)
862 			WREG32(mc_cg_registers[i], data);
863 	}
864 }
865 
gmc_v7_0_enable_bif_mgls(struct amdgpu_device * adev,bool enable)866 static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
867 				     bool enable)
868 {
869 	u32 orig, data;
870 
871 	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
872 
873 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
874 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
875 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
876 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
877 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
878 	} else {
879 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
880 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
881 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
882 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
883 	}
884 
885 	if (orig != data)
886 		WREG32_PCIE(ixPCIE_CNTL2, data);
887 }
888 
gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device * adev,bool enable)889 static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
890 				     bool enable)
891 {
892 	u32 orig, data;
893 
894 	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
895 
896 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
897 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
898 	else
899 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
900 
901 	if (orig != data)
902 		WREG32(mmHDP_HOST_PATH_CNTL, data);
903 }
904 
gmc_v7_0_enable_hdp_ls(struct amdgpu_device * adev,bool enable)905 static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
906 				   bool enable)
907 {
908 	u32 orig, data;
909 
910 	orig = data = RREG32(mmHDP_MEM_POWER_LS);
911 
912 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
913 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
914 	else
915 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
916 
917 	if (orig != data)
918 		WREG32(mmHDP_MEM_POWER_LS, data);
919 }
920 
gmc_v7_0_convert_vram_type(int mc_seq_vram_type)921 static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
922 {
923 	switch (mc_seq_vram_type) {
924 	case MC_SEQ_MISC0__MT__GDDR1:
925 		return AMDGPU_VRAM_TYPE_GDDR1;
926 	case MC_SEQ_MISC0__MT__DDR2:
927 		return AMDGPU_VRAM_TYPE_DDR2;
928 	case MC_SEQ_MISC0__MT__GDDR3:
929 		return AMDGPU_VRAM_TYPE_GDDR3;
930 	case MC_SEQ_MISC0__MT__GDDR4:
931 		return AMDGPU_VRAM_TYPE_GDDR4;
932 	case MC_SEQ_MISC0__MT__GDDR5:
933 		return AMDGPU_VRAM_TYPE_GDDR5;
934 	case MC_SEQ_MISC0__MT__HBM:
935 		return AMDGPU_VRAM_TYPE_HBM;
936 	case MC_SEQ_MISC0__MT__DDR3:
937 		return AMDGPU_VRAM_TYPE_DDR3;
938 	default:
939 		return AMDGPU_VRAM_TYPE_UNKNOWN;
940 	}
941 }
942 
gmc_v7_0_early_init(void * handle)943 static int gmc_v7_0_early_init(void *handle)
944 {
945 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
946 
947 	gmc_v7_0_set_gmc_funcs(adev);
948 	gmc_v7_0_set_irq_funcs(adev);
949 
950 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
951 	adev->gmc.shared_aperture_end =
952 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
953 	adev->gmc.private_aperture_start =
954 		adev->gmc.shared_aperture_end + 1;
955 	adev->gmc.private_aperture_end =
956 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
957 
958 	return 0;
959 }
960 
gmc_v7_0_late_init(void * handle)961 static int gmc_v7_0_late_init(void *handle)
962 {
963 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
964 
965 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
966 		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
967 	else
968 		return 0;
969 }
970 
gmc_v7_0_get_vbios_fb_size(struct amdgpu_device * adev)971 static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
972 {
973 	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
974 	unsigned size;
975 
976 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
977 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
978 	} else {
979 		u32 viewport = RREG32(mmVIEWPORT_SIZE);
980 		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
981 			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
982 			4);
983 	}
984 
985 	return size;
986 }
987 
gmc_v7_0_sw_init(void * handle)988 static int gmc_v7_0_sw_init(void *handle)
989 {
990 	int r;
991 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
992 
993 	adev->num_vmhubs = 1;
994 
995 	if (adev->flags & AMD_IS_APU) {
996 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
997 	} else {
998 		u32 tmp = RREG32(mmMC_SEQ_MISC0);
999 		tmp &= MC_SEQ_MISC0__MT__MASK;
1000 		adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
1001 	}
1002 
1003 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1004 	if (r)
1005 		return r;
1006 
1007 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1008 	if (r)
1009 		return r;
1010 
1011 	/* Adjust VM size here.
1012 	 * Currently set to 4GB ((1 << 20) 4k pages).
1013 	 * Max GPUVM size for cayman and SI is 40 bits.
1014 	 */
1015 	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1016 
1017 	/* Set the internal MC address mask
1018 	 * This is the max address of the GPU's
1019 	 * internal address space.
1020 	 */
1021 	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1022 
1023 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
1024 	if (r) {
1025 		pr_warn("No suitable DMA available\n");
1026 		return r;
1027 	}
1028 	adev->need_swiotlb = drm_need_swiotlb(40);
1029 
1030 	r = gmc_v7_0_init_microcode(adev);
1031 	if (r) {
1032 		DRM_ERROR("Failed to load mc firmware!\n");
1033 		return r;
1034 	}
1035 
1036 	r = gmc_v7_0_mc_init(adev);
1037 	if (r)
1038 		return r;
1039 
1040 	amdgpu_gmc_get_vbios_allocations(adev);
1041 
1042 	/* Memory manager */
1043 	r = amdgpu_bo_init(adev);
1044 	if (r)
1045 		return r;
1046 
1047 	r = gmc_v7_0_gart_init(adev);
1048 	if (r)
1049 		return r;
1050 
1051 	/*
1052 	 * number of VMs
1053 	 * VMID 0 is reserved for System
1054 	 * amdgpu graphics/compute will use VMIDs 1-7
1055 	 * amdkfd will use VMIDs 8-15
1056 	 */
1057 	adev->vm_manager.first_kfd_vmid = 8;
1058 	amdgpu_vm_manager_init(adev);
1059 
1060 	/* base offset of vram pages */
1061 	if (adev->flags & AMD_IS_APU) {
1062 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1063 
1064 		tmp <<= 22;
1065 		adev->vm_manager.vram_base_offset = tmp;
1066 	} else {
1067 		adev->vm_manager.vram_base_offset = 0;
1068 	}
1069 
1070 	adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1071 					GFP_KERNEL);
1072 	if (!adev->gmc.vm_fault_info)
1073 		return -ENOMEM;
1074 	atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1075 
1076 	return 0;
1077 }
1078 
gmc_v7_0_sw_fini(void * handle)1079 static int gmc_v7_0_sw_fini(void *handle)
1080 {
1081 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1082 
1083 	amdgpu_gem_force_release(adev);
1084 	amdgpu_vm_manager_fini(adev);
1085 	kfree(adev->gmc.vm_fault_info);
1086 	amdgpu_gart_table_vram_free(adev);
1087 	amdgpu_bo_fini(adev);
1088 	amdgpu_gart_fini(adev);
1089 	release_firmware(adev->gmc.fw);
1090 	adev->gmc.fw = NULL;
1091 
1092 	return 0;
1093 }
1094 
gmc_v7_0_hw_init(void * handle)1095 static int gmc_v7_0_hw_init(void *handle)
1096 {
1097 	int r;
1098 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1099 
1100 	gmc_v7_0_init_golden_registers(adev);
1101 
1102 	gmc_v7_0_mc_program(adev);
1103 
1104 	if (!(adev->flags & AMD_IS_APU)) {
1105 		r = gmc_v7_0_mc_load_microcode(adev);
1106 		if (r) {
1107 			DRM_ERROR("Failed to load MC firmware!\n");
1108 			return r;
1109 		}
1110 	}
1111 
1112 	r = gmc_v7_0_gart_enable(adev);
1113 	if (r)
1114 		return r;
1115 
1116 	return r;
1117 }
1118 
gmc_v7_0_hw_fini(void * handle)1119 static int gmc_v7_0_hw_fini(void *handle)
1120 {
1121 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1122 
1123 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1124 	gmc_v7_0_gart_disable(adev);
1125 
1126 	return 0;
1127 }
1128 
gmc_v7_0_suspend(void * handle)1129 static int gmc_v7_0_suspend(void *handle)
1130 {
1131 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1132 
1133 	gmc_v7_0_hw_fini(adev);
1134 
1135 	return 0;
1136 }
1137 
gmc_v7_0_resume(void * handle)1138 static int gmc_v7_0_resume(void *handle)
1139 {
1140 	int r;
1141 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1142 
1143 	r = gmc_v7_0_hw_init(adev);
1144 	if (r)
1145 		return r;
1146 
1147 	amdgpu_vmid_reset_all(adev);
1148 
1149 	return 0;
1150 }
1151 
gmc_v7_0_is_idle(void * handle)1152 static bool gmc_v7_0_is_idle(void *handle)
1153 {
1154 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1155 	u32 tmp = RREG32(mmSRBM_STATUS);
1156 
1157 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1158 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1159 		return false;
1160 
1161 	return true;
1162 }
1163 
gmc_v7_0_wait_for_idle(void * handle)1164 static int gmc_v7_0_wait_for_idle(void *handle)
1165 {
1166 	unsigned i;
1167 	u32 tmp;
1168 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1169 
1170 	for (i = 0; i < adev->usec_timeout; i++) {
1171 		/* read MC_STATUS */
1172 		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1173 					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1174 					       SRBM_STATUS__MCC_BUSY_MASK |
1175 					       SRBM_STATUS__MCD_BUSY_MASK |
1176 					       SRBM_STATUS__VMC_BUSY_MASK);
1177 		if (!tmp)
1178 			return 0;
1179 		udelay(1);
1180 	}
1181 	return -ETIMEDOUT;
1182 
1183 }
1184 
gmc_v7_0_soft_reset(void * handle)1185 static int gmc_v7_0_soft_reset(void *handle)
1186 {
1187 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1188 	u32 srbm_soft_reset = 0;
1189 	u32 tmp = RREG32(mmSRBM_STATUS);
1190 
1191 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1192 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1193 						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1194 
1195 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1196 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1197 		if (!(adev->flags & AMD_IS_APU))
1198 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1199 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1200 	}
1201 
1202 	if (srbm_soft_reset) {
1203 		gmc_v7_0_mc_stop(adev);
1204 		if (gmc_v7_0_wait_for_idle((void *)adev)) {
1205 			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1206 		}
1207 
1208 
1209 		tmp = RREG32(mmSRBM_SOFT_RESET);
1210 		tmp |= srbm_soft_reset;
1211 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1212 		WREG32(mmSRBM_SOFT_RESET, tmp);
1213 		tmp = RREG32(mmSRBM_SOFT_RESET);
1214 
1215 		udelay(50);
1216 
1217 		tmp &= ~srbm_soft_reset;
1218 		WREG32(mmSRBM_SOFT_RESET, tmp);
1219 		tmp = RREG32(mmSRBM_SOFT_RESET);
1220 
1221 		/* Wait a little for things to settle down */
1222 		udelay(50);
1223 
1224 		gmc_v7_0_mc_resume(adev);
1225 		udelay(50);
1226 	}
1227 
1228 	return 0;
1229 }
1230 
gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)1231 static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1232 					     struct amdgpu_irq_src *src,
1233 					     unsigned type,
1234 					     enum amdgpu_interrupt_state state)
1235 {
1236 	u32 tmp;
1237 	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1238 		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1239 		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1240 		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1241 		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1242 		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1243 
1244 	switch (state) {
1245 	case AMDGPU_IRQ_STATE_DISABLE:
1246 		/* system context */
1247 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1248 		tmp &= ~bits;
1249 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1250 		/* VMs */
1251 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1252 		tmp &= ~bits;
1253 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1254 		break;
1255 	case AMDGPU_IRQ_STATE_ENABLE:
1256 		/* system context */
1257 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1258 		tmp |= bits;
1259 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1260 		/* VMs */
1261 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1262 		tmp |= bits;
1263 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1264 		break;
1265 	default:
1266 		break;
1267 	}
1268 
1269 	return 0;
1270 }
1271 
gmc_v7_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1272 static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1273 				      struct amdgpu_irq_src *source,
1274 				      struct amdgpu_iv_entry *entry)
1275 {
1276 	u32 addr, status, mc_client, vmid;
1277 
1278 	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1279 	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1280 	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1281 	/* reset addr and status */
1282 	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1283 
1284 	if (!addr && !status)
1285 		return 0;
1286 
1287 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1288 		gmc_v7_0_set_fault_enable_default(adev, false);
1289 
1290 	if (printk_ratelimit()) {
1291 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1292 			entry->src_id, entry->src_data[0]);
1293 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1294 			addr);
1295 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1296 			status);
1297 		gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client,
1298 					 entry->pasid);
1299 	}
1300 
1301 	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1302 			     VMID);
1303 	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1304 		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1305 		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1306 		u32 protections = REG_GET_FIELD(status,
1307 					VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1308 					PROTECTIONS);
1309 
1310 		info->vmid = vmid;
1311 		info->mc_id = REG_GET_FIELD(status,
1312 					    VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1313 					    MEMORY_CLIENT_ID);
1314 		info->status = status;
1315 		info->page_addr = addr;
1316 		info->prot_valid = protections & 0x7 ? true : false;
1317 		info->prot_read = protections & 0x8 ? true : false;
1318 		info->prot_write = protections & 0x10 ? true : false;
1319 		info->prot_exec = protections & 0x20 ? true : false;
1320 		mb();
1321 		atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1322 	}
1323 
1324 	return 0;
1325 }
1326 
gmc_v7_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1327 static int gmc_v7_0_set_clockgating_state(void *handle,
1328 					  enum amd_clockgating_state state)
1329 {
1330 	bool gate = false;
1331 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1332 
1333 	if (state == AMD_CG_STATE_GATE)
1334 		gate = true;
1335 
1336 	if (!(adev->flags & AMD_IS_APU)) {
1337 		gmc_v7_0_enable_mc_mgcg(adev, gate);
1338 		gmc_v7_0_enable_mc_ls(adev, gate);
1339 	}
1340 	gmc_v7_0_enable_bif_mgls(adev, gate);
1341 	gmc_v7_0_enable_hdp_mgcg(adev, gate);
1342 	gmc_v7_0_enable_hdp_ls(adev, gate);
1343 
1344 	return 0;
1345 }
1346 
gmc_v7_0_set_powergating_state(void * handle,enum amd_powergating_state state)1347 static int gmc_v7_0_set_powergating_state(void *handle,
1348 					  enum amd_powergating_state state)
1349 {
1350 	return 0;
1351 }
1352 
1353 static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1354 	.name = "gmc_v7_0",
1355 	.early_init = gmc_v7_0_early_init,
1356 	.late_init = gmc_v7_0_late_init,
1357 	.sw_init = gmc_v7_0_sw_init,
1358 	.sw_fini = gmc_v7_0_sw_fini,
1359 	.hw_init = gmc_v7_0_hw_init,
1360 	.hw_fini = gmc_v7_0_hw_fini,
1361 	.suspend = gmc_v7_0_suspend,
1362 	.resume = gmc_v7_0_resume,
1363 	.is_idle = gmc_v7_0_is_idle,
1364 	.wait_for_idle = gmc_v7_0_wait_for_idle,
1365 	.soft_reset = gmc_v7_0_soft_reset,
1366 	.set_clockgating_state = gmc_v7_0_set_clockgating_state,
1367 	.set_powergating_state = gmc_v7_0_set_powergating_state,
1368 };
1369 
1370 static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
1371 	.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
1372 	.flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid,
1373 	.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
1374 	.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
1375 	.set_prt = gmc_v7_0_set_prt,
1376 	.get_vm_pde = gmc_v7_0_get_vm_pde,
1377 	.get_vm_pte = gmc_v7_0_get_vm_pte,
1378 	.get_vbios_fb_size = gmc_v7_0_get_vbios_fb_size,
1379 };
1380 
1381 static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1382 	.set = gmc_v7_0_vm_fault_interrupt_state,
1383 	.process = gmc_v7_0_process_interrupt,
1384 };
1385 
gmc_v7_0_set_gmc_funcs(struct amdgpu_device * adev)1386 static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
1387 {
1388 	adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
1389 }
1390 
gmc_v7_0_set_irq_funcs(struct amdgpu_device * adev)1391 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1392 {
1393 	adev->gmc.vm_fault.num_types = 1;
1394 	adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
1395 }
1396 
1397 const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
1398 {
1399 	.type = AMD_IP_BLOCK_TYPE_GMC,
1400 	.major = 7,
1401 	.minor = 0,
1402 	.rev = 0,
1403 	.funcs = &gmc_v7_0_ip_funcs,
1404 };
1405 
1406 const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
1407 {
1408 	.type = AMD_IP_BLOCK_TYPE_GMC,
1409 	.major = 7,
1410 	.minor = 4,
1411 	.rev = 0,
1412 	.funcs = &gmc_v7_0_ip_funcs,
1413 };
1414