xref: /linux/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c (revision 52338415)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include <drm/drm_cache.h>
29 #include "amdgpu.h"
30 #include "gmc_v8_0.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_amdkfd.h"
33 #include "amdgpu_gem.h"
34 
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
37 
38 #include "bif/bif_5_0_d.h"
39 #include "bif/bif_5_0_sh_mask.h"
40 
41 #include "oss/oss_3_0_d.h"
42 #include "oss/oss_3_0_sh_mask.h"
43 
44 #include "dce/dce_10_0_d.h"
45 #include "dce/dce_10_0_sh_mask.h"
46 
47 #include "vid.h"
48 #include "vi.h"
49 
50 #include "amdgpu_atombios.h"
51 
52 #include "ivsrcid/ivsrcid_vislands30.h"
53 
54 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
55 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
56 static int gmc_v8_0_wait_for_idle(void *handle);
57 
58 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
59 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
60 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
61 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
62 MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
63 MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
64 MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
65 
66 static const u32 golden_settings_tonga_a11[] =
67 {
68 	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
69 	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
70 	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
71 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
72 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
73 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
74 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
75 };
76 
77 static const u32 tonga_mgcg_cgcg_init[] =
78 {
79 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
80 };
81 
82 static const u32 golden_settings_fiji_a10[] =
83 {
84 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
85 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
86 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
87 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
88 };
89 
90 static const u32 fiji_mgcg_cgcg_init[] =
91 {
92 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
93 };
94 
95 static const u32 golden_settings_polaris11_a11[] =
96 {
97 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
98 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
99 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
100 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
101 };
102 
103 static const u32 golden_settings_polaris10_a11[] =
104 {
105 	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
106 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
107 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
108 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
109 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
110 };
111 
112 static const u32 cz_mgcg_cgcg_init[] =
113 {
114 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
115 };
116 
117 static const u32 stoney_mgcg_cgcg_init[] =
118 {
119 	mmATC_MISC_CG, 0xffffffff, 0x000c0200,
120 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
121 };
122 
123 static const u32 golden_settings_stoney_common[] =
124 {
125 	mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
126 	mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
127 };
128 
129 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
130 {
131 	switch (adev->asic_type) {
132 	case CHIP_FIJI:
133 		amdgpu_device_program_register_sequence(adev,
134 							fiji_mgcg_cgcg_init,
135 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
136 		amdgpu_device_program_register_sequence(adev,
137 							golden_settings_fiji_a10,
138 							ARRAY_SIZE(golden_settings_fiji_a10));
139 		break;
140 	case CHIP_TONGA:
141 		amdgpu_device_program_register_sequence(adev,
142 							tonga_mgcg_cgcg_init,
143 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
144 		amdgpu_device_program_register_sequence(adev,
145 							golden_settings_tonga_a11,
146 							ARRAY_SIZE(golden_settings_tonga_a11));
147 		break;
148 	case CHIP_POLARIS11:
149 	case CHIP_POLARIS12:
150 	case CHIP_VEGAM:
151 		amdgpu_device_program_register_sequence(adev,
152 							golden_settings_polaris11_a11,
153 							ARRAY_SIZE(golden_settings_polaris11_a11));
154 		break;
155 	case CHIP_POLARIS10:
156 		amdgpu_device_program_register_sequence(adev,
157 							golden_settings_polaris10_a11,
158 							ARRAY_SIZE(golden_settings_polaris10_a11));
159 		break;
160 	case CHIP_CARRIZO:
161 		amdgpu_device_program_register_sequence(adev,
162 							cz_mgcg_cgcg_init,
163 							ARRAY_SIZE(cz_mgcg_cgcg_init));
164 		break;
165 	case CHIP_STONEY:
166 		amdgpu_device_program_register_sequence(adev,
167 							stoney_mgcg_cgcg_init,
168 							ARRAY_SIZE(stoney_mgcg_cgcg_init));
169 		amdgpu_device_program_register_sequence(adev,
170 							golden_settings_stoney_common,
171 							ARRAY_SIZE(golden_settings_stoney_common));
172 		break;
173 	default:
174 		break;
175 	}
176 }
177 
178 static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
179 {
180 	u32 blackout;
181 
182 	gmc_v8_0_wait_for_idle(adev);
183 
184 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
185 	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
186 		/* Block CPU access */
187 		WREG32(mmBIF_FB_EN, 0);
188 		/* blackout the MC */
189 		blackout = REG_SET_FIELD(blackout,
190 					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
191 		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
192 	}
193 	/* wait for the MC to settle */
194 	udelay(100);
195 }
196 
197 static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
198 {
199 	u32 tmp;
200 
201 	/* unblackout the MC */
202 	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
203 	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
204 	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
205 	/* allow CPU access */
206 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
207 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
208 	WREG32(mmBIF_FB_EN, tmp);
209 }
210 
211 /**
212  * gmc_v8_0_init_microcode - load ucode images from disk
213  *
214  * @adev: amdgpu_device pointer
215  *
216  * Use the firmware interface to load the ucode images into
217  * the driver (not loaded into hw).
218  * Returns 0 on success, error on failure.
219  */
220 static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
221 {
222 	const char *chip_name;
223 	char fw_name[30];
224 	int err;
225 
226 	DRM_DEBUG("\n");
227 
228 	switch (adev->asic_type) {
229 	case CHIP_TONGA:
230 		chip_name = "tonga";
231 		break;
232 	case CHIP_POLARIS11:
233 		if (((adev->pdev->device == 0x67ef) &&
234 		     ((adev->pdev->revision == 0xe0) ||
235 		      (adev->pdev->revision == 0xe5))) ||
236 		    ((adev->pdev->device == 0x67ff) &&
237 		     ((adev->pdev->revision == 0xcf) ||
238 		      (adev->pdev->revision == 0xef) ||
239 		      (adev->pdev->revision == 0xff))))
240 			chip_name = "polaris11_k";
241 		else if ((adev->pdev->device == 0x67ef) &&
242 			 (adev->pdev->revision == 0xe2))
243 			chip_name = "polaris11_k";
244 		else
245 			chip_name = "polaris11";
246 		break;
247 	case CHIP_POLARIS10:
248 		if ((adev->pdev->device == 0x67df) &&
249 		    ((adev->pdev->revision == 0xe1) ||
250 		     (adev->pdev->revision == 0xf7)))
251 			chip_name = "polaris10_k";
252 		else
253 			chip_name = "polaris10";
254 		break;
255 	case CHIP_POLARIS12:
256 		if (((adev->pdev->device == 0x6987) &&
257 		     ((adev->pdev->revision == 0xc0) ||
258 		      (adev->pdev->revision == 0xc3))) ||
259 		    ((adev->pdev->device == 0x6981) &&
260 		     ((adev->pdev->revision == 0x00) ||
261 		      (adev->pdev->revision == 0x01) ||
262 		      (adev->pdev->revision == 0x10))))
263 			chip_name = "polaris12_k";
264 		else
265 			chip_name = "polaris12";
266 		break;
267 	case CHIP_FIJI:
268 	case CHIP_CARRIZO:
269 	case CHIP_STONEY:
270 	case CHIP_VEGAM:
271 		return 0;
272 	default: BUG();
273 	}
274 
275 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
276 	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
277 	if (err)
278 		goto out;
279 	err = amdgpu_ucode_validate(adev->gmc.fw);
280 
281 out:
282 	if (err) {
283 		pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
284 		release_firmware(adev->gmc.fw);
285 		adev->gmc.fw = NULL;
286 	}
287 	return err;
288 }
289 
290 /**
291  * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
292  *
293  * @adev: amdgpu_device pointer
294  *
295  * Load the GDDR MC ucode into the hw (VI).
296  * Returns 0 on success, error on failure.
297  */
298 static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
299 {
300 	const struct mc_firmware_header_v1_0 *hdr;
301 	const __le32 *fw_data = NULL;
302 	const __le32 *io_mc_regs = NULL;
303 	u32 running;
304 	int i, ucode_size, regs_size;
305 
306 	/* Skip MC ucode loading on SR-IOV capable boards.
307 	 * vbios does this for us in asic_init in that case.
308 	 * Skip MC ucode loading on VF, because hypervisor will do that
309 	 * for this adaptor.
310 	 */
311 	if (amdgpu_sriov_bios(adev))
312 		return 0;
313 
314 	if (!adev->gmc.fw)
315 		return -EINVAL;
316 
317 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
318 	amdgpu_ucode_print_mc_hdr(&hdr->header);
319 
320 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
321 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
322 	io_mc_regs = (const __le32 *)
323 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
324 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
325 	fw_data = (const __le32 *)
326 		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
327 
328 	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
329 
330 	if (running == 0) {
331 		/* reset the engine and set to writable */
332 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
333 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
334 
335 		/* load mc io regs */
336 		for (i = 0; i < regs_size; i++) {
337 			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
338 			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
339 		}
340 		/* load the MC ucode */
341 		for (i = 0; i < ucode_size; i++)
342 			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
343 
344 		/* put the engine back into the active state */
345 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
346 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
347 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
348 
349 		/* wait for training to complete */
350 		for (i = 0; i < adev->usec_timeout; i++) {
351 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
352 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
353 				break;
354 			udelay(1);
355 		}
356 		for (i = 0; i < adev->usec_timeout; i++) {
357 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
358 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
359 				break;
360 			udelay(1);
361 		}
362 	}
363 
364 	return 0;
365 }
366 
367 static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
368 {
369 	const struct mc_firmware_header_v1_0 *hdr;
370 	const __le32 *fw_data = NULL;
371 	const __le32 *io_mc_regs = NULL;
372 	u32 data;
373 	int i, ucode_size, regs_size;
374 
375 	/* Skip MC ucode loading on SR-IOV capable boards.
376 	 * vbios does this for us in asic_init in that case.
377 	 * Skip MC ucode loading on VF, because hypervisor will do that
378 	 * for this adaptor.
379 	 */
380 	if (amdgpu_sriov_bios(adev))
381 		return 0;
382 
383 	if (!adev->gmc.fw)
384 		return -EINVAL;
385 
386 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
387 	amdgpu_ucode_print_mc_hdr(&hdr->header);
388 
389 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
390 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
391 	io_mc_regs = (const __le32 *)
392 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
393 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
394 	fw_data = (const __le32 *)
395 		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
396 
397 	data = RREG32(mmMC_SEQ_MISC0);
398 	data &= ~(0x40);
399 	WREG32(mmMC_SEQ_MISC0, data);
400 
401 	/* load mc io regs */
402 	for (i = 0; i < regs_size; i++) {
403 		WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
404 		WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
405 	}
406 
407 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
408 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
409 
410 	/* load the MC ucode */
411 	for (i = 0; i < ucode_size; i++)
412 		WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
413 
414 	/* put the engine back into the active state */
415 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
416 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
417 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
418 
419 	/* wait for training to complete */
420 	for (i = 0; i < adev->usec_timeout; i++) {
421 		data = RREG32(mmMC_SEQ_MISC0);
422 		if (data & 0x80)
423 			break;
424 		udelay(1);
425 	}
426 
427 	return 0;
428 }
429 
430 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
431 				       struct amdgpu_gmc *mc)
432 {
433 	u64 base = 0;
434 
435 	if (!amdgpu_sriov_vf(adev))
436 		base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
437 	base <<= 24;
438 
439 	amdgpu_gmc_vram_location(adev, mc, base);
440 	amdgpu_gmc_gart_location(adev, mc);
441 }
442 
443 /**
444  * gmc_v8_0_mc_program - program the GPU memory controller
445  *
446  * @adev: amdgpu_device pointer
447  *
448  * Set the location of vram, gart, and AGP in the GPU's
449  * physical address space (VI).
450  */
451 static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
452 {
453 	u32 tmp;
454 	int i, j;
455 
456 	/* Initialize HDP */
457 	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
458 		WREG32((0xb05 + j), 0x00000000);
459 		WREG32((0xb06 + j), 0x00000000);
460 		WREG32((0xb07 + j), 0x00000000);
461 		WREG32((0xb08 + j), 0x00000000);
462 		WREG32((0xb09 + j), 0x00000000);
463 	}
464 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
465 
466 	if (gmc_v8_0_wait_for_idle((void *)adev)) {
467 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
468 	}
469 	if (adev->mode_info.num_crtc) {
470 		/* Lockout access through VGA aperture*/
471 		tmp = RREG32(mmVGA_HDP_CONTROL);
472 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
473 		WREG32(mmVGA_HDP_CONTROL, tmp);
474 
475 		/* disable VGA render */
476 		tmp = RREG32(mmVGA_RENDER_CONTROL);
477 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
478 		WREG32(mmVGA_RENDER_CONTROL, tmp);
479 	}
480 	/* Update configuration */
481 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
482 	       adev->gmc.vram_start >> 12);
483 	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
484 	       adev->gmc.vram_end >> 12);
485 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
486 	       adev->vram_scratch.gpu_addr >> 12);
487 
488 	if (amdgpu_sriov_vf(adev)) {
489 		tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
490 		tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
491 		WREG32(mmMC_VM_FB_LOCATION, tmp);
492 		/* XXX double check these! */
493 		WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
494 		WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
495 		WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
496 	}
497 
498 	WREG32(mmMC_VM_AGP_BASE, 0);
499 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
500 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
501 	if (gmc_v8_0_wait_for_idle((void *)adev)) {
502 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
503 	}
504 
505 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
506 
507 	tmp = RREG32(mmHDP_MISC_CNTL);
508 	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
509 	WREG32(mmHDP_MISC_CNTL, tmp);
510 
511 	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
512 	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
513 }
514 
515 /**
516  * gmc_v8_0_mc_init - initialize the memory controller driver params
517  *
518  * @adev: amdgpu_device pointer
519  *
520  * Look up the amount of vram, vram width, and decide how to place
521  * vram and gart within the GPU's physical address space (VI).
522  * Returns 0 for success.
523  */
524 static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
525 {
526 	int r;
527 
528 	adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
529 	if (!adev->gmc.vram_width) {
530 		u32 tmp;
531 		int chansize, numchan;
532 
533 		/* Get VRAM informations */
534 		tmp = RREG32(mmMC_ARB_RAMCFG);
535 		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
536 			chansize = 64;
537 		} else {
538 			chansize = 32;
539 		}
540 		tmp = RREG32(mmMC_SHARED_CHMAP);
541 		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
542 		case 0:
543 		default:
544 			numchan = 1;
545 			break;
546 		case 1:
547 			numchan = 2;
548 			break;
549 		case 2:
550 			numchan = 4;
551 			break;
552 		case 3:
553 			numchan = 8;
554 			break;
555 		case 4:
556 			numchan = 3;
557 			break;
558 		case 5:
559 			numchan = 6;
560 			break;
561 		case 6:
562 			numchan = 10;
563 			break;
564 		case 7:
565 			numchan = 12;
566 			break;
567 		case 8:
568 			numchan = 16;
569 			break;
570 		}
571 		adev->gmc.vram_width = numchan * chansize;
572 	}
573 	/* size in MB on si */
574 	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
575 	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
576 
577 	if (!(adev->flags & AMD_IS_APU)) {
578 		r = amdgpu_device_resize_fb_bar(adev);
579 		if (r)
580 			return r;
581 	}
582 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
583 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
584 
585 #ifdef CONFIG_X86_64
586 	if (adev->flags & AMD_IS_APU) {
587 		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
588 		adev->gmc.aper_size = adev->gmc.real_vram_size;
589 	}
590 #endif
591 
592 	/* In case the PCI BAR is larger than the actual amount of vram */
593 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
594 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
595 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
596 
597 	/* set the gart size */
598 	if (amdgpu_gart_size == -1) {
599 		switch (adev->asic_type) {
600 		case CHIP_POLARIS10: /* all engines support GPUVM */
601 		case CHIP_POLARIS11: /* all engines support GPUVM */
602 		case CHIP_POLARIS12: /* all engines support GPUVM */
603 		case CHIP_VEGAM:     /* all engines support GPUVM */
604 		default:
605 			adev->gmc.gart_size = 256ULL << 20;
606 			break;
607 		case CHIP_TONGA:   /* UVD, VCE do not support GPUVM */
608 		case CHIP_FIJI:    /* UVD, VCE do not support GPUVM */
609 		case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
610 		case CHIP_STONEY:  /* UVD does not support GPUVM, DCE SG support */
611 			adev->gmc.gart_size = 1024ULL << 20;
612 			break;
613 		}
614 	} else {
615 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
616 	}
617 
618 	gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
619 
620 	return 0;
621 }
622 
623 /*
624  * GART
625  * VMID 0 is the physical GPU addresses as used by the kernel.
626  * VMIDs 1-15 are used for userspace clients and are handled
627  * by the amdgpu vm/hsa code.
628  */
629 
630 /**
631  * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
632  *
633  * @adev: amdgpu_device pointer
634  * @vmid: vm instance to flush
635  *
636  * Flush the TLB for the requested page table (VI).
637  */
638 static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
639 					uint32_t vmhub, uint32_t flush_type)
640 {
641 	/* bits 0-15 are the VM contexts0-15 */
642 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
643 }
644 
645 static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
646 					    unsigned vmid, uint64_t pd_addr)
647 {
648 	uint32_t reg;
649 
650 	if (vmid < 8)
651 		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
652 	else
653 		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
654 	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
655 
656 	/* bits 0-15 are the VM contexts0-15 */
657 	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
658 
659 	return pd_addr;
660 }
661 
662 static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
663 					unsigned pasid)
664 {
665 	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
666 }
667 
668 /*
669  * PTE format on VI:
670  * 63:40 reserved
671  * 39:12 4k physical page base address
672  * 11:7 fragment
673  * 6 write
674  * 5 read
675  * 4 exe
676  * 3 reserved
677  * 2 snooped
678  * 1 system
679  * 0 valid
680  *
681  * PDE format on VI:
682  * 63:59 block fragment size
683  * 58:40 reserved
684  * 39:1 physical base address of PTE
685  * bits 5:1 must be 0.
686  * 0 valid
687  */
688 
689 static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
690 					  uint32_t flags)
691 {
692 	uint64_t pte_flag = 0;
693 
694 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
695 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
696 	if (flags & AMDGPU_VM_PAGE_READABLE)
697 		pte_flag |= AMDGPU_PTE_READABLE;
698 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
699 		pte_flag |= AMDGPU_PTE_WRITEABLE;
700 	if (flags & AMDGPU_VM_PAGE_PRT)
701 		pte_flag |= AMDGPU_PTE_PRT;
702 
703 	return pte_flag;
704 }
705 
706 static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
707 				uint64_t *addr, uint64_t *flags)
708 {
709 	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
710 }
711 
712 /**
713  * gmc_v8_0_set_fault_enable_default - update VM fault handling
714  *
715  * @adev: amdgpu_device pointer
716  * @value: true redirects VM faults to the default page
717  */
718 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
719 					      bool value)
720 {
721 	u32 tmp;
722 
723 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
724 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
725 			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
726 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
727 			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
728 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
729 			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
730 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
731 			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
732 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
733 			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
734 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
735 			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
736 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
737 			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
738 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
739 }
740 
741 /**
742  * gmc_v8_0_set_prt - set PRT VM fault
743  *
744  * @adev: amdgpu_device pointer
745  * @enable: enable/disable VM fault handling for PRT
746 */
747 static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
748 {
749 	u32 tmp;
750 
751 	if (enable && !adev->gmc.prt_warning) {
752 		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
753 		adev->gmc.prt_warning = true;
754 	}
755 
756 	tmp = RREG32(mmVM_PRT_CNTL);
757 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
758 			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
759 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
760 			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
761 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
762 			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
763 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
764 			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
765 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
766 			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
767 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
768 			    L1_TLB_STORE_INVALID_ENTRIES, enable);
769 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
770 			    MASK_PDE0_FAULT, enable);
771 	WREG32(mmVM_PRT_CNTL, tmp);
772 
773 	if (enable) {
774 		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
775 		uint32_t high = adev->vm_manager.max_pfn -
776 			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
777 
778 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
779 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
780 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
781 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
782 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
783 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
784 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
785 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
786 	} else {
787 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
788 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
789 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
790 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
791 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
792 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
793 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
794 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
795 	}
796 }
797 
798 /**
799  * gmc_v8_0_gart_enable - gart enable
800  *
801  * @adev: amdgpu_device pointer
802  *
803  * This sets up the TLBs, programs the page tables for VMID0,
804  * sets up the hw for VMIDs 1-15 which are allocated on
805  * demand, and sets up the global locations for the LDS, GDS,
806  * and GPUVM for FSA64 clients (VI).
807  * Returns 0 for success, errors for failure.
808  */
809 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
810 {
811 	uint64_t table_addr;
812 	int r, i;
813 	u32 tmp, field;
814 
815 	if (adev->gart.bo == NULL) {
816 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
817 		return -EINVAL;
818 	}
819 	r = amdgpu_gart_table_vram_pin(adev);
820 	if (r)
821 		return r;
822 
823 	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
824 
825 	/* Setup TLB control */
826 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
827 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
828 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
829 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
830 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
831 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
832 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
833 	/* Setup L2 cache */
834 	tmp = RREG32(mmVM_L2_CNTL);
835 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
836 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
837 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
838 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
839 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
840 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
841 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
842 	WREG32(mmVM_L2_CNTL, tmp);
843 	tmp = RREG32(mmVM_L2_CNTL2);
844 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
845 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
846 	WREG32(mmVM_L2_CNTL2, tmp);
847 
848 	field = adev->vm_manager.fragment_size;
849 	tmp = RREG32(mmVM_L2_CNTL3);
850 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
851 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
852 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
853 	WREG32(mmVM_L2_CNTL3, tmp);
854 	/* XXX: set to enable PTE/PDE in system memory */
855 	tmp = RREG32(mmVM_L2_CNTL4);
856 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
857 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
858 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
859 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
860 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
861 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
862 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
863 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
864 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
865 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
866 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
867 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
868 	WREG32(mmVM_L2_CNTL4, tmp);
869 	/* setup context0 */
870 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
871 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
872 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
873 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
874 			(u32)(adev->dummy_page_addr >> 12));
875 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
876 	tmp = RREG32(mmVM_CONTEXT0_CNTL);
877 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
878 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
879 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
880 	WREG32(mmVM_CONTEXT0_CNTL, tmp);
881 
882 	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
883 	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
884 	WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
885 
886 	/* empty context1-15 */
887 	/* FIXME start with 4G, once using 2 level pt switch to full
888 	 * vm size space
889 	 */
890 	/* set vm size, must be a multiple of 4 */
891 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
892 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
893 	for (i = 1; i < 16; i++) {
894 		if (i < 8)
895 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
896 			       table_addr >> 12);
897 		else
898 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
899 			       table_addr >> 12);
900 	}
901 
902 	/* enable context1-15 */
903 	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
904 	       (u32)(adev->dummy_page_addr >> 12));
905 	WREG32(mmVM_CONTEXT1_CNTL2, 4);
906 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
907 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
908 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
909 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
910 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
911 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
912 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
913 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
914 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
915 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
916 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
917 			    adev->vm_manager.block_size - 9);
918 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
919 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
920 		gmc_v8_0_set_fault_enable_default(adev, false);
921 	else
922 		gmc_v8_0_set_fault_enable_default(adev, true);
923 
924 	gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
925 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
926 		 (unsigned)(adev->gmc.gart_size >> 20),
927 		 (unsigned long long)table_addr);
928 	adev->gart.ready = true;
929 	return 0;
930 }
931 
932 static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
933 {
934 	int r;
935 
936 	if (adev->gart.bo) {
937 		WARN(1, "R600 PCIE GART already initialized\n");
938 		return 0;
939 	}
940 	/* Initialize common gart structure */
941 	r = amdgpu_gart_init(adev);
942 	if (r)
943 		return r;
944 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
945 	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
946 	return amdgpu_gart_table_vram_alloc(adev);
947 }
948 
949 /**
950  * gmc_v8_0_gart_disable - gart disable
951  *
952  * @adev: amdgpu_device pointer
953  *
954  * This disables all VM page table (VI).
955  */
956 static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
957 {
958 	u32 tmp;
959 
960 	/* Disable all tables */
961 	WREG32(mmVM_CONTEXT0_CNTL, 0);
962 	WREG32(mmVM_CONTEXT1_CNTL, 0);
963 	/* Setup TLB control */
964 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
965 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
966 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
967 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
968 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
969 	/* Setup L2 cache */
970 	tmp = RREG32(mmVM_L2_CNTL);
971 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
972 	WREG32(mmVM_L2_CNTL, tmp);
973 	WREG32(mmVM_L2_CNTL2, 0);
974 	amdgpu_gart_table_vram_unpin(adev);
975 }
976 
977 /**
978  * gmc_v8_0_vm_decode_fault - print human readable fault info
979  *
980  * @adev: amdgpu_device pointer
981  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
982  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
983  *
984  * Print human readable fault information (VI).
985  */
986 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
987 				     u32 addr, u32 mc_client, unsigned pasid)
988 {
989 	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
990 	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
991 					PROTECTIONS);
992 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
993 		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
994 	u32 mc_id;
995 
996 	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
997 			      MEMORY_CLIENT_ID);
998 
999 	dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1000 	       protections, vmid, pasid, addr,
1001 	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1002 			     MEMORY_CLIENT_RW) ?
1003 	       "write" : "read", block, mc_client, mc_id);
1004 }
1005 
1006 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1007 {
1008 	switch (mc_seq_vram_type) {
1009 	case MC_SEQ_MISC0__MT__GDDR1:
1010 		return AMDGPU_VRAM_TYPE_GDDR1;
1011 	case MC_SEQ_MISC0__MT__DDR2:
1012 		return AMDGPU_VRAM_TYPE_DDR2;
1013 	case MC_SEQ_MISC0__MT__GDDR3:
1014 		return AMDGPU_VRAM_TYPE_GDDR3;
1015 	case MC_SEQ_MISC0__MT__GDDR4:
1016 		return AMDGPU_VRAM_TYPE_GDDR4;
1017 	case MC_SEQ_MISC0__MT__GDDR5:
1018 		return AMDGPU_VRAM_TYPE_GDDR5;
1019 	case MC_SEQ_MISC0__MT__HBM:
1020 		return AMDGPU_VRAM_TYPE_HBM;
1021 	case MC_SEQ_MISC0__MT__DDR3:
1022 		return AMDGPU_VRAM_TYPE_DDR3;
1023 	default:
1024 		return AMDGPU_VRAM_TYPE_UNKNOWN;
1025 	}
1026 }
1027 
1028 static int gmc_v8_0_early_init(void *handle)
1029 {
1030 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1031 
1032 	gmc_v8_0_set_gmc_funcs(adev);
1033 	gmc_v8_0_set_irq_funcs(adev);
1034 
1035 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1036 	adev->gmc.shared_aperture_end =
1037 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1038 	adev->gmc.private_aperture_start =
1039 		adev->gmc.shared_aperture_end + 1;
1040 	adev->gmc.private_aperture_end =
1041 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1042 
1043 	return 0;
1044 }
1045 
1046 static int gmc_v8_0_late_init(void *handle)
1047 {
1048 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1049 
1050 	amdgpu_bo_late_init(adev);
1051 
1052 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1053 		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1054 	else
1055 		return 0;
1056 }
1057 
1058 static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
1059 {
1060 	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
1061 	unsigned size;
1062 
1063 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1064 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
1065 	} else {
1066 		u32 viewport = RREG32(mmVIEWPORT_SIZE);
1067 		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1068 			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1069 			4);
1070 	}
1071 	/* return 0 if the pre-OS buffer uses up most of vram */
1072 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1073 		return 0;
1074 	return size;
1075 }
1076 
1077 #define mmMC_SEQ_MISC0_FIJI 0xA71
1078 
1079 static int gmc_v8_0_sw_init(void *handle)
1080 {
1081 	int r;
1082 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1083 
1084 	adev->num_vmhubs = 1;
1085 
1086 	if (adev->flags & AMD_IS_APU) {
1087 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1088 	} else {
1089 		u32 tmp;
1090 
1091 		if ((adev->asic_type == CHIP_FIJI) ||
1092 		    (adev->asic_type == CHIP_VEGAM))
1093 			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1094 		else
1095 			tmp = RREG32(mmMC_SEQ_MISC0);
1096 		tmp &= MC_SEQ_MISC0__MT__MASK;
1097 		adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1098 	}
1099 
1100 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1101 	if (r)
1102 		return r;
1103 
1104 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1105 	if (r)
1106 		return r;
1107 
1108 	/* Adjust VM size here.
1109 	 * Currently set to 4GB ((1 << 20) 4k pages).
1110 	 * Max GPUVM size for cayman and SI is 40 bits.
1111 	 */
1112 	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1113 
1114 	/* Set the internal MC address mask
1115 	 * This is the max address of the GPU's
1116 	 * internal address space.
1117 	 */
1118 	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1119 
1120 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
1121 	if (r) {
1122 		pr_warn("amdgpu: No suitable DMA available\n");
1123 		return r;
1124 	}
1125 	adev->need_swiotlb = drm_need_swiotlb(40);
1126 
1127 	r = gmc_v8_0_init_microcode(adev);
1128 	if (r) {
1129 		DRM_ERROR("Failed to load mc firmware!\n");
1130 		return r;
1131 	}
1132 
1133 	r = gmc_v8_0_mc_init(adev);
1134 	if (r)
1135 		return r;
1136 
1137 	adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev);
1138 
1139 	/* Memory manager */
1140 	r = amdgpu_bo_init(adev);
1141 	if (r)
1142 		return r;
1143 
1144 	r = gmc_v8_0_gart_init(adev);
1145 	if (r)
1146 		return r;
1147 
1148 	/*
1149 	 * number of VMs
1150 	 * VMID 0 is reserved for System
1151 	 * amdgpu graphics/compute will use VMIDs 1-7
1152 	 * amdkfd will use VMIDs 8-15
1153 	 */
1154 	adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
1155 	amdgpu_vm_manager_init(adev);
1156 
1157 	/* base offset of vram pages */
1158 	if (adev->flags & AMD_IS_APU) {
1159 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1160 
1161 		tmp <<= 22;
1162 		adev->vm_manager.vram_base_offset = tmp;
1163 	} else {
1164 		adev->vm_manager.vram_base_offset = 0;
1165 	}
1166 
1167 	adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1168 					GFP_KERNEL);
1169 	if (!adev->gmc.vm_fault_info)
1170 		return -ENOMEM;
1171 	atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1172 
1173 	return 0;
1174 }
1175 
1176 static int gmc_v8_0_sw_fini(void *handle)
1177 {
1178 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1179 
1180 	amdgpu_gem_force_release(adev);
1181 	amdgpu_vm_manager_fini(adev);
1182 	kfree(adev->gmc.vm_fault_info);
1183 	amdgpu_gart_table_vram_free(adev);
1184 	amdgpu_bo_fini(adev);
1185 	amdgpu_gart_fini(adev);
1186 	release_firmware(adev->gmc.fw);
1187 	adev->gmc.fw = NULL;
1188 
1189 	return 0;
1190 }
1191 
1192 static int gmc_v8_0_hw_init(void *handle)
1193 {
1194 	int r;
1195 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1196 
1197 	gmc_v8_0_init_golden_registers(adev);
1198 
1199 	gmc_v8_0_mc_program(adev);
1200 
1201 	if (adev->asic_type == CHIP_TONGA) {
1202 		r = gmc_v8_0_tonga_mc_load_microcode(adev);
1203 		if (r) {
1204 			DRM_ERROR("Failed to load MC firmware!\n");
1205 			return r;
1206 		}
1207 	} else if (adev->asic_type == CHIP_POLARIS11 ||
1208 			adev->asic_type == CHIP_POLARIS10 ||
1209 			adev->asic_type == CHIP_POLARIS12) {
1210 		r = gmc_v8_0_polaris_mc_load_microcode(adev);
1211 		if (r) {
1212 			DRM_ERROR("Failed to load MC firmware!\n");
1213 			return r;
1214 		}
1215 	}
1216 
1217 	r = gmc_v8_0_gart_enable(adev);
1218 	if (r)
1219 		return r;
1220 
1221 	return r;
1222 }
1223 
1224 static int gmc_v8_0_hw_fini(void *handle)
1225 {
1226 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1227 
1228 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1229 	gmc_v8_0_gart_disable(adev);
1230 
1231 	return 0;
1232 }
1233 
1234 static int gmc_v8_0_suspend(void *handle)
1235 {
1236 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1237 
1238 	gmc_v8_0_hw_fini(adev);
1239 
1240 	return 0;
1241 }
1242 
1243 static int gmc_v8_0_resume(void *handle)
1244 {
1245 	int r;
1246 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1247 
1248 	r = gmc_v8_0_hw_init(adev);
1249 	if (r)
1250 		return r;
1251 
1252 	amdgpu_vmid_reset_all(adev);
1253 
1254 	return 0;
1255 }
1256 
1257 static bool gmc_v8_0_is_idle(void *handle)
1258 {
1259 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1260 	u32 tmp = RREG32(mmSRBM_STATUS);
1261 
1262 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1263 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1264 		return false;
1265 
1266 	return true;
1267 }
1268 
1269 static int gmc_v8_0_wait_for_idle(void *handle)
1270 {
1271 	unsigned i;
1272 	u32 tmp;
1273 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1274 
1275 	for (i = 0; i < adev->usec_timeout; i++) {
1276 		/* read MC_STATUS */
1277 		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1278 					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1279 					       SRBM_STATUS__MCC_BUSY_MASK |
1280 					       SRBM_STATUS__MCD_BUSY_MASK |
1281 					       SRBM_STATUS__VMC_BUSY_MASK |
1282 					       SRBM_STATUS__VMC1_BUSY_MASK);
1283 		if (!tmp)
1284 			return 0;
1285 		udelay(1);
1286 	}
1287 	return -ETIMEDOUT;
1288 
1289 }
1290 
1291 static bool gmc_v8_0_check_soft_reset(void *handle)
1292 {
1293 	u32 srbm_soft_reset = 0;
1294 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1295 	u32 tmp = RREG32(mmSRBM_STATUS);
1296 
1297 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1298 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1299 						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1300 
1301 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1302 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1303 		if (!(adev->flags & AMD_IS_APU))
1304 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1305 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1306 	}
1307 	if (srbm_soft_reset) {
1308 		adev->gmc.srbm_soft_reset = srbm_soft_reset;
1309 		return true;
1310 	} else {
1311 		adev->gmc.srbm_soft_reset = 0;
1312 		return false;
1313 	}
1314 }
1315 
1316 static int gmc_v8_0_pre_soft_reset(void *handle)
1317 {
1318 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1319 
1320 	if (!adev->gmc.srbm_soft_reset)
1321 		return 0;
1322 
1323 	gmc_v8_0_mc_stop(adev);
1324 	if (gmc_v8_0_wait_for_idle(adev)) {
1325 		dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 static int gmc_v8_0_soft_reset(void *handle)
1332 {
1333 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1334 	u32 srbm_soft_reset;
1335 
1336 	if (!adev->gmc.srbm_soft_reset)
1337 		return 0;
1338 	srbm_soft_reset = adev->gmc.srbm_soft_reset;
1339 
1340 	if (srbm_soft_reset) {
1341 		u32 tmp;
1342 
1343 		tmp = RREG32(mmSRBM_SOFT_RESET);
1344 		tmp |= srbm_soft_reset;
1345 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1346 		WREG32(mmSRBM_SOFT_RESET, tmp);
1347 		tmp = RREG32(mmSRBM_SOFT_RESET);
1348 
1349 		udelay(50);
1350 
1351 		tmp &= ~srbm_soft_reset;
1352 		WREG32(mmSRBM_SOFT_RESET, tmp);
1353 		tmp = RREG32(mmSRBM_SOFT_RESET);
1354 
1355 		/* Wait a little for things to settle down */
1356 		udelay(50);
1357 	}
1358 
1359 	return 0;
1360 }
1361 
1362 static int gmc_v8_0_post_soft_reset(void *handle)
1363 {
1364 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1365 
1366 	if (!adev->gmc.srbm_soft_reset)
1367 		return 0;
1368 
1369 	gmc_v8_0_mc_resume(adev);
1370 	return 0;
1371 }
1372 
1373 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1374 					     struct amdgpu_irq_src *src,
1375 					     unsigned type,
1376 					     enum amdgpu_interrupt_state state)
1377 {
1378 	u32 tmp;
1379 	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1380 		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1381 		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1382 		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1383 		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1384 		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1385 		    VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1386 
1387 	switch (state) {
1388 	case AMDGPU_IRQ_STATE_DISABLE:
1389 		/* system context */
1390 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1391 		tmp &= ~bits;
1392 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1393 		/* VMs */
1394 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1395 		tmp &= ~bits;
1396 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1397 		break;
1398 	case AMDGPU_IRQ_STATE_ENABLE:
1399 		/* system context */
1400 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1401 		tmp |= bits;
1402 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1403 		/* VMs */
1404 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1405 		tmp |= bits;
1406 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1407 		break;
1408 	default:
1409 		break;
1410 	}
1411 
1412 	return 0;
1413 }
1414 
1415 static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1416 				      struct amdgpu_irq_src *source,
1417 				      struct amdgpu_iv_entry *entry)
1418 {
1419 	u32 addr, status, mc_client, vmid;
1420 
1421 	if (amdgpu_sriov_vf(adev)) {
1422 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1423 			entry->src_id, entry->src_data[0]);
1424 		dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1425 		return 0;
1426 	}
1427 
1428 	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1429 	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1430 	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1431 	/* reset addr and status */
1432 	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1433 
1434 	if (!addr && !status)
1435 		return 0;
1436 
1437 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1438 		gmc_v8_0_set_fault_enable_default(adev, false);
1439 
1440 	if (printk_ratelimit()) {
1441 		struct amdgpu_task_info task_info;
1442 
1443 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
1444 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1445 
1446 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
1447 			entry->src_id, entry->src_data[0], task_info.process_name,
1448 			task_info.tgid, task_info.task_name, task_info.pid);
1449 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1450 			addr);
1451 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1452 			status);
1453 		gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1454 					 entry->pasid);
1455 	}
1456 
1457 	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1458 			     VMID);
1459 	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1460 		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1461 		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1462 		u32 protections = REG_GET_FIELD(status,
1463 					VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1464 					PROTECTIONS);
1465 
1466 		info->vmid = vmid;
1467 		info->mc_id = REG_GET_FIELD(status,
1468 					    VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1469 					    MEMORY_CLIENT_ID);
1470 		info->status = status;
1471 		info->page_addr = addr;
1472 		info->prot_valid = protections & 0x7 ? true : false;
1473 		info->prot_read = protections & 0x8 ? true : false;
1474 		info->prot_write = protections & 0x10 ? true : false;
1475 		info->prot_exec = protections & 0x20 ? true : false;
1476 		mb();
1477 		atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1478 	}
1479 
1480 	return 0;
1481 }
1482 
1483 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1484 						     bool enable)
1485 {
1486 	uint32_t data;
1487 
1488 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1489 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1490 		data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1491 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1492 
1493 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1494 		data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1495 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1496 
1497 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1498 		data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1499 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1500 
1501 		data = RREG32(mmMC_XPB_CLK_GAT);
1502 		data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1503 		WREG32(mmMC_XPB_CLK_GAT, data);
1504 
1505 		data = RREG32(mmATC_MISC_CG);
1506 		data |= ATC_MISC_CG__ENABLE_MASK;
1507 		WREG32(mmATC_MISC_CG, data);
1508 
1509 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1510 		data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1511 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1512 
1513 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1514 		data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1515 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1516 
1517 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1518 		data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1519 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1520 
1521 		data = RREG32(mmVM_L2_CG);
1522 		data |= VM_L2_CG__ENABLE_MASK;
1523 		WREG32(mmVM_L2_CG, data);
1524 	} else {
1525 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1526 		data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1527 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1528 
1529 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1530 		data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1531 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1532 
1533 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1534 		data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1535 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1536 
1537 		data = RREG32(mmMC_XPB_CLK_GAT);
1538 		data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1539 		WREG32(mmMC_XPB_CLK_GAT, data);
1540 
1541 		data = RREG32(mmATC_MISC_CG);
1542 		data &= ~ATC_MISC_CG__ENABLE_MASK;
1543 		WREG32(mmATC_MISC_CG, data);
1544 
1545 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1546 		data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1547 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1548 
1549 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1550 		data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1551 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1552 
1553 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1554 		data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1555 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1556 
1557 		data = RREG32(mmVM_L2_CG);
1558 		data &= ~VM_L2_CG__ENABLE_MASK;
1559 		WREG32(mmVM_L2_CG, data);
1560 	}
1561 }
1562 
1563 static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1564 				       bool enable)
1565 {
1566 	uint32_t data;
1567 
1568 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1569 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1570 		data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1571 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1572 
1573 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1574 		data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1575 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1576 
1577 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1578 		data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1579 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1580 
1581 		data = RREG32(mmMC_XPB_CLK_GAT);
1582 		data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1583 		WREG32(mmMC_XPB_CLK_GAT, data);
1584 
1585 		data = RREG32(mmATC_MISC_CG);
1586 		data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1587 		WREG32(mmATC_MISC_CG, data);
1588 
1589 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1590 		data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1591 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1592 
1593 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1594 		data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1595 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1596 
1597 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1598 		data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1599 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1600 
1601 		data = RREG32(mmVM_L2_CG);
1602 		data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1603 		WREG32(mmVM_L2_CG, data);
1604 	} else {
1605 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1606 		data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1607 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1608 
1609 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1610 		data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1611 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1612 
1613 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1614 		data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1615 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1616 
1617 		data = RREG32(mmMC_XPB_CLK_GAT);
1618 		data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1619 		WREG32(mmMC_XPB_CLK_GAT, data);
1620 
1621 		data = RREG32(mmATC_MISC_CG);
1622 		data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1623 		WREG32(mmATC_MISC_CG, data);
1624 
1625 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1626 		data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1627 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1628 
1629 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1630 		data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1631 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1632 
1633 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1634 		data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1635 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1636 
1637 		data = RREG32(mmVM_L2_CG);
1638 		data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1639 		WREG32(mmVM_L2_CG, data);
1640 	}
1641 }
1642 
1643 static int gmc_v8_0_set_clockgating_state(void *handle,
1644 					  enum amd_clockgating_state state)
1645 {
1646 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1647 
1648 	if (amdgpu_sriov_vf(adev))
1649 		return 0;
1650 
1651 	switch (adev->asic_type) {
1652 	case CHIP_FIJI:
1653 		fiji_update_mc_medium_grain_clock_gating(adev,
1654 				state == AMD_CG_STATE_GATE);
1655 		fiji_update_mc_light_sleep(adev,
1656 				state == AMD_CG_STATE_GATE);
1657 		break;
1658 	default:
1659 		break;
1660 	}
1661 	return 0;
1662 }
1663 
1664 static int gmc_v8_0_set_powergating_state(void *handle,
1665 					  enum amd_powergating_state state)
1666 {
1667 	return 0;
1668 }
1669 
1670 static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
1671 {
1672 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1673 	int data;
1674 
1675 	if (amdgpu_sriov_vf(adev))
1676 		*flags = 0;
1677 
1678 	/* AMD_CG_SUPPORT_MC_MGCG */
1679 	data = RREG32(mmMC_HUB_MISC_HUB_CG);
1680 	if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1681 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
1682 
1683 	/* AMD_CG_SUPPORT_MC_LS */
1684 	if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1685 		*flags |= AMD_CG_SUPPORT_MC_LS;
1686 }
1687 
1688 static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1689 	.name = "gmc_v8_0",
1690 	.early_init = gmc_v8_0_early_init,
1691 	.late_init = gmc_v8_0_late_init,
1692 	.sw_init = gmc_v8_0_sw_init,
1693 	.sw_fini = gmc_v8_0_sw_fini,
1694 	.hw_init = gmc_v8_0_hw_init,
1695 	.hw_fini = gmc_v8_0_hw_fini,
1696 	.suspend = gmc_v8_0_suspend,
1697 	.resume = gmc_v8_0_resume,
1698 	.is_idle = gmc_v8_0_is_idle,
1699 	.wait_for_idle = gmc_v8_0_wait_for_idle,
1700 	.check_soft_reset = gmc_v8_0_check_soft_reset,
1701 	.pre_soft_reset = gmc_v8_0_pre_soft_reset,
1702 	.soft_reset = gmc_v8_0_soft_reset,
1703 	.post_soft_reset = gmc_v8_0_post_soft_reset,
1704 	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
1705 	.set_powergating_state = gmc_v8_0_set_powergating_state,
1706 	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
1707 };
1708 
1709 static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1710 	.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1711 	.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1712 	.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1713 	.set_prt = gmc_v8_0_set_prt,
1714 	.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
1715 	.get_vm_pde = gmc_v8_0_get_vm_pde
1716 };
1717 
1718 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1719 	.set = gmc_v8_0_vm_fault_interrupt_state,
1720 	.process = gmc_v8_0_process_interrupt,
1721 };
1722 
1723 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1724 {
1725 	adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1726 }
1727 
1728 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1729 {
1730 	adev->gmc.vm_fault.num_types = 1;
1731 	adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1732 }
1733 
1734 const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1735 {
1736 	.type = AMD_IP_BLOCK_TYPE_GMC,
1737 	.major = 8,
1738 	.minor = 0,
1739 	.rev = 0,
1740 	.funcs = &gmc_v8_0_ip_funcs,
1741 };
1742 
1743 const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1744 {
1745 	.type = AMD_IP_BLOCK_TYPE_GMC,
1746 	.major = 8,
1747 	.minor = 1,
1748 	.rev = 0,
1749 	.funcs = &gmc_v8_0_ip_funcs,
1750 };
1751 
1752 const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1753 {
1754 	.type = AMD_IP_BLOCK_TYPE_GMC,
1755 	.major = 8,
1756 	.minor = 5,
1757 	.rev = 0,
1758 	.funcs = &gmc_v8_0_ip_funcs,
1759 };
1760