xref: /linux/drivers/gpu/drm/amd/amdgpu/soc21.c (revision 2da68a77)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
35 #include "amdgpu_smu.h"
36 #include "atom.h"
37 #include "amd_pcie.h"
38 
39 #include "gc/gc_11_0_0_offset.h"
40 #include "gc/gc_11_0_0_sh_mask.h"
41 #include "mp/mp_13_0_0_offset.h"
42 
43 #include "soc15.h"
44 #include "soc15_common.h"
45 #include "soc21.h"
46 
47 static const struct amd_ip_funcs soc21_common_ip_funcs;
48 
49 /* SOC21 */
50 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] =
51 {
52 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
53 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
54 };
55 
56 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode =
57 {
58 	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array),
59 	.codec_array = vcn_4_0_0_video_codecs_encode_array,
60 };
61 
62 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] =
63 {
64 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
65 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
66 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
67 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
68 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
69 };
70 
71 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode =
72 {
73 	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array),
74 	.codec_array = vcn_4_0_0_video_codecs_decode_array,
75 };
76 
77 static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
78 				 const struct amdgpu_video_codecs **codecs)
79 {
80 	switch (adev->ip_versions[UVD_HWIP][0]) {
81 
82 	case IP_VERSION(4, 0, 0):
83 	case IP_VERSION(4, 0, 2):
84 		if (encode)
85 			*codecs = &vcn_4_0_0_video_codecs_encode;
86 		else
87 			*codecs = &vcn_4_0_0_video_codecs_decode;
88 		return 0;
89 	default:
90 		return -EINVAL;
91 	}
92 }
93 /*
94  * Indirect registers accessor
95  */
96 static u32 soc21_pcie_rreg(struct amdgpu_device *adev, u32 reg)
97 {
98 	unsigned long address, data;
99 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
100 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
101 
102 	return amdgpu_device_indirect_rreg(adev, address, data, reg);
103 }
104 
105 static void soc21_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
106 {
107 	unsigned long address, data;
108 
109 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
110 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
111 
112 	amdgpu_device_indirect_wreg(adev, address, data, reg, v);
113 }
114 
115 static u64 soc21_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
116 {
117 	unsigned long address, data;
118 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
119 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
120 
121 	return amdgpu_device_indirect_rreg64(adev, address, data, reg);
122 }
123 
124 static void soc21_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
125 {
126 	unsigned long address, data;
127 
128 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
129 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
130 
131 	amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
132 }
133 
134 static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg)
135 {
136 	unsigned long flags, address, data;
137 	u32 r;
138 
139 	address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX);
140 	data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA);
141 
142 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
143 	WREG32(address, (reg));
144 	r = RREG32(data);
145 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
146 	return r;
147 }
148 
149 static void soc21_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
150 {
151 	unsigned long flags, address, data;
152 
153 	address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX);
154 	data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA);
155 
156 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
157 	WREG32(address, (reg));
158 	WREG32(data, (v));
159 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
160 }
161 
162 static u32 soc21_get_config_memsize(struct amdgpu_device *adev)
163 {
164 	return adev->nbio.funcs->get_memsize(adev);
165 }
166 
167 static u32 soc21_get_xclk(struct amdgpu_device *adev)
168 {
169 	return adev->clock.spll.reference_freq;
170 }
171 
172 
173 void soc21_grbm_select(struct amdgpu_device *adev,
174 		     u32 me, u32 pipe, u32 queue, u32 vmid)
175 {
176 	u32 grbm_gfx_cntl = 0;
177 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
178 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
179 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
180 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
181 
182 	WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, grbm_gfx_cntl);
183 }
184 
185 static void soc21_vga_set_state(struct amdgpu_device *adev, bool state)
186 {
187 	/* todo */
188 }
189 
190 static bool soc21_read_disabled_bios(struct amdgpu_device *adev)
191 {
192 	/* todo */
193 	return false;
194 }
195 
196 static struct soc15_allowed_register_entry soc21_allowed_read_registers[] = {
197 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS)},
198 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS2)},
199 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE0)},
200 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE1)},
201 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE2)},
202 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE3)},
203 	{ SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_STATUS_REG)},
204 	{ SOC15_REG_ENTRY(SDMA1, 0, regSDMA1_STATUS_REG)},
205 	{ SOC15_REG_ENTRY(GC, 0, regCP_STAT)},
206 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT1)},
207 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT2)},
208 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT3)},
209 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_BUSY_STAT)},
210 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STALLED_STAT1)},
211 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STATUS)},
212 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_BUSY_STAT)},
213 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STALLED_STAT1)},
214 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STATUS)},
215 	{ SOC15_REG_ENTRY(GC, 0, regGB_ADDR_CONFIG)},
216 };
217 
218 static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
219 					 u32 sh_num, u32 reg_offset)
220 {
221 	uint32_t val;
222 
223 	mutex_lock(&adev->grbm_idx_mutex);
224 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
225 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
226 
227 	val = RREG32(reg_offset);
228 
229 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
230 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
231 	mutex_unlock(&adev->grbm_idx_mutex);
232 	return val;
233 }
234 
235 static uint32_t soc21_get_register_value(struct amdgpu_device *adev,
236 				      bool indexed, u32 se_num,
237 				      u32 sh_num, u32 reg_offset)
238 {
239 	if (indexed) {
240 		return soc21_read_indexed_register(adev, se_num, sh_num, reg_offset);
241 	} else {
242 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, regGB_ADDR_CONFIG) && adev->gfx.config.gb_addr_config)
243 			return adev->gfx.config.gb_addr_config;
244 		return RREG32(reg_offset);
245 	}
246 }
247 
248 static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
249 			    u32 sh_num, u32 reg_offset, u32 *value)
250 {
251 	uint32_t i;
252 	struct soc15_allowed_register_entry  *en;
253 
254 	*value = 0;
255 	for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
256 		en = &soc21_allowed_read_registers[i];
257 		if (adev->reg_offset[en->hwip][en->inst] &&
258 		    reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
259 				   + en->reg_offset))
260 			continue;
261 
262 		*value = soc21_get_register_value(adev,
263 					       soc21_allowed_read_registers[i].grbm_indexed,
264 					       se_num, sh_num, reg_offset);
265 		return 0;
266 	}
267 	return -EINVAL;
268 }
269 
270 #if 0
271 static int soc21_asic_mode1_reset(struct amdgpu_device *adev)
272 {
273 	u32 i;
274 	int ret = 0;
275 
276 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
277 
278 	/* disable BM */
279 	pci_clear_master(adev->pdev);
280 
281 	amdgpu_device_cache_pci_state(adev->pdev);
282 
283 	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
284 		dev_info(adev->dev, "GPU smu mode1 reset\n");
285 		ret = amdgpu_dpm_mode1_reset(adev);
286 	} else {
287 		dev_info(adev->dev, "GPU psp mode1 reset\n");
288 		ret = psp_gpu_reset(adev);
289 	}
290 
291 	if (ret)
292 		dev_err(adev->dev, "GPU mode1 reset failed\n");
293 	amdgpu_device_load_pci_state(adev->pdev);
294 
295 	/* wait for asic to come out of reset */
296 	for (i = 0; i < adev->usec_timeout; i++) {
297 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
298 
299 		if (memsize != 0xffffffff)
300 			break;
301 		udelay(1);
302 	}
303 
304 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
305 
306 	return ret;
307 }
308 #endif
309 
310 static enum amd_reset_method
311 soc21_asic_reset_method(struct amdgpu_device *adev)
312 {
313 	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
314 	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
315 	    amdgpu_reset_method == AMD_RESET_METHOD_BACO)
316 		return amdgpu_reset_method;
317 
318 	if (amdgpu_reset_method != -1)
319 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
320 				  amdgpu_reset_method);
321 
322 	switch (adev->ip_versions[MP1_HWIP][0]) {
323 	case IP_VERSION(13, 0, 0):
324 	case IP_VERSION(13, 0, 7):
325 		return AMD_RESET_METHOD_MODE1;
326 	case IP_VERSION(13, 0, 4):
327 		return AMD_RESET_METHOD_MODE2;
328 	default:
329 		if (amdgpu_dpm_is_baco_supported(adev))
330 			return AMD_RESET_METHOD_BACO;
331 		else
332 			return AMD_RESET_METHOD_MODE1;
333 	}
334 }
335 
336 static int soc21_asic_reset(struct amdgpu_device *adev)
337 {
338 	int ret = 0;
339 
340 	switch (soc21_asic_reset_method(adev)) {
341 	case AMD_RESET_METHOD_PCI:
342 		dev_info(adev->dev, "PCI reset\n");
343 		ret = amdgpu_device_pci_reset(adev);
344 		break;
345 	case AMD_RESET_METHOD_BACO:
346 		dev_info(adev->dev, "BACO reset\n");
347 		ret = amdgpu_dpm_baco_reset(adev);
348 		break;
349 	case AMD_RESET_METHOD_MODE2:
350 		dev_info(adev->dev, "MODE2 reset\n");
351 		ret = amdgpu_dpm_mode2_reset(adev);
352 		break;
353 	default:
354 		dev_info(adev->dev, "MODE1 reset\n");
355 		ret = amdgpu_device_mode1_reset(adev);
356 		break;
357 	}
358 
359 	return ret;
360 }
361 
362 static int soc21_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
363 {
364 	/* todo */
365 	return 0;
366 }
367 
368 static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
369 {
370 	/* todo */
371 	return 0;
372 }
373 
374 static void soc21_pcie_gen3_enable(struct amdgpu_device *adev)
375 {
376 	if (pci_is_root_bus(adev->pdev->bus))
377 		return;
378 
379 	if (amdgpu_pcie_gen2 == 0)
380 		return;
381 
382 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
383 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
384 		return;
385 
386 	/* todo */
387 }
388 
389 static void soc21_program_aspm(struct amdgpu_device *adev)
390 {
391 	if (!amdgpu_device_should_use_aspm(adev))
392 		return;
393 
394 	if (!(adev->flags & AMD_IS_APU) &&
395 	    (adev->nbio.funcs->program_aspm))
396 		adev->nbio.funcs->program_aspm(adev);
397 }
398 
399 static void soc21_enable_doorbell_aperture(struct amdgpu_device *adev,
400 					bool enable)
401 {
402 	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
403 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
404 }
405 
406 const struct amdgpu_ip_block_version soc21_common_ip_block =
407 {
408 	.type = AMD_IP_BLOCK_TYPE_COMMON,
409 	.major = 1,
410 	.minor = 0,
411 	.rev = 0,
412 	.funcs = &soc21_common_ip_funcs,
413 };
414 
415 static uint32_t soc21_get_rev_id(struct amdgpu_device *adev)
416 {
417 	return adev->nbio.funcs->get_rev_id(adev);
418 }
419 
420 static bool soc21_need_full_reset(struct amdgpu_device *adev)
421 {
422 	switch (adev->ip_versions[GC_HWIP][0]) {
423 	case IP_VERSION(11, 0, 0):
424 		return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
425 	case IP_VERSION(11, 0, 2):
426 	case IP_VERSION(11, 0, 3):
427 		return false;
428 	default:
429 		return true;
430 	}
431 }
432 
433 static bool soc21_need_reset_on_init(struct amdgpu_device *adev)
434 {
435 	u32 sol_reg;
436 
437 	if (adev->flags & AMD_IS_APU)
438 		return false;
439 
440 	/* Check sOS sign of life register to confirm sys driver and sOS
441 	 * are already been loaded.
442 	 */
443 	sol_reg = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
444 	if (sol_reg)
445 		return true;
446 
447 	return false;
448 }
449 
450 static uint64_t soc21_get_pcie_replay_count(struct amdgpu_device *adev)
451 {
452 
453 	/* TODO
454 	 * dummy implement for pcie_replay_count sysfs interface
455 	 * */
456 
457 	return 0;
458 }
459 
460 static void soc21_init_doorbell_index(struct amdgpu_device *adev)
461 {
462 	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
463 	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
464 	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
465 	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
466 	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
467 	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
468 	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
469 	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
470 	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
471 	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
472 	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
473 	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
474 	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
475 	adev->doorbell_index.gfx_userqueue_start =
476 		AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_START;
477 	adev->doorbell_index.gfx_userqueue_end =
478 		AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_END;
479 	adev->doorbell_index.mes_ring0 = AMDGPU_NAVI10_DOORBELL_MES_RING0;
480 	adev->doorbell_index.mes_ring1 = AMDGPU_NAVI10_DOORBELL_MES_RING1;
481 	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
482 	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
483 	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
484 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
485 	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
486 	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
487 	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
488 	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
489 	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
490 
491 	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
492 	adev->doorbell_index.sdma_doorbell_range = 20;
493 }
494 
495 static void soc21_pre_asic_init(struct amdgpu_device *adev)
496 {
497 }
498 
499 static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
500 					  bool enter)
501 {
502 	if (enter)
503 		amdgpu_gfx_rlc_enter_safe_mode(adev);
504 	else
505 		amdgpu_gfx_rlc_exit_safe_mode(adev);
506 
507 	if (adev->gfx.funcs->update_perfmon_mgcg)
508 		adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
509 
510 	return 0;
511 }
512 
513 static const struct amdgpu_asic_funcs soc21_asic_funcs =
514 {
515 	.read_disabled_bios = &soc21_read_disabled_bios,
516 	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
517 	.read_register = &soc21_read_register,
518 	.reset = &soc21_asic_reset,
519 	.reset_method = &soc21_asic_reset_method,
520 	.set_vga_state = &soc21_vga_set_state,
521 	.get_xclk = &soc21_get_xclk,
522 	.set_uvd_clocks = &soc21_set_uvd_clocks,
523 	.set_vce_clocks = &soc21_set_vce_clocks,
524 	.get_config_memsize = &soc21_get_config_memsize,
525 	.init_doorbell_index = &soc21_init_doorbell_index,
526 	.need_full_reset = &soc21_need_full_reset,
527 	.need_reset_on_init = &soc21_need_reset_on_init,
528 	.get_pcie_replay_count = &soc21_get_pcie_replay_count,
529 	.supports_baco = &amdgpu_dpm_is_baco_supported,
530 	.pre_asic_init = &soc21_pre_asic_init,
531 	.query_video_codecs = &soc21_query_video_codecs,
532 	.update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
533 };
534 
535 static int soc21_common_early_init(void *handle)
536 {
537 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
538 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
539 
540 	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
541 	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
542 	adev->smc_rreg = NULL;
543 	adev->smc_wreg = NULL;
544 	adev->pcie_rreg = &soc21_pcie_rreg;
545 	adev->pcie_wreg = &soc21_pcie_wreg;
546 	adev->pcie_rreg64 = &soc21_pcie_rreg64;
547 	adev->pcie_wreg64 = &soc21_pcie_wreg64;
548 	adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
549 	adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
550 
551 	/* TODO: will add them during VCN v2 implementation */
552 	adev->uvd_ctx_rreg = NULL;
553 	adev->uvd_ctx_wreg = NULL;
554 
555 	adev->didt_rreg = &soc21_didt_rreg;
556 	adev->didt_wreg = &soc21_didt_wreg;
557 
558 	adev->asic_funcs = &soc21_asic_funcs;
559 
560 	adev->rev_id = soc21_get_rev_id(adev);
561 	adev->external_rev_id = 0xff;
562 	switch (adev->ip_versions[GC_HWIP][0]) {
563 	case IP_VERSION(11, 0, 0):
564 		adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
565 			AMD_CG_SUPPORT_GFX_CGLS |
566 #if 0
567 			AMD_CG_SUPPORT_GFX_3D_CGCG |
568 			AMD_CG_SUPPORT_GFX_3D_CGLS |
569 #endif
570 			AMD_CG_SUPPORT_GFX_MGCG |
571 			AMD_CG_SUPPORT_REPEATER_FGCG |
572 			AMD_CG_SUPPORT_GFX_FGCG |
573 			AMD_CG_SUPPORT_GFX_PERF_CLK |
574 			AMD_CG_SUPPORT_VCN_MGCG |
575 			AMD_CG_SUPPORT_JPEG_MGCG |
576 			AMD_CG_SUPPORT_ATHUB_MGCG |
577 			AMD_CG_SUPPORT_ATHUB_LS |
578 			AMD_CG_SUPPORT_MC_MGCG |
579 			AMD_CG_SUPPORT_MC_LS |
580 			AMD_CG_SUPPORT_IH_CG |
581 			AMD_CG_SUPPORT_HDP_SD;
582 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
583 			AMD_PG_SUPPORT_VCN_DPG |
584 			AMD_PG_SUPPORT_JPEG |
585 			AMD_PG_SUPPORT_ATHUB |
586 			AMD_PG_SUPPORT_MMHUB;
587 		if (amdgpu_sriov_vf(adev)) {
588 			adev->cg_flags = 0;
589 			adev->pg_flags = 0;
590 		}
591 		adev->external_rev_id = adev->rev_id + 0x1; // TODO: need update
592 		break;
593 	case IP_VERSION(11, 0, 2):
594 		adev->cg_flags =
595 			AMD_CG_SUPPORT_GFX_CGCG |
596 			AMD_CG_SUPPORT_GFX_CGLS |
597 			AMD_CG_SUPPORT_REPEATER_FGCG |
598 			AMD_CG_SUPPORT_VCN_MGCG |
599 			AMD_CG_SUPPORT_JPEG_MGCG |
600 			AMD_CG_SUPPORT_ATHUB_MGCG |
601 			AMD_CG_SUPPORT_ATHUB_LS |
602 			AMD_CG_SUPPORT_IH_CG |
603 			AMD_CG_SUPPORT_HDP_SD;
604 		adev->pg_flags =
605 			AMD_PG_SUPPORT_VCN |
606 			AMD_PG_SUPPORT_VCN_DPG |
607 			AMD_PG_SUPPORT_JPEG |
608 			AMD_PG_SUPPORT_ATHUB |
609 			AMD_PG_SUPPORT_MMHUB;
610 		adev->external_rev_id = adev->rev_id + 0x10;
611 		break;
612 	case IP_VERSION(11, 0, 1):
613 		adev->cg_flags =
614 			AMD_CG_SUPPORT_GFX_CGCG |
615 			AMD_CG_SUPPORT_GFX_CGLS |
616 			AMD_CG_SUPPORT_GFX_MGCG |
617 			AMD_CG_SUPPORT_GFX_FGCG |
618 			AMD_CG_SUPPORT_REPEATER_FGCG |
619 			AMD_CG_SUPPORT_GFX_PERF_CLK |
620 			AMD_CG_SUPPORT_MC_MGCG |
621 			AMD_CG_SUPPORT_MC_LS |
622 			AMD_CG_SUPPORT_HDP_MGCG |
623 			AMD_CG_SUPPORT_HDP_LS |
624 			AMD_CG_SUPPORT_ATHUB_MGCG |
625 			AMD_CG_SUPPORT_ATHUB_LS |
626 			AMD_CG_SUPPORT_IH_CG |
627 			AMD_CG_SUPPORT_BIF_MGCG |
628 			AMD_CG_SUPPORT_BIF_LS |
629 			AMD_CG_SUPPORT_VCN_MGCG |
630 			AMD_CG_SUPPORT_JPEG_MGCG;
631 		adev->pg_flags =
632 			AMD_PG_SUPPORT_GFX_PG |
633 			AMD_PG_SUPPORT_VCN |
634 			AMD_PG_SUPPORT_VCN_DPG |
635 			AMD_PG_SUPPORT_JPEG;
636 		adev->external_rev_id = adev->rev_id + 0x1;
637 		break;
638 	case IP_VERSION(11, 0, 3):
639 		adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
640 			AMD_CG_SUPPORT_JPEG_MGCG |
641 			AMD_CG_SUPPORT_GFX_CGCG |
642 			AMD_CG_SUPPORT_GFX_CGLS |
643 			AMD_CG_SUPPORT_REPEATER_FGCG |
644 			AMD_CG_SUPPORT_GFX_MGCG;
645 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
646 			AMD_PG_SUPPORT_VCN_DPG |
647 			AMD_PG_SUPPORT_JPEG;
648 		if (amdgpu_sriov_vf(adev)) {
649 			/* hypervisor control CG and PG enablement */
650 			adev->cg_flags = 0;
651 			adev->pg_flags = 0;
652 		}
653 		adev->external_rev_id = adev->rev_id + 0x20;
654 		break;
655 	default:
656 		/* FIXME: not supported yet */
657 		return -EINVAL;
658 	}
659 
660 	return 0;
661 }
662 
663 static int soc21_common_late_init(void *handle)
664 {
665 	return 0;
666 }
667 
668 static int soc21_common_sw_init(void *handle)
669 {
670 	return 0;
671 }
672 
673 static int soc21_common_sw_fini(void *handle)
674 {
675 	return 0;
676 }
677 
678 static int soc21_common_hw_init(void *handle)
679 {
680 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
681 
682 	/* enable pcie gen2/3 link */
683 	soc21_pcie_gen3_enable(adev);
684 	/* enable aspm */
685 	soc21_program_aspm(adev);
686 	/* setup nbio registers */
687 	adev->nbio.funcs->init_registers(adev);
688 	/* remap HDP registers to a hole in mmio space,
689 	 * for the purpose of expose those registers
690 	 * to process space
691 	 */
692 	if (adev->nbio.funcs->remap_hdp_registers)
693 		adev->nbio.funcs->remap_hdp_registers(adev);
694 	/* enable the doorbell aperture */
695 	soc21_enable_doorbell_aperture(adev, true);
696 
697 	return 0;
698 }
699 
700 static int soc21_common_hw_fini(void *handle)
701 {
702 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
703 
704 	/* disable the doorbell aperture */
705 	soc21_enable_doorbell_aperture(adev, false);
706 
707 	return 0;
708 }
709 
710 static int soc21_common_suspend(void *handle)
711 {
712 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
713 
714 	return soc21_common_hw_fini(adev);
715 }
716 
717 static int soc21_common_resume(void *handle)
718 {
719 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
720 
721 	return soc21_common_hw_init(adev);
722 }
723 
724 static bool soc21_common_is_idle(void *handle)
725 {
726 	return true;
727 }
728 
729 static int soc21_common_wait_for_idle(void *handle)
730 {
731 	return 0;
732 }
733 
734 static int soc21_common_soft_reset(void *handle)
735 {
736 	return 0;
737 }
738 
739 static int soc21_common_set_clockgating_state(void *handle,
740 					   enum amd_clockgating_state state)
741 {
742 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743 
744 	switch (adev->ip_versions[NBIO_HWIP][0]) {
745 	case IP_VERSION(4, 3, 0):
746 	case IP_VERSION(4, 3, 1):
747 	case IP_VERSION(7, 7, 0):
748 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
749 				state == AMD_CG_STATE_GATE);
750 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
751 				state == AMD_CG_STATE_GATE);
752 		adev->hdp.funcs->update_clock_gating(adev,
753 				state == AMD_CG_STATE_GATE);
754 		break;
755 	default:
756 		break;
757 	}
758 	return 0;
759 }
760 
761 static int soc21_common_set_powergating_state(void *handle,
762 					   enum amd_powergating_state state)
763 {
764 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
765 
766 	switch (adev->ip_versions[LSDMA_HWIP][0]) {
767 	case IP_VERSION(6, 0, 0):
768 	case IP_VERSION(6, 0, 2):
769 		adev->lsdma.funcs->update_memory_power_gating(adev,
770 				state == AMD_PG_STATE_GATE);
771 		break;
772 	default:
773 		break;
774 	}
775 
776 	return 0;
777 }
778 
779 static void soc21_common_get_clockgating_state(void *handle, u64 *flags)
780 {
781 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
782 
783 	adev->nbio.funcs->get_clockgating_state(adev, flags);
784 
785 	adev->hdp.funcs->get_clock_gating_state(adev, flags);
786 
787 	return;
788 }
789 
790 static const struct amd_ip_funcs soc21_common_ip_funcs = {
791 	.name = "soc21_common",
792 	.early_init = soc21_common_early_init,
793 	.late_init = soc21_common_late_init,
794 	.sw_init = soc21_common_sw_init,
795 	.sw_fini = soc21_common_sw_fini,
796 	.hw_init = soc21_common_hw_init,
797 	.hw_fini = soc21_common_hw_fini,
798 	.suspend = soc21_common_suspend,
799 	.resume = soc21_common_resume,
800 	.is_idle = soc21_common_is_idle,
801 	.wait_for_idle = soc21_common_wait_for_idle,
802 	.soft_reset = soc21_common_soft_reset,
803 	.set_clockgating_state = soc21_common_set_clockgating_state,
804 	.set_powergating_state = soc21_common_set_powergating_state,
805 	.get_clockgating_state = soc21_common_get_clockgating_state,
806 };
807