xref: /openbsd/sys/dev/pci/drm/amd/amdgpu/gfx_v11_0.c (revision 510d2225)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "imu_v11_0.h"
34 #include "soc21.h"
35 #include "nvd.h"
36 
37 #include "gc/gc_11_0_0_offset.h"
38 #include "gc/gc_11_0_0_sh_mask.h"
39 #include "smuio/smuio_13_0_6_offset.h"
40 #include "smuio/smuio_13_0_6_sh_mask.h"
41 #include "navi10_enum.h"
42 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
43 
44 #include "soc15.h"
45 #include "soc15d.h"
46 #include "clearstate_gfx11.h"
47 #include "v11_structs.h"
48 #include "gfx_v11_0.h"
49 #include "nbio_v4_3.h"
50 #include "mes_v11_0.h"
51 
52 #define GFX11_NUM_GFX_RINGS		1
53 #define GFX11_MEC_HPD_SIZE	2048
54 
55 #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
56 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1	0x1388
57 
58 #define regCGTT_WD_CLK_CTRL		0x5086
59 #define regCGTT_WD_CLK_CTRL_BASE_IDX	1
60 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1	0x4e7e
61 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX	1
62 
63 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
64 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
65 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
66 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
67 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
68 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
69 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin");
70 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin");
71 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin");
72 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
73 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
74 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
75 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
76 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
77 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
78 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
79 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
80 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin");
81 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
82 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
83 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
84 
85 static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
86 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
87 };
88 
89 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
90 {
91 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
92 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010),
93 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
94 	SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988),
95 	SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007),
96 	SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008),
97 	SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100),
98 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
99 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
100 };
101 
102 #define DEFAULT_SH_MEM_CONFIG \
103 	((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
104 	 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
105 	 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
106 
107 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev);
108 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev);
109 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev);
110 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev);
111 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev);
112 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev);
113 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev);
114 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
115                                  struct amdgpu_cu_info *cu_info);
116 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev);
117 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
118 				   u32 sh_num, u32 instance);
119 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
120 
121 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
122 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
123 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
124 				     uint32_t val);
125 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
126 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
127 					   uint16_t pasid, uint32_t flush_type,
128 					   bool all_hub, uint8_t dst_sel);
129 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
130 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
131 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
132 				      bool enable);
133 
134 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
135 {
136 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
137 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
138 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
139 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
140 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
141 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
142 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
143 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
144 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
145 }
146 
147 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring,
148 				 struct amdgpu_ring *ring)
149 {
150 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
151 	uint64_t wptr_addr = ring->wptr_gpu_addr;
152 	uint32_t me = 0, eng_sel = 0;
153 
154 	switch (ring->funcs->type) {
155 	case AMDGPU_RING_TYPE_COMPUTE:
156 		me = 1;
157 		eng_sel = 0;
158 		break;
159 	case AMDGPU_RING_TYPE_GFX:
160 		me = 0;
161 		eng_sel = 4;
162 		break;
163 	case AMDGPU_RING_TYPE_MES:
164 		me = 2;
165 		eng_sel = 5;
166 		break;
167 	default:
168 		WARN_ON(1);
169 	}
170 
171 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
172 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
173 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
174 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
175 			  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
176 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
177 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
178 			  PACKET3_MAP_QUEUES_ME((me)) |
179 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
180 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
181 			  PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
182 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
183 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
184 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
185 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
186 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
187 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
188 }
189 
190 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
191 				   struct amdgpu_ring *ring,
192 				   enum amdgpu_unmap_queues_action action,
193 				   u64 gpu_addr, u64 seq)
194 {
195 	struct amdgpu_device *adev = kiq_ring->adev;
196 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
197 
198 	if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) {
199 		amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
200 		return;
201 	}
202 
203 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
204 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
205 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
206 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
207 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
208 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
209 	amdgpu_ring_write(kiq_ring,
210 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
211 
212 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
213 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
214 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
215 		amdgpu_ring_write(kiq_ring, seq);
216 	} else {
217 		amdgpu_ring_write(kiq_ring, 0);
218 		amdgpu_ring_write(kiq_ring, 0);
219 		amdgpu_ring_write(kiq_ring, 0);
220 	}
221 }
222 
223 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring,
224 				   struct amdgpu_ring *ring,
225 				   u64 addr,
226 				   u64 seq)
227 {
228 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
229 
230 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
231 	amdgpu_ring_write(kiq_ring,
232 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
233 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
234 			  PACKET3_QUERY_STATUS_COMMAND(2));
235 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
236 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
237 			  PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
238 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
239 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
240 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
241 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
242 }
243 
244 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
245 				uint16_t pasid, uint32_t flush_type,
246 				bool all_hub)
247 {
248 	gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
249 }
250 
251 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
252 	.kiq_set_resources = gfx11_kiq_set_resources,
253 	.kiq_map_queues = gfx11_kiq_map_queues,
254 	.kiq_unmap_queues = gfx11_kiq_unmap_queues,
255 	.kiq_query_status = gfx11_kiq_query_status,
256 	.kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs,
257 	.set_resources_size = 8,
258 	.map_queues_size = 7,
259 	.unmap_queues_size = 6,
260 	.query_status_size = 7,
261 	.invalidate_tlbs_size = 2,
262 };
263 
264 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
265 {
266 	adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs;
267 }
268 
269 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
270 {
271 	switch (adev->ip_versions[GC_HWIP][0]) {
272 	case IP_VERSION(11, 0, 1):
273 	case IP_VERSION(11, 0, 4):
274 		soc15_program_register_sequence(adev,
275 						golden_settings_gc_11_0_1,
276 						(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
277 		break;
278 	default:
279 		break;
280 	}
281 	soc15_program_register_sequence(adev,
282 					golden_settings_gc_11_0,
283 					(const u32)ARRAY_SIZE(golden_settings_gc_11_0));
284 
285 }
286 
287 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
288 				       bool wc, uint32_t reg, uint32_t val)
289 {
290 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
291 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
292 			  WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
293 	amdgpu_ring_write(ring, reg);
294 	amdgpu_ring_write(ring, 0);
295 	amdgpu_ring_write(ring, val);
296 }
297 
298 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
299 				  int mem_space, int opt, uint32_t addr0,
300 				  uint32_t addr1, uint32_t ref, uint32_t mask,
301 				  uint32_t inv)
302 {
303 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
304 	amdgpu_ring_write(ring,
305 			  /* memory (1) or register (0) */
306 			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
307 			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
308 			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
309 			   WAIT_REG_MEM_ENGINE(eng_sel)));
310 
311 	if (mem_space)
312 		BUG_ON(addr0 & 0x3); /* Dword align */
313 	amdgpu_ring_write(ring, addr0);
314 	amdgpu_ring_write(ring, addr1);
315 	amdgpu_ring_write(ring, ref);
316 	amdgpu_ring_write(ring, mask);
317 	amdgpu_ring_write(ring, inv); /* poll interval */
318 }
319 
320 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
321 {
322 	struct amdgpu_device *adev = ring->adev;
323 	uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
324 	uint32_t tmp = 0;
325 	unsigned i;
326 	int r;
327 
328 	WREG32(scratch, 0xCAFEDEAD);
329 	r = amdgpu_ring_alloc(ring, 5);
330 	if (r) {
331 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
332 			  ring->idx, r);
333 		return r;
334 	}
335 
336 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
337 		gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF);
338 	} else {
339 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
340 		amdgpu_ring_write(ring, scratch -
341 				  PACKET3_SET_UCONFIG_REG_START);
342 		amdgpu_ring_write(ring, 0xDEADBEEF);
343 	}
344 	amdgpu_ring_commit(ring);
345 
346 	for (i = 0; i < adev->usec_timeout; i++) {
347 		tmp = RREG32(scratch);
348 		if (tmp == 0xDEADBEEF)
349 			break;
350 		if (amdgpu_emu_mode == 1)
351 			drm_msleep(1);
352 		else
353 			udelay(1);
354 	}
355 
356 	if (i >= adev->usec_timeout)
357 		r = -ETIMEDOUT;
358 	return r;
359 }
360 
361 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
362 {
363 	struct amdgpu_device *adev = ring->adev;
364 	struct amdgpu_ib ib;
365 	struct dma_fence *f = NULL;
366 	unsigned index;
367 	uint64_t gpu_addr;
368 	volatile uint32_t *cpu_ptr;
369 	long r;
370 
371 	/* MES KIQ fw hasn't indirect buffer support for now */
372 	if (adev->enable_mes_kiq &&
373 	    ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
374 		return 0;
375 
376 	memset(&ib, 0, sizeof(ib));
377 
378 	if (ring->is_mes_queue) {
379 		uint32_t padding, offset;
380 
381 		offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
382 		padding = amdgpu_mes_ctx_get_offs(ring,
383 						  AMDGPU_MES_CTX_PADDING_OFFS);
384 
385 		ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
386 		ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
387 
388 		gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
389 		cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
390 		*cpu_ptr = cpu_to_le32(0xCAFEDEAD);
391 	} else {
392 		r = amdgpu_device_wb_get(adev, &index);
393 		if (r)
394 			return r;
395 
396 		gpu_addr = adev->wb.gpu_addr + (index * 4);
397 		adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
398 		cpu_ptr = &adev->wb.wb[index];
399 
400 		r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
401 		if (r) {
402 			DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
403 			goto err1;
404 		}
405 	}
406 
407 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
408 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
409 	ib.ptr[2] = lower_32_bits(gpu_addr);
410 	ib.ptr[3] = upper_32_bits(gpu_addr);
411 	ib.ptr[4] = 0xDEADBEEF;
412 	ib.length_dw = 5;
413 
414 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
415 	if (r)
416 		goto err2;
417 
418 	r = dma_fence_wait_timeout(f, false, timeout);
419 	if (r == 0) {
420 		r = -ETIMEDOUT;
421 		goto err2;
422 	} else if (r < 0) {
423 		goto err2;
424 	}
425 
426 	if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
427 		r = 0;
428 	else
429 		r = -EINVAL;
430 err2:
431 	if (!ring->is_mes_queue)
432 		amdgpu_ib_free(adev, &ib, NULL);
433 	dma_fence_put(f);
434 err1:
435 	if (!ring->is_mes_queue)
436 		amdgpu_device_wb_free(adev, index);
437 	return r;
438 }
439 
440 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
441 {
442 	release_firmware(adev->gfx.pfp_fw);
443 	adev->gfx.pfp_fw = NULL;
444 	release_firmware(adev->gfx.me_fw);
445 	adev->gfx.me_fw = NULL;
446 	release_firmware(adev->gfx.rlc_fw);
447 	adev->gfx.rlc_fw = NULL;
448 	release_firmware(adev->gfx.mec_fw);
449 	adev->gfx.mec_fw = NULL;
450 
451 	kfree(adev->gfx.rlc.register_list_format);
452 }
453 
454 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
455 {
456 	char fw_name[40];
457 	char ucode_prefix[30];
458 	int err;
459 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
460 	uint16_t version_major;
461 	uint16_t version_minor;
462 
463 	DRM_DEBUG("\n");
464 
465 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
466 
467 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix);
468 	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
469 	if (err)
470 		goto out;
471 	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
472 	if (err)
473 		goto out;
474 	/* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
475 	adev->gfx.rs64_enable = amdgpu_ucode_hdr_version(
476 				(union amdgpu_firmware_header *)
477 				adev->gfx.pfp_fw->data, 2, 0);
478 	if (adev->gfx.rs64_enable) {
479 		dev_info(adev->dev, "CP RS64 enable\n");
480 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
481 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
482 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
483 	} else {
484 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
485 	}
486 
487 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
488 	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
489 	if (err)
490 		goto out;
491 	err = amdgpu_ucode_validate(adev->gfx.me_fw);
492 	if (err)
493 		goto out;
494 	if (adev->gfx.rs64_enable) {
495 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
496 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
497 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
498 	} else {
499 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
500 	}
501 
502 	if (!amdgpu_sriov_vf(adev)) {
503 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
504 		err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
505 		if (err)
506 			goto out;
507 		err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
508 		if (err)
509 			goto out;
510 		rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
511 		version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
512 		version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
513 		err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
514 		if (err)
515 			goto out;
516 	}
517 
518 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
519 	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
520 	if (err)
521 		goto out;
522 	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
523 	if (err)
524 		goto out;
525 	if (adev->gfx.rs64_enable) {
526 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
527 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
528 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
529 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
530 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
531 	} else {
532 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
533 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
534 	}
535 
536 	/* only one MEC for gfx 11.0.0. */
537 	adev->gfx.mec2_fw = NULL;
538 
539 out:
540 	if (err) {
541 		dev_err(adev->dev,
542 			"gfx11: Failed to init firmware \"%s\"\n",
543 			fw_name);
544 		release_firmware(adev->gfx.pfp_fw);
545 		adev->gfx.pfp_fw = NULL;
546 		release_firmware(adev->gfx.me_fw);
547 		adev->gfx.me_fw = NULL;
548 		release_firmware(adev->gfx.rlc_fw);
549 		adev->gfx.rlc_fw = NULL;
550 		release_firmware(adev->gfx.mec_fw);
551 		adev->gfx.mec_fw = NULL;
552 	}
553 
554 	return err;
555 }
556 
557 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev)
558 {
559 	const struct psp_firmware_header_v1_0 *toc_hdr;
560 	int err = 0;
561 	char fw_name[40];
562 	char ucode_prefix[30];
563 
564 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
565 
566 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
567 	err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev);
568 	if (err)
569 		goto out;
570 
571 	err = amdgpu_ucode_validate(adev->psp.toc_fw);
572 	if (err)
573 		goto out;
574 
575 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
576 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
577 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
578 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
579 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
580 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
581 	return 0;
582 out:
583 	dev_err(adev->dev, "Failed to load TOC microcode\n");
584 	release_firmware(adev->psp.toc_fw);
585 	adev->psp.toc_fw = NULL;
586 	return err;
587 }
588 
589 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
590 {
591 	u32 count = 0;
592 	const struct cs_section_def *sect = NULL;
593 	const struct cs_extent_def *ext = NULL;
594 
595 	/* begin clear state */
596 	count += 2;
597 	/* context control state */
598 	count += 3;
599 
600 	for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
601 		for (ext = sect->section; ext->extent != NULL; ++ext) {
602 			if (sect->id == SECT_CONTEXT)
603 				count += 2 + ext->reg_count;
604 			else
605 				return 0;
606 		}
607 	}
608 
609 	/* set PA_SC_TILE_STEERING_OVERRIDE */
610 	count += 3;
611 	/* end clear state */
612 	count += 2;
613 	/* clear state */
614 	count += 2;
615 
616 	return count;
617 }
618 
619 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
620 				    volatile u32 *buffer)
621 {
622 	u32 count = 0, i;
623 	const struct cs_section_def *sect = NULL;
624 	const struct cs_extent_def *ext = NULL;
625 	int ctx_reg_offset;
626 
627 	if (adev->gfx.rlc.cs_data == NULL)
628 		return;
629 	if (buffer == NULL)
630 		return;
631 
632 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
633 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
634 
635 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
636 	buffer[count++] = cpu_to_le32(0x80000000);
637 	buffer[count++] = cpu_to_le32(0x80000000);
638 
639 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
640 		for (ext = sect->section; ext->extent != NULL; ++ext) {
641 			if (sect->id == SECT_CONTEXT) {
642 				buffer[count++] =
643 					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
644 				buffer[count++] = cpu_to_le32(ext->reg_index -
645 						PACKET3_SET_CONTEXT_REG_START);
646 				for (i = 0; i < ext->reg_count; i++)
647 					buffer[count++] = cpu_to_le32(ext->extent[i]);
648 			} else {
649 				return;
650 			}
651 		}
652 	}
653 
654 	ctx_reg_offset =
655 		SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
656 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
657 	buffer[count++] = cpu_to_le32(ctx_reg_offset);
658 	buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
659 
660 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
661 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
662 
663 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
664 	buffer[count++] = cpu_to_le32(0);
665 }
666 
667 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
668 {
669 	/* clear state block */
670 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
671 			&adev->gfx.rlc.clear_state_gpu_addr,
672 			(void **)&adev->gfx.rlc.cs_ptr);
673 
674 	/* jump table block */
675 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
676 			&adev->gfx.rlc.cp_table_gpu_addr,
677 			(void **)&adev->gfx.rlc.cp_table_ptr);
678 }
679 
680 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
681 {
682 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
683 
684 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
685 	reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
686 	reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
687 	reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
688 	reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3);
689 	reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL);
690 	reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX);
691 	reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0);
692 	adev->gfx.rlc.rlcg_reg_access_supported = true;
693 }
694 
695 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
696 {
697 	const struct cs_section_def *cs_data;
698 	int r;
699 
700 	adev->gfx.rlc.cs_data = gfx11_cs_data;
701 
702 	cs_data = adev->gfx.rlc.cs_data;
703 
704 	if (cs_data) {
705 		/* init clear state block */
706 		r = amdgpu_gfx_rlc_init_csb(adev);
707 		if (r)
708 			return r;
709 	}
710 
711 	/* init spm vmid with 0xf */
712 	if (adev->gfx.rlc.funcs->update_spm_vmid)
713 		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
714 
715 	return 0;
716 }
717 
718 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
719 {
720 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
721 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
722 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
723 }
724 
725 static int gfx_v11_0_me_init(struct amdgpu_device *adev)
726 {
727 	int r;
728 
729 	bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
730 
731 	amdgpu_gfx_graphics_queue_acquire(adev);
732 
733 	r = gfx_v11_0_init_microcode(adev);
734 	if (r)
735 		DRM_ERROR("Failed to load gfx firmware!\n");
736 
737 	return r;
738 }
739 
740 static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
741 {
742 	int r;
743 	u32 *hpd;
744 	size_t mec_hpd_size;
745 
746 	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
747 
748 	/* take ownership of the relevant compute queues */
749 	amdgpu_gfx_compute_queue_acquire(adev);
750 	mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE;
751 
752 	if (mec_hpd_size) {
753 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
754 					      AMDGPU_GEM_DOMAIN_GTT,
755 					      &adev->gfx.mec.hpd_eop_obj,
756 					      &adev->gfx.mec.hpd_eop_gpu_addr,
757 					      (void **)&hpd);
758 		if (r) {
759 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
760 			gfx_v11_0_mec_fini(adev);
761 			return r;
762 		}
763 
764 		memset(hpd, 0, mec_hpd_size);
765 
766 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
767 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
768 	}
769 
770 	return 0;
771 }
772 
773 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
774 {
775 	WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
776 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
777 		(address << SQ_IND_INDEX__INDEX__SHIFT));
778 	return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
779 }
780 
781 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
782 			   uint32_t thread, uint32_t regno,
783 			   uint32_t num, uint32_t *out)
784 {
785 	WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
786 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
787 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
788 		(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
789 		(SQ_IND_INDEX__AUTO_INCR_MASK));
790 	while (num--)
791 		*(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
792 }
793 
794 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
795 {
796 	/* in gfx11 the SIMD_ID is specified as part of the INSTANCE
797 	 * field when performing a select_se_sh so it should be
798 	 * zero here */
799 	WARN_ON(simd != 0);
800 
801 	/* type 3 wave data */
802 	dst[(*no_fields)++] = 3;
803 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
804 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
805 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
806 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
807 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
808 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
809 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
810 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
811 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
812 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
813 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
814 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
815 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
816 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
817 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
818 }
819 
820 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
821 				     uint32_t wave, uint32_t start,
822 				     uint32_t size, uint32_t *dst)
823 {
824 	WARN_ON(simd != 0);
825 
826 	wave_read_regs(
827 		adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
828 		dst);
829 }
830 
831 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
832 				      uint32_t wave, uint32_t thread,
833 				      uint32_t start, uint32_t size,
834 				      uint32_t *dst)
835 {
836 	wave_read_regs(
837 		adev, wave, thread,
838 		start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
839 }
840 
841 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
842 									  u32 me, u32 pipe, u32 q, u32 vm)
843 {
844 	soc21_grbm_select(adev, me, pipe, q, vm);
845 }
846 
847 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
848 	.get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
849 	.select_se_sh = &gfx_v11_0_select_se_sh,
850 	.read_wave_data = &gfx_v11_0_read_wave_data,
851 	.read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
852 	.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
853 	.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
854 	.update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
855 };
856 
857 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
858 {
859 	adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
860 
861 	switch (adev->ip_versions[GC_HWIP][0]) {
862 	case IP_VERSION(11, 0, 0):
863 	case IP_VERSION(11, 0, 2):
864 	case IP_VERSION(11, 0, 3):
865 		adev->gfx.config.max_hw_contexts = 8;
866 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
867 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
868 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
869 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
870 		break;
871 	case IP_VERSION(11, 0, 1):
872 	case IP_VERSION(11, 0, 4):
873 		adev->gfx.config.max_hw_contexts = 8;
874 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
875 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
876 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
877 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300;
878 		break;
879 	default:
880 		BUG();
881 		break;
882 	}
883 
884 	return 0;
885 }
886 
887 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
888 				   int me, int pipe, int queue)
889 {
890 	int r;
891 	struct amdgpu_ring *ring;
892 	unsigned int irq_type;
893 
894 	ring = &adev->gfx.gfx_ring[ring_id];
895 
896 	ring->me = me;
897 	ring->pipe = pipe;
898 	ring->queue = queue;
899 
900 	ring->ring_obj = NULL;
901 	ring->use_doorbell = true;
902 
903 	if (!ring_id)
904 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
905 	else
906 		ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
907 	snprintf(ring->name, sizeof(ring->name), "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
908 
909 	irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
910 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
911 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
912 	if (r)
913 		return r;
914 	return 0;
915 }
916 
917 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
918 				       int mec, int pipe, int queue)
919 {
920 	int r;
921 	unsigned irq_type;
922 	struct amdgpu_ring *ring;
923 	unsigned int hw_prio;
924 
925 	ring = &adev->gfx.compute_ring[ring_id];
926 
927 	/* mec0 is me1 */
928 	ring->me = mec + 1;
929 	ring->pipe = pipe;
930 	ring->queue = queue;
931 
932 	ring->ring_obj = NULL;
933 	ring->use_doorbell = true;
934 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
935 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
936 				+ (ring_id * GFX11_MEC_HPD_SIZE);
937 	snprintf(ring->name, sizeof(ring->name), "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
938 
939 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
940 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
941 		+ ring->pipe;
942 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
943 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
944 	/* type-2 packets are deprecated on MEC, use type-3 instead */
945 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
946 			     hw_prio, NULL);
947 	if (r)
948 		return r;
949 
950 	return 0;
951 }
952 
953 static struct {
954 	SOC21_FIRMWARE_ID	id;
955 	unsigned int		offset;
956 	unsigned int		size;
957 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX];
958 
959 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
960 {
961 	RLC_TABLE_OF_CONTENT *ucode = rlc_toc;
962 
963 	while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) &&
964 			(ucode->id < SOC21_FIRMWARE_ID_MAX)) {
965 		rlc_autoload_info[ucode->id].id = ucode->id;
966 		rlc_autoload_info[ucode->id].offset = ucode->offset * 4;
967 		rlc_autoload_info[ucode->id].size = ucode->size * 4;
968 
969 		ucode++;
970 	}
971 }
972 
973 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev)
974 {
975 	uint32_t total_size = 0;
976 	SOC21_FIRMWARE_ID id;
977 
978 	gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr);
979 
980 	for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++)
981 		total_size += rlc_autoload_info[id].size;
982 
983 	/* In case the offset in rlc toc ucode is aligned */
984 	if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset)
985 		total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset +
986 			rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size;
987 
988 	return total_size;
989 }
990 
991 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
992 {
993 	int r;
994 	uint32_t total_size;
995 
996 	total_size = gfx_v11_0_calc_toc_total_size(adev);
997 
998 	r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
999 			AMDGPU_GEM_DOMAIN_VRAM,
1000 			&adev->gfx.rlc.rlc_autoload_bo,
1001 			&adev->gfx.rlc.rlc_autoload_gpu_addr,
1002 			(void **)&adev->gfx.rlc.rlc_autoload_ptr);
1003 
1004 	if (r) {
1005 		dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1006 		return r;
1007 	}
1008 
1009 	return 0;
1010 }
1011 
1012 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1013 					      SOC21_FIRMWARE_ID id,
1014 			    		      const void *fw_data,
1015 					      uint32_t fw_size,
1016 					      uint32_t *fw_autoload_mask)
1017 {
1018 	uint32_t toc_offset;
1019 	uint32_t toc_fw_size;
1020 	char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1021 
1022 	if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX)
1023 		return;
1024 
1025 	toc_offset = rlc_autoload_info[id].offset;
1026 	toc_fw_size = rlc_autoload_info[id].size;
1027 
1028 	if (fw_size == 0)
1029 		fw_size = toc_fw_size;
1030 
1031 	if (fw_size > toc_fw_size)
1032 		fw_size = toc_fw_size;
1033 
1034 	memcpy(ptr + toc_offset, fw_data, fw_size);
1035 
1036 	if (fw_size < toc_fw_size)
1037 		memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1038 
1039 	if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
1040 		*(uint64_t *)fw_autoload_mask |= 1ULL << id;
1041 }
1042 
1043 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
1044 							uint32_t *fw_autoload_mask)
1045 {
1046 	void *data;
1047 	uint32_t size;
1048 	uint64_t *toc_ptr;
1049 
1050 	*(uint64_t *)fw_autoload_mask |= 0x1;
1051 
1052 	DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask);
1053 
1054 	data = adev->psp.toc.start_addr;
1055 	size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size;
1056 
1057 	toc_ptr = (uint64_t *)data + size / 8 - 1;
1058 	*toc_ptr = *(uint64_t *)fw_autoload_mask;
1059 
1060 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC,
1061 					data, size, fw_autoload_mask);
1062 }
1063 
1064 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev,
1065 							uint32_t *fw_autoload_mask)
1066 {
1067 	const __le32 *fw_data;
1068 	uint32_t fw_size;
1069 	const struct gfx_firmware_header_v1_0 *cp_hdr;
1070 	const struct gfx_firmware_header_v2_0 *cpv2_hdr;
1071 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
1072 	const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
1073 	uint16_t version_major, version_minor;
1074 
1075 	if (adev->gfx.rs64_enable) {
1076 		/* pfp ucode */
1077 		cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1078 			adev->gfx.pfp_fw->data;
1079 		/* instruction */
1080 		fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1081 			le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1082 		fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1083 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP,
1084 						fw_data, fw_size, fw_autoload_mask);
1085 		/* data */
1086 		fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1087 			le32_to_cpu(cpv2_hdr->data_offset_bytes));
1088 		fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1089 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK,
1090 						fw_data, fw_size, fw_autoload_mask);
1091 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK,
1092 						fw_data, fw_size, fw_autoload_mask);
1093 		/* me ucode */
1094 		cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1095 			adev->gfx.me_fw->data;
1096 		/* instruction */
1097 		fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1098 			le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1099 		fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1100 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME,
1101 						fw_data, fw_size, fw_autoload_mask);
1102 		/* data */
1103 		fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1104 			le32_to_cpu(cpv2_hdr->data_offset_bytes));
1105 		fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1106 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK,
1107 						fw_data, fw_size, fw_autoload_mask);
1108 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK,
1109 						fw_data, fw_size, fw_autoload_mask);
1110 		/* mec ucode */
1111 		cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1112 			adev->gfx.mec_fw->data;
1113 		/* instruction */
1114 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1115 			le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1116 		fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1117 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC,
1118 						fw_data, fw_size, fw_autoload_mask);
1119 		/* data */
1120 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1121 			le32_to_cpu(cpv2_hdr->data_offset_bytes));
1122 		fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1123 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK,
1124 						fw_data, fw_size, fw_autoload_mask);
1125 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK,
1126 						fw_data, fw_size, fw_autoload_mask);
1127 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK,
1128 						fw_data, fw_size, fw_autoload_mask);
1129 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK,
1130 						fw_data, fw_size, fw_autoload_mask);
1131 	} else {
1132 		/* pfp ucode */
1133 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1134 			adev->gfx.pfp_fw->data;
1135 		fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1136 				le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1137 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1138 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP,
1139 						fw_data, fw_size, fw_autoload_mask);
1140 
1141 		/* me ucode */
1142 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1143 			adev->gfx.me_fw->data;
1144 		fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1145 				le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1146 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1147 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME,
1148 						fw_data, fw_size, fw_autoload_mask);
1149 
1150 		/* mec ucode */
1151 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1152 			adev->gfx.mec_fw->data;
1153 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1154 				le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1155 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1156 			cp_hdr->jt_size * 4;
1157 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC,
1158 						fw_data, fw_size, fw_autoload_mask);
1159 	}
1160 
1161 	/* rlc ucode */
1162 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1163 		adev->gfx.rlc_fw->data;
1164 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1165 			le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1166 	fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1167 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE,
1168 					fw_data, fw_size, fw_autoload_mask);
1169 
1170 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1171 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1172 	if (version_major == 2) {
1173 		if (version_minor >= 2) {
1174 			rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1175 
1176 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1177 					le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1178 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1179 			gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE,
1180 					fw_data, fw_size, fw_autoload_mask);
1181 
1182 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1183 					le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1184 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1185 			gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT,
1186 					fw_data, fw_size, fw_autoload_mask);
1187 		}
1188 	}
1189 }
1190 
1191 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev,
1192 							uint32_t *fw_autoload_mask)
1193 {
1194 	const __le32 *fw_data;
1195 	uint32_t fw_size;
1196 	const struct sdma_firmware_header_v2_0 *sdma_hdr;
1197 
1198 	sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
1199 		adev->sdma.instance[0].fw->data;
1200 	fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1201 			le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
1202 	fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes);
1203 
1204 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1205 			SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask);
1206 
1207 	fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1208 			le32_to_cpu(sdma_hdr->ctl_ucode_offset));
1209 	fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes);
1210 
1211 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1212 			SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask);
1213 }
1214 
1215 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev,
1216 							uint32_t *fw_autoload_mask)
1217 {
1218 	const __le32 *fw_data;
1219 	unsigned fw_size;
1220 	const struct mes_firmware_header_v1_0 *mes_hdr;
1221 	int pipe, ucode_id, data_id;
1222 
1223 	for (pipe = 0; pipe < 2; pipe++) {
1224 		if (pipe==0) {
1225 			ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0;
1226 			data_id  = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK;
1227 		} else {
1228 			ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1;
1229 			data_id  = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK;
1230 		}
1231 
1232 		mes_hdr = (const struct mes_firmware_header_v1_0 *)
1233 			adev->mes.fw[pipe]->data;
1234 
1235 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1236 				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1237 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1238 
1239 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1240 				ucode_id, fw_data, fw_size, fw_autoload_mask);
1241 
1242 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1243 				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1244 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1245 
1246 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1247 				data_id, fw_data, fw_size, fw_autoload_mask);
1248 	}
1249 }
1250 
1251 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1252 {
1253 	uint32_t rlc_g_offset, rlc_g_size;
1254 	uint64_t gpu_addr;
1255 	uint32_t autoload_fw_id[2];
1256 
1257 	memset(autoload_fw_id, 0, sizeof(uint32_t) * 2);
1258 
1259 	/* RLC autoload sequence 2: copy ucode */
1260 	gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id);
1261 	gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id);
1262 	gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id);
1263 	gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id);
1264 
1265 	rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset;
1266 	rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size;
1267 	gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
1268 
1269 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1270 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1271 
1272 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1273 
1274 	/* RLC autoload sequence 3: load IMU fw */
1275 	if (adev->gfx.imu.funcs->load_microcode)
1276 		adev->gfx.imu.funcs->load_microcode(adev);
1277 	/* RLC autoload sequence 4 init IMU fw */
1278 	if (adev->gfx.imu.funcs->setup_imu)
1279 		adev->gfx.imu.funcs->setup_imu(adev);
1280 	if (adev->gfx.imu.funcs->start_imu)
1281 		adev->gfx.imu.funcs->start_imu(adev);
1282 
1283 	/* RLC autoload sequence 5 disable gpa mode */
1284 	gfx_v11_0_disable_gpa_mode(adev);
1285 
1286 	return 0;
1287 }
1288 
1289 static int gfx_v11_0_sw_init(void *handle)
1290 {
1291 	int i, j, k, r, ring_id = 0;
1292 	struct amdgpu_kiq *kiq;
1293 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1294 
1295 	adev->gfxhub.funcs->init(adev);
1296 
1297 	switch (adev->ip_versions[GC_HWIP][0]) {
1298 	case IP_VERSION(11, 0, 0):
1299 	case IP_VERSION(11, 0, 2):
1300 	case IP_VERSION(11, 0, 3):
1301 		adev->gfx.me.num_me = 1;
1302 		adev->gfx.me.num_pipe_per_me = 1;
1303 		adev->gfx.me.num_queue_per_pipe = 1;
1304 		adev->gfx.mec.num_mec = 2;
1305 		adev->gfx.mec.num_pipe_per_mec = 4;
1306 		adev->gfx.mec.num_queue_per_pipe = 4;
1307 		break;
1308 	case IP_VERSION(11, 0, 1):
1309 	case IP_VERSION(11, 0, 4):
1310 		adev->gfx.me.num_me = 1;
1311 		adev->gfx.me.num_pipe_per_me = 1;
1312 		adev->gfx.me.num_queue_per_pipe = 1;
1313 		adev->gfx.mec.num_mec = 1;
1314 		adev->gfx.mec.num_pipe_per_mec = 4;
1315 		adev->gfx.mec.num_queue_per_pipe = 4;
1316 		break;
1317 	default:
1318 		adev->gfx.me.num_me = 1;
1319 		adev->gfx.me.num_pipe_per_me = 1;
1320 		adev->gfx.me.num_queue_per_pipe = 1;
1321 		adev->gfx.mec.num_mec = 1;
1322 		adev->gfx.mec.num_pipe_per_mec = 4;
1323 		adev->gfx.mec.num_queue_per_pipe = 8;
1324 		break;
1325 	}
1326 
1327 	/* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
1328 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
1329 		amdgpu_sriov_is_pp_one_vf(adev))
1330 		adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
1331 
1332 	/* EOP Event */
1333 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1334 			      GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
1335 			      &adev->gfx.eop_irq);
1336 	if (r)
1337 		return r;
1338 
1339 	/* Privileged reg */
1340 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1341 			      GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
1342 			      &adev->gfx.priv_reg_irq);
1343 	if (r)
1344 		return r;
1345 
1346 	/* Privileged inst */
1347 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1348 			      GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1349 			      &adev->gfx.priv_inst_irq);
1350 	if (r)
1351 		return r;
1352 
1353 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1354 
1355 	if (adev->gfx.imu.funcs) {
1356 		if (adev->gfx.imu.funcs->init_microcode) {
1357 			r = adev->gfx.imu.funcs->init_microcode(adev);
1358 			if (r)
1359 				DRM_ERROR("Failed to load imu firmware!\n");
1360 		}
1361 	}
1362 
1363 	r = gfx_v11_0_me_init(adev);
1364 	if (r)
1365 		return r;
1366 
1367 	r = gfx_v11_0_rlc_init(adev);
1368 	if (r) {
1369 		DRM_ERROR("Failed to init rlc BOs!\n");
1370 		return r;
1371 	}
1372 
1373 	r = gfx_v11_0_mec_init(adev);
1374 	if (r) {
1375 		DRM_ERROR("Failed to init MEC BOs!\n");
1376 		return r;
1377 	}
1378 
1379 	/* set up the gfx ring */
1380 	for (i = 0; i < adev->gfx.me.num_me; i++) {
1381 		for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1382 			for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1383 				if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1384 					continue;
1385 
1386 				r = gfx_v11_0_gfx_ring_init(adev, ring_id,
1387 							    i, k, j);
1388 				if (r)
1389 					return r;
1390 				ring_id++;
1391 			}
1392 		}
1393 	}
1394 
1395 	ring_id = 0;
1396 	/* set up the compute queues - allocate horizontally across pipes */
1397 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1398 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1399 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1400 				if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
1401 								     j))
1402 					continue;
1403 
1404 				r = gfx_v11_0_compute_ring_init(adev, ring_id,
1405 								i, k, j);
1406 				if (r)
1407 					return r;
1408 
1409 				ring_id++;
1410 			}
1411 		}
1412 	}
1413 
1414 	if (!adev->enable_mes_kiq) {
1415 		r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE);
1416 		if (r) {
1417 			DRM_ERROR("Failed to init KIQ BOs!\n");
1418 			return r;
1419 		}
1420 
1421 		kiq = &adev->gfx.kiq;
1422 		r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1423 		if (r)
1424 			return r;
1425 	}
1426 
1427 	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd));
1428 	if (r)
1429 		return r;
1430 
1431 	/* allocate visible FB for rlc auto-loading fw */
1432 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1433 		r = gfx_v11_0_init_toc_microcode(adev);
1434 		if (r)
1435 			dev_err(adev->dev, "Failed to load toc firmware!\n");
1436 		r = gfx_v11_0_rlc_autoload_buffer_init(adev);
1437 		if (r)
1438 			return r;
1439 	}
1440 
1441 	r = gfx_v11_0_gpu_early_init(adev);
1442 	if (r)
1443 		return r;
1444 
1445 	return 0;
1446 }
1447 
1448 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev)
1449 {
1450 	amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1451 			      &adev->gfx.pfp.pfp_fw_gpu_addr,
1452 			      (void **)&adev->gfx.pfp.pfp_fw_ptr);
1453 
1454 	amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj,
1455 			      &adev->gfx.pfp.pfp_fw_data_gpu_addr,
1456 			      (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
1457 }
1458 
1459 static void gfx_v11_0_me_fini(struct amdgpu_device *adev)
1460 {
1461 	amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1462 			      &adev->gfx.me.me_fw_gpu_addr,
1463 			      (void **)&adev->gfx.me.me_fw_ptr);
1464 
1465 	amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj,
1466 			       &adev->gfx.me.me_fw_data_gpu_addr,
1467 			       (void **)&adev->gfx.me.me_fw_data_ptr);
1468 }
1469 
1470 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1471 {
1472 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1473 			&adev->gfx.rlc.rlc_autoload_gpu_addr,
1474 			(void **)&adev->gfx.rlc.rlc_autoload_ptr);
1475 }
1476 
1477 static int gfx_v11_0_sw_fini(void *handle)
1478 {
1479 	int i;
1480 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1481 
1482 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1483 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1484 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
1485 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1486 
1487 	amdgpu_gfx_mqd_sw_fini(adev);
1488 
1489 	if (!adev->enable_mes_kiq) {
1490 		amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
1491 		amdgpu_gfx_kiq_fini(adev);
1492 	}
1493 
1494 	gfx_v11_0_pfp_fini(adev);
1495 	gfx_v11_0_me_fini(adev);
1496 	gfx_v11_0_rlc_fini(adev);
1497 	gfx_v11_0_mec_fini(adev);
1498 
1499 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1500 		gfx_v11_0_rlc_autoload_buffer_fini(adev);
1501 
1502 	gfx_v11_0_free_microcode(adev);
1503 
1504 	return 0;
1505 }
1506 
1507 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1508 				   u32 sh_num, u32 instance)
1509 {
1510 	u32 data;
1511 
1512 	if (instance == 0xffffffff)
1513 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1514 				     INSTANCE_BROADCAST_WRITES, 1);
1515 	else
1516 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1517 				     instance);
1518 
1519 	if (se_num == 0xffffffff)
1520 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1521 				     1);
1522 	else
1523 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1524 
1525 	if (sh_num == 0xffffffff)
1526 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1527 				     1);
1528 	else
1529 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1530 
1531 	WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
1532 }
1533 
1534 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1535 {
1536 	u32 data, mask;
1537 
1538 	data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
1539 	data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
1540 
1541 	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1542 	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1543 
1544 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1545 					 adev->gfx.config.max_sh_per_se);
1546 
1547 	return (~data) & mask;
1548 }
1549 
1550 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
1551 {
1552 	int i, j;
1553 	u32 data;
1554 	u32 active_rbs = 0;
1555 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1556 					adev->gfx.config.max_sh_per_se;
1557 
1558 	mutex_lock(&adev->grbm_idx_mutex);
1559 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1560 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1561 			gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff);
1562 			data = gfx_v11_0_get_rb_active_bitmap(adev);
1563 			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1564 					       rb_bitmap_width_per_sh);
1565 		}
1566 	}
1567 	gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1568 	mutex_unlock(&adev->grbm_idx_mutex);
1569 
1570 	adev->gfx.config.backend_enable_mask = active_rbs;
1571 	adev->gfx.config.num_rbs = hweight32(active_rbs);
1572 }
1573 
1574 #define DEFAULT_SH_MEM_BASES	(0x6000)
1575 #define LDS_APP_BASE           0x1
1576 #define SCRATCH_APP_BASE       0x2
1577 
1578 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
1579 {
1580 	int i;
1581 	uint32_t sh_mem_bases;
1582 	uint32_t data;
1583 
1584 	/*
1585 	 * Configure apertures:
1586 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1587 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1588 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1589 	 */
1590 	sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
1591 			SCRATCH_APP_BASE;
1592 
1593 	mutex_lock(&adev->srbm_mutex);
1594 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1595 		soc21_grbm_select(adev, 0, 0, 0, i);
1596 		/* CP and shaders */
1597 		WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1598 		WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
1599 
1600 		/* Enable trap for each kfd vmid. */
1601 		data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
1602 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1603 	}
1604 	soc21_grbm_select(adev, 0, 0, 0, 0);
1605 	mutex_unlock(&adev->srbm_mutex);
1606 
1607 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
1608 	   acccess. These should be enabled by FW for target VMIDs. */
1609 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1610 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0);
1611 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0);
1612 		WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0);
1613 		WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0);
1614 	}
1615 }
1616 
1617 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev)
1618 {
1619 	int vmid;
1620 
1621 	/*
1622 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1623 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1624 	 * the driver can enable them for graphics. VMID0 should maintain
1625 	 * access so that HWS firmware can save/restore entries.
1626 	 */
1627 	for (vmid = 1; vmid < 16; vmid++) {
1628 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0);
1629 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0);
1630 		WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0);
1631 		WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0);
1632 	}
1633 }
1634 
1635 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev)
1636 {
1637 	/* TODO: harvest feature to be added later. */
1638 }
1639 
1640 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev)
1641 {
1642 	/* TCCs are global (not instanced). */
1643 	uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) |
1644 			       RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE);
1645 
1646 	adev->gfx.config.tcc_disabled_mask =
1647 		REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
1648 		(REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
1649 }
1650 
1651 static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
1652 {
1653 	u32 tmp;
1654 	int i;
1655 
1656 	WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1657 
1658 	gfx_v11_0_setup_rb(adev);
1659 	gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info);
1660 	gfx_v11_0_get_tcc_info(adev);
1661 	adev->gfx.config.pa_sc_tile_steering_override = 0;
1662 
1663 	/* XXX SH_MEM regs */
1664 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1665 	mutex_lock(&adev->srbm_mutex);
1666 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
1667 		soc21_grbm_select(adev, 0, 0, 0, i);
1668 		/* CP and shaders */
1669 		WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1670 		if (i != 0) {
1671 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1672 				(adev->gmc.private_aperture_start >> 48));
1673 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1674 				(adev->gmc.shared_aperture_start >> 48));
1675 			WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp);
1676 		}
1677 	}
1678 	soc21_grbm_select(adev, 0, 0, 0, 0);
1679 
1680 	mutex_unlock(&adev->srbm_mutex);
1681 
1682 	gfx_v11_0_init_compute_vmid(adev);
1683 	gfx_v11_0_init_gds_vmid(adev);
1684 }
1685 
1686 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1687 					       bool enable)
1688 {
1689 	u32 tmp;
1690 
1691 	if (amdgpu_sriov_vf(adev))
1692 		return;
1693 
1694 	tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
1695 
1696 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1697 			    enable ? 1 : 0);
1698 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1699 			    enable ? 1 : 0);
1700 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1701 			    enable ? 1 : 0);
1702 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1703 			    enable ? 1 : 0);
1704 
1705 	WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
1706 }
1707 
1708 static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
1709 {
1710 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1711 
1712 	WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI,
1713 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
1714 	WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO,
1715 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1716 	WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1717 
1718 	return 0;
1719 }
1720 
1721 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
1722 {
1723 	u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
1724 
1725 	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1726 	WREG32_SOC15(GC, 0, regRLC_CNTL, tmp);
1727 }
1728 
1729 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev)
1730 {
1731 	WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1732 	udelay(50);
1733 	WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1734 	udelay(50);
1735 }
1736 
1737 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1738 					     bool enable)
1739 {
1740 	uint32_t rlc_pg_cntl;
1741 
1742 	rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
1743 
1744 	if (!enable) {
1745 		/* RLC_PG_CNTL[23] = 0 (default)
1746 		 * RLC will wait for handshake acks with SMU
1747 		 * GFXOFF will be enabled
1748 		 * RLC_PG_CNTL[23] = 1
1749 		 * RLC will not issue any message to SMU
1750 		 * hence no handshake between SMU & RLC
1751 		 * GFXOFF will be disabled
1752 		 */
1753 		rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1754 	} else
1755 		rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1756 	WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl);
1757 }
1758 
1759 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev)
1760 {
1761 	/* TODO: enable rlc & smu handshake until smu
1762 	 * and gfxoff feature works as expected */
1763 	if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1764 		gfx_v11_0_rlc_smu_handshake_cntl(adev, false);
1765 
1766 	WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1767 	udelay(50);
1768 }
1769 
1770 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev)
1771 {
1772 	uint32_t tmp;
1773 
1774 	/* enable Save Restore Machine */
1775 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL));
1776 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1777 	tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1778 	WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp);
1779 }
1780 
1781 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev)
1782 {
1783 	const struct rlc_firmware_header_v2_0 *hdr;
1784 	const __le32 *fw_data;
1785 	unsigned i, fw_size;
1786 
1787 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1788 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1789 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1790 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1791 
1792 	WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
1793 		     RLCG_UCODE_LOADING_START_ADDRESS);
1794 
1795 	for (i = 0; i < fw_size; i++)
1796 		WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA,
1797 			     le32_to_cpup(fw_data++));
1798 
1799 	WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1800 }
1801 
1802 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev)
1803 {
1804 	const struct rlc_firmware_header_v2_2 *hdr;
1805 	const __le32 *fw_data;
1806 	unsigned i, fw_size;
1807 	u32 tmp;
1808 
1809 	hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1810 
1811 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1812 			le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
1813 	fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
1814 
1815 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0);
1816 
1817 	for (i = 0; i < fw_size; i++) {
1818 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1819 			drm_msleep(1);
1820 		WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA,
1821 				le32_to_cpup(fw_data++));
1822 	}
1823 
1824 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1825 
1826 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1827 			le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
1828 	fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
1829 
1830 	WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0);
1831 	for (i = 0; i < fw_size; i++) {
1832 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1833 			drm_msleep(1);
1834 		WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA,
1835 				le32_to_cpup(fw_data++));
1836 	}
1837 
1838 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1839 
1840 	tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL);
1841 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
1842 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
1843 	WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp);
1844 }
1845 
1846 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev)
1847 {
1848 	const struct rlc_firmware_header_v2_3 *hdr;
1849 	const __le32 *fw_data;
1850 	unsigned i, fw_size;
1851 	u32 tmp;
1852 
1853 	hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
1854 
1855 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1856 			le32_to_cpu(hdr->rlcp_ucode_offset_bytes));
1857 	fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4;
1858 
1859 	WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0);
1860 
1861 	for (i = 0; i < fw_size; i++) {
1862 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1863 			drm_msleep(1);
1864 		WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA,
1865 				le32_to_cpup(fw_data++));
1866 	}
1867 
1868 	WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version);
1869 
1870 	tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE);
1871 	tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
1872 	WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp);
1873 
1874 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1875 			le32_to_cpu(hdr->rlcv_ucode_offset_bytes));
1876 	fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4;
1877 
1878 	WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0);
1879 
1880 	for (i = 0; i < fw_size; i++) {
1881 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1882 			drm_msleep(1);
1883 		WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA,
1884 				le32_to_cpup(fw_data++));
1885 	}
1886 
1887 	WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version);
1888 
1889 	tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL);
1890 	tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1);
1891 	WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp);
1892 }
1893 
1894 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
1895 {
1896 	const struct rlc_firmware_header_v2_0 *hdr;
1897 	uint16_t version_major;
1898 	uint16_t version_minor;
1899 
1900 	if (!adev->gfx.rlc_fw)
1901 		return -EINVAL;
1902 
1903 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1904 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1905 
1906 	version_major = le16_to_cpu(hdr->header.header_version_major);
1907 	version_minor = le16_to_cpu(hdr->header.header_version_minor);
1908 
1909 	if (version_major == 2) {
1910 		gfx_v11_0_load_rlcg_microcode(adev);
1911 		if (amdgpu_dpm == 1) {
1912 			if (version_minor >= 2)
1913 				gfx_v11_0_load_rlc_iram_dram_microcode(adev);
1914 			if (version_minor == 3)
1915 				gfx_v11_0_load_rlcp_rlcv_microcode(adev);
1916 		}
1917 
1918 		return 0;
1919 	}
1920 
1921 	return -EINVAL;
1922 }
1923 
1924 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev)
1925 {
1926 	int r;
1927 
1928 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1929 		gfx_v11_0_init_csb(adev);
1930 
1931 		if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
1932 			gfx_v11_0_rlc_enable_srm(adev);
1933 	} else {
1934 		if (amdgpu_sriov_vf(adev)) {
1935 			gfx_v11_0_init_csb(adev);
1936 			return 0;
1937 		}
1938 
1939 		adev->gfx.rlc.funcs->stop(adev);
1940 
1941 		/* disable CG */
1942 		WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
1943 
1944 		/* disable PG */
1945 		WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0);
1946 
1947 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1948 			/* legacy rlc firmware loading */
1949 			r = gfx_v11_0_rlc_load_microcode(adev);
1950 			if (r)
1951 				return r;
1952 		}
1953 
1954 		gfx_v11_0_init_csb(adev);
1955 
1956 		adev->gfx.rlc.funcs->start(adev);
1957 	}
1958 	return 0;
1959 }
1960 
1961 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
1962 {
1963 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
1964 	uint32_t tmp;
1965 	int i;
1966 
1967 	/* Trigger an invalidation of the L1 instruction caches */
1968 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
1969 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
1970 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
1971 
1972 	/* Wait for invalidation complete */
1973 	for (i = 0; i < usec_timeout; i++) {
1974 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
1975 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
1976 					INVALIDATE_CACHE_COMPLETE))
1977 			break;
1978 		udelay(1);
1979 	}
1980 
1981 	if (i >= usec_timeout) {
1982 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
1983 		return -EINVAL;
1984 	}
1985 
1986 	if (amdgpu_emu_mode == 1)
1987 		adev->hdp.funcs->flush_hdp(adev, NULL);
1988 
1989 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
1990 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
1991 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
1992 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
1993 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
1994 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
1995 
1996 	/* Program me ucode address into intruction cache address register */
1997 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
1998 			lower_32_bits(addr) & 0xFFFFF000);
1999 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2000 			upper_32_bits(addr));
2001 
2002 	return 0;
2003 }
2004 
2005 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
2006 {
2007 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2008 	uint32_t tmp;
2009 	int i;
2010 
2011 	/* Trigger an invalidation of the L1 instruction caches */
2012 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2013 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2014 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2015 
2016 	/* Wait for invalidation complete */
2017 	for (i = 0; i < usec_timeout; i++) {
2018 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2019 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2020 					INVALIDATE_CACHE_COMPLETE))
2021 			break;
2022 		udelay(1);
2023 	}
2024 
2025 	if (i >= usec_timeout) {
2026 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2027 		return -EINVAL;
2028 	}
2029 
2030 	if (amdgpu_emu_mode == 1)
2031 		adev->hdp.funcs->flush_hdp(adev, NULL);
2032 
2033 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2034 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2035 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2036 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2037 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2038 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2039 
2040 	/* Program pfp ucode address into intruction cache address register */
2041 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2042 			lower_32_bits(addr) & 0xFFFFF000);
2043 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2044 			upper_32_bits(addr));
2045 
2046 	return 0;
2047 }
2048 
2049 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
2050 {
2051 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2052 	uint32_t tmp;
2053 	int i;
2054 
2055 	/* Trigger an invalidation of the L1 instruction caches */
2056 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2057 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2058 
2059 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2060 
2061 	/* Wait for invalidation complete */
2062 	for (i = 0; i < usec_timeout; i++) {
2063 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2064 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2065 					INVALIDATE_CACHE_COMPLETE))
2066 			break;
2067 		udelay(1);
2068 	}
2069 
2070 	if (i >= usec_timeout) {
2071 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2072 		return -EINVAL;
2073 	}
2074 
2075 	if (amdgpu_emu_mode == 1)
2076 		adev->hdp.funcs->flush_hdp(adev, NULL);
2077 
2078 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2079 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2080 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2081 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2082 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2083 
2084 	/* Program mec1 ucode address into intruction cache address register */
2085 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO,
2086 			lower_32_bits(addr) & 0xFFFFF000);
2087 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2088 			upper_32_bits(addr));
2089 
2090 	return 0;
2091 }
2092 
2093 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2094 {
2095 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2096 	uint32_t tmp;
2097 	unsigned i, pipe_id;
2098 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
2099 
2100 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2101 		adev->gfx.pfp_fw->data;
2102 
2103 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2104 		lower_32_bits(addr));
2105 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2106 		upper_32_bits(addr));
2107 
2108 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2109 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2110 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2111 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2112 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2113 
2114 	/*
2115 	 * Programming any of the CP_PFP_IC_BASE registers
2116 	 * forces invalidation of the ME L1 I$. Wait for the
2117 	 * invalidation complete
2118 	 */
2119 	for (i = 0; i < usec_timeout; i++) {
2120 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2121 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2122 			INVALIDATE_CACHE_COMPLETE))
2123 			break;
2124 		udelay(1);
2125 	}
2126 
2127 	if (i >= usec_timeout) {
2128 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2129 		return -EINVAL;
2130 	}
2131 
2132 	/* Prime the L1 instruction caches */
2133 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2134 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2135 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2136 	/* Waiting for cache primed*/
2137 	for (i = 0; i < usec_timeout; i++) {
2138 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2139 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2140 			ICACHE_PRIMED))
2141 			break;
2142 		udelay(1);
2143 	}
2144 
2145 	if (i >= usec_timeout) {
2146 		dev_err(adev->dev, "failed to prime instruction cache\n");
2147 		return -EINVAL;
2148 	}
2149 
2150 	mutex_lock(&adev->srbm_mutex);
2151 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2152 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2153 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2154 			(pfp_hdr->ucode_start_addr_hi << 30) |
2155 			(pfp_hdr->ucode_start_addr_lo >> 2));
2156 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2157 			pfp_hdr->ucode_start_addr_hi >> 2);
2158 
2159 		/*
2160 		 * Program CP_ME_CNTL to reset given PIPE to take
2161 		 * effect of CP_PFP_PRGRM_CNTR_START.
2162 		 */
2163 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2164 		if (pipe_id == 0)
2165 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2166 					PFP_PIPE0_RESET, 1);
2167 		else
2168 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2169 					PFP_PIPE1_RESET, 1);
2170 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2171 
2172 		/* Clear pfp pipe0 reset bit. */
2173 		if (pipe_id == 0)
2174 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2175 					PFP_PIPE0_RESET, 0);
2176 		else
2177 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2178 					PFP_PIPE1_RESET, 0);
2179 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2180 
2181 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2182 			lower_32_bits(addr2));
2183 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2184 			upper_32_bits(addr2));
2185 	}
2186 	soc21_grbm_select(adev, 0, 0, 0, 0);
2187 	mutex_unlock(&adev->srbm_mutex);
2188 
2189 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2190 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2191 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2192 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2193 
2194 	/* Invalidate the data caches */
2195 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2196 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2197 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2198 
2199 	for (i = 0; i < usec_timeout; i++) {
2200 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2201 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2202 			INVALIDATE_DCACHE_COMPLETE))
2203 			break;
2204 		udelay(1);
2205 	}
2206 
2207 	if (i >= usec_timeout) {
2208 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2209 		return -EINVAL;
2210 	}
2211 
2212 	return 0;
2213 }
2214 
2215 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2216 {
2217 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2218 	uint32_t tmp;
2219 	unsigned i, pipe_id;
2220 	const struct gfx_firmware_header_v2_0 *me_hdr;
2221 
2222 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
2223 		adev->gfx.me_fw->data;
2224 
2225 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2226 		lower_32_bits(addr));
2227 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2228 		upper_32_bits(addr));
2229 
2230 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2231 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2232 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2233 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2234 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2235 
2236 	/*
2237 	 * Programming any of the CP_ME_IC_BASE registers
2238 	 * forces invalidation of the ME L1 I$. Wait for the
2239 	 * invalidation complete
2240 	 */
2241 	for (i = 0; i < usec_timeout; i++) {
2242 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2243 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2244 			INVALIDATE_CACHE_COMPLETE))
2245 			break;
2246 		udelay(1);
2247 	}
2248 
2249 	if (i >= usec_timeout) {
2250 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2251 		return -EINVAL;
2252 	}
2253 
2254 	/* Prime the instruction caches */
2255 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2256 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
2257 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2258 
2259 	/* Waiting for instruction cache primed*/
2260 	for (i = 0; i < usec_timeout; i++) {
2261 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2262 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2263 			ICACHE_PRIMED))
2264 			break;
2265 		udelay(1);
2266 	}
2267 
2268 	if (i >= usec_timeout) {
2269 		dev_err(adev->dev, "failed to prime instruction cache\n");
2270 		return -EINVAL;
2271 	}
2272 
2273 	mutex_lock(&adev->srbm_mutex);
2274 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2275 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2276 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2277 			(me_hdr->ucode_start_addr_hi << 30) |
2278 			(me_hdr->ucode_start_addr_lo >> 2) );
2279 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2280 			me_hdr->ucode_start_addr_hi>>2);
2281 
2282 		/*
2283 		 * Program CP_ME_CNTL to reset given PIPE to take
2284 		 * effect of CP_PFP_PRGRM_CNTR_START.
2285 		 */
2286 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2287 		if (pipe_id == 0)
2288 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2289 					ME_PIPE0_RESET, 1);
2290 		else
2291 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2292 					ME_PIPE1_RESET, 1);
2293 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2294 
2295 		/* Clear pfp pipe0 reset bit. */
2296 		if (pipe_id == 0)
2297 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2298 					ME_PIPE0_RESET, 0);
2299 		else
2300 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2301 					ME_PIPE1_RESET, 0);
2302 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2303 
2304 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
2305 			lower_32_bits(addr2));
2306 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
2307 			upper_32_bits(addr2));
2308 	}
2309 	soc21_grbm_select(adev, 0, 0, 0, 0);
2310 	mutex_unlock(&adev->srbm_mutex);
2311 
2312 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2313 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2314 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2315 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2316 
2317 	/* Invalidate the data caches */
2318 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2319 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2320 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2321 
2322 	for (i = 0; i < usec_timeout; i++) {
2323 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2324 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2325 			INVALIDATE_DCACHE_COMPLETE))
2326 			break;
2327 		udelay(1);
2328 	}
2329 
2330 	if (i >= usec_timeout) {
2331 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2332 		return -EINVAL;
2333 	}
2334 
2335 	return 0;
2336 }
2337 
2338 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2339 {
2340 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2341 	uint32_t tmp;
2342 	unsigned i;
2343 	const struct gfx_firmware_header_v2_0 *mec_hdr;
2344 
2345 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2346 		adev->gfx.mec_fw->data;
2347 
2348 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2349 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2350 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2351 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2352 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2353 
2354 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
2355 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2356 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2357 	WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
2358 
2359 	mutex_lock(&adev->srbm_mutex);
2360 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2361 		soc21_grbm_select(adev, 1, i, 0, 0);
2362 
2363 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2);
2364 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
2365 		     upper_32_bits(addr2));
2366 
2367 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2368 					mec_hdr->ucode_start_addr_lo >> 2 |
2369 					mec_hdr->ucode_start_addr_hi << 30);
2370 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2371 					mec_hdr->ucode_start_addr_hi >> 2);
2372 
2373 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr);
2374 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2375 		     upper_32_bits(addr));
2376 	}
2377 	mutex_unlock(&adev->srbm_mutex);
2378 	soc21_grbm_select(adev, 0, 0, 0, 0);
2379 
2380 	/* Trigger an invalidation of the L1 instruction caches */
2381 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2382 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2383 	WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
2384 
2385 	/* Wait for invalidation complete */
2386 	for (i = 0; i < usec_timeout; i++) {
2387 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2388 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2389 				       INVALIDATE_DCACHE_COMPLETE))
2390 			break;
2391 		udelay(1);
2392 	}
2393 
2394 	if (i >= usec_timeout) {
2395 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2396 		return -EINVAL;
2397 	}
2398 
2399 	/* Trigger an invalidation of the L1 instruction caches */
2400 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2401 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2402 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2403 
2404 	/* Wait for invalidation complete */
2405 	for (i = 0; i < usec_timeout; i++) {
2406 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2407 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2408 				       INVALIDATE_CACHE_COMPLETE))
2409 			break;
2410 		udelay(1);
2411 	}
2412 
2413 	if (i >= usec_timeout) {
2414 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2415 		return -EINVAL;
2416 	}
2417 
2418 	return 0;
2419 }
2420 
2421 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
2422 {
2423 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
2424 	const struct gfx_firmware_header_v2_0 *me_hdr;
2425 	const struct gfx_firmware_header_v2_0 *mec_hdr;
2426 	uint32_t pipe_id, tmp;
2427 
2428 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2429 		adev->gfx.mec_fw->data;
2430 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
2431 		adev->gfx.me_fw->data;
2432 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2433 		adev->gfx.pfp_fw->data;
2434 
2435 	/* config pfp program start addr */
2436 	for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2437 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2438 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2439 			(pfp_hdr->ucode_start_addr_hi << 30) |
2440 			(pfp_hdr->ucode_start_addr_lo >> 2));
2441 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2442 			pfp_hdr->ucode_start_addr_hi >> 2);
2443 	}
2444 	soc21_grbm_select(adev, 0, 0, 0, 0);
2445 
2446 	/* reset pfp pipe */
2447 	tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2448 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1);
2449 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1);
2450 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2451 
2452 	/* clear pfp pipe reset */
2453 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0);
2454 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0);
2455 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2456 
2457 	/* config me program start addr */
2458 	for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2459 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2460 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2461 			(me_hdr->ucode_start_addr_hi << 30) |
2462 			(me_hdr->ucode_start_addr_lo >> 2) );
2463 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2464 			me_hdr->ucode_start_addr_hi>>2);
2465 	}
2466 	soc21_grbm_select(adev, 0, 0, 0, 0);
2467 
2468 	/* reset me pipe */
2469 	tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2470 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1);
2471 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1);
2472 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2473 
2474 	/* clear me pipe reset */
2475 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0);
2476 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0);
2477 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2478 
2479 	/* config mec program start addr */
2480 	for (pipe_id = 0; pipe_id < 4; pipe_id++) {
2481 		soc21_grbm_select(adev, 1, pipe_id, 0, 0);
2482 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2483 					mec_hdr->ucode_start_addr_lo >> 2 |
2484 					mec_hdr->ucode_start_addr_hi << 30);
2485 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2486 					mec_hdr->ucode_start_addr_hi >> 2);
2487 	}
2488 	soc21_grbm_select(adev, 0, 0, 0, 0);
2489 
2490 	/* reset mec pipe */
2491 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
2492 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
2493 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
2494 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
2495 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
2496 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2497 
2498 	/* clear mec pipe reset */
2499 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
2500 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
2501 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
2502 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
2503 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2504 }
2505 
2506 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2507 {
2508 	uint32_t cp_status;
2509 	uint32_t bootload_status;
2510 	int i, r;
2511 	uint64_t addr, addr2;
2512 
2513 	for (i = 0; i < adev->usec_timeout; i++) {
2514 		cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
2515 
2516 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1) ||
2517 				adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 4))
2518 			bootload_status = RREG32_SOC15(GC, 0,
2519 					regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
2520 		else
2521 			bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
2522 
2523 		if ((cp_status == 0) &&
2524 		    (REG_GET_FIELD(bootload_status,
2525 			RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2526 			break;
2527 		}
2528 		udelay(1);
2529 	}
2530 
2531 	if (i >= adev->usec_timeout) {
2532 		dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2533 		return -ETIMEDOUT;
2534 	}
2535 
2536 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2537 		if (adev->gfx.rs64_enable) {
2538 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2539 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset;
2540 			addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2541 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset;
2542 			r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2);
2543 			if (r)
2544 				return r;
2545 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2546 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset;
2547 			addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2548 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset;
2549 			r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2);
2550 			if (r)
2551 				return r;
2552 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2553 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset;
2554 			addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2555 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset;
2556 			r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2);
2557 			if (r)
2558 				return r;
2559 		} else {
2560 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2561 				rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset;
2562 			r = gfx_v11_0_config_me_cache(adev, addr);
2563 			if (r)
2564 				return r;
2565 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2566 				rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset;
2567 			r = gfx_v11_0_config_pfp_cache(adev, addr);
2568 			if (r)
2569 				return r;
2570 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2571 				rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset;
2572 			r = gfx_v11_0_config_mec_cache(adev, addr);
2573 			if (r)
2574 				return r;
2575 		}
2576 	}
2577 
2578 	return 0;
2579 }
2580 
2581 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2582 {
2583 	int i;
2584 	u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2585 
2586 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2587 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2588 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2589 
2590 	for (i = 0; i < adev->usec_timeout; i++) {
2591 		if (RREG32_SOC15(GC, 0, regCP_STAT) == 0)
2592 			break;
2593 		udelay(1);
2594 	}
2595 
2596 	if (i >= adev->usec_timeout)
2597 		DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
2598 
2599 	return 0;
2600 }
2601 
2602 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2603 {
2604 	int r;
2605 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
2606 	const __le32 *fw_data;
2607 	unsigned i, fw_size;
2608 
2609 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2610 		adev->gfx.pfp_fw->data;
2611 
2612 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2613 
2614 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2615 		le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2616 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2617 
2618 	r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2619 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2620 				      &adev->gfx.pfp.pfp_fw_obj,
2621 				      &adev->gfx.pfp.pfp_fw_gpu_addr,
2622 				      (void **)&adev->gfx.pfp.pfp_fw_ptr);
2623 	if (r) {
2624 		dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2625 		gfx_v11_0_pfp_fini(adev);
2626 		return r;
2627 	}
2628 
2629 	memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2630 
2631 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2632 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2633 
2634 	gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr);
2635 
2636 	WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0);
2637 
2638 	for (i = 0; i < pfp_hdr->jt_size; i++)
2639 		WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA,
2640 			     le32_to_cpup(fw_data + pfp_hdr->jt_offset + i));
2641 
2642 	WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2643 
2644 	return 0;
2645 }
2646 
2647 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
2648 {
2649 	int r;
2650 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
2651 	const __le32 *fw_ucode, *fw_data;
2652 	unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2653 	uint32_t tmp;
2654 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2655 
2656 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2657 		adev->gfx.pfp_fw->data;
2658 
2659 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2660 
2661 	/* instruction */
2662 	fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data +
2663 		le32_to_cpu(pfp_hdr->ucode_offset_bytes));
2664 	fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes);
2665 	/* data */
2666 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2667 		le32_to_cpu(pfp_hdr->data_offset_bytes));
2668 	fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes);
2669 
2670 	/* 64kb align */
2671 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2672 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
2673 				      &adev->gfx.pfp.pfp_fw_obj,
2674 				      &adev->gfx.pfp.pfp_fw_gpu_addr,
2675 				      (void **)&adev->gfx.pfp.pfp_fw_ptr);
2676 	if (r) {
2677 		dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r);
2678 		gfx_v11_0_pfp_fini(adev);
2679 		return r;
2680 	}
2681 
2682 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
2683 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
2684 				      &adev->gfx.pfp.pfp_fw_data_obj,
2685 				      &adev->gfx.pfp.pfp_fw_data_gpu_addr,
2686 				      (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
2687 	if (r) {
2688 		dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r);
2689 		gfx_v11_0_pfp_fini(adev);
2690 		return r;
2691 	}
2692 
2693 	memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size);
2694 	memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size);
2695 
2696 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2697 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj);
2698 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2699 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
2700 
2701 	if (amdgpu_emu_mode == 1)
2702 		adev->hdp.funcs->flush_hdp(adev, NULL);
2703 
2704 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2705 		lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2706 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2707 		upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2708 
2709 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2710 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2711 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2712 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2713 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2714 
2715 	/*
2716 	 * Programming any of the CP_PFP_IC_BASE registers
2717 	 * forces invalidation of the ME L1 I$. Wait for the
2718 	 * invalidation complete
2719 	 */
2720 	for (i = 0; i < usec_timeout; i++) {
2721 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2722 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2723 			INVALIDATE_CACHE_COMPLETE))
2724 			break;
2725 		udelay(1);
2726 	}
2727 
2728 	if (i >= usec_timeout) {
2729 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2730 		return -EINVAL;
2731 	}
2732 
2733 	/* Prime the L1 instruction caches */
2734 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2735 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2736 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2737 	/* Waiting for cache primed*/
2738 	for (i = 0; i < usec_timeout; i++) {
2739 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2740 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2741 			ICACHE_PRIMED))
2742 			break;
2743 		udelay(1);
2744 	}
2745 
2746 	if (i >= usec_timeout) {
2747 		dev_err(adev->dev, "failed to prime instruction cache\n");
2748 		return -EINVAL;
2749 	}
2750 
2751 	mutex_lock(&adev->srbm_mutex);
2752 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2753 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2754 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2755 			(pfp_hdr->ucode_start_addr_hi << 30) |
2756 			(pfp_hdr->ucode_start_addr_lo >> 2) );
2757 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2758 			pfp_hdr->ucode_start_addr_hi>>2);
2759 
2760 		/*
2761 		 * Program CP_ME_CNTL to reset given PIPE to take
2762 		 * effect of CP_PFP_PRGRM_CNTR_START.
2763 		 */
2764 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2765 		if (pipe_id == 0)
2766 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2767 					PFP_PIPE0_RESET, 1);
2768 		else
2769 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2770 					PFP_PIPE1_RESET, 1);
2771 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2772 
2773 		/* Clear pfp pipe0 reset bit. */
2774 		if (pipe_id == 0)
2775 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2776 					PFP_PIPE0_RESET, 0);
2777 		else
2778 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2779 					PFP_PIPE1_RESET, 0);
2780 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2781 
2782 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2783 			lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
2784 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2785 			upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
2786 	}
2787 	soc21_grbm_select(adev, 0, 0, 0, 0);
2788 	mutex_unlock(&adev->srbm_mutex);
2789 
2790 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2791 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2792 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2793 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2794 
2795 	/* Invalidate the data caches */
2796 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2797 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2798 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2799 
2800 	for (i = 0; i < usec_timeout; i++) {
2801 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2802 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2803 			INVALIDATE_DCACHE_COMPLETE))
2804 			break;
2805 		udelay(1);
2806 	}
2807 
2808 	if (i >= usec_timeout) {
2809 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2810 		return -EINVAL;
2811 	}
2812 
2813 	return 0;
2814 }
2815 
2816 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
2817 {
2818 	int r;
2819 	const struct gfx_firmware_header_v1_0 *me_hdr;
2820 	const __le32 *fw_data;
2821 	unsigned i, fw_size;
2822 
2823 	me_hdr = (const struct gfx_firmware_header_v1_0 *)
2824 		adev->gfx.me_fw->data;
2825 
2826 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2827 
2828 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2829 		le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2830 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
2831 
2832 	r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
2833 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2834 				      &adev->gfx.me.me_fw_obj,
2835 				      &adev->gfx.me.me_fw_gpu_addr,
2836 				      (void **)&adev->gfx.me.me_fw_ptr);
2837 	if (r) {
2838 		dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
2839 		gfx_v11_0_me_fini(adev);
2840 		return r;
2841 	}
2842 
2843 	memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
2844 
2845 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2846 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2847 
2848 	gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr);
2849 
2850 	WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0);
2851 
2852 	for (i = 0; i < me_hdr->jt_size; i++)
2853 		WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA,
2854 			     le32_to_cpup(fw_data + me_hdr->jt_offset + i));
2855 
2856 	WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version);
2857 
2858 	return 0;
2859 }
2860 
2861 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
2862 {
2863 	int r;
2864 	const struct gfx_firmware_header_v2_0 *me_hdr;
2865 	const __le32 *fw_ucode, *fw_data;
2866 	unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2867 	uint32_t tmp;
2868 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2869 
2870 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
2871 		adev->gfx.me_fw->data;
2872 
2873 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2874 
2875 	/* instruction */
2876 	fw_ucode = (const __le32 *)(adev->gfx.me_fw->data +
2877 		le32_to_cpu(me_hdr->ucode_offset_bytes));
2878 	fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes);
2879 	/* data */
2880 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2881 		le32_to_cpu(me_hdr->data_offset_bytes));
2882 	fw_data_size = le32_to_cpu(me_hdr->data_size_bytes);
2883 
2884 	/* 64kb align*/
2885 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2886 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
2887 				      &adev->gfx.me.me_fw_obj,
2888 				      &adev->gfx.me.me_fw_gpu_addr,
2889 				      (void **)&adev->gfx.me.me_fw_ptr);
2890 	if (r) {
2891 		dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r);
2892 		gfx_v11_0_me_fini(adev);
2893 		return r;
2894 	}
2895 
2896 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
2897 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
2898 				      &adev->gfx.me.me_fw_data_obj,
2899 				      &adev->gfx.me.me_fw_data_gpu_addr,
2900 				      (void **)&adev->gfx.me.me_fw_data_ptr);
2901 	if (r) {
2902 		dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
2903 		gfx_v11_0_pfp_fini(adev);
2904 		return r;
2905 	}
2906 
2907 	memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size);
2908 	memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size);
2909 
2910 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2911 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj);
2912 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2913 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
2914 
2915 	if (amdgpu_emu_mode == 1)
2916 		adev->hdp.funcs->flush_hdp(adev, NULL);
2917 
2918 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2919 		lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
2920 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2921 		upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
2922 
2923 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2924 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2925 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2926 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2927 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2928 
2929 	/*
2930 	 * Programming any of the CP_ME_IC_BASE registers
2931 	 * forces invalidation of the ME L1 I$. Wait for the
2932 	 * invalidation complete
2933 	 */
2934 	for (i = 0; i < usec_timeout; i++) {
2935 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2936 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2937 			INVALIDATE_CACHE_COMPLETE))
2938 			break;
2939 		udelay(1);
2940 	}
2941 
2942 	if (i >= usec_timeout) {
2943 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2944 		return -EINVAL;
2945 	}
2946 
2947 	/* Prime the instruction caches */
2948 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2949 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
2950 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2951 
2952 	/* Waiting for instruction cache primed*/
2953 	for (i = 0; i < usec_timeout; i++) {
2954 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2955 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2956 			ICACHE_PRIMED))
2957 			break;
2958 		udelay(1);
2959 	}
2960 
2961 	if (i >= usec_timeout) {
2962 		dev_err(adev->dev, "failed to prime instruction cache\n");
2963 		return -EINVAL;
2964 	}
2965 
2966 	mutex_lock(&adev->srbm_mutex);
2967 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2968 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2969 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2970 			(me_hdr->ucode_start_addr_hi << 30) |
2971 			(me_hdr->ucode_start_addr_lo >> 2) );
2972 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2973 			me_hdr->ucode_start_addr_hi>>2);
2974 
2975 		/*
2976 		 * Program CP_ME_CNTL to reset given PIPE to take
2977 		 * effect of CP_PFP_PRGRM_CNTR_START.
2978 		 */
2979 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2980 		if (pipe_id == 0)
2981 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2982 					ME_PIPE0_RESET, 1);
2983 		else
2984 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2985 					ME_PIPE1_RESET, 1);
2986 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2987 
2988 		/* Clear pfp pipe0 reset bit. */
2989 		if (pipe_id == 0)
2990 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2991 					ME_PIPE0_RESET, 0);
2992 		else
2993 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2994 					ME_PIPE1_RESET, 0);
2995 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2996 
2997 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
2998 			lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
2999 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
3000 			upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3001 	}
3002 	soc21_grbm_select(adev, 0, 0, 0, 0);
3003 	mutex_unlock(&adev->srbm_mutex);
3004 
3005 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3006 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3007 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3008 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3009 
3010 	/* Invalidate the data caches */
3011 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3012 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3013 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3014 
3015 	for (i = 0; i < usec_timeout; i++) {
3016 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3017 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3018 			INVALIDATE_DCACHE_COMPLETE))
3019 			break;
3020 		udelay(1);
3021 	}
3022 
3023 	if (i >= usec_timeout) {
3024 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3025 		return -EINVAL;
3026 	}
3027 
3028 	return 0;
3029 }
3030 
3031 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3032 {
3033 	int r;
3034 
3035 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw)
3036 		return -EINVAL;
3037 
3038 	gfx_v11_0_cp_gfx_enable(adev, false);
3039 
3040 	if (adev->gfx.rs64_enable)
3041 		r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev);
3042 	else
3043 		r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev);
3044 	if (r) {
3045 		dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
3046 		return r;
3047 	}
3048 
3049 	if (adev->gfx.rs64_enable)
3050 		r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev);
3051 	else
3052 		r = gfx_v11_0_cp_gfx_load_me_microcode(adev);
3053 	if (r) {
3054 		dev_err(adev->dev, "(%d) failed to load me fw\n", r);
3055 		return r;
3056 	}
3057 
3058 	return 0;
3059 }
3060 
3061 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev)
3062 {
3063 	struct amdgpu_ring *ring;
3064 	const struct cs_section_def *sect = NULL;
3065 	const struct cs_extent_def *ext = NULL;
3066 	int r, i;
3067 	int ctx_reg_offset;
3068 
3069 	/* init the CP */
3070 	WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT,
3071 		     adev->gfx.config.max_hw_contexts - 1);
3072 	WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1);
3073 
3074 	if (!amdgpu_async_gfx_ring)
3075 		gfx_v11_0_cp_gfx_enable(adev, true);
3076 
3077 	ring = &adev->gfx.gfx_ring[0];
3078 	r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev));
3079 	if (r) {
3080 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3081 		return r;
3082 	}
3083 
3084 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3085 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3086 
3087 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3088 	amdgpu_ring_write(ring, 0x80000000);
3089 	amdgpu_ring_write(ring, 0x80000000);
3090 
3091 	for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
3092 		for (ext = sect->section; ext->extent != NULL; ++ext) {
3093 			if (sect->id == SECT_CONTEXT) {
3094 				amdgpu_ring_write(ring,
3095 						  PACKET3(PACKET3_SET_CONTEXT_REG,
3096 							  ext->reg_count));
3097 				amdgpu_ring_write(ring, ext->reg_index -
3098 						  PACKET3_SET_CONTEXT_REG_START);
3099 				for (i = 0; i < ext->reg_count; i++)
3100 					amdgpu_ring_write(ring, ext->extent[i]);
3101 			}
3102 		}
3103 	}
3104 
3105 	ctx_reg_offset =
3106 		SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
3107 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
3108 	amdgpu_ring_write(ring, ctx_reg_offset);
3109 	amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
3110 
3111 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3112 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3113 
3114 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3115 	amdgpu_ring_write(ring, 0);
3116 
3117 	amdgpu_ring_commit(ring);
3118 
3119 	/* submit cs packet to copy state 0 to next available state */
3120 	if (adev->gfx.num_gfx_rings > 1) {
3121 		/* maximum supported gfx ring is 2 */
3122 		ring = &adev->gfx.gfx_ring[1];
3123 		r = amdgpu_ring_alloc(ring, 2);
3124 		if (r) {
3125 			DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3126 			return r;
3127 		}
3128 
3129 		amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3130 		amdgpu_ring_write(ring, 0);
3131 
3132 		amdgpu_ring_commit(ring);
3133 	}
3134 	return 0;
3135 }
3136 
3137 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
3138 					 CP_PIPE_ID pipe)
3139 {
3140 	u32 tmp;
3141 
3142 	tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
3143 	tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
3144 
3145 	WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
3146 }
3147 
3148 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
3149 					  struct amdgpu_ring *ring)
3150 {
3151 	u32 tmp;
3152 
3153 	tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3154 	if (ring->use_doorbell) {
3155 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3156 				    DOORBELL_OFFSET, ring->doorbell_index);
3157 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3158 				    DOORBELL_EN, 1);
3159 	} else {
3160 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3161 				    DOORBELL_EN, 0);
3162 	}
3163 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp);
3164 
3165 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3166 			    DOORBELL_RANGE_LOWER, ring->doorbell_index);
3167 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp);
3168 
3169 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3170 		     CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3171 }
3172 
3173 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
3174 {
3175 	struct amdgpu_ring *ring;
3176 	u32 tmp;
3177 	u32 rb_bufsz;
3178 	u64 rb_addr, rptr_addr, wptr_gpu_addr;
3179 	u32 i;
3180 
3181 	/* Set the write pointer delay */
3182 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
3183 
3184 	/* set the RB to use vmid 0 */
3185 	WREG32_SOC15(GC, 0, regCP_RB_VMID, 0);
3186 
3187 	/* Init gfx ring 0 for pipe 0 */
3188 	mutex_lock(&adev->srbm_mutex);
3189 	gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3190 
3191 	/* Set ring buffer size */
3192 	ring = &adev->gfx.gfx_ring[0];
3193 	rb_bufsz = order_base_2(ring->ring_size / 8);
3194 	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3195 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3196 	WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3197 
3198 	/* Initialize the ring buffer's write pointers */
3199 	ring->wptr = 0;
3200 	WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
3201 	WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3202 
3203 	/* set the wb address wether it's enabled or not */
3204 	rptr_addr = ring->rptr_gpu_addr;
3205 	WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3206 	WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3207 		     CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3208 
3209 	wptr_gpu_addr = ring->wptr_gpu_addr;
3210 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3211 		     lower_32_bits(wptr_gpu_addr));
3212 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3213 		     upper_32_bits(wptr_gpu_addr));
3214 
3215 	mdelay(1);
3216 	WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3217 
3218 	rb_addr = ring->gpu_addr >> 8;
3219 	WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr);
3220 	WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3221 
3222 	WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1);
3223 
3224 	gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3225 	mutex_unlock(&adev->srbm_mutex);
3226 
3227 	/* Init gfx ring 1 for pipe 1 */
3228 	if (adev->gfx.num_gfx_rings > 1) {
3229 		mutex_lock(&adev->srbm_mutex);
3230 		gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
3231 		/* maximum supported gfx ring is 2 */
3232 		ring = &adev->gfx.gfx_ring[1];
3233 		rb_bufsz = order_base_2(ring->ring_size / 8);
3234 		tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
3235 		tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
3236 		WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3237 		/* Initialize the ring buffer's write pointers */
3238 		ring->wptr = 0;
3239 		WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr));
3240 		WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
3241 		/* Set the wb address wether it's enabled or not */
3242 		rptr_addr = ring->rptr_gpu_addr;
3243 		WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
3244 		WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3245 			     CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3246 		wptr_gpu_addr = ring->wptr_gpu_addr;
3247 		WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3248 			     lower_32_bits(wptr_gpu_addr));
3249 		WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3250 			     upper_32_bits(wptr_gpu_addr));
3251 
3252 		mdelay(1);
3253 		WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3254 
3255 		rb_addr = ring->gpu_addr >> 8;
3256 		WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr);
3257 		WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr));
3258 		WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1);
3259 
3260 		gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3261 		mutex_unlock(&adev->srbm_mutex);
3262 	}
3263 	/* Switch to pipe 0 */
3264 	mutex_lock(&adev->srbm_mutex);
3265 	gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3266 	mutex_unlock(&adev->srbm_mutex);
3267 
3268 	/* start the ring */
3269 	gfx_v11_0_cp_gfx_start(adev);
3270 
3271 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3272 		ring = &adev->gfx.gfx_ring[i];
3273 		ring->sched.ready = true;
3274 	}
3275 
3276 	return 0;
3277 }
3278 
3279 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3280 {
3281 	u32 data;
3282 
3283 	if (adev->gfx.rs64_enable) {
3284 		data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
3285 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
3286 							 enable ? 0 : 1);
3287 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
3288 							 enable ? 0 : 1);
3289 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
3290 							 enable ? 0 : 1);
3291 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
3292 							 enable ? 0 : 1);
3293 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
3294 							 enable ? 0 : 1);
3295 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
3296 							 enable ? 1 : 0);
3297 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
3298 				                         enable ? 1 : 0);
3299 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
3300 							 enable ? 1 : 0);
3301 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
3302 							 enable ? 1 : 0);
3303 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
3304 							 enable ? 0 : 1);
3305 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data);
3306 	} else {
3307 		data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL);
3308 
3309 		if (enable) {
3310 			data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0);
3311 			if (!adev->enable_mes_kiq)
3312 				data = REG_SET_FIELD(data, CP_MEC_CNTL,
3313 						     MEC_ME2_HALT, 0);
3314 		} else {
3315 			data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1);
3316 			data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1);
3317 		}
3318 		WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
3319 	}
3320 
3321 	adev->gfx.kiq.ring.sched.ready = enable;
3322 
3323 	udelay(50);
3324 }
3325 
3326 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3327 {
3328 	const struct gfx_firmware_header_v1_0 *mec_hdr;
3329 	const __le32 *fw_data;
3330 	unsigned i, fw_size;
3331 	u32 *fw = NULL;
3332 	int r;
3333 
3334 	if (!adev->gfx.mec_fw)
3335 		return -EINVAL;
3336 
3337 	gfx_v11_0_cp_compute_enable(adev, false);
3338 
3339 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3340 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3341 
3342 	fw_data = (const __le32 *)
3343 		(adev->gfx.mec_fw->data +
3344 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3345 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
3346 
3347 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
3348 					  PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3349 					  &adev->gfx.mec.mec_fw_obj,
3350 					  &adev->gfx.mec.mec_fw_gpu_addr,
3351 					  (void **)&fw);
3352 	if (r) {
3353 		dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
3354 		gfx_v11_0_mec_fini(adev);
3355 		return r;
3356 	}
3357 
3358 	memcpy(fw, fw_data, fw_size);
3359 
3360 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3361 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3362 
3363 	gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr);
3364 
3365 	/* MEC1 */
3366 	WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0);
3367 
3368 	for (i = 0; i < mec_hdr->jt_size; i++)
3369 		WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA,
3370 			     le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3371 
3372 	WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
3373 
3374 	return 0;
3375 }
3376 
3377 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
3378 {
3379 	const struct gfx_firmware_header_v2_0 *mec_hdr;
3380 	const __le32 *fw_ucode, *fw_data;
3381 	u32 tmp, fw_ucode_size, fw_data_size;
3382 	u32 i, usec_timeout = 50000; /* Wait for 50 ms */
3383 	u32 *fw_ucode_ptr, *fw_data_ptr;
3384 	int r;
3385 
3386 	if (!adev->gfx.mec_fw)
3387 		return -EINVAL;
3388 
3389 	gfx_v11_0_cp_compute_enable(adev, false);
3390 
3391 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
3392 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3393 
3394 	fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
3395 				le32_to_cpu(mec_hdr->ucode_offset_bytes));
3396 	fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
3397 
3398 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
3399 				le32_to_cpu(mec_hdr->data_offset_bytes));
3400 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
3401 
3402 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3403 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
3404 				      &adev->gfx.mec.mec_fw_obj,
3405 				      &adev->gfx.mec.mec_fw_gpu_addr,
3406 				      (void **)&fw_ucode_ptr);
3407 	if (r) {
3408 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3409 		gfx_v11_0_mec_fini(adev);
3410 		return r;
3411 	}
3412 
3413 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
3414 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
3415 				      &adev->gfx.mec.mec_fw_data_obj,
3416 				      &adev->gfx.mec.mec_fw_data_gpu_addr,
3417 				      (void **)&fw_data_ptr);
3418 	if (r) {
3419 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3420 		gfx_v11_0_mec_fini(adev);
3421 		return r;
3422 	}
3423 
3424 	memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
3425 	memcpy(fw_data_ptr, fw_data, fw_data_size);
3426 
3427 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3428 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
3429 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3430 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
3431 
3432 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
3433 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3434 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
3435 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3436 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
3437 
3438 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
3439 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
3440 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
3441 	WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
3442 
3443 	mutex_lock(&adev->srbm_mutex);
3444 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3445 		soc21_grbm_select(adev, 1, i, 0, 0);
3446 
3447 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr);
3448 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
3449 		     upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr));
3450 
3451 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
3452 					mec_hdr->ucode_start_addr_lo >> 2 |
3453 					mec_hdr->ucode_start_addr_hi << 30);
3454 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
3455 					mec_hdr->ucode_start_addr_hi >> 2);
3456 
3457 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr);
3458 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
3459 		     upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3460 	}
3461 	mutex_unlock(&adev->srbm_mutex);
3462 	soc21_grbm_select(adev, 0, 0, 0, 0);
3463 
3464 	/* Trigger an invalidation of the L1 instruction caches */
3465 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3466 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3467 	WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
3468 
3469 	/* Wait for invalidation complete */
3470 	for (i = 0; i < usec_timeout; i++) {
3471 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3472 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
3473 				       INVALIDATE_DCACHE_COMPLETE))
3474 			break;
3475 		udelay(1);
3476 	}
3477 
3478 	if (i >= usec_timeout) {
3479 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
3480 		return -EINVAL;
3481 	}
3482 
3483 	/* Trigger an invalidation of the L1 instruction caches */
3484 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3485 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
3486 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
3487 
3488 	/* Wait for invalidation complete */
3489 	for (i = 0; i < usec_timeout; i++) {
3490 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3491 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
3492 				       INVALIDATE_CACHE_COMPLETE))
3493 			break;
3494 		udelay(1);
3495 	}
3496 
3497 	if (i >= usec_timeout) {
3498 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
3499 		return -EINVAL;
3500 	}
3501 
3502 	return 0;
3503 }
3504 
3505 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
3506 {
3507 	uint32_t tmp;
3508 	struct amdgpu_device *adev = ring->adev;
3509 
3510 	/* tell RLC which is KIQ queue */
3511 	tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
3512 	tmp &= 0xffffff00;
3513 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3514 	WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3515 	tmp |= 0x80;
3516 	WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3517 }
3518 
3519 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
3520 {
3521 	/* set graphics engine doorbell range */
3522 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER,
3523 		     (adev->doorbell_index.gfx_ring0 * 2) << 2);
3524 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3525 		     (adev->doorbell_index.gfx_userqueue_end * 2) << 2);
3526 
3527 	/* set compute engine doorbell range */
3528 	WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3529 		     (adev->doorbell_index.kiq * 2) << 2);
3530 	WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3531 		     (adev->doorbell_index.userqueue_end * 2) << 2);
3532 }
3533 
3534 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
3535 				  struct amdgpu_mqd_prop *prop)
3536 {
3537 	struct v11_gfx_mqd *mqd = m;
3538 	uint64_t hqd_gpu_addr, wb_gpu_addr;
3539 	uint32_t tmp;
3540 	uint32_t rb_bufsz;
3541 
3542 	/* set up gfx hqd wptr */
3543 	mqd->cp_gfx_hqd_wptr = 0;
3544 	mqd->cp_gfx_hqd_wptr_hi = 0;
3545 
3546 	/* set the pointer to the MQD */
3547 	mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc;
3548 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3549 
3550 	/* set up mqd control */
3551 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
3552 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
3553 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
3554 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
3555 	mqd->cp_gfx_mqd_control = tmp;
3556 
3557 	/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
3558 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
3559 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
3560 	mqd->cp_gfx_hqd_vmid = 0;
3561 
3562 	/* set up default queue priority level
3563 	 * 0x0 = low priority, 0x1 = high priority */
3564 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
3565 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
3566 	mqd->cp_gfx_hqd_queue_priority = tmp;
3567 
3568 	/* set up time quantum */
3569 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
3570 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
3571 	mqd->cp_gfx_hqd_quantum = tmp;
3572 
3573 	/* set up gfx hqd base. this is similar as CP_RB_BASE */
3574 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3575 	mqd->cp_gfx_hqd_base = hqd_gpu_addr;
3576 	mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
3577 
3578 	/* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
3579 	wb_gpu_addr = prop->rptr_gpu_addr;
3580 	mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
3581 	mqd->cp_gfx_hqd_rptr_addr_hi =
3582 		upper_32_bits(wb_gpu_addr) & 0xffff;
3583 
3584 	/* set up rb_wptr_poll addr */
3585 	wb_gpu_addr = prop->wptr_gpu_addr;
3586 	mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3587 	mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3588 
3589 	/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
3590 	rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
3591 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
3592 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
3593 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
3594 #ifdef __BIG_ENDIAN
3595 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
3596 #endif
3597 	mqd->cp_gfx_hqd_cntl = tmp;
3598 
3599 	/* set up cp_doorbell_control */
3600 	tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3601 	if (prop->use_doorbell) {
3602 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3603 				    DOORBELL_OFFSET, prop->doorbell_index);
3604 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3605 				    DOORBELL_EN, 1);
3606 	} else
3607 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3608 				    DOORBELL_EN, 0);
3609 	mqd->cp_rb_doorbell_control = tmp;
3610 
3611 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3612 	mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
3613 
3614 	/* active the queue */
3615 	mqd->cp_gfx_hqd_active = 1;
3616 
3617 	return 0;
3618 }
3619 
3620 #ifdef BRING_UP_DEBUG
3621 static int gfx_v11_0_gfx_queue_init_register(struct amdgpu_ring *ring)
3622 {
3623 	struct amdgpu_device *adev = ring->adev;
3624 	struct v11_gfx_mqd *mqd = ring->mqd_ptr;
3625 
3626 	/* set mmCP_GFX_HQD_WPTR/_HI to 0 */
3627 	WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
3628 	WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
3629 
3630 	/* set GFX_MQD_BASE */
3631 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
3632 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
3633 
3634 	/* set GFX_MQD_CONTROL */
3635 	WREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
3636 
3637 	/* set GFX_HQD_VMID to 0 */
3638 	WREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
3639 
3640 	WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY,
3641 			mqd->cp_gfx_hqd_queue_priority);
3642 	WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
3643 
3644 	/* set GFX_HQD_BASE, similar as CP_RB_BASE */
3645 	WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
3646 	WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
3647 
3648 	/* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */
3649 	WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
3650 	WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
3651 
3652 	/* set GFX_HQD_CNTL, similar as CP_RB_CNTL */
3653 	WREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
3654 
3655 	/* set RB_WPTR_POLL_ADDR */
3656 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
3657 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
3658 
3659 	/* set RB_DOORBELL_CONTROL */
3660 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
3661 
3662 	/* active the queue */
3663 	WREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
3664 
3665 	return 0;
3666 }
3667 #endif
3668 
3669 static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
3670 {
3671 	struct amdgpu_device *adev = ring->adev;
3672 	struct v11_gfx_mqd *mqd = ring->mqd_ptr;
3673 	int mqd_idx = ring - &adev->gfx.gfx_ring[0];
3674 
3675 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
3676 		memset((void *)mqd, 0, sizeof(*mqd));
3677 		mutex_lock(&adev->srbm_mutex);
3678 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3679 		amdgpu_ring_init_mqd(ring);
3680 #ifdef BRING_UP_DEBUG
3681 		gfx_v11_0_gfx_queue_init_register(ring);
3682 #endif
3683 		soc21_grbm_select(adev, 0, 0, 0, 0);
3684 		mutex_unlock(&adev->srbm_mutex);
3685 		if (adev->gfx.me.mqd_backup[mqd_idx])
3686 			memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3687 	} else if (amdgpu_in_reset(adev)) {
3688 		/* reset mqd with the backup copy */
3689 		if (adev->gfx.me.mqd_backup[mqd_idx])
3690 			memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
3691 		/* reset the ring */
3692 		ring->wptr = 0;
3693 		*ring->wptr_cpu_addr = 0;
3694 		amdgpu_ring_clear_ring(ring);
3695 #ifdef BRING_UP_DEBUG
3696 		mutex_lock(&adev->srbm_mutex);
3697 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3698 		gfx_v11_0_gfx_queue_init_register(ring);
3699 		soc21_grbm_select(adev, 0, 0, 0, 0);
3700 		mutex_unlock(&adev->srbm_mutex);
3701 #endif
3702 	} else {
3703 		amdgpu_ring_clear_ring(ring);
3704 	}
3705 
3706 	return 0;
3707 }
3708 
3709 #ifndef BRING_UP_DEBUG
3710 static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev)
3711 {
3712 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3713 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3714 	int r, i;
3715 
3716 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
3717 		return -EINVAL;
3718 
3719 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
3720 					adev->gfx.num_gfx_rings);
3721 	if (r) {
3722 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3723 		return r;
3724 	}
3725 
3726 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3727 		kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
3728 
3729 	return amdgpu_ring_test_helper(kiq_ring);
3730 }
3731 #endif
3732 
3733 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
3734 {
3735 	int r, i;
3736 	struct amdgpu_ring *ring;
3737 
3738 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3739 		ring = &adev->gfx.gfx_ring[i];
3740 
3741 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
3742 		if (unlikely(r != 0))
3743 			goto done;
3744 
3745 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3746 		if (!r) {
3747 			r = gfx_v11_0_gfx_init_queue(ring);
3748 			amdgpu_bo_kunmap(ring->mqd_obj);
3749 			ring->mqd_ptr = NULL;
3750 		}
3751 		amdgpu_bo_unreserve(ring->mqd_obj);
3752 		if (r)
3753 			goto done;
3754 	}
3755 #ifndef BRING_UP_DEBUG
3756 	r = gfx_v11_0_kiq_enable_kgq(adev);
3757 	if (r)
3758 		goto done;
3759 #endif
3760 	r = gfx_v11_0_cp_gfx_start(adev);
3761 	if (r)
3762 		goto done;
3763 
3764 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3765 		ring = &adev->gfx.gfx_ring[i];
3766 		ring->sched.ready = true;
3767 	}
3768 done:
3769 	return r;
3770 }
3771 
3772 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
3773 				      struct amdgpu_mqd_prop *prop)
3774 {
3775 	struct v11_compute_mqd *mqd = m;
3776 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3777 	uint32_t tmp;
3778 
3779 	mqd->header = 0xC0310800;
3780 	mqd->compute_pipelinestat_enable = 0x00000001;
3781 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3782 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3783 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3784 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3785 	mqd->compute_misc_reserved = 0x00000007;
3786 
3787 	eop_base_addr = prop->eop_gpu_addr >> 8;
3788 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3789 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3790 
3791 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3792 	tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
3793 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3794 			(order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
3795 
3796 	mqd->cp_hqd_eop_control = tmp;
3797 
3798 	/* enable doorbell? */
3799 	tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
3800 
3801 	if (prop->use_doorbell) {
3802 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3803 				    DOORBELL_OFFSET, prop->doorbell_index);
3804 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3805 				    DOORBELL_EN, 1);
3806 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3807 				    DOORBELL_SOURCE, 0);
3808 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3809 				    DOORBELL_HIT, 0);
3810 	} else {
3811 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3812 				    DOORBELL_EN, 0);
3813 	}
3814 
3815 	mqd->cp_hqd_pq_doorbell_control = tmp;
3816 
3817 	/* disable the queue if it's active */
3818 	mqd->cp_hqd_dequeue_request = 0;
3819 	mqd->cp_hqd_pq_rptr = 0;
3820 	mqd->cp_hqd_pq_wptr_lo = 0;
3821 	mqd->cp_hqd_pq_wptr_hi = 0;
3822 
3823 	/* set the pointer to the MQD */
3824 	mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
3825 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3826 
3827 	/* set MQD vmid to 0 */
3828 	tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
3829 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3830 	mqd->cp_mqd_control = tmp;
3831 
3832 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3833 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3834 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3835 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3836 
3837 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3838 	tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
3839 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3840 			    (order_base_2(prop->queue_size / 4) - 1));
3841 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3842 			    (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
3843 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3844 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
3845 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3846 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3847 	mqd->cp_hqd_pq_control = tmp;
3848 
3849 	/* set the wb address whether it's enabled or not */
3850 	wb_gpu_addr = prop->rptr_gpu_addr;
3851 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3852 	mqd->cp_hqd_pq_rptr_report_addr_hi =
3853 		upper_32_bits(wb_gpu_addr) & 0xffff;
3854 
3855 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3856 	wb_gpu_addr = prop->wptr_gpu_addr;
3857 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3858 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3859 
3860 	tmp = 0;
3861 	/* enable the doorbell if requested */
3862 	if (prop->use_doorbell) {
3863 		tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
3864 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3865 				DOORBELL_OFFSET, prop->doorbell_index);
3866 
3867 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3868 				    DOORBELL_EN, 1);
3869 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3870 				    DOORBELL_SOURCE, 0);
3871 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3872 				    DOORBELL_HIT, 0);
3873 	}
3874 
3875 	mqd->cp_hqd_pq_doorbell_control = tmp;
3876 
3877 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3878 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
3879 
3880 	/* set the vmid for the queue */
3881 	mqd->cp_hqd_vmid = 0;
3882 
3883 	tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
3884 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
3885 	mqd->cp_hqd_persistent_state = tmp;
3886 
3887 	/* set MIN_IB_AVAIL_SIZE */
3888 	tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
3889 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3890 	mqd->cp_hqd_ib_control = tmp;
3891 
3892 	/* set static priority for a compute queue/ring */
3893 	mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
3894 	mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
3895 
3896 	mqd->cp_hqd_active = prop->hqd_active;
3897 
3898 	return 0;
3899 }
3900 
3901 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring)
3902 {
3903 	struct amdgpu_device *adev = ring->adev;
3904 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
3905 	int j;
3906 
3907 	/* inactivate the queue */
3908 	if (amdgpu_sriov_vf(adev))
3909 		WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0);
3910 
3911 	/* disable wptr polling */
3912 	WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3913 
3914 	/* write the EOP addr */
3915 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR,
3916 	       mqd->cp_hqd_eop_base_addr_lo);
3917 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI,
3918 	       mqd->cp_hqd_eop_base_addr_hi);
3919 
3920 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3921 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL,
3922 	       mqd->cp_hqd_eop_control);
3923 
3924 	/* enable doorbell? */
3925 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
3926 	       mqd->cp_hqd_pq_doorbell_control);
3927 
3928 	/* disable the queue if it's active */
3929 	if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
3930 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
3931 		for (j = 0; j < adev->usec_timeout; j++) {
3932 			if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
3933 				break;
3934 			udelay(1);
3935 		}
3936 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST,
3937 		       mqd->cp_hqd_dequeue_request);
3938 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR,
3939 		       mqd->cp_hqd_pq_rptr);
3940 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
3941 		       mqd->cp_hqd_pq_wptr_lo);
3942 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
3943 		       mqd->cp_hqd_pq_wptr_hi);
3944 	}
3945 
3946 	/* set the pointer to the MQD */
3947 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR,
3948 	       mqd->cp_mqd_base_addr_lo);
3949 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI,
3950 	       mqd->cp_mqd_base_addr_hi);
3951 
3952 	/* set MQD vmid to 0 */
3953 	WREG32_SOC15(GC, 0, regCP_MQD_CONTROL,
3954 	       mqd->cp_mqd_control);
3955 
3956 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3957 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE,
3958 	       mqd->cp_hqd_pq_base_lo);
3959 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI,
3960 	       mqd->cp_hqd_pq_base_hi);
3961 
3962 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3963 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL,
3964 	       mqd->cp_hqd_pq_control);
3965 
3966 	/* set the wb address whether it's enabled or not */
3967 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
3968 		mqd->cp_hqd_pq_rptr_report_addr_lo);
3969 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3970 		mqd->cp_hqd_pq_rptr_report_addr_hi);
3971 
3972 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3973 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
3974 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
3975 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3976 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
3977 
3978 	/* enable the doorbell if requested */
3979 	if (ring->use_doorbell) {
3980 		WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3981 			(adev->doorbell_index.kiq * 2) << 2);
3982 		WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3983 			(adev->doorbell_index.userqueue_end * 2) << 2);
3984 	}
3985 
3986 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
3987 	       mqd->cp_hqd_pq_doorbell_control);
3988 
3989 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3990 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
3991 	       mqd->cp_hqd_pq_wptr_lo);
3992 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
3993 	       mqd->cp_hqd_pq_wptr_hi);
3994 
3995 	/* set the vmid for the queue */
3996 	WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid);
3997 
3998 	WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE,
3999 	       mqd->cp_hqd_persistent_state);
4000 
4001 	/* activate the queue */
4002 	WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE,
4003 	       mqd->cp_hqd_active);
4004 
4005 	if (ring->use_doorbell)
4006 		WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4007 
4008 	return 0;
4009 }
4010 
4011 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
4012 {
4013 	struct amdgpu_device *adev = ring->adev;
4014 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
4015 	int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
4016 
4017 	gfx_v11_0_kiq_setting(ring);
4018 
4019 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4020 		/* reset MQD to a clean status */
4021 		if (adev->gfx.mec.mqd_backup[mqd_idx])
4022 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
4023 
4024 		/* reset ring buffer */
4025 		ring->wptr = 0;
4026 		amdgpu_ring_clear_ring(ring);
4027 
4028 		mutex_lock(&adev->srbm_mutex);
4029 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4030 		gfx_v11_0_kiq_init_register(ring);
4031 		soc21_grbm_select(adev, 0, 0, 0, 0);
4032 		mutex_unlock(&adev->srbm_mutex);
4033 	} else {
4034 		memset((void *)mqd, 0, sizeof(*mqd));
4035 		mutex_lock(&adev->srbm_mutex);
4036 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4037 		amdgpu_ring_init_mqd(ring);
4038 		gfx_v11_0_kiq_init_register(ring);
4039 		soc21_grbm_select(adev, 0, 0, 0, 0);
4040 		mutex_unlock(&adev->srbm_mutex);
4041 
4042 		if (adev->gfx.mec.mqd_backup[mqd_idx])
4043 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4044 	}
4045 
4046 	return 0;
4047 }
4048 
4049 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
4050 {
4051 	struct amdgpu_device *adev = ring->adev;
4052 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
4053 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
4054 
4055 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4056 		memset((void *)mqd, 0, sizeof(*mqd));
4057 		mutex_lock(&adev->srbm_mutex);
4058 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4059 		amdgpu_ring_init_mqd(ring);
4060 		soc21_grbm_select(adev, 0, 0, 0, 0);
4061 		mutex_unlock(&adev->srbm_mutex);
4062 
4063 		if (adev->gfx.mec.mqd_backup[mqd_idx])
4064 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4065 	} else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4066 		/* reset MQD to a clean status */
4067 		if (adev->gfx.mec.mqd_backup[mqd_idx])
4068 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
4069 
4070 		/* reset ring buffer */
4071 		ring->wptr = 0;
4072 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
4073 		amdgpu_ring_clear_ring(ring);
4074 	} else {
4075 		amdgpu_ring_clear_ring(ring);
4076 	}
4077 
4078 	return 0;
4079 }
4080 
4081 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
4082 {
4083 	struct amdgpu_ring *ring;
4084 	int r;
4085 
4086 	ring = &adev->gfx.kiq.ring;
4087 
4088 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
4089 	if (unlikely(r != 0))
4090 		return r;
4091 
4092 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4093 	if (unlikely(r != 0)) {
4094 		amdgpu_bo_unreserve(ring->mqd_obj);
4095 		return r;
4096 	}
4097 
4098 	gfx_v11_0_kiq_init_queue(ring);
4099 	amdgpu_bo_kunmap(ring->mqd_obj);
4100 	ring->mqd_ptr = NULL;
4101 	amdgpu_bo_unreserve(ring->mqd_obj);
4102 	ring->sched.ready = true;
4103 	return 0;
4104 }
4105 
4106 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
4107 {
4108 	struct amdgpu_ring *ring = NULL;
4109 	int r = 0, i;
4110 
4111 	if (!amdgpu_async_gfx_ring)
4112 		gfx_v11_0_cp_compute_enable(adev, true);
4113 
4114 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4115 		ring = &adev->gfx.compute_ring[i];
4116 
4117 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
4118 		if (unlikely(r != 0))
4119 			goto done;
4120 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4121 		if (!r) {
4122 			r = gfx_v11_0_kcq_init_queue(ring);
4123 			amdgpu_bo_kunmap(ring->mqd_obj);
4124 			ring->mqd_ptr = NULL;
4125 		}
4126 		amdgpu_bo_unreserve(ring->mqd_obj);
4127 		if (r)
4128 			goto done;
4129 	}
4130 
4131 	r = amdgpu_gfx_enable_kcq(adev);
4132 done:
4133 	return r;
4134 }
4135 
4136 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
4137 {
4138 	int r, i;
4139 	struct amdgpu_ring *ring;
4140 
4141 	if (!(adev->flags & AMD_IS_APU))
4142 		gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4143 
4144 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4145 		/* legacy firmware loading */
4146 		r = gfx_v11_0_cp_gfx_load_microcode(adev);
4147 		if (r)
4148 			return r;
4149 
4150 		if (adev->gfx.rs64_enable)
4151 			r = gfx_v11_0_cp_compute_load_microcode_rs64(adev);
4152 		else
4153 			r = gfx_v11_0_cp_compute_load_microcode(adev);
4154 		if (r)
4155 			return r;
4156 	}
4157 
4158 	gfx_v11_0_cp_set_doorbell_range(adev);
4159 
4160 	if (amdgpu_async_gfx_ring) {
4161 		gfx_v11_0_cp_compute_enable(adev, true);
4162 		gfx_v11_0_cp_gfx_enable(adev, true);
4163 	}
4164 
4165 	if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
4166 		r = amdgpu_mes_kiq_hw_init(adev);
4167 	else
4168 		r = gfx_v11_0_kiq_resume(adev);
4169 	if (r)
4170 		return r;
4171 
4172 	r = gfx_v11_0_kcq_resume(adev);
4173 	if (r)
4174 		return r;
4175 
4176 	if (!amdgpu_async_gfx_ring) {
4177 		r = gfx_v11_0_cp_gfx_resume(adev);
4178 		if (r)
4179 			return r;
4180 	} else {
4181 		r = gfx_v11_0_cp_async_gfx_ring_resume(adev);
4182 		if (r)
4183 			return r;
4184 	}
4185 
4186 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4187 		ring = &adev->gfx.gfx_ring[i];
4188 		r = amdgpu_ring_test_helper(ring);
4189 		if (r)
4190 			return r;
4191 	}
4192 
4193 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4194 		ring = &adev->gfx.compute_ring[i];
4195 		r = amdgpu_ring_test_helper(ring);
4196 		if (r)
4197 			return r;
4198 	}
4199 
4200 	return 0;
4201 }
4202 
4203 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable)
4204 {
4205 	gfx_v11_0_cp_gfx_enable(adev, enable);
4206 	gfx_v11_0_cp_compute_enable(adev, enable);
4207 }
4208 
4209 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
4210 {
4211 	int r;
4212 	bool value;
4213 
4214 	r = adev->gfxhub.funcs->gart_enable(adev);
4215 	if (r)
4216 		return r;
4217 
4218 	adev->hdp.funcs->flush_hdp(adev, NULL);
4219 
4220 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
4221 		false : true;
4222 
4223 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
4224 	amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
4225 
4226 	return 0;
4227 }
4228 
4229 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev)
4230 {
4231 	u32 tmp;
4232 
4233 	/* select RS64 */
4234 	if (adev->gfx.rs64_enable) {
4235 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL);
4236 		tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1);
4237 		WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp);
4238 
4239 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL);
4240 		tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1);
4241 		WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp);
4242 	}
4243 
4244 	if (amdgpu_emu_mode == 1)
4245 		drm_msleep(100);
4246 }
4247 
4248 static int get_gb_addr_config(struct amdgpu_device * adev)
4249 {
4250 	u32 gb_addr_config;
4251 
4252 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
4253 	if (gb_addr_config == 0)
4254 		return -EINVAL;
4255 
4256 	adev->gfx.config.gb_addr_config_fields.num_pkrs =
4257 		1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
4258 
4259 	adev->gfx.config.gb_addr_config = gb_addr_config;
4260 
4261 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
4262 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4263 				      GB_ADDR_CONFIG, NUM_PIPES);
4264 
4265 	adev->gfx.config.max_tile_pipes =
4266 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4267 
4268 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
4269 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4270 				      GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
4271 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
4272 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4273 				      GB_ADDR_CONFIG, NUM_RB_PER_SE);
4274 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
4275 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4276 				      GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
4277 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
4278 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4279 				      GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
4280 
4281 	return 0;
4282 }
4283 
4284 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev)
4285 {
4286 	uint32_t data;
4287 
4288 	data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG);
4289 	data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
4290 	WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data);
4291 
4292 	data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG);
4293 	data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
4294 	WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
4295 }
4296 
4297 static int gfx_v11_0_hw_init(void *handle)
4298 {
4299 	int r;
4300 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4301 
4302 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
4303 		if (adev->gfx.imu.funcs) {
4304 			/* RLC autoload sequence 1: Program rlc ram */
4305 			if (adev->gfx.imu.funcs->program_rlc_ram)
4306 				adev->gfx.imu.funcs->program_rlc_ram(adev);
4307 		}
4308 		/* rlc autoload firmware */
4309 		r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
4310 		if (r)
4311 			return r;
4312 	} else {
4313 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4314 			if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
4315 				if (adev->gfx.imu.funcs->load_microcode)
4316 					adev->gfx.imu.funcs->load_microcode(adev);
4317 				if (adev->gfx.imu.funcs->setup_imu)
4318 					adev->gfx.imu.funcs->setup_imu(adev);
4319 				if (adev->gfx.imu.funcs->start_imu)
4320 					adev->gfx.imu.funcs->start_imu(adev);
4321 			}
4322 
4323 			/* disable gpa mode in backdoor loading */
4324 			gfx_v11_0_disable_gpa_mode(adev);
4325 		}
4326 	}
4327 
4328 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
4329 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
4330 		r = gfx_v11_0_wait_for_rlc_autoload_complete(adev);
4331 		if (r) {
4332 			dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
4333 			return r;
4334 		}
4335 	}
4336 
4337 	adev->gfx.is_poweron = true;
4338 
4339 	if(get_gb_addr_config(adev))
4340 		DRM_WARN("Invalid gb_addr_config !\n");
4341 
4342 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
4343 	    adev->gfx.rs64_enable)
4344 		gfx_v11_0_config_gfx_rs64(adev);
4345 
4346 	r = gfx_v11_0_gfxhub_enable(adev);
4347 	if (r)
4348 		return r;
4349 
4350 	if (!amdgpu_emu_mode)
4351 		gfx_v11_0_init_golden_registers(adev);
4352 
4353 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
4354 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
4355 		/**
4356 		 * For gfx 11, rlc firmware loading relies on smu firmware is
4357 		 * loaded firstly, so in direct type, it has to load smc ucode
4358 		 * here before rlc.
4359 		 */
4360 		if (!(adev->flags & AMD_IS_APU)) {
4361 			r = amdgpu_pm_load_smu_firmware(adev, NULL);
4362 			if (r)
4363 				return r;
4364 		}
4365 	}
4366 
4367 	gfx_v11_0_constants_init(adev);
4368 
4369 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
4370 		gfx_v11_0_select_cp_fw_arch(adev);
4371 
4372 	if (adev->nbio.funcs->gc_doorbell_init)
4373 		adev->nbio.funcs->gc_doorbell_init(adev);
4374 
4375 	r = gfx_v11_0_rlc_resume(adev);
4376 	if (r)
4377 		return r;
4378 
4379 	/*
4380 	 * init golden registers and rlc resume may override some registers,
4381 	 * reconfig them here
4382 	 */
4383 	gfx_v11_0_tcp_harvest(adev);
4384 
4385 	r = gfx_v11_0_cp_resume(adev);
4386 	if (r)
4387 		return r;
4388 
4389 	return r;
4390 }
4391 
4392 #ifndef BRING_UP_DEBUG
4393 static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev)
4394 {
4395 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4396 	struct amdgpu_ring *kiq_ring = &kiq->ring;
4397 	int i, r = 0;
4398 
4399 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
4400 		return -EINVAL;
4401 
4402 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
4403 					adev->gfx.num_gfx_rings))
4404 		return -ENOMEM;
4405 
4406 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4407 		kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
4408 					   PREEMPT_QUEUES, 0, 0);
4409 
4410 	if (adev->gfx.kiq.ring.sched.ready)
4411 		r = amdgpu_ring_test_helper(kiq_ring);
4412 
4413 	return r;
4414 }
4415 #endif
4416 
4417 static int gfx_v11_0_hw_fini(void *handle)
4418 {
4419 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4420 	int r;
4421 	uint32_t tmp;
4422 
4423 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4424 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4425 
4426 	if (!adev->no_hw_access) {
4427 #ifndef BRING_UP_DEBUG
4428 		if (amdgpu_async_gfx_ring) {
4429 			r = gfx_v11_0_kiq_disable_kgq(adev);
4430 			if (r)
4431 				DRM_ERROR("KGQ disable failed\n");
4432 		}
4433 #endif
4434 		if (amdgpu_gfx_disable_kcq(adev))
4435 			DRM_ERROR("KCQ disable failed\n");
4436 
4437 		amdgpu_mes_kiq_hw_fini(adev);
4438 	}
4439 
4440 	if (amdgpu_sriov_vf(adev)) {
4441 		gfx_v11_0_cp_gfx_enable(adev, false);
4442 		/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
4443 		tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
4444 		tmp &= 0xffffff00;
4445 		WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
4446 
4447 		return 0;
4448 	}
4449 	gfx_v11_0_cp_enable(adev, false);
4450 	gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4451 
4452 	adev->gfxhub.funcs->gart_disable(adev);
4453 
4454 	adev->gfx.is_poweron = false;
4455 
4456 	return 0;
4457 }
4458 
4459 static int gfx_v11_0_suspend(void *handle)
4460 {
4461 	return gfx_v11_0_hw_fini(handle);
4462 }
4463 
4464 static int gfx_v11_0_resume(void *handle)
4465 {
4466 	return gfx_v11_0_hw_init(handle);
4467 }
4468 
4469 static bool gfx_v11_0_is_idle(void *handle)
4470 {
4471 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4472 
4473 	if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
4474 				GRBM_STATUS, GUI_ACTIVE))
4475 		return false;
4476 	else
4477 		return true;
4478 }
4479 
4480 static int gfx_v11_0_wait_for_idle(void *handle)
4481 {
4482 	unsigned i;
4483 	u32 tmp;
4484 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4485 
4486 	for (i = 0; i < adev->usec_timeout; i++) {
4487 		/* read MC_STATUS */
4488 		tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) &
4489 			GRBM_STATUS__GUI_ACTIVE_MASK;
4490 
4491 		if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
4492 			return 0;
4493 		udelay(1);
4494 	}
4495 	return -ETIMEDOUT;
4496 }
4497 
4498 static int gfx_v11_0_soft_reset(void *handle)
4499 {
4500 	u32 grbm_soft_reset = 0;
4501 	u32 tmp;
4502 	int i, j, k;
4503 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4504 
4505 	tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4506 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
4507 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
4508 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0);
4509 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
4510 	WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4511 
4512 	gfx_v11_0_set_safe_mode(adev);
4513 
4514 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4515 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4516 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4517 				tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
4518 				tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
4519 				tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
4520 				tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
4521 				WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
4522 
4523 				WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
4524 				WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
4525 			}
4526 		}
4527 	}
4528 	for (i = 0; i < adev->gfx.me.num_me; ++i) {
4529 		for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
4530 			for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
4531 				tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
4532 				tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
4533 				tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
4534 				tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
4535 				WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
4536 
4537 				WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
4538 			}
4539 		}
4540 	}
4541 
4542 	WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
4543 
4544 	// Read CP_VMID_RESET register three times.
4545 	// to get sufficient time for GFX_HQD_ACTIVE reach 0
4546 	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4547 	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4548 	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4549 
4550 	for (i = 0; i < adev->usec_timeout; i++) {
4551 		if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
4552 		    !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
4553 			break;
4554 		udelay(1);
4555 	}
4556 	if (i >= adev->usec_timeout) {
4557 		printk("Failed to wait all pipes clean\n");
4558 		return -EINVAL;
4559 	}
4560 
4561 	/**********  trigger soft reset  ***********/
4562 	grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4563 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4564 					SOFT_RESET_CP, 1);
4565 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4566 					SOFT_RESET_GFX, 1);
4567 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4568 					SOFT_RESET_CPF, 1);
4569 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4570 					SOFT_RESET_CPC, 1);
4571 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4572 					SOFT_RESET_CPG, 1);
4573 	WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4574 	/**********  exit soft reset  ***********/
4575 	grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4576 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4577 					SOFT_RESET_CP, 0);
4578 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4579 					SOFT_RESET_GFX, 0);
4580 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4581 					SOFT_RESET_CPF, 0);
4582 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4583 					SOFT_RESET_CPC, 0);
4584 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4585 					SOFT_RESET_CPG, 0);
4586 	WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4587 
4588 	tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL);
4589 	tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1);
4590 	WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp);
4591 
4592 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0);
4593 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0);
4594 
4595 	for (i = 0; i < adev->usec_timeout; i++) {
4596 		if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET))
4597 			break;
4598 		udelay(1);
4599 	}
4600 	if (i >= adev->usec_timeout) {
4601 		printk("Failed to wait CP_VMID_RESET to 0\n");
4602 		return -EINVAL;
4603 	}
4604 
4605 	tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4606 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4607 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4608 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4609 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4610 	WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4611 
4612 	gfx_v11_0_unset_safe_mode(adev);
4613 
4614 	return gfx_v11_0_cp_resume(adev);
4615 }
4616 
4617 static bool gfx_v11_0_check_soft_reset(void *handle)
4618 {
4619 	int i, r;
4620 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4621 	struct amdgpu_ring *ring;
4622 	long tmo = msecs_to_jiffies(1000);
4623 
4624 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4625 		ring = &adev->gfx.gfx_ring[i];
4626 		r = amdgpu_ring_test_ib(ring, tmo);
4627 		if (r)
4628 			return true;
4629 	}
4630 
4631 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4632 		ring = &adev->gfx.compute_ring[i];
4633 		r = amdgpu_ring_test_ib(ring, tmo);
4634 		if (r)
4635 			return true;
4636 	}
4637 
4638 	return false;
4639 }
4640 
4641 static int gfx_v11_0_post_soft_reset(void *handle)
4642 {
4643 	/**
4644 	 * GFX soft reset will impact MES, need resume MES when do GFX soft reset
4645 	 */
4646 	return amdgpu_mes_resume((struct amdgpu_device *)handle);
4647 }
4648 
4649 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4650 {
4651 	uint64_t clock;
4652 	uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
4653 
4654 	if (amdgpu_sriov_vf(adev)) {
4655 		amdgpu_gfx_off_ctrl(adev, false);
4656 		mutex_lock(&adev->gfx.gpu_clock_mutex);
4657 		clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
4658 		clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
4659 		clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
4660 		if (clock_counter_hi_pre != clock_counter_hi_after)
4661 			clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
4662 		mutex_unlock(&adev->gfx.gpu_clock_mutex);
4663 		amdgpu_gfx_off_ctrl(adev, true);
4664 	} else {
4665 		preempt_disable();
4666 		clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
4667 		clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
4668 		clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
4669 		if (clock_counter_hi_pre != clock_counter_hi_after)
4670 			clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
4671 		preempt_enable();
4672 	}
4673 	clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
4674 
4675 	return clock;
4676 }
4677 
4678 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4679 					   uint32_t vmid,
4680 					   uint32_t gds_base, uint32_t gds_size,
4681 					   uint32_t gws_base, uint32_t gws_size,
4682 					   uint32_t oa_base, uint32_t oa_size)
4683 {
4684 	struct amdgpu_device *adev = ring->adev;
4685 
4686 	/* GDS Base */
4687 	gfx_v11_0_write_data_to_reg(ring, 0, false,
4688 				    SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid,
4689 				    gds_base);
4690 
4691 	/* GDS Size */
4692 	gfx_v11_0_write_data_to_reg(ring, 0, false,
4693 				    SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid,
4694 				    gds_size);
4695 
4696 	/* GWS */
4697 	gfx_v11_0_write_data_to_reg(ring, 0, false,
4698 				    SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid,
4699 				    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4700 
4701 	/* OA */
4702 	gfx_v11_0_write_data_to_reg(ring, 0, false,
4703 				    SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid,
4704 				    (1 << (oa_size + oa_base)) - (1 << oa_base));
4705 }
4706 
4707 static int gfx_v11_0_early_init(void *handle)
4708 {
4709 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4710 
4711 	adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
4712 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4713 					  AMDGPU_MAX_COMPUTE_RINGS);
4714 
4715 	gfx_v11_0_set_kiq_pm4_funcs(adev);
4716 	gfx_v11_0_set_ring_funcs(adev);
4717 	gfx_v11_0_set_irq_funcs(adev);
4718 	gfx_v11_0_set_gds_init(adev);
4719 	gfx_v11_0_set_rlc_funcs(adev);
4720 	gfx_v11_0_set_mqd_funcs(adev);
4721 	gfx_v11_0_set_imu_funcs(adev);
4722 
4723 	gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
4724 
4725 	return 0;
4726 }
4727 
4728 static int gfx_v11_0_late_init(void *handle)
4729 {
4730 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4731 	int r;
4732 
4733 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4734 	if (r)
4735 		return r;
4736 
4737 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4738 	if (r)
4739 		return r;
4740 
4741 	return 0;
4742 }
4743 
4744 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev)
4745 {
4746 	uint32_t rlc_cntl;
4747 
4748 	/* if RLC is not enabled, do nothing */
4749 	rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL);
4750 	return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
4751 }
4752 
4753 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev)
4754 {
4755 	uint32_t data;
4756 	unsigned i;
4757 
4758 	data = RLC_SAFE_MODE__CMD_MASK;
4759 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4760 
4761 	WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
4762 
4763 	/* wait for RLC_SAFE_MODE */
4764 	for (i = 0; i < adev->usec_timeout; i++) {
4765 		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE),
4766 				   RLC_SAFE_MODE, CMD))
4767 			break;
4768 		udelay(1);
4769 	}
4770 }
4771 
4772 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev)
4773 {
4774 	WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
4775 }
4776 
4777 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
4778 				      bool enable)
4779 {
4780 	uint32_t def, data;
4781 
4782 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
4783 		return;
4784 
4785 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4786 
4787 	if (enable)
4788 		data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
4789 	else
4790 		data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
4791 
4792 	if (def != data)
4793 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4794 }
4795 
4796 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev,
4797 				       bool enable)
4798 {
4799 	uint32_t def, data;
4800 
4801 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
4802 		return;
4803 
4804 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4805 
4806 	if (enable)
4807 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
4808 	else
4809 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
4810 
4811 	if (def != data)
4812 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4813 }
4814 
4815 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev,
4816 					   bool enable)
4817 {
4818 	uint32_t def, data;
4819 
4820 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
4821 		return;
4822 
4823 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4824 
4825 	if (enable)
4826 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
4827 	else
4828 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
4829 
4830 	if (def != data)
4831 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4832 }
4833 
4834 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4835 						       bool enable)
4836 {
4837 	uint32_t data, def;
4838 
4839 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
4840 		return;
4841 
4842 	/* It is disabled by HW by default */
4843 	if (enable) {
4844 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
4845 			/* 1 - RLC_CGTT_MGCG_OVERRIDE */
4846 			def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4847 
4848 			data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4849 				  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4850 				  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
4851 
4852 			if (def != data)
4853 				WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4854 		}
4855 	} else {
4856 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
4857 			def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4858 
4859 			data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4860 				 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4861 				 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
4862 
4863 			if (def != data)
4864 				WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4865 		}
4866 	}
4867 }
4868 
4869 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4870 						       bool enable)
4871 {
4872 	uint32_t def, data;
4873 
4874 	if (!(adev->cg_flags &
4875 	      (AMD_CG_SUPPORT_GFX_CGCG |
4876 	      AMD_CG_SUPPORT_GFX_CGLS |
4877 	      AMD_CG_SUPPORT_GFX_3D_CGCG |
4878 	      AMD_CG_SUPPORT_GFX_3D_CGLS)))
4879 		return;
4880 
4881 	if (enable) {
4882 		def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4883 
4884 		/* unset CGCG override */
4885 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
4886 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4887 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4888 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4889 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
4890 		    adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4891 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4892 
4893 		/* update CGCG override bits */
4894 		if (def != data)
4895 			WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4896 
4897 		/* enable cgcg FSM(0x0000363F) */
4898 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4899 
4900 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
4901 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
4902 			data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4903 				 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4904 		}
4905 
4906 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
4907 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
4908 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4909 				 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4910 		}
4911 
4912 		if (def != data)
4913 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
4914 
4915 		/* Program RLC_CGCG_CGLS_CTRL_3D */
4916 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4917 
4918 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
4919 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK;
4920 			data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4921 				 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4922 		}
4923 
4924 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
4925 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK;
4926 			data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4927 				 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4928 		}
4929 
4930 		if (def != data)
4931 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
4932 
4933 		/* set IDLE_POLL_COUNT(0x00900100) */
4934 		def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL);
4935 
4936 		data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK);
4937 		data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4938 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4939 
4940 		if (def != data)
4941 			WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data);
4942 
4943 		data = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4944 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4945 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4946 		data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4947 		data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4948 		WREG32_SOC15(GC, 0, regCP_INT_CNTL, data);
4949 
4950 		data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
4951 		data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
4952 		WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
4953 
4954 		/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
4955 		if (adev->sdma.num_instances > 1) {
4956 			data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
4957 			data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
4958 			WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
4959 		}
4960 	} else {
4961 		/* Program RLC_CGCG_CGLS_CTRL */
4962 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4963 
4964 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
4965 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4966 
4967 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4968 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4969 
4970 		if (def != data)
4971 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
4972 
4973 		/* Program RLC_CGCG_CGLS_CTRL_3D */
4974 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4975 
4976 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
4977 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4978 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4979 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4980 
4981 		if (def != data)
4982 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
4983 
4984 		data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
4985 		data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
4986 		WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
4987 
4988 		/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
4989 		if (adev->sdma.num_instances > 1) {
4990 			data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
4991 			data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
4992 			WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
4993 		}
4994 	}
4995 }
4996 
4997 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4998 					    bool enable)
4999 {
5000 	amdgpu_gfx_rlc_enter_safe_mode(adev);
5001 
5002 	gfx_v11_0_update_coarse_grain_clock_gating(adev, enable);
5003 
5004 	gfx_v11_0_update_medium_grain_clock_gating(adev, enable);
5005 
5006 	gfx_v11_0_update_repeater_fgcg(adev, enable);
5007 
5008 	gfx_v11_0_update_sram_fgcg(adev, enable);
5009 
5010 	gfx_v11_0_update_perf_clk(adev, enable);
5011 
5012 	if (adev->cg_flags &
5013 	    (AMD_CG_SUPPORT_GFX_MGCG |
5014 	     AMD_CG_SUPPORT_GFX_CGLS |
5015 	     AMD_CG_SUPPORT_GFX_CGCG |
5016 	     AMD_CG_SUPPORT_GFX_3D_CGCG |
5017 	     AMD_CG_SUPPORT_GFX_3D_CGLS))
5018 	        gfx_v11_0_enable_gui_idle_interrupt(adev, enable);
5019 
5020 	amdgpu_gfx_rlc_exit_safe_mode(adev);
5021 
5022 	return 0;
5023 }
5024 
5025 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
5026 {
5027 	u32 reg, data;
5028 
5029 	amdgpu_gfx_off_ctrl(adev, false);
5030 
5031 	reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
5032 	if (amdgpu_sriov_is_pp_one_vf(adev))
5033 		data = RREG32_NO_KIQ(reg);
5034 	else
5035 		data = RREG32(reg);
5036 
5037 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
5038 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5039 
5040 	if (amdgpu_sriov_is_pp_one_vf(adev))
5041 		WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
5042 	else
5043 		WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
5044 
5045 	amdgpu_gfx_off_ctrl(adev, true);
5046 }
5047 
5048 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
5049 	.is_rlc_enabled = gfx_v11_0_is_rlc_enabled,
5050 	.set_safe_mode = gfx_v11_0_set_safe_mode,
5051 	.unset_safe_mode = gfx_v11_0_unset_safe_mode,
5052 	.init = gfx_v11_0_rlc_init,
5053 	.get_csb_size = gfx_v11_0_get_csb_size,
5054 	.get_csb_buffer = gfx_v11_0_get_csb_buffer,
5055 	.resume = gfx_v11_0_rlc_resume,
5056 	.stop = gfx_v11_0_rlc_stop,
5057 	.reset = gfx_v11_0_rlc_reset,
5058 	.start = gfx_v11_0_rlc_start,
5059 	.update_spm_vmid = gfx_v11_0_update_spm_vmid,
5060 };
5061 
5062 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
5063 {
5064 	u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
5065 
5066 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
5067 		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5068 	else
5069 		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5070 
5071 	WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
5072 
5073 	// Program RLC_PG_DELAY3 for CGPG hysteresis
5074 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
5075 		switch (adev->ip_versions[GC_HWIP][0]) {
5076 		case IP_VERSION(11, 0, 1):
5077 		case IP_VERSION(11, 0, 4):
5078 			WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
5079 			break;
5080 		default:
5081 			break;
5082 		}
5083 	}
5084 }
5085 
5086 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
5087 {
5088 	amdgpu_gfx_rlc_enter_safe_mode(adev);
5089 
5090 	gfx_v11_cntl_power_gating(adev, enable);
5091 
5092 	amdgpu_gfx_rlc_exit_safe_mode(adev);
5093 }
5094 
5095 static int gfx_v11_0_set_powergating_state(void *handle,
5096 					   enum amd_powergating_state state)
5097 {
5098 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5099 	bool enable = (state == AMD_PG_STATE_GATE);
5100 
5101 	if (amdgpu_sriov_vf(adev))
5102 		return 0;
5103 
5104 	switch (adev->ip_versions[GC_HWIP][0]) {
5105 	case IP_VERSION(11, 0, 0):
5106 	case IP_VERSION(11, 0, 2):
5107 	case IP_VERSION(11, 0, 3):
5108 		amdgpu_gfx_off_ctrl(adev, enable);
5109 		break;
5110 	case IP_VERSION(11, 0, 1):
5111 	case IP_VERSION(11, 0, 4):
5112 		if (!enable)
5113 			amdgpu_gfx_off_ctrl(adev, false);
5114 
5115 		gfx_v11_cntl_pg(adev, enable);
5116 
5117 		if (enable)
5118 			amdgpu_gfx_off_ctrl(adev, true);
5119 
5120 		break;
5121 	default:
5122 		break;
5123 	}
5124 
5125 	return 0;
5126 }
5127 
5128 static int gfx_v11_0_set_clockgating_state(void *handle,
5129 					  enum amd_clockgating_state state)
5130 {
5131 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5132 
5133 	if (amdgpu_sriov_vf(adev))
5134 	        return 0;
5135 
5136 	switch (adev->ip_versions[GC_HWIP][0]) {
5137 	case IP_VERSION(11, 0, 0):
5138 	case IP_VERSION(11, 0, 1):
5139 	case IP_VERSION(11, 0, 2):
5140 	case IP_VERSION(11, 0, 3):
5141 	case IP_VERSION(11, 0, 4):
5142 	        gfx_v11_0_update_gfx_clock_gating(adev,
5143 	                        state ==  AMD_CG_STATE_GATE);
5144 	        break;
5145 	default:
5146 	        break;
5147 	}
5148 
5149 	return 0;
5150 }
5151 
5152 static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags)
5153 {
5154 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5155 	int data;
5156 
5157 	/* AMD_CG_SUPPORT_GFX_MGCG */
5158 	data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5159 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5160 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
5161 
5162 	/* AMD_CG_SUPPORT_REPEATER_FGCG */
5163 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
5164 		*flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
5165 
5166 	/* AMD_CG_SUPPORT_GFX_FGCG */
5167 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
5168 		*flags |= AMD_CG_SUPPORT_GFX_FGCG;
5169 
5170 	/* AMD_CG_SUPPORT_GFX_PERF_CLK */
5171 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
5172 		*flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
5173 
5174 	/* AMD_CG_SUPPORT_GFX_CGCG */
5175 	data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5176 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5177 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
5178 
5179 	/* AMD_CG_SUPPORT_GFX_CGLS */
5180 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5181 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
5182 
5183 	/* AMD_CG_SUPPORT_GFX_3D_CGCG */
5184 	data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5185 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5186 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5187 
5188 	/* AMD_CG_SUPPORT_GFX_3D_CGLS */
5189 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5190 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5191 }
5192 
5193 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5194 {
5195 	/* gfx11 is 32bit rptr*/
5196 	return *(uint32_t *)ring->rptr_cpu_addr;
5197 }
5198 
5199 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5200 {
5201 	struct amdgpu_device *adev = ring->adev;
5202 	u64 wptr;
5203 
5204 	/* XXX check if swapping is necessary on BE */
5205 	if (ring->use_doorbell) {
5206 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5207 	} else {
5208 		wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR);
5209 		wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32;
5210 	}
5211 
5212 	return wptr;
5213 }
5214 
5215 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5216 {
5217 	struct amdgpu_device *adev = ring->adev;
5218 	uint32_t *wptr_saved;
5219 	uint32_t *is_queue_unmap;
5220 	uint64_t aggregated_db_index;
5221 	uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
5222 	uint64_t wptr_tmp;
5223 
5224 	if (ring->is_mes_queue) {
5225 		wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
5226 		is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
5227 					      sizeof(uint32_t));
5228 		aggregated_db_index =
5229 			amdgpu_mes_get_aggregated_doorbell_index(adev,
5230 								 ring->hw_prio);
5231 
5232 		wptr_tmp = ring->wptr & ring->buf_mask;
5233 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
5234 		*wptr_saved = wptr_tmp;
5235 		/* assume doorbell always being used by mes mapped queue */
5236 		if (*is_queue_unmap) {
5237 			WDOORBELL64(aggregated_db_index, wptr_tmp);
5238 			WDOORBELL64(ring->doorbell_index, wptr_tmp);
5239 		} else {
5240 			WDOORBELL64(ring->doorbell_index, wptr_tmp);
5241 
5242 			if (*is_queue_unmap)
5243 				WDOORBELL64(aggregated_db_index, wptr_tmp);
5244 		}
5245 	} else {
5246 		if (ring->use_doorbell) {
5247 			/* XXX check if swapping is necessary on BE */
5248 			atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5249 				     ring->wptr);
5250 			WDOORBELL64(ring->doorbell_index, ring->wptr);
5251 		} else {
5252 			WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
5253 				     lower_32_bits(ring->wptr));
5254 			WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
5255 				     upper_32_bits(ring->wptr));
5256 		}
5257 	}
5258 }
5259 
5260 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5261 {
5262 	/* gfx11 hardware is 32bit rptr */
5263 	return *(uint32_t *)ring->rptr_cpu_addr;
5264 }
5265 
5266 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5267 {
5268 	u64 wptr;
5269 
5270 	/* XXX check if swapping is necessary on BE */
5271 	if (ring->use_doorbell)
5272 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5273 	else
5274 		BUG();
5275 	return wptr;
5276 }
5277 
5278 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5279 {
5280 	struct amdgpu_device *adev = ring->adev;
5281 	uint32_t *wptr_saved;
5282 	uint32_t *is_queue_unmap;
5283 	uint64_t aggregated_db_index;
5284 	uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
5285 	uint64_t wptr_tmp;
5286 
5287 	if (ring->is_mes_queue) {
5288 		wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
5289 		is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
5290 					      sizeof(uint32_t));
5291 		aggregated_db_index =
5292 			amdgpu_mes_get_aggregated_doorbell_index(adev,
5293 								 ring->hw_prio);
5294 
5295 		wptr_tmp = ring->wptr & ring->buf_mask;
5296 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
5297 		*wptr_saved = wptr_tmp;
5298 		/* assume doorbell always used by mes mapped queue */
5299 		if (*is_queue_unmap) {
5300 			WDOORBELL64(aggregated_db_index, wptr_tmp);
5301 			WDOORBELL64(ring->doorbell_index, wptr_tmp);
5302 		} else {
5303 			WDOORBELL64(ring->doorbell_index, wptr_tmp);
5304 
5305 			if (*is_queue_unmap)
5306 				WDOORBELL64(aggregated_db_index, wptr_tmp);
5307 		}
5308 	} else {
5309 		/* XXX check if swapping is necessary on BE */
5310 		if (ring->use_doorbell) {
5311 			atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5312 				     ring->wptr);
5313 			WDOORBELL64(ring->doorbell_index, ring->wptr);
5314 		} else {
5315 			BUG(); /* only DOORBELL method supported on gfx11 now */
5316 		}
5317 	}
5318 }
5319 
5320 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5321 {
5322 	struct amdgpu_device *adev = ring->adev;
5323 	u32 ref_and_mask, reg_mem_engine;
5324 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5325 
5326 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5327 		switch (ring->me) {
5328 		case 1:
5329 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5330 			break;
5331 		case 2:
5332 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5333 			break;
5334 		default:
5335 			return;
5336 		}
5337 		reg_mem_engine = 0;
5338 	} else {
5339 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5340 		reg_mem_engine = 1; /* pfp */
5341 	}
5342 
5343 	gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5344 			       adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5345 			       adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5346 			       ref_and_mask, ref_and_mask, 0x20);
5347 }
5348 
5349 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5350 				       struct amdgpu_job *job,
5351 				       struct amdgpu_ib *ib,
5352 				       uint32_t flags)
5353 {
5354 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5355 	u32 header, control = 0;
5356 
5357 	BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
5358 
5359 	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5360 
5361 	control |= ib->length_dw | (vmid << 24);
5362 
5363 	if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5364 		control |= INDIRECT_BUFFER_PRE_ENB(1);
5365 
5366 		if (flags & AMDGPU_IB_PREEMPTED)
5367 			control |= INDIRECT_BUFFER_PRE_RESUME(1);
5368 
5369 		if (vmid)
5370 			gfx_v11_0_ring_emit_de_meta(ring,
5371 				    (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
5372 	}
5373 
5374 	if (ring->is_mes_queue)
5375 		/* inherit vmid from mqd */
5376 		control |= 0x400000;
5377 
5378 	amdgpu_ring_write(ring, header);
5379 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5380 	amdgpu_ring_write(ring,
5381 #ifdef __BIG_ENDIAN
5382 		(2 << 0) |
5383 #endif
5384 		lower_32_bits(ib->gpu_addr));
5385 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5386 	amdgpu_ring_write(ring, control);
5387 }
5388 
5389 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5390 					   struct amdgpu_job *job,
5391 					   struct amdgpu_ib *ib,
5392 					   uint32_t flags)
5393 {
5394 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5395 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5396 
5397 	if (ring->is_mes_queue)
5398 		/* inherit vmid from mqd */
5399 		control |= 0x40000000;
5400 
5401 	/* Currently, there is a high possibility to get wave ID mismatch
5402 	 * between ME and GDS, leading to a hw deadlock, because ME generates
5403 	 * different wave IDs than the GDS expects. This situation happens
5404 	 * randomly when at least 5 compute pipes use GDS ordered append.
5405 	 * The wave IDs generated by ME are also wrong after suspend/resume.
5406 	 * Those are probably bugs somewhere else in the kernel driver.
5407 	 *
5408 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5409 	 * GDS to 0 for this ring (me/pipe).
5410 	 */
5411 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5412 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5413 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
5414 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5415 	}
5416 
5417 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5418 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5419 	amdgpu_ring_write(ring,
5420 #ifdef __BIG_ENDIAN
5421 				(2 << 0) |
5422 #endif
5423 				lower_32_bits(ib->gpu_addr));
5424 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5425 	amdgpu_ring_write(ring, control);
5426 }
5427 
5428 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5429 				     u64 seq, unsigned flags)
5430 {
5431 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5432 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5433 
5434 	/* RELEASE_MEM - flush caches, send int */
5435 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5436 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
5437 				 PACKET3_RELEASE_MEM_GCR_GL2_WB |
5438 				 PACKET3_RELEASE_MEM_GCR_GL2_INV |
5439 				 PACKET3_RELEASE_MEM_GCR_GL2_US |
5440 				 PACKET3_RELEASE_MEM_GCR_GL1_INV |
5441 				 PACKET3_RELEASE_MEM_GCR_GLV_INV |
5442 				 PACKET3_RELEASE_MEM_GCR_GLM_INV |
5443 				 PACKET3_RELEASE_MEM_GCR_GLM_WB |
5444 				 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
5445 				 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5446 				 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
5447 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
5448 				 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
5449 
5450 	/*
5451 	 * the address should be Qword aligned if 64bit write, Dword
5452 	 * aligned if only send 32bit data low (discard data high)
5453 	 */
5454 	if (write64bit)
5455 		BUG_ON(addr & 0x7);
5456 	else
5457 		BUG_ON(addr & 0x3);
5458 	amdgpu_ring_write(ring, lower_32_bits(addr));
5459 	amdgpu_ring_write(ring, upper_32_bits(addr));
5460 	amdgpu_ring_write(ring, lower_32_bits(seq));
5461 	amdgpu_ring_write(ring, upper_32_bits(seq));
5462 	amdgpu_ring_write(ring, ring->is_mes_queue ?
5463 			 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
5464 }
5465 
5466 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5467 {
5468 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5469 	uint32_t seq = ring->fence_drv.sync_seq;
5470 	uint64_t addr = ring->fence_drv.gpu_addr;
5471 
5472 	gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
5473 			       upper_32_bits(addr), seq, 0xffffffff, 4);
5474 }
5475 
5476 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
5477 				   uint16_t pasid, uint32_t flush_type,
5478 				   bool all_hub, uint8_t dst_sel)
5479 {
5480 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
5481 	amdgpu_ring_write(ring,
5482 			  PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
5483 			  PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
5484 			  PACKET3_INVALIDATE_TLBS_PASID(pasid) |
5485 			  PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
5486 }
5487 
5488 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5489 					 unsigned vmid, uint64_t pd_addr)
5490 {
5491 	if (ring->is_mes_queue)
5492 		gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
5493 	else
5494 		amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5495 
5496 	/* compute doesn't have PFP */
5497 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5498 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
5499 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5500 		amdgpu_ring_write(ring, 0x0);
5501 	}
5502 }
5503 
5504 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5505 					  u64 seq, unsigned int flags)
5506 {
5507 	struct amdgpu_device *adev = ring->adev;
5508 
5509 	/* we only allocate 32bit for each seq wb address */
5510 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5511 
5512 	/* write fence seq to the "addr" */
5513 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5514 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5515 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5516 	amdgpu_ring_write(ring, lower_32_bits(addr));
5517 	amdgpu_ring_write(ring, upper_32_bits(addr));
5518 	amdgpu_ring_write(ring, lower_32_bits(seq));
5519 
5520 	if (flags & AMDGPU_FENCE_FLAG_INT) {
5521 		/* set register to trigger INT */
5522 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5523 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5524 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5525 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS));
5526 		amdgpu_ring_write(ring, 0);
5527 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5528 	}
5529 }
5530 
5531 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
5532 					 uint32_t flags)
5533 {
5534 	uint32_t dw2 = 0;
5535 
5536 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5537 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5538 		/* set load_global_config & load_global_uconfig */
5539 		dw2 |= 0x8001;
5540 		/* set load_cs_sh_regs */
5541 		dw2 |= 0x01000000;
5542 		/* set load_per_context_state & load_gfx_sh_regs for GFX */
5543 		dw2 |= 0x10002;
5544 	}
5545 
5546 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5547 	amdgpu_ring_write(ring, dw2);
5548 	amdgpu_ring_write(ring, 0);
5549 }
5550 
5551 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5552 {
5553 	unsigned ret;
5554 
5555 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5556 	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5557 	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5558 	amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5559 	ret = ring->wptr & ring->buf_mask;
5560 	amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5561 
5562 	return ret;
5563 }
5564 
5565 static void gfx_v11_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5566 {
5567 	unsigned cur;
5568 	BUG_ON(offset > ring->buf_mask);
5569 	BUG_ON(ring->ring[offset] != 0x55aa55aa);
5570 
5571 	cur = (ring->wptr - 1) & ring->buf_mask;
5572 	if (likely(cur > offset))
5573 		ring->ring[offset] = cur - offset;
5574 	else
5575 		ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
5576 }
5577 
5578 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
5579 {
5580 	int i, r = 0;
5581 	struct amdgpu_device *adev = ring->adev;
5582 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
5583 	struct amdgpu_ring *kiq_ring = &kiq->ring;
5584 	unsigned long flags;
5585 
5586 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
5587 		return -EINVAL;
5588 
5589 	spin_lock_irqsave(&kiq->ring_lock, flags);
5590 
5591 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
5592 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
5593 		return -ENOMEM;
5594 	}
5595 
5596 	/* assert preemption condition */
5597 	amdgpu_ring_set_preempt_cond_exec(ring, false);
5598 
5599 	/* assert IB preemption, emit the trailing fence */
5600 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
5601 				   ring->trail_fence_gpu_addr,
5602 				   ++ring->trail_seq);
5603 	amdgpu_ring_commit(kiq_ring);
5604 
5605 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
5606 
5607 	/* poll the trailing fence */
5608 	for (i = 0; i < adev->usec_timeout; i++) {
5609 		if (ring->trail_seq ==
5610 		    le32_to_cpu(*(ring->trail_fence_cpu_addr)))
5611 			break;
5612 		udelay(1);
5613 	}
5614 
5615 	if (i >= adev->usec_timeout) {
5616 		r = -EINVAL;
5617 		DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
5618 	}
5619 
5620 	/* deassert preemption condition */
5621 	amdgpu_ring_set_preempt_cond_exec(ring, true);
5622 	return r;
5623 }
5624 
5625 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
5626 {
5627 	struct amdgpu_device *adev = ring->adev;
5628 	struct v10_de_ib_state de_payload = {0};
5629 	uint64_t offset, gds_addr, de_payload_gpu_addr;
5630 	void *de_payload_cpu_addr;
5631 	int cnt;
5632 
5633 	if (ring->is_mes_queue) {
5634 		offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5635 				  gfx[0].gfx_meta_data) +
5636 			offsetof(struct v10_gfx_meta_data, de_payload);
5637 		de_payload_gpu_addr =
5638 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5639 		de_payload_cpu_addr =
5640 			amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
5641 
5642 		offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5643 				  gfx[0].gds_backup) +
5644 			offsetof(struct v10_gfx_meta_data, de_payload);
5645 		gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5646 	} else {
5647 		offset = offsetof(struct v10_gfx_meta_data, de_payload);
5648 		de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5649 		de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5650 
5651 		gds_addr = roundup2(amdgpu_csa_vaddr(ring->adev) +
5652 				 AMDGPU_CSA_SIZE - adev->gds.gds_size,
5653 				 PAGE_SIZE);
5654 	}
5655 
5656 	de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5657 	de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5658 
5659 	cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5660 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5661 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5662 				 WRITE_DATA_DST_SEL(8) |
5663 				 WR_CONFIRM) |
5664 				 WRITE_DATA_CACHE_POLICY(0));
5665 	amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
5666 	amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
5667 
5668 	if (resume)
5669 		amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
5670 					   sizeof(de_payload) >> 2);
5671 	else
5672 		amdgpu_ring_write_multiple(ring, (void *)&de_payload,
5673 					   sizeof(de_payload) >> 2);
5674 }
5675 
5676 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5677 				    bool secure)
5678 {
5679 	uint32_t v = secure ? FRAME_TMZ : 0;
5680 
5681 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5682 	amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5683 }
5684 
5685 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5686 				     uint32_t reg_val_offs)
5687 {
5688 	struct amdgpu_device *adev = ring->adev;
5689 
5690 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5691 	amdgpu_ring_write(ring, 0 |	/* src: register*/
5692 				(5 << 8) |	/* dst: memory */
5693 				(1 << 20));	/* write confirm */
5694 	amdgpu_ring_write(ring, reg);
5695 	amdgpu_ring_write(ring, 0);
5696 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5697 				reg_val_offs * 4));
5698 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5699 				reg_val_offs * 4));
5700 }
5701 
5702 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5703 				   uint32_t val)
5704 {
5705 	uint32_t cmd = 0;
5706 
5707 	switch (ring->funcs->type) {
5708 	case AMDGPU_RING_TYPE_GFX:
5709 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5710 		break;
5711 	case AMDGPU_RING_TYPE_KIQ:
5712 		cmd = (1 << 16); /* no inc addr */
5713 		break;
5714 	default:
5715 		cmd = WR_CONFIRM;
5716 		break;
5717 	}
5718 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5719 	amdgpu_ring_write(ring, cmd);
5720 	amdgpu_ring_write(ring, reg);
5721 	amdgpu_ring_write(ring, 0);
5722 	amdgpu_ring_write(ring, val);
5723 }
5724 
5725 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5726 					uint32_t val, uint32_t mask)
5727 {
5728 	gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5729 }
5730 
5731 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5732 						   uint32_t reg0, uint32_t reg1,
5733 						   uint32_t ref, uint32_t mask)
5734 {
5735 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5736 
5737 	gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5738 			       ref, mask, 0x20);
5739 }
5740 
5741 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
5742 					 unsigned vmid)
5743 {
5744 	struct amdgpu_device *adev = ring->adev;
5745 	uint32_t value = 0;
5746 
5747 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5748 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5749 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5750 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5751 	WREG32_SOC15(GC, 0, regSQ_CMD, value);
5752 }
5753 
5754 static void
5755 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5756 				      uint32_t me, uint32_t pipe,
5757 				      enum amdgpu_interrupt_state state)
5758 {
5759 	uint32_t cp_int_cntl, cp_int_cntl_reg;
5760 
5761 	if (!me) {
5762 		switch (pipe) {
5763 		case 0:
5764 			cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
5765 			break;
5766 		case 1:
5767 			cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
5768 			break;
5769 		default:
5770 			DRM_DEBUG("invalid pipe %d\n", pipe);
5771 			return;
5772 		}
5773 	} else {
5774 		DRM_DEBUG("invalid me %d\n", me);
5775 		return;
5776 	}
5777 
5778 	switch (state) {
5779 	case AMDGPU_IRQ_STATE_DISABLE:
5780 		cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
5781 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5782 					    TIME_STAMP_INT_ENABLE, 0);
5783 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5784 					    GENERIC0_INT_ENABLE, 0);
5785 		WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
5786 		break;
5787 	case AMDGPU_IRQ_STATE_ENABLE:
5788 		cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
5789 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5790 					    TIME_STAMP_INT_ENABLE, 1);
5791 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5792 					    GENERIC0_INT_ENABLE, 1);
5793 		WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
5794 		break;
5795 	default:
5796 		break;
5797 	}
5798 }
5799 
5800 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5801 						     int me, int pipe,
5802 						     enum amdgpu_interrupt_state state)
5803 {
5804 	u32 mec_int_cntl, mec_int_cntl_reg;
5805 
5806 	/*
5807 	 * amdgpu controls only the first MEC. That's why this function only
5808 	 * handles the setting of interrupts for this specific MEC. All other
5809 	 * pipes' interrupts are set by amdkfd.
5810 	 */
5811 
5812 	if (me == 1) {
5813 		switch (pipe) {
5814 		case 0:
5815 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
5816 			break;
5817 		case 1:
5818 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
5819 			break;
5820 		case 2:
5821 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
5822 			break;
5823 		case 3:
5824 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
5825 			break;
5826 		default:
5827 			DRM_DEBUG("invalid pipe %d\n", pipe);
5828 			return;
5829 		}
5830 	} else {
5831 		DRM_DEBUG("invalid me %d\n", me);
5832 		return;
5833 	}
5834 
5835 	switch (state) {
5836 	case AMDGPU_IRQ_STATE_DISABLE:
5837 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5838 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5839 					     TIME_STAMP_INT_ENABLE, 0);
5840 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5841 					     GENERIC0_INT_ENABLE, 0);
5842 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5843 		break;
5844 	case AMDGPU_IRQ_STATE_ENABLE:
5845 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5846 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5847 					     TIME_STAMP_INT_ENABLE, 1);
5848 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5849 					     GENERIC0_INT_ENABLE, 1);
5850 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5851 		break;
5852 	default:
5853 		break;
5854 	}
5855 }
5856 
5857 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5858 					    struct amdgpu_irq_src *src,
5859 					    unsigned type,
5860 					    enum amdgpu_interrupt_state state)
5861 {
5862 	switch (type) {
5863 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5864 		gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
5865 		break;
5866 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
5867 		gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
5868 		break;
5869 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5870 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5871 		break;
5872 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5873 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5874 		break;
5875 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5876 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5877 		break;
5878 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5879 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5880 		break;
5881 	default:
5882 		break;
5883 	}
5884 	return 0;
5885 }
5886 
5887 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
5888 			     struct amdgpu_irq_src *source,
5889 			     struct amdgpu_iv_entry *entry)
5890 {
5891 	int i;
5892 	u8 me_id, pipe_id, queue_id;
5893 	struct amdgpu_ring *ring;
5894 	uint32_t mes_queue_id = entry->src_data[0];
5895 
5896 	DRM_DEBUG("IH: CP EOP\n");
5897 
5898 	if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
5899 		struct amdgpu_mes_queue *queue;
5900 
5901 		mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
5902 
5903 		spin_lock(&adev->mes.queue_id_lock);
5904 		queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
5905 		if (queue) {
5906 			DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
5907 			amdgpu_fence_process(queue->ring);
5908 		}
5909 		spin_unlock(&adev->mes.queue_id_lock);
5910 	} else {
5911 		me_id = (entry->ring_id & 0x0c) >> 2;
5912 		pipe_id = (entry->ring_id & 0x03) >> 0;
5913 		queue_id = (entry->ring_id & 0x70) >> 4;
5914 
5915 		switch (me_id) {
5916 		case 0:
5917 			if (pipe_id == 0)
5918 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5919 			else
5920 				amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
5921 			break;
5922 		case 1:
5923 		case 2:
5924 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5925 				ring = &adev->gfx.compute_ring[i];
5926 				/* Per-queue interrupt is supported for MEC starting from VI.
5927 				 * The interrupt can only be enabled/disabled per pipe instead
5928 				 * of per queue.
5929 				 */
5930 				if ((ring->me == me_id) &&
5931 				    (ring->pipe == pipe_id) &&
5932 				    (ring->queue == queue_id))
5933 					amdgpu_fence_process(ring);
5934 			}
5935 			break;
5936 		}
5937 	}
5938 
5939 	return 0;
5940 }
5941 
5942 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5943 					      struct amdgpu_irq_src *source,
5944 					      unsigned type,
5945 					      enum amdgpu_interrupt_state state)
5946 {
5947 	switch (state) {
5948 	case AMDGPU_IRQ_STATE_DISABLE:
5949 	case AMDGPU_IRQ_STATE_ENABLE:
5950 		WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
5951 			       PRIV_REG_INT_ENABLE,
5952 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5953 		break;
5954 	default:
5955 		break;
5956 	}
5957 
5958 	return 0;
5959 }
5960 
5961 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5962 					       struct amdgpu_irq_src *source,
5963 					       unsigned type,
5964 					       enum amdgpu_interrupt_state state)
5965 {
5966 	switch (state) {
5967 	case AMDGPU_IRQ_STATE_DISABLE:
5968 	case AMDGPU_IRQ_STATE_ENABLE:
5969 		WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
5970 			       PRIV_INSTR_INT_ENABLE,
5971 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5972 		break;
5973 	default:
5974 		break;
5975 	}
5976 
5977 	return 0;
5978 }
5979 
5980 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
5981 					struct amdgpu_iv_entry *entry)
5982 {
5983 	u8 me_id, pipe_id, queue_id;
5984 	struct amdgpu_ring *ring;
5985 	int i;
5986 
5987 	me_id = (entry->ring_id & 0x0c) >> 2;
5988 	pipe_id = (entry->ring_id & 0x03) >> 0;
5989 	queue_id = (entry->ring_id & 0x70) >> 4;
5990 
5991 	switch (me_id) {
5992 	case 0:
5993 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
5994 			ring = &adev->gfx.gfx_ring[i];
5995 			/* we only enabled 1 gfx queue per pipe for now */
5996 			if (ring->me == me_id && ring->pipe == pipe_id)
5997 				drm_sched_fault(&ring->sched);
5998 		}
5999 		break;
6000 	case 1:
6001 	case 2:
6002 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6003 			ring = &adev->gfx.compute_ring[i];
6004 			if (ring->me == me_id && ring->pipe == pipe_id &&
6005 			    ring->queue == queue_id)
6006 				drm_sched_fault(&ring->sched);
6007 		}
6008 		break;
6009 	default:
6010 		BUG();
6011 		break;
6012 	}
6013 }
6014 
6015 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
6016 				  struct amdgpu_irq_src *source,
6017 				  struct amdgpu_iv_entry *entry)
6018 {
6019 	DRM_ERROR("Illegal register access in command stream\n");
6020 	gfx_v11_0_handle_priv_fault(adev, entry);
6021 	return 0;
6022 }
6023 
6024 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
6025 				   struct amdgpu_irq_src *source,
6026 				   struct amdgpu_iv_entry *entry)
6027 {
6028 	DRM_ERROR("Illegal instruction in command stream\n");
6029 	gfx_v11_0_handle_priv_fault(adev, entry);
6030 	return 0;
6031 }
6032 
6033 #if 0
6034 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
6035 					     struct amdgpu_irq_src *src,
6036 					     unsigned int type,
6037 					     enum amdgpu_interrupt_state state)
6038 {
6039 	uint32_t tmp, target;
6040 	struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
6041 
6042 	target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6043 	target += ring->pipe;
6044 
6045 	switch (type) {
6046 	case AMDGPU_CP_KIQ_IRQ_DRIVER0:
6047 		if (state == AMDGPU_IRQ_STATE_DISABLE) {
6048 			tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6049 			tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6050 					    GENERIC2_INT_ENABLE, 0);
6051 			WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6052 
6053 			tmp = RREG32_SOC15_IP(GC, target);
6054 			tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6055 					    GENERIC2_INT_ENABLE, 0);
6056 			WREG32_SOC15_IP(GC, target, tmp);
6057 		} else {
6058 			tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6059 			tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6060 					    GENERIC2_INT_ENABLE, 1);
6061 			WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6062 
6063 			tmp = RREG32_SOC15_IP(GC, target);
6064 			tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6065 					    GENERIC2_INT_ENABLE, 1);
6066 			WREG32_SOC15_IP(GC, target, tmp);
6067 		}
6068 		break;
6069 	default:
6070 		BUG(); /* kiq only support GENERIC2_INT now */
6071 		break;
6072 	}
6073 	return 0;
6074 }
6075 #endif
6076 
6077 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
6078 {
6079 	const unsigned int gcr_cntl =
6080 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
6081 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
6082 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
6083 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
6084 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
6085 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
6086 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
6087 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
6088 
6089 	/* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
6090 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
6091 	amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
6092 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6093 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
6094 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6095 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
6096 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6097 	amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
6098 }
6099 
6100 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
6101 	.name = "gfx_v11_0",
6102 	.early_init = gfx_v11_0_early_init,
6103 	.late_init = gfx_v11_0_late_init,
6104 	.sw_init = gfx_v11_0_sw_init,
6105 	.sw_fini = gfx_v11_0_sw_fini,
6106 	.hw_init = gfx_v11_0_hw_init,
6107 	.hw_fini = gfx_v11_0_hw_fini,
6108 	.suspend = gfx_v11_0_suspend,
6109 	.resume = gfx_v11_0_resume,
6110 	.is_idle = gfx_v11_0_is_idle,
6111 	.wait_for_idle = gfx_v11_0_wait_for_idle,
6112 	.soft_reset = gfx_v11_0_soft_reset,
6113 	.check_soft_reset = gfx_v11_0_check_soft_reset,
6114 	.post_soft_reset = gfx_v11_0_post_soft_reset,
6115 	.set_clockgating_state = gfx_v11_0_set_clockgating_state,
6116 	.set_powergating_state = gfx_v11_0_set_powergating_state,
6117 	.get_clockgating_state = gfx_v11_0_get_clockgating_state,
6118 };
6119 
6120 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
6121 	.type = AMDGPU_RING_TYPE_GFX,
6122 	.align_mask = 0xff,
6123 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6124 	.support_64bit_ptrs = true,
6125 	.vmhub = AMDGPU_GFXHUB_0,
6126 	.get_rptr = gfx_v11_0_ring_get_rptr_gfx,
6127 	.get_wptr = gfx_v11_0_ring_get_wptr_gfx,
6128 	.set_wptr = gfx_v11_0_ring_set_wptr_gfx,
6129 	.emit_frame_size = /* totally 242 maximum if 16 IBs */
6130 		5 + /* COND_EXEC */
6131 		7 + /* PIPELINE_SYNC */
6132 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6133 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6134 		2 + /* VM_FLUSH */
6135 		8 + /* FENCE for VM_FLUSH */
6136 		20 + /* GDS switch */
6137 		5 + /* COND_EXEC */
6138 		7 + /* HDP_flush */
6139 		4 + /* VGT_flush */
6140 		31 + /*	DE_META */
6141 		3 + /* CNTX_CTRL */
6142 		5 + /* HDP_INVL */
6143 		8 + 8 + /* FENCE x2 */
6144 		8, /* gfx_v11_0_emit_mem_sync */
6145 	.emit_ib_size =	4, /* gfx_v11_0_ring_emit_ib_gfx */
6146 	.emit_ib = gfx_v11_0_ring_emit_ib_gfx,
6147 	.emit_fence = gfx_v11_0_ring_emit_fence,
6148 	.emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6149 	.emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6150 	.emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6151 	.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6152 	.test_ring = gfx_v11_0_ring_test_ring,
6153 	.test_ib = gfx_v11_0_ring_test_ib,
6154 	.insert_nop = amdgpu_ring_insert_nop,
6155 	.pad_ib = amdgpu_ring_generic_pad_ib,
6156 	.emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
6157 	.init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
6158 	.patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec,
6159 	.preempt_ib = gfx_v11_0_ring_preempt_ib,
6160 	.emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl,
6161 	.emit_wreg = gfx_v11_0_ring_emit_wreg,
6162 	.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6163 	.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6164 	.soft_recovery = gfx_v11_0_ring_soft_recovery,
6165 	.emit_mem_sync = gfx_v11_0_emit_mem_sync,
6166 };
6167 
6168 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
6169 	.type = AMDGPU_RING_TYPE_COMPUTE,
6170 	.align_mask = 0xff,
6171 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6172 	.support_64bit_ptrs = true,
6173 	.vmhub = AMDGPU_GFXHUB_0,
6174 	.get_rptr = gfx_v11_0_ring_get_rptr_compute,
6175 	.get_wptr = gfx_v11_0_ring_get_wptr_compute,
6176 	.set_wptr = gfx_v11_0_ring_set_wptr_compute,
6177 	.emit_frame_size =
6178 		20 + /* gfx_v11_0_ring_emit_gds_switch */
6179 		7 + /* gfx_v11_0_ring_emit_hdp_flush */
6180 		5 + /* hdp invalidate */
6181 		7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6182 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6183 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6184 		2 + /* gfx_v11_0_ring_emit_vm_flush */
6185 		8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */
6186 		8, /* gfx_v11_0_emit_mem_sync */
6187 	.emit_ib_size =	7, /* gfx_v11_0_ring_emit_ib_compute */
6188 	.emit_ib = gfx_v11_0_ring_emit_ib_compute,
6189 	.emit_fence = gfx_v11_0_ring_emit_fence,
6190 	.emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6191 	.emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6192 	.emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6193 	.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6194 	.test_ring = gfx_v11_0_ring_test_ring,
6195 	.test_ib = gfx_v11_0_ring_test_ib,
6196 	.insert_nop = amdgpu_ring_insert_nop,
6197 	.pad_ib = amdgpu_ring_generic_pad_ib,
6198 	.emit_wreg = gfx_v11_0_ring_emit_wreg,
6199 	.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6200 	.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6201 	.emit_mem_sync = gfx_v11_0_emit_mem_sync,
6202 };
6203 
6204 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
6205 	.type = AMDGPU_RING_TYPE_KIQ,
6206 	.align_mask = 0xff,
6207 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6208 	.support_64bit_ptrs = true,
6209 	.vmhub = AMDGPU_GFXHUB_0,
6210 	.get_rptr = gfx_v11_0_ring_get_rptr_compute,
6211 	.get_wptr = gfx_v11_0_ring_get_wptr_compute,
6212 	.set_wptr = gfx_v11_0_ring_set_wptr_compute,
6213 	.emit_frame_size =
6214 		20 + /* gfx_v11_0_ring_emit_gds_switch */
6215 		7 + /* gfx_v11_0_ring_emit_hdp_flush */
6216 		5 + /*hdp invalidate */
6217 		7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6218 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6219 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6220 		2 + /* gfx_v11_0_ring_emit_vm_flush */
6221 		8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6222 	.emit_ib_size =	7, /* gfx_v11_0_ring_emit_ib_compute */
6223 	.emit_ib = gfx_v11_0_ring_emit_ib_compute,
6224 	.emit_fence = gfx_v11_0_ring_emit_fence_kiq,
6225 	.test_ring = gfx_v11_0_ring_test_ring,
6226 	.test_ib = gfx_v11_0_ring_test_ib,
6227 	.insert_nop = amdgpu_ring_insert_nop,
6228 	.pad_ib = amdgpu_ring_generic_pad_ib,
6229 	.emit_rreg = gfx_v11_0_ring_emit_rreg,
6230 	.emit_wreg = gfx_v11_0_ring_emit_wreg,
6231 	.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6232 	.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6233 };
6234 
6235 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
6236 {
6237 	int i;
6238 
6239 	adev->gfx.kiq.ring.funcs = &gfx_v11_0_ring_funcs_kiq;
6240 
6241 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6242 		adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
6243 
6244 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
6245 		adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute;
6246 }
6247 
6248 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = {
6249 	.set = gfx_v11_0_set_eop_interrupt_state,
6250 	.process = gfx_v11_0_eop_irq,
6251 };
6252 
6253 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
6254 	.set = gfx_v11_0_set_priv_reg_fault_state,
6255 	.process = gfx_v11_0_priv_reg_irq,
6256 };
6257 
6258 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
6259 	.set = gfx_v11_0_set_priv_inst_fault_state,
6260 	.process = gfx_v11_0_priv_inst_irq,
6261 };
6262 
6263 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
6264 {
6265 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6266 	adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs;
6267 
6268 	adev->gfx.priv_reg_irq.num_types = 1;
6269 	adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
6270 
6271 	adev->gfx.priv_inst_irq.num_types = 1;
6272 	adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
6273 }
6274 
6275 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
6276 {
6277 	if (adev->flags & AMD_IS_APU)
6278 		adev->gfx.imu.mode = MISSION_MODE;
6279 	else
6280 		adev->gfx.imu.mode = DEBUG_MODE;
6281 
6282 	adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs;
6283 }
6284 
6285 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev)
6286 {
6287 	adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs;
6288 }
6289 
6290 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev)
6291 {
6292 	unsigned total_cu = adev->gfx.config.max_cu_per_sh *
6293 			    adev->gfx.config.max_sh_per_se *
6294 			    adev->gfx.config.max_shader_engines;
6295 
6296 	adev->gds.gds_size = 0x1000;
6297 	adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
6298 	adev->gds.gws_size = 64;
6299 	adev->gds.oa_size = 16;
6300 }
6301 
6302 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev)
6303 {
6304 	/* set gfx eng mqd */
6305 	adev->mqds[AMDGPU_HW_IP_GFX].mqd_size =
6306 		sizeof(struct v11_gfx_mqd);
6307 	adev->mqds[AMDGPU_HW_IP_GFX].init_mqd =
6308 		gfx_v11_0_gfx_mqd_init;
6309 	/* set compute eng mqd */
6310 	adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
6311 		sizeof(struct v11_compute_mqd);
6312 	adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
6313 		gfx_v11_0_compute_mqd_init;
6314 }
6315 
6316 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
6317 							  u32 bitmap)
6318 {
6319 	u32 data;
6320 
6321 	if (!bitmap)
6322 		return;
6323 
6324 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
6325 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
6326 
6327 	WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data);
6328 }
6329 
6330 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
6331 {
6332 	u32 data, wgp_bitmask;
6333 	data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG);
6334 	data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG);
6335 
6336 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
6337 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
6338 
6339 	wgp_bitmask =
6340 		amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
6341 
6342 	return (~data) & wgp_bitmask;
6343 }
6344 
6345 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
6346 {
6347 	u32 wgp_idx, wgp_active_bitmap;
6348 	u32 cu_bitmap_per_wgp, cu_active_bitmap;
6349 
6350 	wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev);
6351 	cu_active_bitmap = 0;
6352 
6353 	for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
6354 		/* if there is one WGP enabled, it means 2 CUs will be enabled */
6355 		cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
6356 		if (wgp_active_bitmap & (1 << wgp_idx))
6357 			cu_active_bitmap |= cu_bitmap_per_wgp;
6358 	}
6359 
6360 	return cu_active_bitmap;
6361 }
6362 
6363 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
6364 				 struct amdgpu_cu_info *cu_info)
6365 {
6366 	int i, j, k, counter, active_cu_number = 0;
6367 	u32 mask, bitmap;
6368 	unsigned disable_masks[8 * 2];
6369 
6370 	if (!adev || !cu_info)
6371 		return -EINVAL;
6372 
6373 	amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
6374 
6375 	mutex_lock(&adev->grbm_idx_mutex);
6376 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6377 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6378 			mask = 1;
6379 			counter = 0;
6380 			gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff);
6381 			if (i < 8 && j < 2)
6382 				gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(
6383 					adev, disable_masks[i * 2 + j]);
6384 			bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev);
6385 
6386 			/**
6387 			 * GFX11 could support more than 4 SEs, while the bitmap
6388 			 * in cu_info struct is 4x4 and ioctl interface struct
6389 			 * drm_amdgpu_info_device should keep stable.
6390 			 * So we use last two columns of bitmap to store cu mask for
6391 			 * SEs 4 to 7, the layout of the bitmap is as below:
6392 			 *    SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
6393 			 *    SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
6394 			 *    SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
6395 			 *    SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
6396 			 *    SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
6397 			 *    SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
6398 			 *    SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
6399 			 *    SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
6400 			 */
6401 			cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap;
6402 
6403 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
6404 				if (bitmap & mask)
6405 					counter++;
6406 
6407 				mask <<= 1;
6408 			}
6409 			active_cu_number += counter;
6410 		}
6411 	}
6412 	gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6413 	mutex_unlock(&adev->grbm_idx_mutex);
6414 
6415 	cu_info->number = active_cu_number;
6416 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
6417 
6418 	return 0;
6419 }
6420 
6421 const struct amdgpu_ip_block_version gfx_v11_0_ip_block =
6422 {
6423 	.type = AMD_IP_BLOCK_TYPE_GFX,
6424 	.major = 11,
6425 	.minor = 0,
6426 	.rev = 0,
6427 	.funcs = &gfx_v11_0_ip_funcs,
6428 };
6429