xref: /openbsd/sys/dev/pci/drm/amd/amdgpu/jpeg_v4_0_3.c (revision f005ef32)
1*f005ef32Sjsg /*
2*f005ef32Sjsg  * Copyright 2022 Advanced Micro Devices, Inc.
3*f005ef32Sjsg  *
4*f005ef32Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
5*f005ef32Sjsg  * copy of this software and associated documentation files (the "Software"),
6*f005ef32Sjsg  * to deal in the Software without restriction, including without limitation
7*f005ef32Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*f005ef32Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
9*f005ef32Sjsg  * Software is furnished to do so, subject to the following conditions:
10*f005ef32Sjsg  *
11*f005ef32Sjsg  * The above copyright notice and this permission notice shall be included in
12*f005ef32Sjsg  * all copies or substantial portions of the Software.
13*f005ef32Sjsg  *
14*f005ef32Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*f005ef32Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*f005ef32Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*f005ef32Sjsg  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*f005ef32Sjsg  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*f005ef32Sjsg  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*f005ef32Sjsg  * OTHER DEALINGS IN THE SOFTWARE.
21*f005ef32Sjsg  *
22*f005ef32Sjsg  */
23*f005ef32Sjsg 
24*f005ef32Sjsg #include "amdgpu.h"
25*f005ef32Sjsg #include "amdgpu_jpeg.h"
26*f005ef32Sjsg #include "soc15.h"
27*f005ef32Sjsg #include "soc15d.h"
28*f005ef32Sjsg #include "jpeg_v4_0_3.h"
29*f005ef32Sjsg #include "mmsch_v4_0_3.h"
30*f005ef32Sjsg 
31*f005ef32Sjsg #include "vcn/vcn_4_0_3_offset.h"
32*f005ef32Sjsg #include "vcn/vcn_4_0_3_sh_mask.h"
33*f005ef32Sjsg #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
34*f005ef32Sjsg 
35*f005ef32Sjsg enum jpeg_engin_status {
36*f005ef32Sjsg 	UVD_PGFSM_STATUS__UVDJ_PWR_ON  = 0,
37*f005ef32Sjsg 	UVD_PGFSM_STATUS__UVDJ_PWR_OFF = 2,
38*f005ef32Sjsg };
39*f005ef32Sjsg 
40*f005ef32Sjsg static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev);
41*f005ef32Sjsg static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
42*f005ef32Sjsg static int jpeg_v4_0_3_set_powergating_state(void *handle,
43*f005ef32Sjsg 				enum amd_powergating_state state);
44*f005ef32Sjsg static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
45*f005ef32Sjsg static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring);
46*f005ef32Sjsg 
47*f005ef32Sjsg static int amdgpu_ih_srcid_jpeg[] = {
48*f005ef32Sjsg 	VCN_4_0__SRCID__JPEG_DECODE,
49*f005ef32Sjsg 	VCN_4_0__SRCID__JPEG1_DECODE,
50*f005ef32Sjsg 	VCN_4_0__SRCID__JPEG2_DECODE,
51*f005ef32Sjsg 	VCN_4_0__SRCID__JPEG3_DECODE,
52*f005ef32Sjsg 	VCN_4_0__SRCID__JPEG4_DECODE,
53*f005ef32Sjsg 	VCN_4_0__SRCID__JPEG5_DECODE,
54*f005ef32Sjsg 	VCN_4_0__SRCID__JPEG6_DECODE,
55*f005ef32Sjsg 	VCN_4_0__SRCID__JPEG7_DECODE
56*f005ef32Sjsg };
57*f005ef32Sjsg 
58*f005ef32Sjsg /**
59*f005ef32Sjsg  * jpeg_v4_0_3_early_init - set function pointers
60*f005ef32Sjsg  *
61*f005ef32Sjsg  * @handle: amdgpu_device pointer
62*f005ef32Sjsg  *
63*f005ef32Sjsg  * Set ring and irq function pointers
64*f005ef32Sjsg  */
jpeg_v4_0_3_early_init(void * handle)65*f005ef32Sjsg static int jpeg_v4_0_3_early_init(void *handle)
66*f005ef32Sjsg {
67*f005ef32Sjsg 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
68*f005ef32Sjsg 
69*f005ef32Sjsg 	adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
70*f005ef32Sjsg 
71*f005ef32Sjsg 	jpeg_v4_0_3_set_dec_ring_funcs(adev);
72*f005ef32Sjsg 	jpeg_v4_0_3_set_irq_funcs(adev);
73*f005ef32Sjsg 	jpeg_v4_0_3_set_ras_funcs(adev);
74*f005ef32Sjsg 
75*f005ef32Sjsg 	return 0;
76*f005ef32Sjsg }
77*f005ef32Sjsg 
78*f005ef32Sjsg /**
79*f005ef32Sjsg  * jpeg_v4_0_3_sw_init - sw init for JPEG block
80*f005ef32Sjsg  *
81*f005ef32Sjsg  * @handle: amdgpu_device pointer
82*f005ef32Sjsg  *
83*f005ef32Sjsg  * Load firmware and sw initialization
84*f005ef32Sjsg  */
jpeg_v4_0_3_sw_init(void * handle)85*f005ef32Sjsg static int jpeg_v4_0_3_sw_init(void *handle)
86*f005ef32Sjsg {
87*f005ef32Sjsg 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
88*f005ef32Sjsg 	struct amdgpu_ring *ring;
89*f005ef32Sjsg 	int i, j, r, jpeg_inst;
90*f005ef32Sjsg 
91*f005ef32Sjsg 	for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
92*f005ef32Sjsg 		/* JPEG TRAP */
93*f005ef32Sjsg 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
94*f005ef32Sjsg 				amdgpu_ih_srcid_jpeg[j], &adev->jpeg.inst->irq);
95*f005ef32Sjsg 		if (r)
96*f005ef32Sjsg 			return r;
97*f005ef32Sjsg 	}
98*f005ef32Sjsg 
99*f005ef32Sjsg 	r = amdgpu_jpeg_sw_init(adev);
100*f005ef32Sjsg 	if (r)
101*f005ef32Sjsg 		return r;
102*f005ef32Sjsg 
103*f005ef32Sjsg 	r = amdgpu_jpeg_resume(adev);
104*f005ef32Sjsg 	if (r)
105*f005ef32Sjsg 		return r;
106*f005ef32Sjsg 
107*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
108*f005ef32Sjsg 		jpeg_inst = GET_INST(JPEG, i);
109*f005ef32Sjsg 
110*f005ef32Sjsg 		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
111*f005ef32Sjsg 			ring = &adev->jpeg.inst[i].ring_dec[j];
112*f005ef32Sjsg 			ring->use_doorbell = true;
113*f005ef32Sjsg 			ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
114*f005ef32Sjsg 			if (!amdgpu_sriov_vf(adev)) {
115*f005ef32Sjsg 				ring->doorbell_index =
116*f005ef32Sjsg 					(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
117*f005ef32Sjsg 					1 + j + 9 * jpeg_inst;
118*f005ef32Sjsg 			} else {
119*f005ef32Sjsg 				if (j < 4)
120*f005ef32Sjsg 					ring->doorbell_index =
121*f005ef32Sjsg 						(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
122*f005ef32Sjsg 						4 + j + 32 * jpeg_inst;
123*f005ef32Sjsg 				else
124*f005ef32Sjsg 					ring->doorbell_index =
125*f005ef32Sjsg 						(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
126*f005ef32Sjsg 						8 + j + 32 * jpeg_inst;
127*f005ef32Sjsg 			}
128*f005ef32Sjsg 			snprintf(ring->name, sizeof(ring->name), "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
129*f005ef32Sjsg 			r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
130*f005ef32Sjsg 						AMDGPU_RING_PRIO_DEFAULT, NULL);
131*f005ef32Sjsg 			if (r)
132*f005ef32Sjsg 				return r;
133*f005ef32Sjsg 
134*f005ef32Sjsg 			adev->jpeg.internal.jpeg_pitch[j] =
135*f005ef32Sjsg 				regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET;
136*f005ef32Sjsg 			adev->jpeg.inst[i].external.jpeg_pitch[j] =
137*f005ef32Sjsg 				SOC15_REG_OFFSET1(
138*f005ef32Sjsg 					JPEG, jpeg_inst,
139*f005ef32Sjsg 					regUVD_JRBC0_UVD_JRBC_SCRATCH0,
140*f005ef32Sjsg 					(j ? (0x40 * j - 0xc80) : 0));
141*f005ef32Sjsg 		}
142*f005ef32Sjsg 	}
143*f005ef32Sjsg 
144*f005ef32Sjsg 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
145*f005ef32Sjsg 		r = amdgpu_jpeg_ras_sw_init(adev);
146*f005ef32Sjsg 		if (r) {
147*f005ef32Sjsg 			dev_err(adev->dev, "Failed to initialize jpeg ras block!\n");
148*f005ef32Sjsg 			return r;
149*f005ef32Sjsg 		}
150*f005ef32Sjsg 	}
151*f005ef32Sjsg 
152*f005ef32Sjsg 	return 0;
153*f005ef32Sjsg }
154*f005ef32Sjsg 
155*f005ef32Sjsg /**
156*f005ef32Sjsg  * jpeg_v4_0_3_sw_fini - sw fini for JPEG block
157*f005ef32Sjsg  *
158*f005ef32Sjsg  * @handle: amdgpu_device pointer
159*f005ef32Sjsg  *
160*f005ef32Sjsg  * JPEG suspend and free up sw allocation
161*f005ef32Sjsg  */
jpeg_v4_0_3_sw_fini(void * handle)162*f005ef32Sjsg static int jpeg_v4_0_3_sw_fini(void *handle)
163*f005ef32Sjsg {
164*f005ef32Sjsg 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
165*f005ef32Sjsg 	int r;
166*f005ef32Sjsg 
167*f005ef32Sjsg 	r = amdgpu_jpeg_suspend(adev);
168*f005ef32Sjsg 	if (r)
169*f005ef32Sjsg 		return r;
170*f005ef32Sjsg 
171*f005ef32Sjsg 	r = amdgpu_jpeg_sw_fini(adev);
172*f005ef32Sjsg 
173*f005ef32Sjsg 	return r;
174*f005ef32Sjsg }
175*f005ef32Sjsg 
jpeg_v4_0_3_start_sriov(struct amdgpu_device * adev)176*f005ef32Sjsg static int jpeg_v4_0_3_start_sriov(struct amdgpu_device *adev)
177*f005ef32Sjsg {
178*f005ef32Sjsg 	struct amdgpu_ring *ring;
179*f005ef32Sjsg 	uint64_t ctx_addr;
180*f005ef32Sjsg 	uint32_t param, resp, expected;
181*f005ef32Sjsg 	uint32_t tmp, timeout;
182*f005ef32Sjsg 
183*f005ef32Sjsg 	struct amdgpu_mm_table *table = &adev->virt.mm_table;
184*f005ef32Sjsg 	uint32_t *table_loc;
185*f005ef32Sjsg 	uint32_t table_size;
186*f005ef32Sjsg 	uint32_t size, size_dw, item_offset;
187*f005ef32Sjsg 	uint32_t init_status;
188*f005ef32Sjsg 	int i, j, jpeg_inst;
189*f005ef32Sjsg 
190*f005ef32Sjsg 	struct mmsch_v4_0_cmd_direct_write
191*f005ef32Sjsg 		direct_wt = { {0} };
192*f005ef32Sjsg 	struct mmsch_v4_0_cmd_end end = { {0} };
193*f005ef32Sjsg 	struct mmsch_v4_0_3_init_header header;
194*f005ef32Sjsg 
195*f005ef32Sjsg 	direct_wt.cmd_header.command_type =
196*f005ef32Sjsg 		MMSCH_COMMAND__DIRECT_REG_WRITE;
197*f005ef32Sjsg 	end.cmd_header.command_type =
198*f005ef32Sjsg 		MMSCH_COMMAND__END;
199*f005ef32Sjsg 
200*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
201*f005ef32Sjsg 		jpeg_inst = GET_INST(JPEG, i);
202*f005ef32Sjsg 
203*f005ef32Sjsg 		memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header));
204*f005ef32Sjsg 		header.version = MMSCH_VERSION;
205*f005ef32Sjsg 		header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2;
206*f005ef32Sjsg 
207*f005ef32Sjsg 		table_loc = (uint32_t *)table->cpu_addr;
208*f005ef32Sjsg 		table_loc += header.total_size;
209*f005ef32Sjsg 
210*f005ef32Sjsg 		item_offset = header.total_size;
211*f005ef32Sjsg 
212*f005ef32Sjsg 		for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) {
213*f005ef32Sjsg 			ring = &adev->jpeg.inst[i].ring_dec[j];
214*f005ef32Sjsg 			table_size = 0;
215*f005ef32Sjsg 
216*f005ef32Sjsg 			tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW);
217*f005ef32Sjsg 			MMSCH_V4_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr));
218*f005ef32Sjsg 			tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH);
219*f005ef32Sjsg 			MMSCH_V4_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr));
220*f005ef32Sjsg 			tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_SIZE);
221*f005ef32Sjsg 			MMSCH_V4_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4);
222*f005ef32Sjsg 
223*f005ef32Sjsg 			if (j <= 3) {
224*f005ef32Sjsg 				header.mjpegdec0[j].table_offset = item_offset;
225*f005ef32Sjsg 				header.mjpegdec0[j].init_status = 0;
226*f005ef32Sjsg 				header.mjpegdec0[j].table_size = table_size;
227*f005ef32Sjsg 			} else {
228*f005ef32Sjsg 				header.mjpegdec1[j - 4].table_offset = item_offset;
229*f005ef32Sjsg 				header.mjpegdec1[j - 4].init_status = 0;
230*f005ef32Sjsg 				header.mjpegdec1[j - 4].table_size = table_size;
231*f005ef32Sjsg 			}
232*f005ef32Sjsg 			header.total_size += table_size;
233*f005ef32Sjsg 			item_offset += table_size;
234*f005ef32Sjsg 		}
235*f005ef32Sjsg 
236*f005ef32Sjsg 		MMSCH_V4_0_INSERT_END();
237*f005ef32Sjsg 
238*f005ef32Sjsg 		/* send init table to MMSCH */
239*f005ef32Sjsg 		size = sizeof(struct mmsch_v4_0_3_init_header);
240*f005ef32Sjsg 		table_loc = (uint32_t *)table->cpu_addr;
241*f005ef32Sjsg 		memcpy((void *)table_loc, &header, size);
242*f005ef32Sjsg 
243*f005ef32Sjsg 		ctx_addr = table->gpu_addr;
244*f005ef32Sjsg 		WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
245*f005ef32Sjsg 		WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
246*f005ef32Sjsg 
247*f005ef32Sjsg 		tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID);
248*f005ef32Sjsg 		tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
249*f005ef32Sjsg 		tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
250*f005ef32Sjsg 		WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp);
251*f005ef32Sjsg 
252*f005ef32Sjsg 		size = header.total_size;
253*f005ef32Sjsg 		WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size);
254*f005ef32Sjsg 
255*f005ef32Sjsg 		WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0);
256*f005ef32Sjsg 
257*f005ef32Sjsg 		param = 0x00000001;
258*f005ef32Sjsg 		WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param);
259*f005ef32Sjsg 		tmp = 0;
260*f005ef32Sjsg 		timeout = 1000;
261*f005ef32Sjsg 		resp = 0;
262*f005ef32Sjsg 		expected = MMSCH_VF_MAILBOX_RESP__OK;
263*f005ef32Sjsg 		init_status =
264*f005ef32Sjsg 			((struct mmsch_v4_0_3_init_header *)(table_loc))->mjpegdec0[i].init_status;
265*f005ef32Sjsg 		while (resp != expected) {
266*f005ef32Sjsg 			resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP);
267*f005ef32Sjsg 
268*f005ef32Sjsg 			if (resp != 0)
269*f005ef32Sjsg 				break;
270*f005ef32Sjsg 			udelay(10);
271*f005ef32Sjsg 			tmp = tmp + 10;
272*f005ef32Sjsg 			if (tmp >= timeout) {
273*f005ef32Sjsg 				DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
274*f005ef32Sjsg 					" waiting for regMMSCH_VF_MAILBOX_RESP "\
275*f005ef32Sjsg 					"(expected=0x%08x, readback=0x%08x)\n",
276*f005ef32Sjsg 					tmp, expected, resp);
277*f005ef32Sjsg 				return -EBUSY;
278*f005ef32Sjsg 			}
279*f005ef32Sjsg 		}
280*f005ef32Sjsg 		if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE &&
281*f005ef32Sjsg 				init_status != MMSCH_VF_ENGINE_STATUS__PASS)
282*f005ef32Sjsg 			DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n",
283*f005ef32Sjsg 					resp, init_status);
284*f005ef32Sjsg 
285*f005ef32Sjsg 	}
286*f005ef32Sjsg 	return 0;
287*f005ef32Sjsg }
288*f005ef32Sjsg 
289*f005ef32Sjsg /**
290*f005ef32Sjsg  * jpeg_v4_0_3_hw_init - start and test JPEG block
291*f005ef32Sjsg  *
292*f005ef32Sjsg  * @handle: amdgpu_device pointer
293*f005ef32Sjsg  *
294*f005ef32Sjsg  */
jpeg_v4_0_3_hw_init(void * handle)295*f005ef32Sjsg static int jpeg_v4_0_3_hw_init(void *handle)
296*f005ef32Sjsg {
297*f005ef32Sjsg 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
298*f005ef32Sjsg 	struct amdgpu_ring *ring;
299*f005ef32Sjsg 	int i, j, r, jpeg_inst;
300*f005ef32Sjsg 
301*f005ef32Sjsg 	if (amdgpu_sriov_vf(adev)) {
302*f005ef32Sjsg 		r = jpeg_v4_0_3_start_sriov(adev);
303*f005ef32Sjsg 		if (r)
304*f005ef32Sjsg 			return r;
305*f005ef32Sjsg 
306*f005ef32Sjsg 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
307*f005ef32Sjsg 			for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
308*f005ef32Sjsg 				ring = &adev->jpeg.inst[i].ring_dec[j];
309*f005ef32Sjsg 				ring->wptr = 0;
310*f005ef32Sjsg 				ring->wptr_old = 0;
311*f005ef32Sjsg 				jpeg_v4_0_3_dec_ring_set_wptr(ring);
312*f005ef32Sjsg 				ring->sched.ready = true;
313*f005ef32Sjsg 			}
314*f005ef32Sjsg 		}
315*f005ef32Sjsg 	} else {
316*f005ef32Sjsg 		for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
317*f005ef32Sjsg 			jpeg_inst = GET_INST(JPEG, i);
318*f005ef32Sjsg 
319*f005ef32Sjsg 			ring = adev->jpeg.inst[i].ring_dec;
320*f005ef32Sjsg 
321*f005ef32Sjsg 			if (ring->use_doorbell)
322*f005ef32Sjsg 				adev->nbio.funcs->vcn_doorbell_range(
323*f005ef32Sjsg 					adev, ring->use_doorbell,
324*f005ef32Sjsg 					(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
325*f005ef32Sjsg 						9 * jpeg_inst,
326*f005ef32Sjsg 					adev->jpeg.inst[i].aid_id);
327*f005ef32Sjsg 
328*f005ef32Sjsg 			for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
329*f005ef32Sjsg 				ring = &adev->jpeg.inst[i].ring_dec[j];
330*f005ef32Sjsg 				if (ring->use_doorbell)
331*f005ef32Sjsg 					WREG32_SOC15_OFFSET(
332*f005ef32Sjsg 						VCN, GET_INST(VCN, i),
333*f005ef32Sjsg 						regVCN_JPEG_DB_CTRL,
334*f005ef32Sjsg 						(ring->pipe ? (ring->pipe - 0x15) : 0),
335*f005ef32Sjsg 						ring->doorbell_index
336*f005ef32Sjsg 							<< VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
337*f005ef32Sjsg 							VCN_JPEG_DB_CTRL__EN_MASK);
338*f005ef32Sjsg 				r = amdgpu_ring_test_helper(ring);
339*f005ef32Sjsg 				if (r)
340*f005ef32Sjsg 					return r;
341*f005ef32Sjsg 			}
342*f005ef32Sjsg 		}
343*f005ef32Sjsg 	}
344*f005ef32Sjsg 	DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
345*f005ef32Sjsg 
346*f005ef32Sjsg 	return 0;
347*f005ef32Sjsg }
348*f005ef32Sjsg 
349*f005ef32Sjsg /**
350*f005ef32Sjsg  * jpeg_v4_0_3_hw_fini - stop the hardware block
351*f005ef32Sjsg  *
352*f005ef32Sjsg  * @handle: amdgpu_device pointer
353*f005ef32Sjsg  *
354*f005ef32Sjsg  * Stop the JPEG block, mark ring as not ready any more
355*f005ef32Sjsg  */
jpeg_v4_0_3_hw_fini(void * handle)356*f005ef32Sjsg static int jpeg_v4_0_3_hw_fini(void *handle)
357*f005ef32Sjsg {
358*f005ef32Sjsg 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
359*f005ef32Sjsg 	int ret = 0;
360*f005ef32Sjsg 
361*f005ef32Sjsg 	cancel_delayed_work_sync(&adev->jpeg.idle_work);
362*f005ef32Sjsg 
363*f005ef32Sjsg 	if (!amdgpu_sriov_vf(adev)) {
364*f005ef32Sjsg 		if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
365*f005ef32Sjsg 			ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
366*f005ef32Sjsg 	}
367*f005ef32Sjsg 
368*f005ef32Sjsg 	return ret;
369*f005ef32Sjsg }
370*f005ef32Sjsg 
371*f005ef32Sjsg /**
372*f005ef32Sjsg  * jpeg_v4_0_3_suspend - suspend JPEG block
373*f005ef32Sjsg  *
374*f005ef32Sjsg  * @handle: amdgpu_device pointer
375*f005ef32Sjsg  *
376*f005ef32Sjsg  * HW fini and suspend JPEG block
377*f005ef32Sjsg  */
jpeg_v4_0_3_suspend(void * handle)378*f005ef32Sjsg static int jpeg_v4_0_3_suspend(void *handle)
379*f005ef32Sjsg {
380*f005ef32Sjsg 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
381*f005ef32Sjsg 	int r;
382*f005ef32Sjsg 
383*f005ef32Sjsg 	r = jpeg_v4_0_3_hw_fini(adev);
384*f005ef32Sjsg 	if (r)
385*f005ef32Sjsg 		return r;
386*f005ef32Sjsg 
387*f005ef32Sjsg 	r = amdgpu_jpeg_suspend(adev);
388*f005ef32Sjsg 
389*f005ef32Sjsg 	return r;
390*f005ef32Sjsg }
391*f005ef32Sjsg 
392*f005ef32Sjsg /**
393*f005ef32Sjsg  * jpeg_v4_0_3_resume - resume JPEG block
394*f005ef32Sjsg  *
395*f005ef32Sjsg  * @handle: amdgpu_device pointer
396*f005ef32Sjsg  *
397*f005ef32Sjsg  * Resume firmware and hw init JPEG block
398*f005ef32Sjsg  */
jpeg_v4_0_3_resume(void * handle)399*f005ef32Sjsg static int jpeg_v4_0_3_resume(void *handle)
400*f005ef32Sjsg {
401*f005ef32Sjsg 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
402*f005ef32Sjsg 	int r;
403*f005ef32Sjsg 
404*f005ef32Sjsg 	r = amdgpu_jpeg_resume(adev);
405*f005ef32Sjsg 	if (r)
406*f005ef32Sjsg 		return r;
407*f005ef32Sjsg 
408*f005ef32Sjsg 	r = jpeg_v4_0_3_hw_init(adev);
409*f005ef32Sjsg 
410*f005ef32Sjsg 	return r;
411*f005ef32Sjsg }
412*f005ef32Sjsg 
jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device * adev,int inst_idx)413*f005ef32Sjsg static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
414*f005ef32Sjsg {
415*f005ef32Sjsg 	int i, jpeg_inst;
416*f005ef32Sjsg 	uint32_t data;
417*f005ef32Sjsg 
418*f005ef32Sjsg 	jpeg_inst = GET_INST(JPEG, inst_idx);
419*f005ef32Sjsg 	data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL);
420*f005ef32Sjsg 	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
421*f005ef32Sjsg 		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
422*f005ef32Sjsg 		data &= (~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1));
423*f005ef32Sjsg 	} else {
424*f005ef32Sjsg 		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
425*f005ef32Sjsg 	}
426*f005ef32Sjsg 
427*f005ef32Sjsg 	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
428*f005ef32Sjsg 	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
429*f005ef32Sjsg 	WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL, data);
430*f005ef32Sjsg 
431*f005ef32Sjsg 	data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE);
432*f005ef32Sjsg 	data &= ~(JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK);
433*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i)
434*f005ef32Sjsg 		data &= ~(JPEG_CGC_GATE__JPEG0_DEC_MASK << i);
435*f005ef32Sjsg 	WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data);
436*f005ef32Sjsg }
437*f005ef32Sjsg 
jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device * adev,int inst_idx)438*f005ef32Sjsg static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
439*f005ef32Sjsg {
440*f005ef32Sjsg 	int i, jpeg_inst;
441*f005ef32Sjsg 	uint32_t data;
442*f005ef32Sjsg 
443*f005ef32Sjsg 	jpeg_inst = GET_INST(JPEG, inst_idx);
444*f005ef32Sjsg 	data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL);
445*f005ef32Sjsg 	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
446*f005ef32Sjsg 		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
447*f005ef32Sjsg 		data |= (JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1);
448*f005ef32Sjsg 	} else {
449*f005ef32Sjsg 		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
450*f005ef32Sjsg 	}
451*f005ef32Sjsg 
452*f005ef32Sjsg 	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
453*f005ef32Sjsg 	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
454*f005ef32Sjsg 	WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL, data);
455*f005ef32Sjsg 
456*f005ef32Sjsg 	data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE);
457*f005ef32Sjsg 	data |= (JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK);
458*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i)
459*f005ef32Sjsg 		data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK << i);
460*f005ef32Sjsg 	WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data);
461*f005ef32Sjsg }
462*f005ef32Sjsg 
463*f005ef32Sjsg /**
464*f005ef32Sjsg  * jpeg_v4_0_3_start - start JPEG block
465*f005ef32Sjsg  *
466*f005ef32Sjsg  * @adev: amdgpu_device pointer
467*f005ef32Sjsg  *
468*f005ef32Sjsg  * Setup and start the JPEG block
469*f005ef32Sjsg  */
jpeg_v4_0_3_start(struct amdgpu_device * adev)470*f005ef32Sjsg static int jpeg_v4_0_3_start(struct amdgpu_device *adev)
471*f005ef32Sjsg {
472*f005ef32Sjsg 	struct amdgpu_ring *ring;
473*f005ef32Sjsg 	int i, j, jpeg_inst;
474*f005ef32Sjsg 
475*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
476*f005ef32Sjsg 		jpeg_inst = GET_INST(JPEG, i);
477*f005ef32Sjsg 
478*f005ef32Sjsg 		WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
479*f005ef32Sjsg 			     1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
480*f005ef32Sjsg 		SOC15_WAIT_ON_RREG(
481*f005ef32Sjsg 			JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
482*f005ef32Sjsg 			UVD_PGFSM_STATUS__UVDJ_PWR_ON
483*f005ef32Sjsg 				<< UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
484*f005ef32Sjsg 			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
485*f005ef32Sjsg 
486*f005ef32Sjsg 		/* disable anti hang mechanism */
487*f005ef32Sjsg 		WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
488*f005ef32Sjsg 					  regUVD_JPEG_POWER_STATUS),
489*f005ef32Sjsg 			 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
490*f005ef32Sjsg 
491*f005ef32Sjsg 		/* JPEG disable CGC */
492*f005ef32Sjsg 		jpeg_v4_0_3_disable_clock_gating(adev, i);
493*f005ef32Sjsg 
494*f005ef32Sjsg 		/* MJPEG global tiling registers */
495*f005ef32Sjsg 		WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX8_ADDR_CONFIG,
496*f005ef32Sjsg 			     adev->gfx.config.gb_addr_config);
497*f005ef32Sjsg 		WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX10_ADDR_CONFIG,
498*f005ef32Sjsg 			     adev->gfx.config.gb_addr_config);
499*f005ef32Sjsg 
500*f005ef32Sjsg 		/* enable JMI channel */
501*f005ef32Sjsg 		WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
502*f005ef32Sjsg 			 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
503*f005ef32Sjsg 
504*f005ef32Sjsg 		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
505*f005ef32Sjsg 			unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
506*f005ef32Sjsg 
507*f005ef32Sjsg 			ring = &adev->jpeg.inst[i].ring_dec[j];
508*f005ef32Sjsg 
509*f005ef32Sjsg 			/* enable System Interrupt for JRBC */
510*f005ef32Sjsg 			WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
511*f005ef32Sjsg 						  regJPEG_SYS_INT_EN),
512*f005ef32Sjsg 				 JPEG_SYS_INT_EN__DJRBC0_MASK << j,
513*f005ef32Sjsg 				 ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j));
514*f005ef32Sjsg 
515*f005ef32Sjsg 			WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
516*f005ef32Sjsg 					    regUVD_JMI0_UVD_LMI_JRBC_RB_VMID,
517*f005ef32Sjsg 					    reg_offset, 0);
518*f005ef32Sjsg 			WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
519*f005ef32Sjsg 					    regUVD_JRBC0_UVD_JRBC_RB_CNTL,
520*f005ef32Sjsg 					    reg_offset,
521*f005ef32Sjsg 					    (0x00000001L | 0x00000002L));
522*f005ef32Sjsg 			WREG32_SOC15_OFFSET(
523*f005ef32Sjsg 				JPEG, jpeg_inst,
524*f005ef32Sjsg 				regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW,
525*f005ef32Sjsg 				reg_offset, lower_32_bits(ring->gpu_addr));
526*f005ef32Sjsg 			WREG32_SOC15_OFFSET(
527*f005ef32Sjsg 				JPEG, jpeg_inst,
528*f005ef32Sjsg 				regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
529*f005ef32Sjsg 				reg_offset, upper_32_bits(ring->gpu_addr));
530*f005ef32Sjsg 			WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
531*f005ef32Sjsg 					    regUVD_JRBC0_UVD_JRBC_RB_RPTR,
532*f005ef32Sjsg 					    reg_offset, 0);
533*f005ef32Sjsg 			WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
534*f005ef32Sjsg 					    regUVD_JRBC0_UVD_JRBC_RB_WPTR,
535*f005ef32Sjsg 					    reg_offset, 0);
536*f005ef32Sjsg 			WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
537*f005ef32Sjsg 					    regUVD_JRBC0_UVD_JRBC_RB_CNTL,
538*f005ef32Sjsg 					    reg_offset, 0x00000002L);
539*f005ef32Sjsg 			WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
540*f005ef32Sjsg 					    regUVD_JRBC0_UVD_JRBC_RB_SIZE,
541*f005ef32Sjsg 					    reg_offset, ring->ring_size / 4);
542*f005ef32Sjsg 			ring->wptr = RREG32_SOC15_OFFSET(
543*f005ef32Sjsg 				JPEG, jpeg_inst, regUVD_JRBC0_UVD_JRBC_RB_WPTR,
544*f005ef32Sjsg 				reg_offset);
545*f005ef32Sjsg 		}
546*f005ef32Sjsg 	}
547*f005ef32Sjsg 
548*f005ef32Sjsg 	return 0;
549*f005ef32Sjsg }
550*f005ef32Sjsg 
551*f005ef32Sjsg /**
552*f005ef32Sjsg  * jpeg_v4_0_3_stop - stop JPEG block
553*f005ef32Sjsg  *
554*f005ef32Sjsg  * @adev: amdgpu_device pointer
555*f005ef32Sjsg  *
556*f005ef32Sjsg  * stop the JPEG block
557*f005ef32Sjsg  */
jpeg_v4_0_3_stop(struct amdgpu_device * adev)558*f005ef32Sjsg static int jpeg_v4_0_3_stop(struct amdgpu_device *adev)
559*f005ef32Sjsg {
560*f005ef32Sjsg 	int i, jpeg_inst;
561*f005ef32Sjsg 
562*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
563*f005ef32Sjsg 		jpeg_inst = GET_INST(JPEG, i);
564*f005ef32Sjsg 		/* reset JMI */
565*f005ef32Sjsg 		WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
566*f005ef32Sjsg 			 UVD_JMI_CNTL__SOFT_RESET_MASK,
567*f005ef32Sjsg 			 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
568*f005ef32Sjsg 
569*f005ef32Sjsg 		jpeg_v4_0_3_enable_clock_gating(adev, i);
570*f005ef32Sjsg 
571*f005ef32Sjsg 		/* enable anti hang mechanism */
572*f005ef32Sjsg 		WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
573*f005ef32Sjsg 					  regUVD_JPEG_POWER_STATUS),
574*f005ef32Sjsg 			 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
575*f005ef32Sjsg 			 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
576*f005ef32Sjsg 
577*f005ef32Sjsg 		WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
578*f005ef32Sjsg 			     2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
579*f005ef32Sjsg 		SOC15_WAIT_ON_RREG(
580*f005ef32Sjsg 			JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
581*f005ef32Sjsg 			UVD_PGFSM_STATUS__UVDJ_PWR_OFF
582*f005ef32Sjsg 				<< UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
583*f005ef32Sjsg 			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
584*f005ef32Sjsg 	}
585*f005ef32Sjsg 
586*f005ef32Sjsg 	return 0;
587*f005ef32Sjsg }
588*f005ef32Sjsg 
589*f005ef32Sjsg /**
590*f005ef32Sjsg  * jpeg_v4_0_3_dec_ring_get_rptr - get read pointer
591*f005ef32Sjsg  *
592*f005ef32Sjsg  * @ring: amdgpu_ring pointer
593*f005ef32Sjsg  *
594*f005ef32Sjsg  * Returns the current hardware read pointer
595*f005ef32Sjsg  */
jpeg_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring * ring)596*f005ef32Sjsg static uint64_t jpeg_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring *ring)
597*f005ef32Sjsg {
598*f005ef32Sjsg 	struct amdgpu_device *adev = ring->adev;
599*f005ef32Sjsg 
600*f005ef32Sjsg 	return RREG32_SOC15_OFFSET(
601*f005ef32Sjsg 		JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_RPTR,
602*f005ef32Sjsg 		ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0);
603*f005ef32Sjsg }
604*f005ef32Sjsg 
605*f005ef32Sjsg /**
606*f005ef32Sjsg  * jpeg_v4_0_3_dec_ring_get_wptr - get write pointer
607*f005ef32Sjsg  *
608*f005ef32Sjsg  * @ring: amdgpu_ring pointer
609*f005ef32Sjsg  *
610*f005ef32Sjsg  * Returns the current hardware write pointer
611*f005ef32Sjsg  */
jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring * ring)612*f005ef32Sjsg static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring)
613*f005ef32Sjsg {
614*f005ef32Sjsg 	struct amdgpu_device *adev = ring->adev;
615*f005ef32Sjsg 
616*f005ef32Sjsg 	if (ring->use_doorbell)
617*f005ef32Sjsg 		return adev->wb.wb[ring->wptr_offs];
618*f005ef32Sjsg 	else
619*f005ef32Sjsg 		return RREG32_SOC15_OFFSET(
620*f005ef32Sjsg 			JPEG, GET_INST(JPEG, ring->me),
621*f005ef32Sjsg 			regUVD_JRBC0_UVD_JRBC_RB_WPTR,
622*f005ef32Sjsg 			ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0);
623*f005ef32Sjsg }
624*f005ef32Sjsg 
625*f005ef32Sjsg /**
626*f005ef32Sjsg  * jpeg_v4_0_3_dec_ring_set_wptr - set write pointer
627*f005ef32Sjsg  *
628*f005ef32Sjsg  * @ring: amdgpu_ring pointer
629*f005ef32Sjsg  *
630*f005ef32Sjsg  * Commits the write pointer to the hardware
631*f005ef32Sjsg  */
jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring * ring)632*f005ef32Sjsg static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring)
633*f005ef32Sjsg {
634*f005ef32Sjsg 	struct amdgpu_device *adev = ring->adev;
635*f005ef32Sjsg 
636*f005ef32Sjsg 	if (ring->use_doorbell) {
637*f005ef32Sjsg 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
638*f005ef32Sjsg 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
639*f005ef32Sjsg 	} else {
640*f005ef32Sjsg 		WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me),
641*f005ef32Sjsg 				    regUVD_JRBC0_UVD_JRBC_RB_WPTR,
642*f005ef32Sjsg 				    (ring->pipe ? (0x40 * ring->pipe - 0xc80) :
643*f005ef32Sjsg 						  0),
644*f005ef32Sjsg 				    lower_32_bits(ring->wptr));
645*f005ef32Sjsg 	}
646*f005ef32Sjsg }
647*f005ef32Sjsg 
648*f005ef32Sjsg /**
649*f005ef32Sjsg  * jpeg_v4_0_3_dec_ring_insert_start - insert a start command
650*f005ef32Sjsg  *
651*f005ef32Sjsg  * @ring: amdgpu_ring pointer
652*f005ef32Sjsg  *
653*f005ef32Sjsg  * Write a start command to the ring.
654*f005ef32Sjsg  */
jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring * ring)655*f005ef32Sjsg static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
656*f005ef32Sjsg {
657*f005ef32Sjsg 	amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
658*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
659*f005ef32Sjsg 	amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
660*f005ef32Sjsg 
661*f005ef32Sjsg 	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
662*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
663*f005ef32Sjsg 	amdgpu_ring_write(ring, 0x80004000);
664*f005ef32Sjsg }
665*f005ef32Sjsg 
666*f005ef32Sjsg /**
667*f005ef32Sjsg  * jpeg_v4_0_3_dec_ring_insert_end - insert a end command
668*f005ef32Sjsg  *
669*f005ef32Sjsg  * @ring: amdgpu_ring pointer
670*f005ef32Sjsg  *
671*f005ef32Sjsg  * Write a end command to the ring.
672*f005ef32Sjsg  */
jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring * ring)673*f005ef32Sjsg static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
674*f005ef32Sjsg {
675*f005ef32Sjsg 	amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
676*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
677*f005ef32Sjsg 	amdgpu_ring_write(ring, 0x62a04);
678*f005ef32Sjsg 
679*f005ef32Sjsg 	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
680*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
681*f005ef32Sjsg 	amdgpu_ring_write(ring, 0x00004000);
682*f005ef32Sjsg }
683*f005ef32Sjsg 
684*f005ef32Sjsg /**
685*f005ef32Sjsg  * jpeg_v4_0_3_dec_ring_emit_fence - emit an fence & trap command
686*f005ef32Sjsg  *
687*f005ef32Sjsg  * @ring: amdgpu_ring pointer
688*f005ef32Sjsg  * @addr: address
689*f005ef32Sjsg  * @seq: sequence number
690*f005ef32Sjsg  * @flags: fence related flags
691*f005ef32Sjsg  *
692*f005ef32Sjsg  * Write a fence and a trap command to the ring.
693*f005ef32Sjsg  */
jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned int flags)694*f005ef32Sjsg static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
695*f005ef32Sjsg 				unsigned int flags)
696*f005ef32Sjsg {
697*f005ef32Sjsg 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
698*f005ef32Sjsg 
699*f005ef32Sjsg 	amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
700*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
701*f005ef32Sjsg 	amdgpu_ring_write(ring, seq);
702*f005ef32Sjsg 
703*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
704*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
705*f005ef32Sjsg 	amdgpu_ring_write(ring, seq);
706*f005ef32Sjsg 
707*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
708*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
709*f005ef32Sjsg 	amdgpu_ring_write(ring, lower_32_bits(addr));
710*f005ef32Sjsg 
711*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
712*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
713*f005ef32Sjsg 	amdgpu_ring_write(ring, upper_32_bits(addr));
714*f005ef32Sjsg 
715*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
716*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
717*f005ef32Sjsg 	amdgpu_ring_write(ring, 0x8);
718*f005ef32Sjsg 
719*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
720*f005ef32Sjsg 		0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
721*f005ef32Sjsg 	amdgpu_ring_write(ring, 0);
722*f005ef32Sjsg 
723*f005ef32Sjsg 	if (ring->adev->jpeg.inst[ring->me].aid_id) {
724*f005ef32Sjsg 		amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET,
725*f005ef32Sjsg 			0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0));
726*f005ef32Sjsg 		amdgpu_ring_write(ring, 0x4);
727*f005ef32Sjsg 	} else {
728*f005ef32Sjsg 		amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
729*f005ef32Sjsg 		amdgpu_ring_write(ring, 0);
730*f005ef32Sjsg 	}
731*f005ef32Sjsg 
732*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
733*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
734*f005ef32Sjsg 	amdgpu_ring_write(ring, 0x3fbc);
735*f005ef32Sjsg 
736*f005ef32Sjsg 	if (ring->adev->jpeg.inst[ring->me].aid_id) {
737*f005ef32Sjsg 		amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET,
738*f005ef32Sjsg 			0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0));
739*f005ef32Sjsg 		amdgpu_ring_write(ring, 0x0);
740*f005ef32Sjsg 	} else {
741*f005ef32Sjsg 		amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
742*f005ef32Sjsg 		amdgpu_ring_write(ring, 0);
743*f005ef32Sjsg 	}
744*f005ef32Sjsg 
745*f005ef32Sjsg 	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
746*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
747*f005ef32Sjsg 	amdgpu_ring_write(ring, 0x1);
748*f005ef32Sjsg 
749*f005ef32Sjsg 	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
750*f005ef32Sjsg 	amdgpu_ring_write(ring, 0);
751*f005ef32Sjsg }
752*f005ef32Sjsg 
753*f005ef32Sjsg /**
754*f005ef32Sjsg  * jpeg_v4_0_3_dec_ring_emit_ib - execute indirect buffer
755*f005ef32Sjsg  *
756*f005ef32Sjsg  * @ring: amdgpu_ring pointer
757*f005ef32Sjsg  * @job: job to retrieve vmid from
758*f005ef32Sjsg  * @ib: indirect buffer to execute
759*f005ef32Sjsg  * @flags: unused
760*f005ef32Sjsg  *
761*f005ef32Sjsg  * Write ring commands to execute the indirect buffer.
762*f005ef32Sjsg  */
jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)763*f005ef32Sjsg static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
764*f005ef32Sjsg 				struct amdgpu_job *job,
765*f005ef32Sjsg 				struct amdgpu_ib *ib,
766*f005ef32Sjsg 				uint32_t flags)
767*f005ef32Sjsg {
768*f005ef32Sjsg 	unsigned int vmid = AMDGPU_JOB_GET_VMID(job);
769*f005ef32Sjsg 
770*f005ef32Sjsg 	amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
771*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
772*f005ef32Sjsg 	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
773*f005ef32Sjsg 
774*f005ef32Sjsg 	amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
775*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
776*f005ef32Sjsg 	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
777*f005ef32Sjsg 
778*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
779*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
780*f005ef32Sjsg 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
781*f005ef32Sjsg 
782*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
783*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
784*f005ef32Sjsg 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
785*f005ef32Sjsg 
786*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
787*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
788*f005ef32Sjsg 	amdgpu_ring_write(ring, ib->length_dw);
789*f005ef32Sjsg 
790*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
791*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
792*f005ef32Sjsg 	amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
793*f005ef32Sjsg 
794*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
795*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
796*f005ef32Sjsg 	amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
797*f005ef32Sjsg 
798*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
799*f005ef32Sjsg 	amdgpu_ring_write(ring, 0);
800*f005ef32Sjsg 
801*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
802*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
803*f005ef32Sjsg 	amdgpu_ring_write(ring, 0x01400200);
804*f005ef32Sjsg 
805*f005ef32Sjsg 	amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
806*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
807*f005ef32Sjsg 	amdgpu_ring_write(ring, 0x2);
808*f005ef32Sjsg 
809*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JRBC_STATUS_INTERNAL_OFFSET,
810*f005ef32Sjsg 		0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
811*f005ef32Sjsg 	amdgpu_ring_write(ring, 0x2);
812*f005ef32Sjsg }
813*f005ef32Sjsg 
jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)814*f005ef32Sjsg static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
815*f005ef32Sjsg 				uint32_t val, uint32_t mask)
816*f005ef32Sjsg {
817*f005ef32Sjsg 	uint32_t reg_offset = (reg << 2);
818*f005ef32Sjsg 
819*f005ef32Sjsg 	amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
820*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
821*f005ef32Sjsg 	amdgpu_ring_write(ring, 0x01400200);
822*f005ef32Sjsg 
823*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
824*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
825*f005ef32Sjsg 	amdgpu_ring_write(ring, val);
826*f005ef32Sjsg 
827*f005ef32Sjsg 	amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
828*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
829*f005ef32Sjsg 	if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
830*f005ef32Sjsg 		amdgpu_ring_write(ring, 0);
831*f005ef32Sjsg 		amdgpu_ring_write(ring,
832*f005ef32Sjsg 			PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
833*f005ef32Sjsg 	} else {
834*f005ef32Sjsg 		amdgpu_ring_write(ring, reg_offset);
835*f005ef32Sjsg 		amdgpu_ring_write(ring,	PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
836*f005ef32Sjsg 			0, 0, PACKETJ_TYPE3));
837*f005ef32Sjsg 	}
838*f005ef32Sjsg 	amdgpu_ring_write(ring, mask);
839*f005ef32Sjsg }
840*f005ef32Sjsg 
jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)841*f005ef32Sjsg static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
842*f005ef32Sjsg 				unsigned int vmid, uint64_t pd_addr)
843*f005ef32Sjsg {
844*f005ef32Sjsg 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
845*f005ef32Sjsg 	uint32_t data0, data1, mask;
846*f005ef32Sjsg 
847*f005ef32Sjsg 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
848*f005ef32Sjsg 
849*f005ef32Sjsg 	/* wait for register write */
850*f005ef32Sjsg 	data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
851*f005ef32Sjsg 	data1 = lower_32_bits(pd_addr);
852*f005ef32Sjsg 	mask = 0xffffffff;
853*f005ef32Sjsg 	jpeg_v4_0_3_dec_ring_emit_reg_wait(ring, data0, data1, mask);
854*f005ef32Sjsg }
855*f005ef32Sjsg 
jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)856*f005ef32Sjsg static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
857*f005ef32Sjsg {
858*f005ef32Sjsg 	uint32_t reg_offset = (reg << 2);
859*f005ef32Sjsg 
860*f005ef32Sjsg 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
861*f005ef32Sjsg 		0, 0, PACKETJ_TYPE0));
862*f005ef32Sjsg 	if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
863*f005ef32Sjsg 		amdgpu_ring_write(ring, 0);
864*f005ef32Sjsg 		amdgpu_ring_write(ring,
865*f005ef32Sjsg 			PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
866*f005ef32Sjsg 	} else {
867*f005ef32Sjsg 		amdgpu_ring_write(ring, reg_offset);
868*f005ef32Sjsg 		amdgpu_ring_write(ring,	PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
869*f005ef32Sjsg 			0, 0, PACKETJ_TYPE0));
870*f005ef32Sjsg 	}
871*f005ef32Sjsg 	amdgpu_ring_write(ring, val);
872*f005ef32Sjsg }
873*f005ef32Sjsg 
jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring * ring,uint32_t count)874*f005ef32Sjsg static void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
875*f005ef32Sjsg {
876*f005ef32Sjsg 	int i;
877*f005ef32Sjsg 
878*f005ef32Sjsg 	WARN_ON(ring->wptr % 2 || count % 2);
879*f005ef32Sjsg 
880*f005ef32Sjsg 	for (i = 0; i < count / 2; i++) {
881*f005ef32Sjsg 		amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
882*f005ef32Sjsg 		amdgpu_ring_write(ring, 0);
883*f005ef32Sjsg 	}
884*f005ef32Sjsg }
885*f005ef32Sjsg 
jpeg_v4_0_3_is_idle(void * handle)886*f005ef32Sjsg static bool jpeg_v4_0_3_is_idle(void *handle)
887*f005ef32Sjsg {
888*f005ef32Sjsg 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
889*f005ef32Sjsg 	bool ret = false;
890*f005ef32Sjsg 	int i, j;
891*f005ef32Sjsg 
892*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
893*f005ef32Sjsg 		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
894*f005ef32Sjsg 			unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
895*f005ef32Sjsg 
896*f005ef32Sjsg 			ret &= ((RREG32_SOC15_OFFSET(
897*f005ef32Sjsg 					 JPEG, GET_INST(JPEG, i),
898*f005ef32Sjsg 					 regUVD_JRBC0_UVD_JRBC_STATUS,
899*f005ef32Sjsg 					 reg_offset) &
900*f005ef32Sjsg 				 UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
901*f005ef32Sjsg 				UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
902*f005ef32Sjsg 		}
903*f005ef32Sjsg 	}
904*f005ef32Sjsg 
905*f005ef32Sjsg 	return ret;
906*f005ef32Sjsg }
907*f005ef32Sjsg 
jpeg_v4_0_3_wait_for_idle(void * handle)908*f005ef32Sjsg static int jpeg_v4_0_3_wait_for_idle(void *handle)
909*f005ef32Sjsg {
910*f005ef32Sjsg 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
911*f005ef32Sjsg 	int ret = 0;
912*f005ef32Sjsg 	int i, j;
913*f005ef32Sjsg 
914*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
915*f005ef32Sjsg 		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
916*f005ef32Sjsg 			unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
917*f005ef32Sjsg 
918*f005ef32Sjsg 			ret &= SOC15_WAIT_ON_RREG_OFFSET(
919*f005ef32Sjsg 				JPEG, GET_INST(JPEG, i),
920*f005ef32Sjsg 				regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset,
921*f005ef32Sjsg 				UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
922*f005ef32Sjsg 				UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
923*f005ef32Sjsg 		}
924*f005ef32Sjsg 	}
925*f005ef32Sjsg 	return ret;
926*f005ef32Sjsg }
927*f005ef32Sjsg 
jpeg_v4_0_3_set_clockgating_state(void * handle,enum amd_clockgating_state state)928*f005ef32Sjsg static int jpeg_v4_0_3_set_clockgating_state(void *handle,
929*f005ef32Sjsg 					  enum amd_clockgating_state state)
930*f005ef32Sjsg {
931*f005ef32Sjsg 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
932*f005ef32Sjsg 	bool enable = state == AMD_CG_STATE_GATE;
933*f005ef32Sjsg 	int i;
934*f005ef32Sjsg 
935*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
936*f005ef32Sjsg 		if (enable) {
937*f005ef32Sjsg 			if (!jpeg_v4_0_3_is_idle(handle))
938*f005ef32Sjsg 				return -EBUSY;
939*f005ef32Sjsg 			jpeg_v4_0_3_enable_clock_gating(adev, i);
940*f005ef32Sjsg 		} else {
941*f005ef32Sjsg 			jpeg_v4_0_3_disable_clock_gating(adev, i);
942*f005ef32Sjsg 		}
943*f005ef32Sjsg 	}
944*f005ef32Sjsg 	return 0;
945*f005ef32Sjsg }
946*f005ef32Sjsg 
jpeg_v4_0_3_set_powergating_state(void * handle,enum amd_powergating_state state)947*f005ef32Sjsg static int jpeg_v4_0_3_set_powergating_state(void *handle,
948*f005ef32Sjsg 					  enum amd_powergating_state state)
949*f005ef32Sjsg {
950*f005ef32Sjsg 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
951*f005ef32Sjsg 	int ret;
952*f005ef32Sjsg 
953*f005ef32Sjsg 	if (state == adev->jpeg.cur_state)
954*f005ef32Sjsg 		return 0;
955*f005ef32Sjsg 
956*f005ef32Sjsg 	if (state == AMD_PG_STATE_GATE)
957*f005ef32Sjsg 		ret = jpeg_v4_0_3_stop(adev);
958*f005ef32Sjsg 	else
959*f005ef32Sjsg 		ret = jpeg_v4_0_3_start(adev);
960*f005ef32Sjsg 
961*f005ef32Sjsg 	if (!ret)
962*f005ef32Sjsg 		adev->jpeg.cur_state = state;
963*f005ef32Sjsg 
964*f005ef32Sjsg 	return ret;
965*f005ef32Sjsg }
966*f005ef32Sjsg 
jpeg_v4_0_3_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned int type,enum amdgpu_interrupt_state state)967*f005ef32Sjsg static int jpeg_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
968*f005ef32Sjsg 					struct amdgpu_irq_src *source,
969*f005ef32Sjsg 					unsigned int type,
970*f005ef32Sjsg 					enum amdgpu_interrupt_state state)
971*f005ef32Sjsg {
972*f005ef32Sjsg 	return 0;
973*f005ef32Sjsg }
974*f005ef32Sjsg 
jpeg_v4_0_3_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)975*f005ef32Sjsg static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev,
976*f005ef32Sjsg 				      struct amdgpu_irq_src *source,
977*f005ef32Sjsg 				      struct amdgpu_iv_entry *entry)
978*f005ef32Sjsg {
979*f005ef32Sjsg 	uint32_t i, inst;
980*f005ef32Sjsg 
981*f005ef32Sjsg 	i = node_id_to_phys_map[entry->node_id];
982*f005ef32Sjsg 	DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n");
983*f005ef32Sjsg 
984*f005ef32Sjsg 	for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst)
985*f005ef32Sjsg 		if (adev->jpeg.inst[inst].aid_id == i)
986*f005ef32Sjsg 			break;
987*f005ef32Sjsg 
988*f005ef32Sjsg 	if (inst >= adev->jpeg.num_jpeg_inst) {
989*f005ef32Sjsg 		dev_WARN_ONCE(adev->dev, 1,
990*f005ef32Sjsg 			      "Interrupt received for unknown JPEG instance %d",
991*f005ef32Sjsg 			      entry->node_id);
992*f005ef32Sjsg 		return 0;
993*f005ef32Sjsg 	}
994*f005ef32Sjsg 
995*f005ef32Sjsg 	switch (entry->src_id) {
996*f005ef32Sjsg 	case VCN_4_0__SRCID__JPEG_DECODE:
997*f005ef32Sjsg 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]);
998*f005ef32Sjsg 		break;
999*f005ef32Sjsg 	case VCN_4_0__SRCID__JPEG1_DECODE:
1000*f005ef32Sjsg 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]);
1001*f005ef32Sjsg 		break;
1002*f005ef32Sjsg 	case VCN_4_0__SRCID__JPEG2_DECODE:
1003*f005ef32Sjsg 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]);
1004*f005ef32Sjsg 		break;
1005*f005ef32Sjsg 	case VCN_4_0__SRCID__JPEG3_DECODE:
1006*f005ef32Sjsg 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]);
1007*f005ef32Sjsg 		break;
1008*f005ef32Sjsg 	case VCN_4_0__SRCID__JPEG4_DECODE:
1009*f005ef32Sjsg 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]);
1010*f005ef32Sjsg 		break;
1011*f005ef32Sjsg 	case VCN_4_0__SRCID__JPEG5_DECODE:
1012*f005ef32Sjsg 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]);
1013*f005ef32Sjsg 		break;
1014*f005ef32Sjsg 	case VCN_4_0__SRCID__JPEG6_DECODE:
1015*f005ef32Sjsg 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]);
1016*f005ef32Sjsg 		break;
1017*f005ef32Sjsg 	case VCN_4_0__SRCID__JPEG7_DECODE:
1018*f005ef32Sjsg 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]);
1019*f005ef32Sjsg 		break;
1020*f005ef32Sjsg 	default:
1021*f005ef32Sjsg 		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1022*f005ef32Sjsg 			  entry->src_id, entry->src_data[0]);
1023*f005ef32Sjsg 		break;
1024*f005ef32Sjsg 	}
1025*f005ef32Sjsg 
1026*f005ef32Sjsg 	return 0;
1027*f005ef32Sjsg }
1028*f005ef32Sjsg 
1029*f005ef32Sjsg static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
1030*f005ef32Sjsg 	.name = "jpeg_v4_0_3",
1031*f005ef32Sjsg 	.early_init = jpeg_v4_0_3_early_init,
1032*f005ef32Sjsg 	.late_init = NULL,
1033*f005ef32Sjsg 	.sw_init = jpeg_v4_0_3_sw_init,
1034*f005ef32Sjsg 	.sw_fini = jpeg_v4_0_3_sw_fini,
1035*f005ef32Sjsg 	.hw_init = jpeg_v4_0_3_hw_init,
1036*f005ef32Sjsg 	.hw_fini = jpeg_v4_0_3_hw_fini,
1037*f005ef32Sjsg 	.suspend = jpeg_v4_0_3_suspend,
1038*f005ef32Sjsg 	.resume = jpeg_v4_0_3_resume,
1039*f005ef32Sjsg 	.is_idle = jpeg_v4_0_3_is_idle,
1040*f005ef32Sjsg 	.wait_for_idle = jpeg_v4_0_3_wait_for_idle,
1041*f005ef32Sjsg 	.check_soft_reset = NULL,
1042*f005ef32Sjsg 	.pre_soft_reset = NULL,
1043*f005ef32Sjsg 	.soft_reset = NULL,
1044*f005ef32Sjsg 	.post_soft_reset = NULL,
1045*f005ef32Sjsg 	.set_clockgating_state = jpeg_v4_0_3_set_clockgating_state,
1046*f005ef32Sjsg 	.set_powergating_state = jpeg_v4_0_3_set_powergating_state,
1047*f005ef32Sjsg };
1048*f005ef32Sjsg 
1049*f005ef32Sjsg static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
1050*f005ef32Sjsg 	.type = AMDGPU_RING_TYPE_VCN_JPEG,
1051*f005ef32Sjsg 	.align_mask = 0xf,
1052*f005ef32Sjsg 	.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
1053*f005ef32Sjsg 	.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
1054*f005ef32Sjsg 	.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
1055*f005ef32Sjsg 	.emit_frame_size =
1056*f005ef32Sjsg 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1057*f005ef32Sjsg 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1058*f005ef32Sjsg 		8 + /* jpeg_v4_0_3_dec_ring_emit_vm_flush */
1059*f005ef32Sjsg 		22 + 22 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */
1060*f005ef32Sjsg 		8 + 16,
1061*f005ef32Sjsg 	.emit_ib_size = 22, /* jpeg_v4_0_3_dec_ring_emit_ib */
1062*f005ef32Sjsg 	.emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
1063*f005ef32Sjsg 	.emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
1064*f005ef32Sjsg 	.emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
1065*f005ef32Sjsg 	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
1066*f005ef32Sjsg 	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
1067*f005ef32Sjsg 	.insert_nop = jpeg_v4_0_3_dec_ring_nop,
1068*f005ef32Sjsg 	.insert_start = jpeg_v4_0_3_dec_ring_insert_start,
1069*f005ef32Sjsg 	.insert_end = jpeg_v4_0_3_dec_ring_insert_end,
1070*f005ef32Sjsg 	.pad_ib = amdgpu_ring_generic_pad_ib,
1071*f005ef32Sjsg 	.begin_use = amdgpu_jpeg_ring_begin_use,
1072*f005ef32Sjsg 	.end_use = amdgpu_jpeg_ring_end_use,
1073*f005ef32Sjsg 	.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
1074*f005ef32Sjsg 	.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
1075*f005ef32Sjsg 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1076*f005ef32Sjsg };
1077*f005ef32Sjsg 
jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device * adev)1078*f005ef32Sjsg static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev)
1079*f005ef32Sjsg {
1080*f005ef32Sjsg 	int i, j, jpeg_inst;
1081*f005ef32Sjsg 
1082*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
1083*f005ef32Sjsg 		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
1084*f005ef32Sjsg 			adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v4_0_3_dec_ring_vm_funcs;
1085*f005ef32Sjsg 			adev->jpeg.inst[i].ring_dec[j].me = i;
1086*f005ef32Sjsg 			adev->jpeg.inst[i].ring_dec[j].pipe = j;
1087*f005ef32Sjsg 		}
1088*f005ef32Sjsg 		jpeg_inst = GET_INST(JPEG, i);
1089*f005ef32Sjsg 		adev->jpeg.inst[i].aid_id =
1090*f005ef32Sjsg 			jpeg_inst / adev->jpeg.num_inst_per_aid;
1091*f005ef32Sjsg 	}
1092*f005ef32Sjsg 	DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
1093*f005ef32Sjsg }
1094*f005ef32Sjsg 
1095*f005ef32Sjsg static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = {
1096*f005ef32Sjsg 	.set = jpeg_v4_0_3_set_interrupt_state,
1097*f005ef32Sjsg 	.process = jpeg_v4_0_3_process_interrupt,
1098*f005ef32Sjsg };
1099*f005ef32Sjsg 
jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device * adev)1100*f005ef32Sjsg static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
1101*f005ef32Sjsg {
1102*f005ef32Sjsg 	int i;
1103*f005ef32Sjsg 
1104*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
1105*f005ef32Sjsg 		adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
1106*f005ef32Sjsg 	}
1107*f005ef32Sjsg 	adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs;
1108*f005ef32Sjsg }
1109*f005ef32Sjsg 
1110*f005ef32Sjsg const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block = {
1111*f005ef32Sjsg 	.type = AMD_IP_BLOCK_TYPE_JPEG,
1112*f005ef32Sjsg 	.major = 4,
1113*f005ef32Sjsg 	.minor = 0,
1114*f005ef32Sjsg 	.rev = 3,
1115*f005ef32Sjsg 	.funcs = &jpeg_v4_0_3_ip_funcs,
1116*f005ef32Sjsg };
1117*f005ef32Sjsg 
1118*f005ef32Sjsg static const struct amdgpu_ras_err_status_reg_entry jpeg_v4_0_3_ue_reg_list[] = {
1119*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG0S, regVCN_UE_ERR_STATUS_HI_JPEG0S),
1120*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG0S"},
1121*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG0D, regVCN_UE_ERR_STATUS_HI_JPEG0D),
1122*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG0D"},
1123*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG1S, regVCN_UE_ERR_STATUS_HI_JPEG1S),
1124*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG1S"},
1125*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG1D, regVCN_UE_ERR_STATUS_HI_JPEG1D),
1126*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG1D"},
1127*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG2S, regVCN_UE_ERR_STATUS_HI_JPEG2S),
1128*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG2S"},
1129*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG2D, regVCN_UE_ERR_STATUS_HI_JPEG2D),
1130*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG2D"},
1131*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG3S, regVCN_UE_ERR_STATUS_HI_JPEG3S),
1132*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG3S"},
1133*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG3D, regVCN_UE_ERR_STATUS_HI_JPEG3D),
1134*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG3D"},
1135*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG4S, regVCN_UE_ERR_STATUS_HI_JPEG4S),
1136*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG4S"},
1137*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG4D, regVCN_UE_ERR_STATUS_HI_JPEG4D),
1138*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG4D"},
1139*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG5S, regVCN_UE_ERR_STATUS_HI_JPEG5S),
1140*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG5S"},
1141*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG5D, regVCN_UE_ERR_STATUS_HI_JPEG5D),
1142*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG5D"},
1143*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG6S, regVCN_UE_ERR_STATUS_HI_JPEG6S),
1144*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG6S"},
1145*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG6D, regVCN_UE_ERR_STATUS_HI_JPEG6D),
1146*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG6D"},
1147*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG7S, regVCN_UE_ERR_STATUS_HI_JPEG7S),
1148*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG7S"},
1149*f005ef32Sjsg 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG7D, regVCN_UE_ERR_STATUS_HI_JPEG7D),
1150*f005ef32Sjsg 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG7D"},
1151*f005ef32Sjsg };
1152*f005ef32Sjsg 
jpeg_v4_0_3_inst_query_ras_error_count(struct amdgpu_device * adev,uint32_t jpeg_inst,void * ras_err_status)1153*f005ef32Sjsg static void jpeg_v4_0_3_inst_query_ras_error_count(struct amdgpu_device *adev,
1154*f005ef32Sjsg 						   uint32_t jpeg_inst,
1155*f005ef32Sjsg 						   void *ras_err_status)
1156*f005ef32Sjsg {
1157*f005ef32Sjsg 	struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
1158*f005ef32Sjsg 
1159*f005ef32Sjsg 	/* jpeg v4_0_3 only support uncorrectable errors */
1160*f005ef32Sjsg 	amdgpu_ras_inst_query_ras_error_count(adev,
1161*f005ef32Sjsg 			jpeg_v4_0_3_ue_reg_list,
1162*f005ef32Sjsg 			ARRAY_SIZE(jpeg_v4_0_3_ue_reg_list),
1163*f005ef32Sjsg 			NULL, 0, GET_INST(VCN, jpeg_inst),
1164*f005ef32Sjsg 			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
1165*f005ef32Sjsg 			&err_data->ue_count);
1166*f005ef32Sjsg }
1167*f005ef32Sjsg 
jpeg_v4_0_3_query_ras_error_count(struct amdgpu_device * adev,void * ras_err_status)1168*f005ef32Sjsg static void jpeg_v4_0_3_query_ras_error_count(struct amdgpu_device *adev,
1169*f005ef32Sjsg 					      void *ras_err_status)
1170*f005ef32Sjsg {
1171*f005ef32Sjsg 	uint32_t i;
1172*f005ef32Sjsg 
1173*f005ef32Sjsg 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
1174*f005ef32Sjsg 		dev_warn(adev->dev, "JPEG RAS is not supported\n");
1175*f005ef32Sjsg 		return;
1176*f005ef32Sjsg 	}
1177*f005ef32Sjsg 
1178*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_inst; i++)
1179*f005ef32Sjsg 		jpeg_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status);
1180*f005ef32Sjsg }
1181*f005ef32Sjsg 
jpeg_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device * adev,uint32_t jpeg_inst)1182*f005ef32Sjsg static void jpeg_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device *adev,
1183*f005ef32Sjsg 						   uint32_t jpeg_inst)
1184*f005ef32Sjsg {
1185*f005ef32Sjsg 	amdgpu_ras_inst_reset_ras_error_count(adev,
1186*f005ef32Sjsg 			jpeg_v4_0_3_ue_reg_list,
1187*f005ef32Sjsg 			ARRAY_SIZE(jpeg_v4_0_3_ue_reg_list),
1188*f005ef32Sjsg 			GET_INST(VCN, jpeg_inst));
1189*f005ef32Sjsg }
1190*f005ef32Sjsg 
jpeg_v4_0_3_reset_ras_error_count(struct amdgpu_device * adev)1191*f005ef32Sjsg static void jpeg_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
1192*f005ef32Sjsg {
1193*f005ef32Sjsg 	uint32_t i;
1194*f005ef32Sjsg 
1195*f005ef32Sjsg 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
1196*f005ef32Sjsg 		dev_warn(adev->dev, "JPEG RAS is not supported\n");
1197*f005ef32Sjsg 		return;
1198*f005ef32Sjsg 	}
1199*f005ef32Sjsg 
1200*f005ef32Sjsg 	for (i = 0; i < adev->jpeg.num_jpeg_inst; i++)
1201*f005ef32Sjsg 		jpeg_v4_0_3_inst_reset_ras_error_count(adev, i);
1202*f005ef32Sjsg }
1203*f005ef32Sjsg 
1204*f005ef32Sjsg static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = {
1205*f005ef32Sjsg 	.query_ras_error_count = jpeg_v4_0_3_query_ras_error_count,
1206*f005ef32Sjsg 	.reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count,
1207*f005ef32Sjsg };
1208*f005ef32Sjsg 
1209*f005ef32Sjsg static struct amdgpu_jpeg_ras jpeg_v4_0_3_ras = {
1210*f005ef32Sjsg 	.ras_block = {
1211*f005ef32Sjsg 		.hw_ops = &jpeg_v4_0_3_ras_hw_ops,
1212*f005ef32Sjsg 	},
1213*f005ef32Sjsg };
1214*f005ef32Sjsg 
jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device * adev)1215*f005ef32Sjsg static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
1216*f005ef32Sjsg {
1217*f005ef32Sjsg 	adev->jpeg.ras = &jpeg_v4_0_3_ras;
1218*f005ef32Sjsg }
1219