xref: /openbsd/sys/dev/pci/drm/amd/amdgpu/jpeg_v4_0.c (revision f005ef32)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "amdgpu_jpeg.h"
26 #include "amdgpu_pm.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "jpeg_v2_0.h"
30 #include "jpeg_v4_0.h"
31 #include "mmsch_v4_0.h"
32 
33 #include "vcn/vcn_4_0_0_offset.h"
34 #include "vcn/vcn_4_0_0_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
36 
37 #define regUVD_JPEG_PITCH_INTERNAL_OFFSET                  0x401f
38 
39 static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev);
40 static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev);
41 static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev);
42 static int jpeg_v4_0_set_powergating_state(void *handle,
43 				enum amd_powergating_state state);
44 static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev);
45 
46 static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
47 
48 /**
49  * jpeg_v4_0_early_init - set function pointers
50  *
51  * @handle: amdgpu_device pointer
52  *
53  * Set ring and irq function pointers
54  */
jpeg_v4_0_early_init(void * handle)55 static int jpeg_v4_0_early_init(void *handle)
56 {
57 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
58 
59 
60 	adev->jpeg.num_jpeg_inst = 1;
61 	adev->jpeg.num_jpeg_rings = 1;
62 
63 	jpeg_v4_0_set_dec_ring_funcs(adev);
64 	jpeg_v4_0_set_irq_funcs(adev);
65 	jpeg_v4_0_set_ras_funcs(adev);
66 
67 	return 0;
68 }
69 
70 /**
71  * jpeg_v4_0_sw_init - sw init for JPEG block
72  *
73  * @handle: amdgpu_device pointer
74  *
75  * Load firmware and sw initialization
76  */
jpeg_v4_0_sw_init(void * handle)77 static int jpeg_v4_0_sw_init(void *handle)
78 {
79 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
80 	struct amdgpu_ring *ring;
81 	int r;
82 
83 	/* JPEG TRAP */
84 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
85 		VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
86 	if (r)
87 		return r;
88 
89 	/* JPEG DJPEG POISON EVENT */
90 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
91 			VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
92 	if (r)
93 		return r;
94 
95 	/* JPEG EJPEG POISON EVENT */
96 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
97 			VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
98 	if (r)
99 		return r;
100 
101 	r = amdgpu_jpeg_sw_init(adev);
102 	if (r)
103 		return r;
104 
105 	r = amdgpu_jpeg_resume(adev);
106 	if (r)
107 		return r;
108 
109 	ring = adev->jpeg.inst->ring_dec;
110 	ring->use_doorbell = true;
111 	ring->doorbell_index = amdgpu_sriov_vf(adev) ? (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1);
112 	ring->vm_hub = AMDGPU_MMHUB0(0);
113 
114 	snprintf(ring->name, sizeof(ring->name), "jpeg_dec");
115 	r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
116 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
117 	if (r)
118 		return r;
119 
120 	adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
121 	adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
122 
123 	r = amdgpu_jpeg_ras_sw_init(adev);
124 	if (r)
125 		return r;
126 
127 	return 0;
128 }
129 
130 /**
131  * jpeg_v4_0_sw_fini - sw fini for JPEG block
132  *
133  * @handle: amdgpu_device pointer
134  *
135  * JPEG suspend and free up sw allocation
136  */
jpeg_v4_0_sw_fini(void * handle)137 static int jpeg_v4_0_sw_fini(void *handle)
138 {
139 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
140 	int r;
141 
142 	r = amdgpu_jpeg_suspend(adev);
143 	if (r)
144 		return r;
145 
146 	r = amdgpu_jpeg_sw_fini(adev);
147 
148 	return r;
149 }
150 
151 /**
152  * jpeg_v4_0_hw_init - start and test JPEG block
153  *
154  * @handle: amdgpu_device pointer
155  *
156  */
jpeg_v4_0_hw_init(void * handle)157 static int jpeg_v4_0_hw_init(void *handle)
158 {
159 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
160 	struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
161 	int r;
162 
163 	if (amdgpu_sriov_vf(adev)) {
164 		r = jpeg_v4_0_start_sriov(adev);
165 		if (r)
166 			return r;
167 		ring->wptr = 0;
168 		ring->wptr_old = 0;
169 		jpeg_v4_0_dec_ring_set_wptr(ring);
170 		ring->sched.ready = true;
171 	} else {
172 		adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
173 						(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
174 
175 		WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
176 			ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
177 			VCN_JPEG_DB_CTRL__EN_MASK);
178 
179 		r = amdgpu_ring_test_helper(ring);
180 		if (r)
181 			return r;
182 	}
183 
184 	DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
185 
186 	return 0;
187 }
188 
189 /**
190  * jpeg_v4_0_hw_fini - stop the hardware block
191  *
192  * @handle: amdgpu_device pointer
193  *
194  * Stop the JPEG block, mark ring as not ready any more
195  */
jpeg_v4_0_hw_fini(void * handle)196 static int jpeg_v4_0_hw_fini(void *handle)
197 {
198 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
199 
200 	cancel_delayed_work_sync(&adev->vcn.idle_work);
201 	if (!amdgpu_sriov_vf(adev)) {
202 		if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
203 			RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
204 			jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
205 	}
206 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
207 		amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
208 
209 	return 0;
210 }
211 
212 /**
213  * jpeg_v4_0_suspend - suspend JPEG block
214  *
215  * @handle: amdgpu_device pointer
216  *
217  * HW fini and suspend JPEG block
218  */
jpeg_v4_0_suspend(void * handle)219 static int jpeg_v4_0_suspend(void *handle)
220 {
221 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 	int r;
223 
224 	r = jpeg_v4_0_hw_fini(adev);
225 	if (r)
226 		return r;
227 
228 	r = amdgpu_jpeg_suspend(adev);
229 
230 	return r;
231 }
232 
233 /**
234  * jpeg_v4_0_resume - resume JPEG block
235  *
236  * @handle: amdgpu_device pointer
237  *
238  * Resume firmware and hw init JPEG block
239  */
jpeg_v4_0_resume(void * handle)240 static int jpeg_v4_0_resume(void *handle)
241 {
242 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
243 	int r;
244 
245 	r = amdgpu_jpeg_resume(adev);
246 	if (r)
247 		return r;
248 
249 	r = jpeg_v4_0_hw_init(adev);
250 
251 	return r;
252 }
253 
jpeg_v4_0_disable_clock_gating(struct amdgpu_device * adev)254 static void jpeg_v4_0_disable_clock_gating(struct amdgpu_device *adev)
255 {
256 	uint32_t data = 0;
257 
258 	data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
259 	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
260 		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
261 		data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK);
262 	} else {
263 		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
264 	}
265 
266 	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
267 	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
268 	WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
269 
270 	data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
271 	data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
272 		| JPEG_CGC_GATE__JPEG2_DEC_MASK
273 		| JPEG_CGC_GATE__JMCIF_MASK
274 		| JPEG_CGC_GATE__JRBBM_MASK);
275 	WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
276 }
277 
jpeg_v4_0_enable_clock_gating(struct amdgpu_device * adev)278 static void jpeg_v4_0_enable_clock_gating(struct amdgpu_device *adev)
279 {
280 	uint32_t data = 0;
281 
282 	data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
283 	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
284 		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
285 		data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK;
286 	} else {
287 		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
288 	}
289 
290 	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
291 	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
292 	WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
293 
294 	data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
295 	data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
296 		|JPEG_CGC_GATE__JPEG2_DEC_MASK
297 		|JPEG_CGC_GATE__JMCIF_MASK
298 		|JPEG_CGC_GATE__JRBBM_MASK);
299 	WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
300 }
301 
jpeg_v4_0_disable_static_power_gating(struct amdgpu_device * adev)302 static int jpeg_v4_0_disable_static_power_gating(struct amdgpu_device *adev)
303 {
304 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
305 		uint32_t data = 0;
306 		int r = 0;
307 
308 		data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
309 		WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_PGFSM_CONFIG), data);
310 
311 		r = SOC15_WAIT_ON_RREG(JPEG, 0,
312 			regUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
313 			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
314 
315 		if (r) {
316 			DRM_DEV_ERROR(adev->dev, "amdgpu: JPEG disable power gating failed\n");
317 			return r;
318 		}
319 	}
320 
321 	/* disable anti hang mechanism */
322 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
323 		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
324 
325 	/* keep the JPEG in static PG mode */
326 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
327 		~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
328 
329 	return 0;
330 }
331 
jpeg_v4_0_enable_static_power_gating(struct amdgpu_device * adev)332 static int jpeg_v4_0_enable_static_power_gating(struct amdgpu_device *adev)
333 {
334 	/* enable anti hang mechanism */
335 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS),
336 		UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
337 		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
338 
339 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
340 		uint32_t data = 0;
341 		int r = 0;
342 
343 		data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
344 		WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_PGFSM_CONFIG), data);
345 
346 		r = SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_PGFSM_STATUS,
347 			(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
348 			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
349 
350 		if (r) {
351 			DRM_DEV_ERROR(adev->dev, "amdgpu: JPEG enable power gating failed\n");
352 			return r;
353 		}
354 	}
355 
356 	return 0;
357 }
358 
359 /**
360  * jpeg_v4_0_start - start JPEG block
361  *
362  * @adev: amdgpu_device pointer
363  *
364  * Setup and start the JPEG block
365  */
jpeg_v4_0_start(struct amdgpu_device * adev)366 static int jpeg_v4_0_start(struct amdgpu_device *adev)
367 {
368 	struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
369 	int r;
370 
371 	if (adev->pm.dpm_enabled)
372 		amdgpu_dpm_enable_jpeg(adev, true);
373 
374 	/* disable power gating */
375 	r = jpeg_v4_0_disable_static_power_gating(adev);
376 	if (r)
377 		return r;
378 
379 	/* JPEG disable CGC */
380 	jpeg_v4_0_disable_clock_gating(adev);
381 
382 	/* MJPEG global tiling registers */
383 	WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
384 		adev->gfx.config.gb_addr_config);
385 
386 
387 	/* enable JMI channel */
388 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0,
389 		~UVD_JMI_CNTL__SOFT_RESET_MASK);
390 
391 	/* enable System Interrupt for JRBC */
392 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN),
393 		JPEG_SYS_INT_EN__DJRBC_MASK,
394 		~JPEG_SYS_INT_EN__DJRBC_MASK);
395 
396 	WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0);
397 	WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
398 	WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
399 		lower_32_bits(ring->gpu_addr));
400 	WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
401 		upper_32_bits(ring->gpu_addr));
402 	WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0);
403 	WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0);
404 	WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L);
405 	WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
406 	ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
407 
408 	return 0;
409 }
410 
jpeg_v4_0_start_sriov(struct amdgpu_device * adev)411 static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev)
412 {
413 	struct amdgpu_ring *ring;
414 	uint64_t ctx_addr;
415 	uint32_t param, resp, expected;
416 	uint32_t tmp, timeout;
417 
418 	struct amdgpu_mm_table *table = &adev->virt.mm_table;
419 	uint32_t *table_loc;
420 	uint32_t table_size;
421 	uint32_t size, size_dw;
422 	uint32_t init_status;
423 
424 	struct mmsch_v4_0_cmd_direct_write
425 		direct_wt = { {0} };
426 	struct mmsch_v4_0_cmd_end end = { {0} };
427 	struct mmsch_v4_0_init_header header;
428 
429 	direct_wt.cmd_header.command_type =
430 		MMSCH_COMMAND__DIRECT_REG_WRITE;
431 	end.cmd_header.command_type =
432 		MMSCH_COMMAND__END;
433 
434 	header.version = MMSCH_VERSION;
435 	header.total_size = RREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE);
436 
437 	header.jpegdec.init_status = 0;
438 	header.jpegdec.table_offset = 0;
439 	header.jpegdec.table_size = 0;
440 
441 	table_loc = (uint32_t *)table->cpu_addr;
442 	table_loc += header.total_size;
443 
444 	table_size = 0;
445 
446 	ring = adev->jpeg.inst->ring_dec;
447 
448 	MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0,
449 		regUVD_LMI_JRBC_RB_64BIT_BAR_LOW),
450 		lower_32_bits(ring->gpu_addr));
451 	MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0,
452 		regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH),
453 		upper_32_bits(ring->gpu_addr));
454 	MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0,
455 		regUVD_JRBC_RB_SIZE), ring->ring_size / 4);
456 
457 	/* add end packet */
458 	MMSCH_V4_0_INSERT_END();
459 
460 	/* refine header */
461 	header.jpegdec.init_status = 0;
462 	header.jpegdec.table_offset = header.total_size;
463 	header.jpegdec.table_size = table_size;
464 	header.total_size += table_size;
465 
466 	/* Update init table header in memory */
467 	size = sizeof(struct mmsch_v4_0_init_header);
468 	table_loc = (uint32_t *)table->cpu_addr;
469 	memcpy((void *)table_loc, &header, size);
470 
471 	/* message MMSCH (in VCN[0]) to initialize this client
472 	 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
473 	 * of memory descriptor location
474 	 */
475 	ctx_addr = table->gpu_addr;
476 	WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
477 	WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
478 
479 	/* 2, update vmid of descriptor */
480 	tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID);
481 	tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
482 	/* use domain0 for MM scheduler */
483 	tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
484 	WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp);
485 
486 	/* 3, notify mmsch about the size of this descriptor */
487 	size = header.total_size;
488 	WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size);
489 
490 	/* 4, set resp to zero */
491 	WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0);
492 
493 	/* 5, kick off the initialization and wait until
494 	 * MMSCH_VF_MAILBOX_RESP becomes non-zero
495 	 */
496 	param = 0x00000001;
497 	WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param);
498 	tmp = 0;
499 	timeout = 1000;
500 	resp = 0;
501 	expected = MMSCH_VF_MAILBOX_RESP__OK;
502 	init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->jpegdec.init_status;
503 	while (resp != expected) {
504 		resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP);
505 
506 		if (resp != 0)
507 			break;
508 		udelay(10);
509 		tmp = tmp + 10;
510 		if (tmp >= timeout) {
511 			DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
512 				" waiting for regMMSCH_VF_MAILBOX_RESP "\
513 				"(expected=0x%08x, readback=0x%08x)\n",
514 				tmp, expected, resp);
515 			return -EBUSY;
516 		}
517 	}
518 	if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE && init_status != MMSCH_VF_ENGINE_STATUS__PASS)
519 		DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n", resp, init_status);
520 
521 	return 0;
522 
523 }
524 
525 /**
526  * jpeg_v4_0_stop - stop JPEG block
527  *
528  * @adev: amdgpu_device pointer
529  *
530  * stop the JPEG block
531  */
jpeg_v4_0_stop(struct amdgpu_device * adev)532 static int jpeg_v4_0_stop(struct amdgpu_device *adev)
533 {
534 	int r;
535 
536 	/* reset JMI */
537 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL),
538 		UVD_JMI_CNTL__SOFT_RESET_MASK,
539 		~UVD_JMI_CNTL__SOFT_RESET_MASK);
540 
541 	jpeg_v4_0_enable_clock_gating(adev);
542 
543 	/* enable power gating */
544 	r = jpeg_v4_0_enable_static_power_gating(adev);
545 	if (r)
546 		return r;
547 
548 	if (adev->pm.dpm_enabled)
549 		amdgpu_dpm_enable_jpeg(adev, false);
550 
551 	return 0;
552 }
553 
554 /**
555  * jpeg_v4_0_dec_ring_get_rptr - get read pointer
556  *
557  * @ring: amdgpu_ring pointer
558  *
559  * Returns the current hardware read pointer
560  */
jpeg_v4_0_dec_ring_get_rptr(struct amdgpu_ring * ring)561 static uint64_t jpeg_v4_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
562 {
563 	struct amdgpu_device *adev = ring->adev;
564 
565 	return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR);
566 }
567 
568 /**
569  * jpeg_v4_0_dec_ring_get_wptr - get write pointer
570  *
571  * @ring: amdgpu_ring pointer
572  *
573  * Returns the current hardware write pointer
574  */
jpeg_v4_0_dec_ring_get_wptr(struct amdgpu_ring * ring)575 static uint64_t jpeg_v4_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
576 {
577 	struct amdgpu_device *adev = ring->adev;
578 
579 	if (ring->use_doorbell)
580 		return *ring->wptr_cpu_addr;
581 	else
582 		return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
583 }
584 
585 /**
586  * jpeg_v4_0_dec_ring_set_wptr - set write pointer
587  *
588  * @ring: amdgpu_ring pointer
589  *
590  * Commits the write pointer to the hardware
591  */
jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring * ring)592 static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
593 {
594 	struct amdgpu_device *adev = ring->adev;
595 
596 	if (ring->use_doorbell) {
597 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
598 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
599 	} else {
600 		WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
601 	}
602 }
603 
jpeg_v4_0_is_idle(void * handle)604 static bool jpeg_v4_0_is_idle(void *handle)
605 {
606 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
607 	int ret = 1;
608 
609 	ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
610 		UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
611 		UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
612 
613 	return ret;
614 }
615 
jpeg_v4_0_wait_for_idle(void * handle)616 static int jpeg_v4_0_wait_for_idle(void *handle)
617 {
618 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
619 
620 	return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
621 		UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
622 		UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
623 }
624 
jpeg_v4_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)625 static int jpeg_v4_0_set_clockgating_state(void *handle,
626 					  enum amd_clockgating_state state)
627 {
628 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
629 	bool enable = state == AMD_CG_STATE_GATE;
630 
631 	if (enable) {
632 		if (!jpeg_v4_0_is_idle(handle))
633 			return -EBUSY;
634 		jpeg_v4_0_enable_clock_gating(adev);
635 	} else {
636 		jpeg_v4_0_disable_clock_gating(adev);
637 	}
638 
639 	return 0;
640 }
641 
jpeg_v4_0_set_powergating_state(void * handle,enum amd_powergating_state state)642 static int jpeg_v4_0_set_powergating_state(void *handle,
643 					  enum amd_powergating_state state)
644 {
645 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
646 	int ret;
647 
648 	if (amdgpu_sriov_vf(adev)) {
649 		adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
650 		return 0;
651 	}
652 
653 	if (state == adev->jpeg.cur_state)
654 		return 0;
655 
656 	if (state == AMD_PG_STATE_GATE)
657 		ret = jpeg_v4_0_stop(adev);
658 	else
659 		ret = jpeg_v4_0_start(adev);
660 
661 	if (!ret)
662 		adev->jpeg.cur_state = state;
663 
664 	return ret;
665 }
666 
jpeg_v4_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)667 static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
668 					struct amdgpu_irq_src *source,
669 					unsigned type,
670 					enum amdgpu_interrupt_state state)
671 {
672 	return 0;
673 }
674 
jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned int type,enum amdgpu_interrupt_state state)675 static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
676 					struct amdgpu_irq_src *source,
677 					unsigned int type,
678 					enum amdgpu_interrupt_state state)
679 {
680 	return 0;
681 }
682 
jpeg_v4_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)683 static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
684 				      struct amdgpu_irq_src *source,
685 				      struct amdgpu_iv_entry *entry)
686 {
687 	DRM_DEBUG("IH: JPEG TRAP\n");
688 
689 	switch (entry->src_id) {
690 	case VCN_4_0__SRCID__JPEG_DECODE:
691 		amdgpu_fence_process(adev->jpeg.inst->ring_dec);
692 		break;
693 	default:
694 		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
695 			  entry->src_id, entry->src_data[0]);
696 		break;
697 	}
698 
699 	return 0;
700 }
701 
702 static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
703 	.name = "jpeg_v4_0",
704 	.early_init = jpeg_v4_0_early_init,
705 	.late_init = NULL,
706 	.sw_init = jpeg_v4_0_sw_init,
707 	.sw_fini = jpeg_v4_0_sw_fini,
708 	.hw_init = jpeg_v4_0_hw_init,
709 	.hw_fini = jpeg_v4_0_hw_fini,
710 	.suspend = jpeg_v4_0_suspend,
711 	.resume = jpeg_v4_0_resume,
712 	.is_idle = jpeg_v4_0_is_idle,
713 	.wait_for_idle = jpeg_v4_0_wait_for_idle,
714 	.check_soft_reset = NULL,
715 	.pre_soft_reset = NULL,
716 	.soft_reset = NULL,
717 	.post_soft_reset = NULL,
718 	.set_clockgating_state = jpeg_v4_0_set_clockgating_state,
719 	.set_powergating_state = jpeg_v4_0_set_powergating_state,
720 };
721 
722 static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
723 	.type = AMDGPU_RING_TYPE_VCN_JPEG,
724 	.align_mask = 0xf,
725 	.get_rptr = jpeg_v4_0_dec_ring_get_rptr,
726 	.get_wptr = jpeg_v4_0_dec_ring_get_wptr,
727 	.set_wptr = jpeg_v4_0_dec_ring_set_wptr,
728 	.emit_frame_size =
729 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
730 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
731 		8 + /* jpeg_v4_0_dec_ring_emit_vm_flush */
732 		18 + 18 + /* jpeg_v4_0_dec_ring_emit_fence x2 vm fence */
733 		8 + 16,
734 	.emit_ib_size = 22, /* jpeg_v4_0_dec_ring_emit_ib */
735 	.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
736 	.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
737 	.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
738 	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
739 	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
740 	.insert_nop = jpeg_v2_0_dec_ring_nop,
741 	.insert_start = jpeg_v2_0_dec_ring_insert_start,
742 	.insert_end = jpeg_v2_0_dec_ring_insert_end,
743 	.pad_ib = amdgpu_ring_generic_pad_ib,
744 	.begin_use = amdgpu_jpeg_ring_begin_use,
745 	.end_use = amdgpu_jpeg_ring_end_use,
746 	.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
747 	.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
748 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
749 };
750 
jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device * adev)751 static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
752 {
753 	adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_dec_ring_vm_funcs;
754 	DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
755 }
756 
757 static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
758 	.set = jpeg_v4_0_set_interrupt_state,
759 	.process = jpeg_v4_0_process_interrupt,
760 };
761 
762 static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = {
763 	.set = jpeg_v4_0_set_ras_interrupt_state,
764 	.process = amdgpu_jpeg_process_poison_irq,
765 };
766 
jpeg_v4_0_set_irq_funcs(struct amdgpu_device * adev)767 static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev)
768 {
769 	adev->jpeg.inst->irq.num_types = 1;
770 	adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs;
771 
772 	adev->jpeg.inst->ras_poison_irq.num_types = 1;
773 	adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs;
774 }
775 
776 const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = {
777 	.type = AMD_IP_BLOCK_TYPE_JPEG,
778 	.major = 4,
779 	.minor = 0,
780 	.rev = 0,
781 	.funcs = &jpeg_v4_0_ip_funcs,
782 };
783 
jpeg_v4_0_query_poison_by_instance(struct amdgpu_device * adev,uint32_t instance,uint32_t sub_block)784 static uint32_t jpeg_v4_0_query_poison_by_instance(struct amdgpu_device *adev,
785 		uint32_t instance, uint32_t sub_block)
786 {
787 	uint32_t poison_stat = 0, reg_value = 0;
788 
789 	switch (sub_block) {
790 	case AMDGPU_JPEG_V4_0_JPEG0:
791 		reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
792 		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
793 		break;
794 	case AMDGPU_JPEG_V4_0_JPEG1:
795 		reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
796 		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
797 		break;
798 	default:
799 		break;
800 	}
801 
802 	if (poison_stat)
803 		dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
804 			instance, sub_block);
805 
806 	return poison_stat;
807 }
808 
jpeg_v4_0_query_ras_poison_status(struct amdgpu_device * adev)809 static bool jpeg_v4_0_query_ras_poison_status(struct amdgpu_device *adev)
810 {
811 	uint32_t inst = 0, sub = 0, poison_stat = 0;
812 
813 	for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
814 		for (sub = 0; sub < AMDGPU_JPEG_V4_0_MAX_SUB_BLOCK; sub++)
815 			poison_stat +=
816 				jpeg_v4_0_query_poison_by_instance(adev, inst, sub);
817 
818 	return !!poison_stat;
819 }
820 
821 const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = {
822 	.query_poison_status = jpeg_v4_0_query_ras_poison_status,
823 };
824 
825 static struct amdgpu_jpeg_ras jpeg_v4_0_ras = {
826 	.ras_block = {
827 		.hw_ops = &jpeg_v4_0_ras_hw_ops,
828 		.ras_late_init = amdgpu_jpeg_ras_late_init,
829 	},
830 };
831 
jpeg_v4_0_set_ras_funcs(struct amdgpu_device * adev)832 static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev)
833 {
834 	switch (adev->ip_versions[JPEG_HWIP][0]) {
835 	case IP_VERSION(4, 0, 0):
836 		adev->jpeg.ras = &jpeg_v4_0_ras;
837 		break;
838 	default:
839 		break;
840 	}
841 }
842