1 /* $NetBSD: amdgpu_mxgpu_vi.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $ */
2
3 /*
4 * Copyright 2017 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Xiangliang.Yu@amd.com
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_mxgpu_vi.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
29
30 #include "amdgpu.h"
31 #include "vi.h"
32 #include "bif/bif_5_0_d.h"
33 #include "bif/bif_5_0_sh_mask.h"
34 #include "vid.h"
35 #include "gca/gfx_8_0_d.h"
36 #include "gca/gfx_8_0_sh_mask.h"
37 #include "gmc_v8_0.h"
38 #include "gfx_v8_0.h"
39 #include "sdma_v3_0.h"
40 #include "tonga_ih.h"
41 #include "gmc/gmc_8_2_d.h"
42 #include "gmc/gmc_8_2_sh_mask.h"
43 #include "oss/oss_3_0_d.h"
44 #include "oss/oss_3_0_sh_mask.h"
45 #include "dce/dce_10_0_d.h"
46 #include "dce/dce_10_0_sh_mask.h"
47 #include "smu/smu_7_1_3_d.h"
48 #include "mxgpu_vi.h"
49
50 /* VI golden setting */
51 static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
52 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
53 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
54 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
55 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
56 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
57 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
58 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
59 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
60 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
61 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
62 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
63 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
64 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
65 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
66 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
67 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
68 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
69 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
70 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
71 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
72 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
73 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
74 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
75 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
76 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
77 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
78 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
79 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
80 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
81 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
82 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
83 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
84 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
85 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
86 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
87 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
88 mmPCIE_DATA, 0x000f0000, 0x00000000,
89 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
90 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
91 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
92 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
93 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
94 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
95 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
96 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
97 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
98 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
99 };
100
101 static const u32 xgpu_fiji_golden_settings_a10[] = {
102 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
103 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
104 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
105 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
106 mmFBC_MISC, 0x1f311fff, 0x12300000,
107 mmHDMI_CONTROL, 0x31000111, 0x00000011,
108 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
109 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
110 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
111 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
112 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
113 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
114 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
115 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
116 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
117 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
118 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
119 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
120 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
121 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
122 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
123 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
124 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
125 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
126 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
127 };
128
129 static const u32 xgpu_fiji_golden_common_all[] = {
130 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
131 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
132 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
133 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
134 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
135 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
136 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
137 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
138 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
139 mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
140 };
141
142 static const u32 xgpu_tonga_mgcg_cgcg_init[] = {
143 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
144 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
145 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
146 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
147 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
148 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
149 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
150 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
151 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
152 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
153 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
154 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
155 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
156 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
157 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
158 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
159 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
160 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
161 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
162 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
163 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
164 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
165 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
166 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
167 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
168 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
169 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
170 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
171 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
172 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
173 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
174 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
175 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
176 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
177 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
178 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
179 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
180 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
181 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
182 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
183 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
184 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
185 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
186 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
187 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
188 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
189 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
190 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
191 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
192 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
193 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
194 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
195 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
196 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
197 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
198 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
199 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
200 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
201 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
202 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
203 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
204 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
205 mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
206 mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
207 mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
208 mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
209 mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
210 mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
211 mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
212 mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
213 mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
214 mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
215 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
216 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
217 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
218 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
219 mmPCIE_DATA, 0x000f0000, 0x00000000,
220 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
221 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
222 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
223 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
224 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
225 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
226 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
227 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
228 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
229 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
230 };
231
232 static const u32 xgpu_tonga_golden_settings_a11[] = {
233 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
234 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
235 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
236 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
237 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
238 mmFBC_MISC, 0x1f311fff, 0x12300000,
239 mmGB_GPU_ID, 0x0000000f, 0x00000000,
240 mmHDMI_CONTROL, 0x31000111, 0x00000011,
241 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
242 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
243 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
244 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
245 mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
246 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
247 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
248 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
249 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
250 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
251 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
252 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
253 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
254 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
255 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
256 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
257 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
258 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
259 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
260 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
261 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
262 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
263 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
264 mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
265 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
266 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
267 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
268 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
269 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
270 };
271
272 static const u32 xgpu_tonga_golden_common_all[] = {
273 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
274 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
275 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
276 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
277 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
278 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
279 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
280 };
281
xgpu_vi_init_golden_registers(struct amdgpu_device * adev)282 void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
283 {
284 switch (adev->asic_type) {
285 case CHIP_FIJI:
286 amdgpu_device_program_register_sequence(adev,
287 xgpu_fiji_mgcg_cgcg_init,
288 ARRAY_SIZE(
289 xgpu_fiji_mgcg_cgcg_init));
290 amdgpu_device_program_register_sequence(adev,
291 xgpu_fiji_golden_settings_a10,
292 ARRAY_SIZE(
293 xgpu_fiji_golden_settings_a10));
294 amdgpu_device_program_register_sequence(adev,
295 xgpu_fiji_golden_common_all,
296 ARRAY_SIZE(
297 xgpu_fiji_golden_common_all));
298 break;
299 case CHIP_TONGA:
300 amdgpu_device_program_register_sequence(adev,
301 xgpu_tonga_mgcg_cgcg_init,
302 ARRAY_SIZE(
303 xgpu_tonga_mgcg_cgcg_init));
304 amdgpu_device_program_register_sequence(adev,
305 xgpu_tonga_golden_settings_a11,
306 ARRAY_SIZE(
307 xgpu_tonga_golden_settings_a11));
308 amdgpu_device_program_register_sequence(adev,
309 xgpu_tonga_golden_common_all,
310 ARRAY_SIZE(
311 xgpu_tonga_golden_common_all));
312 break;
313 default:
314 BUG_ON("Doesn't support chip type.\n");
315 break;
316 }
317 }
318
319 /*
320 * Mailbox communication between GPU hypervisor and VFs
321 */
xgpu_vi_mailbox_send_ack(struct amdgpu_device * adev)322 static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
323 {
324 u32 reg;
325 int timeout = VI_MAILBOX_TIMEDOUT;
326 u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
327
328 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
329 reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1);
330 WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
331
332 /*Wait for RCV_MSG_VALID to be 0*/
333 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
334 while (reg & mask) {
335 if (timeout <= 0) {
336 pr_err("RCV_MSG_VALID is not cleared\n");
337 break;
338 }
339 mdelay(1);
340 timeout -=1;
341
342 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
343 }
344 }
345
xgpu_vi_mailbox_set_valid(struct amdgpu_device * adev,bool val)346 static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val)
347 {
348 u32 reg;
349
350 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
351 reg = REG_SET_FIELD(reg, MAILBOX_CONTROL,
352 TRN_MSG_VALID, val ? 1 : 0);
353 WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
354 }
355
xgpu_vi_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req)356 static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev,
357 enum idh_request req)
358 {
359 u32 reg;
360
361 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0);
362 reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0,
363 MSGBUF_DATA, req);
364 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, reg);
365
366 xgpu_vi_mailbox_set_valid(adev, true);
367 }
368
xgpu_vi_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)369 static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
370 enum idh_event event)
371 {
372 u32 reg;
373 u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
374
375 /* workaround: host driver doesn't set VALID for CMPL now */
376 if (event != IDH_FLR_NOTIFICATION_CMPL) {
377 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
378 if (!(reg & mask))
379 return -ENOENT;
380 }
381
382 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
383 if (reg != event)
384 return -ENOENT;
385
386 /* send ack to PF */
387 xgpu_vi_mailbox_send_ack(adev);
388
389 return 0;
390 }
391
xgpu_vi_poll_ack(struct amdgpu_device * adev)392 static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
393 {
394 int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
395 u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK);
396 u32 reg;
397
398 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
399 while (!(reg & mask)) {
400 if (timeout <= 0) {
401 pr_err("Doesn't get ack from pf.\n");
402 r = -ETIME;
403 break;
404 }
405 mdelay(5);
406 timeout -= 5;
407
408 reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
409 }
410
411 return r;
412 }
413
xgpu_vi_poll_msg(struct amdgpu_device * adev,enum idh_event event)414 static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
415 {
416 int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
417
418 r = xgpu_vi_mailbox_rcv_msg(adev, event);
419 while (r) {
420 if (timeout <= 0) {
421 pr_err("Doesn't get ack from pf.\n");
422 r = -ETIME;
423 break;
424 }
425 mdelay(5);
426 timeout -= 5;
427
428 r = xgpu_vi_mailbox_rcv_msg(adev, event);
429 }
430
431 return r;
432 }
433
xgpu_vi_send_access_requests(struct amdgpu_device * adev,enum idh_request request)434 static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
435 enum idh_request request)
436 {
437 int r;
438
439 xgpu_vi_mailbox_trans_msg(adev, request);
440
441 /* start to poll ack */
442 r = xgpu_vi_poll_ack(adev);
443 if (r)
444 return r;
445
446 xgpu_vi_mailbox_set_valid(adev, false);
447
448 /* start to check msg if request is idh_req_gpu_init_access */
449 if (request == IDH_REQ_GPU_INIT_ACCESS ||
450 request == IDH_REQ_GPU_FINI_ACCESS ||
451 request == IDH_REQ_GPU_RESET_ACCESS) {
452 r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
453 if (r) {
454 pr_err("Doesn't get ack from pf, give up\n");
455 return r;
456 }
457 }
458
459 return 0;
460 }
461
xgpu_vi_request_reset(struct amdgpu_device * adev)462 static int xgpu_vi_request_reset(struct amdgpu_device *adev)
463 {
464 return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
465 }
466
xgpu_vi_wait_reset_cmpl(struct amdgpu_device * adev)467 static int xgpu_vi_wait_reset_cmpl(struct amdgpu_device *adev)
468 {
469 return xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
470 }
471
xgpu_vi_request_full_gpu_access(struct amdgpu_device * adev,bool init)472 static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
473 bool init)
474 {
475 enum idh_request req;
476
477 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
478 return xgpu_vi_send_access_requests(adev, req);
479 }
480
xgpu_vi_release_full_gpu_access(struct amdgpu_device * adev,bool init)481 static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev,
482 bool init)
483 {
484 enum idh_request req;
485 int r = 0;
486
487 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
488 r = xgpu_vi_send_access_requests(adev, req);
489
490 return r;
491 }
492
493 /* add support mailbox interrupts */
xgpu_vi_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)494 static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev,
495 struct amdgpu_irq_src *source,
496 struct amdgpu_iv_entry *entry)
497 {
498 DRM_DEBUG("get ack intr and do nothing.\n");
499 return 0;
500 }
501
xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)502 static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev,
503 struct amdgpu_irq_src *src,
504 unsigned type,
505 enum amdgpu_interrupt_state state)
506 {
507 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
508
509 tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN,
510 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
511 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
512
513 return 0;
514 }
515
xgpu_vi_mailbox_flr_work(struct work_struct * work)516 static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
517 {
518 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
519 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
520
521 /* wait until RCV_MSG become 3 */
522 if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
523 pr_err("failed to receive FLR_CMPL\n");
524 return;
525 }
526
527 /* Trigger recovery due to world switch failure */
528 if (amdgpu_device_should_recover_gpu(adev))
529 amdgpu_device_gpu_recover(adev, NULL);
530 }
531
xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)532 static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
533 struct amdgpu_irq_src *src,
534 unsigned type,
535 enum amdgpu_interrupt_state state)
536 {
537 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
538
539 tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN,
540 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
541 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
542
543 return 0;
544 }
545
xgpu_vi_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)546 static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
547 struct amdgpu_irq_src *source,
548 struct amdgpu_iv_entry *entry)
549 {
550 int r;
551
552 /* trigger gpu-reset by hypervisor only if TDR disbaled */
553 if (!amdgpu_gpu_recovery) {
554 /* see what event we get */
555 r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
556
557 /* only handle FLR_NOTIFY now */
558 if (!r)
559 schedule_work(&adev->virt.flr_work);
560 }
561
562 return 0;
563 }
564
565 static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = {
566 .set = xgpu_vi_set_mailbox_ack_irq,
567 .process = xgpu_vi_mailbox_ack_irq,
568 };
569
570 static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = {
571 .set = xgpu_vi_set_mailbox_rcv_irq,
572 .process = xgpu_vi_mailbox_rcv_irq,
573 };
574
xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device * adev)575 void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev)
576 {
577 adev->virt.ack_irq.num_types = 1;
578 adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs;
579 adev->virt.rcv_irq.num_types = 1;
580 adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs;
581 }
582
xgpu_vi_mailbox_add_irq_id(struct amdgpu_device * adev)583 int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
584 {
585 int r;
586
587 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
588 if (r)
589 return r;
590
591 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
592 if (r) {
593 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
594 return r;
595 }
596
597 return 0;
598 }
599
xgpu_vi_mailbox_get_irq(struct amdgpu_device * adev)600 int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev)
601 {
602 int r;
603
604 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
605 if (r)
606 return r;
607 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
608 if (r) {
609 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
610 return r;
611 }
612
613 INIT_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work);
614
615 return 0;
616 }
617
xgpu_vi_mailbox_put_irq(struct amdgpu_device * adev)618 void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev)
619 {
620 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
621 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
622 }
623
624 const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
625 .req_full_gpu = xgpu_vi_request_full_gpu_access,
626 .rel_full_gpu = xgpu_vi_release_full_gpu_access,
627 .reset_gpu = xgpu_vi_request_reset,
628 .wait_reset = xgpu_vi_wait_reset_cmpl,
629 .trans_msg = NULL, /* Does not need to trans VF errors to host. */
630 };
631