1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "amdgpu.h"
25 #include "nbio/nbio_6_1_offset.h"
26 #include "nbio/nbio_6_1_sh_mask.h"
27 #include "gc/gc_9_0_offset.h"
28 #include "gc/gc_9_0_sh_mask.h"
29 #include "soc15.h"
30 #include "vega10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_ai.h"
33
xgpu_ai_mailbox_send_ack(struct amdgpu_device * adev)34 static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
35 {
36 WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
37 }
38
xgpu_ai_mailbox_set_valid(struct amdgpu_device * adev,bool val)39 static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
40 {
41 WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
42 }
43
44 /*
45 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
46 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
47 * by host.
48 *
49 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
50 * correct value since it doesn't return the RCV_DW0 under the case that
51 * RCV_MSG_VALID is set by host.
52 */
xgpu_ai_mailbox_peek_msg(struct amdgpu_device * adev)53 static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
54 {
55 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
56 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
57 }
58
59
xgpu_ai_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)60 static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
61 enum idh_event event)
62 {
63 u32 reg;
64
65 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
66 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
67 if (reg != event)
68 return -ENOENT;
69
70 xgpu_ai_mailbox_send_ack(adev);
71
72 return 0;
73 }
74
xgpu_ai_peek_ack(struct amdgpu_device * adev)75 static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
76 return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
77 }
78
xgpu_ai_poll_ack(struct amdgpu_device * adev)79 static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
80 {
81 int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
82 u8 reg;
83
84 do {
85 reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
86 if (reg & 2)
87 return 0;
88
89 mdelay(5);
90 timeout -= 5;
91 } while (timeout > 1);
92
93 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
94
95 return -ETIME;
96 }
97
xgpu_ai_poll_msg(struct amdgpu_device * adev,enum idh_event event)98 static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
99 {
100 int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
101
102 do {
103 r = xgpu_ai_mailbox_rcv_msg(adev, event);
104 if (!r)
105 return 0;
106
107 msleep(10);
108 timeout -= 10;
109 } while (timeout > 1);
110
111 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
112
113 return -ETIME;
114 }
115
xgpu_ai_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)116 static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
117 enum idh_request req, u32 data1, u32 data2, u32 data3) {
118 u32 reg;
119 int r;
120 uint8_t trn;
121
122 /* IMPORTANT:
123 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
124 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
125 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
126 * will return immediatly
127 */
128 do {
129 xgpu_ai_mailbox_set_valid(adev, false);
130 trn = xgpu_ai_peek_ack(adev);
131 if (trn) {
132 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
133 msleep(1);
134 }
135 } while(trn);
136
137 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
138 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
139 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
140 MSGBUF_DATA, req);
141 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
142 reg);
143 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
144 data1);
145 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
146 data2);
147 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
148 data3);
149
150 xgpu_ai_mailbox_set_valid(adev, true);
151
152 /* start to poll ack */
153 r = xgpu_ai_poll_ack(adev);
154 if (r)
155 pr_err("Doesn't get ack from pf, continue\n");
156
157 xgpu_ai_mailbox_set_valid(adev, false);
158 }
159
xgpu_ai_send_access_requests(struct amdgpu_device * adev,enum idh_request req)160 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
161 enum idh_request req)
162 {
163 int r;
164
165 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
166
167 /* start to check msg if request is idh_req_gpu_init_access */
168 if (req == IDH_REQ_GPU_INIT_ACCESS ||
169 req == IDH_REQ_GPU_FINI_ACCESS ||
170 req == IDH_REQ_GPU_RESET_ACCESS) {
171 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
172 if (r) {
173 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
174 return r;
175 }
176 /* Retrieve checksum from mailbox2 */
177 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
178 adev->virt.fw_reserve.checksum_key =
179 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
180 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
181 }
182 }
183
184 return 0;
185 }
186
xgpu_ai_request_reset(struct amdgpu_device * adev)187 static int xgpu_ai_request_reset(struct amdgpu_device *adev)
188 {
189 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
190 }
191
xgpu_ai_request_full_gpu_access(struct amdgpu_device * adev,bool init)192 static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
193 bool init)
194 {
195 enum idh_request req;
196
197 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
198 return xgpu_ai_send_access_requests(adev, req);
199 }
200
xgpu_ai_release_full_gpu_access(struct amdgpu_device * adev,bool init)201 static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
202 bool init)
203 {
204 enum idh_request req;
205 int r = 0;
206
207 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
208 r = xgpu_ai_send_access_requests(adev, req);
209
210 return r;
211 }
212
xgpu_ai_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)213 static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
214 struct amdgpu_irq_src *source,
215 struct amdgpu_iv_entry *entry)
216 {
217 DRM_DEBUG("get ack intr and do nothing.\n");
218 return 0;
219 }
220
xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)221 static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
222 struct amdgpu_irq_src *source,
223 unsigned type,
224 enum amdgpu_interrupt_state state)
225 {
226 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
227
228 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
229 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
230 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
231
232 return 0;
233 }
234
xgpu_ai_mailbox_flr_work(struct work_struct * work)235 static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
236 {
237 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
238 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
239 int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
240 int locked;
241
242 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
243 * otherwise the mailbox msg will be ruined/reseted by
244 * the VF FLR.
245 *
246 * we can unlock the lock_reset to allow "amdgpu_job_timedout"
247 * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
248 * which means host side had finished this VF's FLR.
249 */
250 locked = mutex_trylock(&adev->lock_reset);
251 if (locked)
252 adev->in_gpu_reset = 1;
253
254 do {
255 if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
256 goto flr_done;
257
258 msleep(10);
259 timeout -= 10;
260 } while (timeout > 1);
261
262 flr_done:
263 if (locked) {
264 adev->in_gpu_reset = 0;
265 mutex_unlock(&adev->lock_reset);
266 }
267
268 /* Trigger recovery for world switch failure if no TDR */
269 if (amdgpu_lockup_timeout == 0)
270 amdgpu_device_gpu_recover(adev, NULL, true);
271 }
272
xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)273 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
274 struct amdgpu_irq_src *src,
275 unsigned type,
276 enum amdgpu_interrupt_state state)
277 {
278 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
279
280 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
281 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
282 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
283
284 return 0;
285 }
286
xgpu_ai_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)287 static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
288 struct amdgpu_irq_src *source,
289 struct amdgpu_iv_entry *entry)
290 {
291 enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
292
293 switch (event) {
294 case IDH_FLR_NOTIFICATION:
295 if (amdgpu_sriov_runtime(adev))
296 schedule_work(&adev->virt.flr_work);
297 break;
298 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
299 * it byfar since that polling thread will handle it,
300 * other msg like flr complete is not handled here.
301 */
302 case IDH_CLR_MSG_BUF:
303 case IDH_FLR_NOTIFICATION_CMPL:
304 case IDH_READY_TO_ACCESS_GPU:
305 default:
306 break;
307 }
308
309 return 0;
310 }
311
312 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
313 .set = xgpu_ai_set_mailbox_ack_irq,
314 .process = xgpu_ai_mailbox_ack_irq,
315 };
316
317 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
318 .set = xgpu_ai_set_mailbox_rcv_irq,
319 .process = xgpu_ai_mailbox_rcv_irq,
320 };
321
xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device * adev)322 void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
323 {
324 adev->virt.ack_irq.num_types = 1;
325 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
326 adev->virt.rcv_irq.num_types = 1;
327 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
328 }
329
xgpu_ai_mailbox_add_irq_id(struct amdgpu_device * adev)330 int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
331 {
332 int r;
333
334 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
335 if (r)
336 return r;
337
338 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
339 if (r) {
340 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
341 return r;
342 }
343
344 return 0;
345 }
346
xgpu_ai_mailbox_get_irq(struct amdgpu_device * adev)347 int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
348 {
349 int r;
350
351 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
352 if (r)
353 return r;
354 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
355 if (r) {
356 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
357 return r;
358 }
359
360 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
361
362 return 0;
363 }
364
xgpu_ai_mailbox_put_irq(struct amdgpu_device * adev)365 void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
366 {
367 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
368 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
369 }
370
371 const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
372 .req_full_gpu = xgpu_ai_request_full_gpu_access,
373 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
374 .reset_gpu = xgpu_ai_request_reset,
375 .wait_reset = NULL,
376 .trans_msg = xgpu_ai_mailbox_trans_msg,
377 };
378