1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "amdgpu.h"
25 #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
26 #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
27 #define MAX_KIQ_REG_TRY 20
28
amdgpu_csa_vaddr(struct amdgpu_device * adev)29 uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
30 {
31 uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
32
33 addr -= AMDGPU_VA_RESERVED_SIZE;
34
35 if (addr >= AMDGPU_VA_HOLE_START)
36 addr |= AMDGPU_VA_HOLE_END;
37
38 return addr;
39 }
40
amdgpu_virt_mmio_blocked(struct amdgpu_device * adev)41 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
42 {
43 /* By now all MMIO pages except mailbox are blocked */
44 /* if blocking is enabled in hypervisor. Choose the */
45 /* SCRATCH_REG0 to test. */
46 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
47 }
48
amdgpu_allocate_static_csa(struct amdgpu_device * adev)49 int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
50 {
51 int r;
52 void *ptr;
53
54 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
55 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
56 (u64 *)&adev->virt.csa_vmid0_addr, &ptr);
57 if (r)
58 return r;
59
60 memset(ptr, 0, AMDGPU_CSA_SIZE);
61 return 0;
62 }
63
amdgpu_free_static_csa(struct amdgpu_device * adev)64 void amdgpu_free_static_csa(struct amdgpu_device *adev) {
65 amdgpu_bo_free_kernel(&adev->virt.csa_obj,
66 (u64 *)&adev->virt.csa_vmid0_addr,
67 NULL);
68 }
69
70 /*
71 * amdgpu_map_static_csa should be called during amdgpu_vm_init
72 * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
73 * submission of GFX should use this virtual address within META_DATA init
74 * package to support SRIOV gfx preemption.
75 */
amdgpu_map_static_csa(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va ** bo_va)76 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
77 struct amdgpu_bo_va **bo_va)
78 {
79 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
80 struct ww_acquire_ctx ticket;
81 struct list_head list;
82 struct amdgpu_bo_list_entry pd;
83 struct ttm_validate_buffer csa_tv;
84 int r;
85
86 INIT_LIST_HEAD(&list);
87 INIT_LIST_HEAD(&csa_tv.head);
88 csa_tv.bo = &adev->virt.csa_obj->tbo;
89 csa_tv.shared = true;
90
91 list_add(&csa_tv.head, &list);
92 amdgpu_vm_get_pd_bo(vm, &list, &pd);
93
94 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
95 if (r) {
96 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
97 return r;
98 }
99
100 *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
101 if (!*bo_va) {
102 ttm_eu_backoff_reservation(&ticket, &list);
103 DRM_ERROR("failed to create bo_va for static CSA\n");
104 return -ENOMEM;
105 }
106
107 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
108 AMDGPU_CSA_SIZE);
109 if (r) {
110 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
111 amdgpu_vm_bo_rmv(adev, *bo_va);
112 ttm_eu_backoff_reservation(&ticket, &list);
113 return r;
114 }
115
116 r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
117 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
118 AMDGPU_PTE_EXECUTABLE);
119
120 if (r) {
121 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
122 amdgpu_vm_bo_rmv(adev, *bo_va);
123 ttm_eu_backoff_reservation(&ticket, &list);
124 return r;
125 }
126
127 ttm_eu_backoff_reservation(&ticket, &list);
128 return 0;
129 }
130
amdgpu_virt_init_setting(struct amdgpu_device * adev)131 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
132 {
133 /* enable virtual display */
134 adev->mode_info.num_crtc = 1;
135 adev->enable_virtual_display = true;
136 adev->cg_flags = 0;
137 adev->pg_flags = 0;
138 }
139
amdgpu_virt_kiq_rreg(struct amdgpu_device * adev,uint32_t reg)140 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
141 {
142 signed long r, cnt = 0;
143 unsigned long flags;
144 uint32_t seq;
145 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
146 struct amdgpu_ring *ring = &kiq->ring;
147
148 BUG_ON(!ring->funcs->emit_rreg);
149
150 spin_lock_irqsave(&kiq->ring_lock, flags);
151 amdgpu_ring_alloc(ring, 32);
152 amdgpu_ring_emit_rreg(ring, reg);
153 amdgpu_fence_emit_polling(ring, &seq);
154 amdgpu_ring_commit(ring);
155 spin_unlock_irqrestore(&kiq->ring_lock, flags);
156
157 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
158
159 /* don't wait anymore for gpu reset case because this way may
160 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
161 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
162 * never return if we keep waiting in virt_kiq_rreg, which cause
163 * gpu_recover() hang there.
164 *
165 * also don't wait anymore for IRQ context
166 * */
167 #if 0
168 if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
169 goto failed_kiq_read;
170
171 if (in_interrupt())
172 might_sleep();
173 #endif
174 kprintf("amdgpu_virt_kiq_rreg: implement in_interrupt() function\n");
175 if (r < 1 && (adev->in_gpu_reset))
176 goto failed_kiq_read;
177
178
179 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
180 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
181 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
182 }
183
184 if (cnt > MAX_KIQ_REG_TRY)
185 goto failed_kiq_read;
186
187 return adev->wb.wb[adev->virt.reg_val_offs];
188
189 failed_kiq_read:
190 pr_err("failed to read reg:%x\n", reg);
191 return ~0;
192 }
193
amdgpu_virt_kiq_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v)194 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
195 {
196 signed long r, cnt = 0;
197 unsigned long flags;
198 uint32_t seq;
199 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
200 struct amdgpu_ring *ring = &kiq->ring;
201
202 BUG_ON(!ring->funcs->emit_wreg);
203
204 spin_lock_irqsave(&kiq->ring_lock, flags);
205 amdgpu_ring_alloc(ring, 32);
206 amdgpu_ring_emit_wreg(ring, reg, v);
207 amdgpu_fence_emit_polling(ring, &seq);
208 amdgpu_ring_commit(ring);
209 spin_unlock_irqrestore(&kiq->ring_lock, flags);
210
211 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
212
213 /* don't wait anymore for gpu reset case because this way may
214 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
215 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
216 * never return if we keep waiting in virt_kiq_rreg, which cause
217 * gpu_recover() hang there.
218 *
219 * also don't wait anymore for IRQ context
220 * */
221 #if 0
222 if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
223 goto failed_kiq_write;
224
225 if (in_interrupt())
226 might_sleep();
227 #endif
228 kprintf("amdgpu_virt_kiq_wreg: implement in_interrupt() function\n");
229 if (r < 1 && (adev->in_gpu_reset))
230 goto failed_kiq_write;
231
232 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
233
234 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
235 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
236 }
237
238 if (cnt > MAX_KIQ_REG_TRY)
239 goto failed_kiq_write;
240
241 return;
242
243 failed_kiq_write:
244 pr_err("failed to write reg:%x\n", reg);
245 }
246
247 /**
248 * amdgpu_virt_request_full_gpu() - request full gpu access
249 * @amdgpu: amdgpu device.
250 * @init: is driver init time.
251 * When start to init/fini driver, first need to request full gpu access.
252 * Return: Zero if request success, otherwise will return error.
253 */
amdgpu_virt_request_full_gpu(struct amdgpu_device * adev,bool init)254 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
255 {
256 struct amdgpu_virt *virt = &adev->virt;
257 int r;
258
259 if (virt->ops && virt->ops->req_full_gpu) {
260 r = virt->ops->req_full_gpu(adev, init);
261 if (r)
262 return r;
263
264 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
265 }
266
267 return 0;
268 }
269
270 /**
271 * amdgpu_virt_release_full_gpu() - release full gpu access
272 * @amdgpu: amdgpu device.
273 * @init: is driver init time.
274 * When finishing driver init/fini, need to release full gpu access.
275 * Return: Zero if release success, otherwise will returen error.
276 */
amdgpu_virt_release_full_gpu(struct amdgpu_device * adev,bool init)277 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
278 {
279 struct amdgpu_virt *virt = &adev->virt;
280 int r;
281
282 if (virt->ops && virt->ops->rel_full_gpu) {
283 r = virt->ops->rel_full_gpu(adev, init);
284 if (r)
285 return r;
286
287 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
288 }
289 return 0;
290 }
291
292 /**
293 * amdgpu_virt_reset_gpu() - reset gpu
294 * @amdgpu: amdgpu device.
295 * Send reset command to GPU hypervisor to reset GPU that VM is using
296 * Return: Zero if reset success, otherwise will return error.
297 */
amdgpu_virt_reset_gpu(struct amdgpu_device * adev)298 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
299 {
300 struct amdgpu_virt *virt = &adev->virt;
301 int r;
302
303 if (virt->ops && virt->ops->reset_gpu) {
304 r = virt->ops->reset_gpu(adev);
305 if (r)
306 return r;
307
308 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
309 }
310
311 return 0;
312 }
313
314 /**
315 * amdgpu_virt_wait_reset() - wait for reset gpu completed
316 * @amdgpu: amdgpu device.
317 * Wait for GPU reset completed.
318 * Return: Zero if reset success, otherwise will return error.
319 */
amdgpu_virt_wait_reset(struct amdgpu_device * adev)320 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
321 {
322 struct amdgpu_virt *virt = &adev->virt;
323
324 if (!virt->ops || !virt->ops->wait_reset)
325 return -EINVAL;
326
327 return virt->ops->wait_reset(adev);
328 }
329
330 /**
331 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
332 * @amdgpu: amdgpu device.
333 * MM table is used by UVD and VCE for its initialization
334 * Return: Zero if allocate success.
335 */
amdgpu_virt_alloc_mm_table(struct amdgpu_device * adev)336 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
337 {
338 int r;
339
340 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
341 return 0;
342
343 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
344 AMDGPU_GEM_DOMAIN_VRAM,
345 &adev->virt.mm_table.bo,
346 (u64 *)&adev->virt.mm_table.gpu_addr,
347 (void *)&adev->virt.mm_table.cpu_addr);
348 if (r) {
349 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
350 return r;
351 }
352
353 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
354 DRM_INFO("MM table gpu addr = 0x%lx, cpu addr = %p.\n",
355 adev->virt.mm_table.gpu_addr,
356 adev->virt.mm_table.cpu_addr);
357 return 0;
358 }
359
360 /**
361 * amdgpu_virt_free_mm_table() - free mm table memory
362 * @amdgpu: amdgpu device.
363 * Free MM table memory
364 */
amdgpu_virt_free_mm_table(struct amdgpu_device * adev)365 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
366 {
367 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
368 return;
369
370 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
371 (u64 *)&adev->virt.mm_table.gpu_addr,
372 (void *)&adev->virt.mm_table.cpu_addr);
373 adev->virt.mm_table.gpu_addr = 0;
374 }
375
376
amdgpu_virt_fw_reserve_get_checksum(void * obj,unsigned long obj_size,unsigned int key,unsigned int chksum)377 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
378 unsigned long obj_size,
379 unsigned int key,
380 unsigned int chksum)
381 {
382 unsigned int ret = key;
383 unsigned long i = 0;
384 unsigned char *pos;
385
386 pos = (char *)obj;
387 /* calculate checksum */
388 for (i = 0; i < obj_size; ++i)
389 ret += *(pos + i);
390 /* minus the chksum itself */
391 pos = (char *)&chksum;
392 for (i = 0; i < sizeof(chksum); ++i)
393 ret -= *(pos + i);
394 return ret;
395 }
396
amdgpu_virt_init_data_exchange(struct amdgpu_device * adev)397 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
398 {
399 uint32_t pf2vf_size = 0;
400 uint32_t checksum = 0;
401 uint32_t checkval;
402 char *str;
403
404 adev->virt.fw_reserve.p_pf2vf = NULL;
405 adev->virt.fw_reserve.p_vf2pf = NULL;
406
407 if (adev->fw_vram_usage.va != NULL) {
408 adev->virt.fw_reserve.p_pf2vf =
409 (struct amdgim_pf2vf_info_header *)(
410 adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
411 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
412 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
413 AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
414
415 /* pf2vf message must be in 4K */
416 if (pf2vf_size > 0 && pf2vf_size < 4096) {
417 checkval = amdgpu_virt_fw_reserve_get_checksum(
418 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
419 adev->virt.fw_reserve.checksum_key, checksum);
420 if (checkval == checksum) {
421 adev->virt.fw_reserve.p_vf2pf =
422 ((void *)adev->virt.fw_reserve.p_pf2vf +
423 pf2vf_size);
424 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
425 sizeof(amdgim_vf2pf_info));
426 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
427 AMDGPU_FW_VRAM_VF2PF_VER);
428 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
429 sizeof(amdgim_vf2pf_info));
430 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
431 &str);
432 #ifdef MODULE
433 if (THIS_MODULE->version != NULL)
434 strcpy(str, THIS_MODULE->version);
435 else
436 #endif
437 strcpy(str, "N/A");
438 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
439 0);
440 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
441 amdgpu_virt_fw_reserve_get_checksum(
442 adev->virt.fw_reserve.p_vf2pf,
443 pf2vf_size,
444 adev->virt.fw_reserve.checksum_key, 0));
445 }
446 }
447 }
448 }
449
450
451