1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <drm/drmP.h>
29 #include "amdgpu.h"
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_sched.h"
32 #include "amdgpu_uvd.h"
33 #include "amdgpu_vce.h"
34 #include "atom.h"
35
36 #include <linux/vga_switcheroo.h>
37 #include <linux/slab.h>
38 #include <linux/pm_runtime.h>
39 #include "amdgpu_amdkfd.h"
40
41 /**
42 * amdgpu_driver_unload_kms - Main unload function for KMS.
43 *
44 * @dev: drm dev pointer
45 *
46 * This is the main unload function for KMS (all asics).
47 * Returns 0 on success.
48 */
amdgpu_driver_unload_kms(struct drm_device * dev)49 void amdgpu_driver_unload_kms(struct drm_device *dev)
50 {
51 struct amdgpu_device *adev = dev->dev_private;
52
53 if (adev == NULL)
54 return;
55
56 if (adev->rmmio == NULL)
57 goto done_free;
58
59 if (amdgpu_sriov_vf(adev))
60 amdgpu_virt_request_full_gpu(adev, false);
61
62 if (amdgpu_device_is_px(dev)) {
63 #if 0
64 pm_runtime_get_sync(dev->dev);
65 pm_runtime_forbid(dev->dev);
66 #endif
67 }
68
69 amdgpu_acpi_fini(adev);
70
71 amdgpu_device_fini(adev);
72
73 done_free:
74 kfree(adev);
75 dev->dev_private = NULL;
76 }
77
78 /**
79 * amdgpu_driver_load_kms - Main load function for KMS.
80 *
81 * @dev: drm dev pointer
82 * @flags: device flags
83 *
84 * This is the main load function for KMS (all asics).
85 * Returns 0 on success, error on failure.
86 */
amdgpu_driver_load_kms(struct drm_device * dev,unsigned long flags)87 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
88 {
89 struct amdgpu_device *adev;
90 int r, acpi_status;
91
92 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
93 if (adev == NULL) {
94 return -ENOMEM;
95 }
96 dev->dev_private = (void *)adev;
97 kprintf("amdgpu_driver_load_kms(): flags=%ld drm_device=%p adev=%p\n",
98 flags, dev, adev);
99
100 if ((amdgpu_runtime_pm != 0) &&
101 amdgpu_has_atpx() &&
102 (amdgpu_is_atpx_hybrid() ||
103 amdgpu_has_atpx_dgpu_power_cntl()) &&
104 ((flags & AMD_IS_APU) == 0)
105 #if 0
106 && !pci_is_thunderbolt_attached(dev->pdev)
107 #endif
108 )
109 flags |= AMD_IS_PX;
110
111 /* amdgpu_device_init should report only fatal error
112 * like memory allocation failure or iomapping failure,
113 * or memory manager initialization failure, it must
114 * properly initialize the GPU MC controller and permit
115 * VRAM allocation
116 */
117 r = amdgpu_device_init(adev, dev, dev->pdev, flags);
118 if (r) {
119 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
120 goto out;
121 }
122
123 /* Call ACPI methods: require modeset init
124 * but failure is not fatal
125 */
126 if (!r) {
127 acpi_status = amdgpu_acpi_init(adev);
128 if (acpi_status)
129 dev_dbg(&dev->pdev->dev,
130 "Error during ACPI methods call\n");
131 }
132
133 if (amdgpu_device_is_px(dev)) {
134 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
135 #if 0
136 pm_runtime_use_autosuspend(dev->dev);
137 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
138 #endif
139 pm_runtime_set_active(dev->dev);
140 #if 0
141 pm_runtime_allow(dev->dev);
142 #endif
143 pm_runtime_mark_last_busy(dev->dev);
144 #if 0
145 pm_runtime_put_autosuspend(dev->dev);
146 #endif
147 }
148
149 out:
150 if (r) {
151 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
152 #if 0
153 if (adev->rmmio && amdgpu_device_is_px(dev))
154 pm_runtime_put_noidle(dev->dev);
155 #endif
156 amdgpu_driver_unload_kms(dev);
157 }
158
159 return r;
160 }
161
amdgpu_firmware_info(struct drm_amdgpu_info_firmware * fw_info,struct drm_amdgpu_query_fw * query_fw,struct amdgpu_device * adev)162 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
163 struct drm_amdgpu_query_fw *query_fw,
164 struct amdgpu_device *adev)
165 {
166 switch (query_fw->fw_type) {
167 case AMDGPU_INFO_FW_VCE:
168 fw_info->ver = adev->vce.fw_version;
169 fw_info->feature = adev->vce.fb_version;
170 break;
171 case AMDGPU_INFO_FW_UVD:
172 fw_info->ver = adev->uvd.fw_version;
173 fw_info->feature = 0;
174 break;
175 case AMDGPU_INFO_FW_VCN:
176 fw_info->ver = adev->vcn.fw_version;
177 fw_info->feature = 0;
178 break;
179 case AMDGPU_INFO_FW_GMC:
180 fw_info->ver = adev->gmc.fw_version;
181 fw_info->feature = 0;
182 break;
183 case AMDGPU_INFO_FW_GFX_ME:
184 fw_info->ver = adev->gfx.me_fw_version;
185 fw_info->feature = adev->gfx.me_feature_version;
186 break;
187 case AMDGPU_INFO_FW_GFX_PFP:
188 fw_info->ver = adev->gfx.pfp_fw_version;
189 fw_info->feature = adev->gfx.pfp_feature_version;
190 break;
191 case AMDGPU_INFO_FW_GFX_CE:
192 fw_info->ver = adev->gfx.ce_fw_version;
193 fw_info->feature = adev->gfx.ce_feature_version;
194 break;
195 case AMDGPU_INFO_FW_GFX_RLC:
196 fw_info->ver = adev->gfx.rlc_fw_version;
197 fw_info->feature = adev->gfx.rlc_feature_version;
198 break;
199 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
200 fw_info->ver = adev->gfx.rlc_srlc_fw_version;
201 fw_info->feature = adev->gfx.rlc_srlc_feature_version;
202 break;
203 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
204 fw_info->ver = adev->gfx.rlc_srlg_fw_version;
205 fw_info->feature = adev->gfx.rlc_srlg_feature_version;
206 break;
207 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
208 fw_info->ver = adev->gfx.rlc_srls_fw_version;
209 fw_info->feature = adev->gfx.rlc_srls_feature_version;
210 break;
211 case AMDGPU_INFO_FW_GFX_MEC:
212 if (query_fw->index == 0) {
213 fw_info->ver = adev->gfx.mec_fw_version;
214 fw_info->feature = adev->gfx.mec_feature_version;
215 } else if (query_fw->index == 1) {
216 fw_info->ver = adev->gfx.mec2_fw_version;
217 fw_info->feature = adev->gfx.mec2_feature_version;
218 } else
219 return -EINVAL;
220 break;
221 case AMDGPU_INFO_FW_SMC:
222 fw_info->ver = adev->pm.fw_version;
223 fw_info->feature = 0;
224 break;
225 case AMDGPU_INFO_FW_SDMA:
226 if (query_fw->index >= adev->sdma.num_instances)
227 return -EINVAL;
228 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
229 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
230 break;
231 case AMDGPU_INFO_FW_SOS:
232 fw_info->ver = adev->psp.sos_fw_version;
233 fw_info->feature = adev->psp.sos_feature_version;
234 break;
235 case AMDGPU_INFO_FW_ASD:
236 fw_info->ver = adev->psp.asd_fw_version;
237 fw_info->feature = adev->psp.asd_feature_version;
238 break;
239 default:
240 return -EINVAL;
241 }
242 return 0;
243 }
244
245 /*
246 * Userspace get information ioctl
247 */
248 /**
249 * amdgpu_info_ioctl - answer a device specific request.
250 *
251 * @adev: amdgpu device pointer
252 * @data: request object
253 * @filp: drm filp
254 *
255 * This function is used to pass device specific parameters to the userspace
256 * drivers. Examples include: pci device id, pipeline parms, tiling params,
257 * etc. (all asics).
258 * Returns 0 on success, -EINVAL on failure.
259 */
amdgpu_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)260 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
261 {
262 struct amdgpu_device *adev = dev->dev_private;
263 struct drm_amdgpu_info *info = data;
264 struct amdgpu_mode_info *minfo = &adev->mode_info;
265 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
266 uint32_t size = info->return_size;
267 struct drm_crtc *crtc;
268 uint32_t ui32 = 0;
269 uint64_t ui64 = 0;
270 int i, j, found;
271 int ui32_size = sizeof(ui32);
272
273 if (!info->return_size || !info->return_pointer)
274 return -EINVAL;
275
276 switch (info->query) {
277 case AMDGPU_INFO_ACCEL_WORKING:
278 ui32 = adev->accel_working;
279 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
280 case AMDGPU_INFO_CRTC_FROM_ID:
281 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
282 crtc = (struct drm_crtc *)minfo->crtcs[i];
283 if (crtc && crtc->base.id == info->mode_crtc.id) {
284 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
285 ui32 = amdgpu_crtc->crtc_id;
286 found = 1;
287 break;
288 }
289 }
290 if (!found) {
291 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
292 return -EINVAL;
293 }
294 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
295 case AMDGPU_INFO_HW_IP_INFO: {
296 struct drm_amdgpu_info_hw_ip ip = {};
297 enum amd_ip_block_type type;
298 uint32_t ring_mask = 0;
299 uint32_t ib_start_alignment = 0;
300 uint32_t ib_size_alignment = 0;
301
302 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
303 return -EINVAL;
304
305 switch (info->query_hw_ip.type) {
306 case AMDGPU_HW_IP_GFX:
307 type = AMD_IP_BLOCK_TYPE_GFX;
308 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
309 ring_mask |= adev->gfx.gfx_ring[i].ready << i;
310 ib_start_alignment = 32;
311 ib_size_alignment = 32;
312 break;
313 case AMDGPU_HW_IP_COMPUTE:
314 type = AMD_IP_BLOCK_TYPE_GFX;
315 for (i = 0; i < adev->gfx.num_compute_rings; i++)
316 ring_mask |= adev->gfx.compute_ring[i].ready << i;
317 ib_start_alignment = 32;
318 ib_size_alignment = 32;
319 break;
320 case AMDGPU_HW_IP_DMA:
321 type = AMD_IP_BLOCK_TYPE_SDMA;
322 for (i = 0; i < adev->sdma.num_instances; i++)
323 ring_mask |= adev->sdma.instance[i].ring.ready << i;
324 ib_start_alignment = 256;
325 ib_size_alignment = 4;
326 break;
327 case AMDGPU_HW_IP_UVD:
328 type = AMD_IP_BLOCK_TYPE_UVD;
329 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
330 if (adev->uvd.harvest_config & (1 << i))
331 continue;
332 ring_mask |= adev->uvd.inst[i].ring.ready;
333 }
334 ib_start_alignment = 64;
335 ib_size_alignment = 64;
336 break;
337 case AMDGPU_HW_IP_VCE:
338 type = AMD_IP_BLOCK_TYPE_VCE;
339 for (i = 0; i < adev->vce.num_rings; i++)
340 ring_mask |= adev->vce.ring[i].ready << i;
341 ib_start_alignment = 4;
342 ib_size_alignment = 1;
343 break;
344 case AMDGPU_HW_IP_UVD_ENC:
345 type = AMD_IP_BLOCK_TYPE_UVD;
346 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
347 if (adev->uvd.harvest_config & (1 << i))
348 continue;
349 for (j = 0; j < adev->uvd.num_enc_rings; j++)
350 ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
351 }
352 ib_start_alignment = 64;
353 ib_size_alignment = 64;
354 break;
355 case AMDGPU_HW_IP_VCN_DEC:
356 type = AMD_IP_BLOCK_TYPE_VCN;
357 ring_mask = adev->vcn.ring_dec.ready;
358 ib_start_alignment = 16;
359 ib_size_alignment = 16;
360 break;
361 case AMDGPU_HW_IP_VCN_ENC:
362 type = AMD_IP_BLOCK_TYPE_VCN;
363 for (i = 0; i < adev->vcn.num_enc_rings; i++)
364 ring_mask |= adev->vcn.ring_enc[i].ready << i;
365 ib_start_alignment = 64;
366 ib_size_alignment = 1;
367 break;
368 case AMDGPU_HW_IP_VCN_JPEG:
369 type = AMD_IP_BLOCK_TYPE_VCN;
370 ring_mask = adev->vcn.ring_jpeg.ready;
371 ib_start_alignment = 16;
372 ib_size_alignment = 16;
373 break;
374 default:
375 return -EINVAL;
376 }
377
378 for (i = 0; i < adev->num_ip_blocks; i++) {
379 if (adev->ip_blocks[i].version->type == type &&
380 adev->ip_blocks[i].status.valid) {
381 ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
382 ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
383 ip.capabilities_flags = 0;
384 ip.available_rings = ring_mask;
385 ip.ib_start_alignment = ib_start_alignment;
386 ip.ib_size_alignment = ib_size_alignment;
387 break;
388 }
389 }
390 return copy_to_user(out, &ip,
391 min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
392 }
393 case AMDGPU_INFO_HW_IP_COUNT: {
394 enum amd_ip_block_type type;
395 uint32_t count = 0;
396
397 switch (info->query_hw_ip.type) {
398 case AMDGPU_HW_IP_GFX:
399 type = AMD_IP_BLOCK_TYPE_GFX;
400 break;
401 case AMDGPU_HW_IP_COMPUTE:
402 type = AMD_IP_BLOCK_TYPE_GFX;
403 break;
404 case AMDGPU_HW_IP_DMA:
405 type = AMD_IP_BLOCK_TYPE_SDMA;
406 break;
407 case AMDGPU_HW_IP_UVD:
408 type = AMD_IP_BLOCK_TYPE_UVD;
409 break;
410 case AMDGPU_HW_IP_VCE:
411 type = AMD_IP_BLOCK_TYPE_VCE;
412 break;
413 case AMDGPU_HW_IP_UVD_ENC:
414 type = AMD_IP_BLOCK_TYPE_UVD;
415 break;
416 case AMDGPU_HW_IP_VCN_DEC:
417 case AMDGPU_HW_IP_VCN_ENC:
418 case AMDGPU_HW_IP_VCN_JPEG:
419 type = AMD_IP_BLOCK_TYPE_VCN;
420 break;
421 default:
422 return -EINVAL;
423 }
424
425 for (i = 0; i < adev->num_ip_blocks; i++)
426 if (adev->ip_blocks[i].version->type == type &&
427 adev->ip_blocks[i].status.valid &&
428 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
429 count++;
430
431 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
432 }
433 case AMDGPU_INFO_TIMESTAMP:
434 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
435 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
436 case AMDGPU_INFO_FW_VERSION: {
437 struct drm_amdgpu_info_firmware fw_info;
438 int ret;
439
440 /* We only support one instance of each IP block right now. */
441 if (info->query_fw.ip_instance != 0)
442 return -EINVAL;
443
444 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
445 if (ret)
446 return ret;
447
448 return copy_to_user(out, &fw_info,
449 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
450 }
451 case AMDGPU_INFO_NUM_BYTES_MOVED:
452 ui64 = atomic64_read(&adev->num_bytes_moved);
453 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
454 case AMDGPU_INFO_NUM_EVICTIONS:
455 ui64 = atomic64_read(&adev->num_evictions);
456 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
457 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
458 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
459 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
460 case AMDGPU_INFO_VRAM_USAGE:
461 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
462 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
463 case AMDGPU_INFO_VIS_VRAM_USAGE:
464 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
465 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
466 case AMDGPU_INFO_GTT_USAGE:
467 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
468 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
469 case AMDGPU_INFO_GDS_CONFIG: {
470 struct drm_amdgpu_info_gds gds_info;
471
472 memset(&gds_info, 0, sizeof(gds_info));
473 gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
474 gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
475 gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
476 gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
477 gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
478 gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
479 gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
480 return copy_to_user(out, &gds_info,
481 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
482 }
483 case AMDGPU_INFO_VRAM_GTT: {
484 struct drm_amdgpu_info_vram_gtt vram_gtt;
485
486 vram_gtt.vram_size = adev->gmc.real_vram_size -
487 atomic64_read(&adev->vram_pin_size);
488 vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
489 atomic64_read(&adev->visible_pin_size);
490 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
491 vram_gtt.gtt_size *= PAGE_SIZE;
492 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
493 return copy_to_user(out, &vram_gtt,
494 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
495 }
496 case AMDGPU_INFO_MEMORY: {
497 struct drm_amdgpu_memory_info mem;
498
499 memset(&mem, 0, sizeof(mem));
500 mem.vram.total_heap_size = adev->gmc.real_vram_size;
501 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
502 atomic64_read(&adev->vram_pin_size);
503 mem.vram.heap_usage =
504 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
505 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
506
507 mem.cpu_accessible_vram.total_heap_size =
508 adev->gmc.visible_vram_size;
509 mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
510 atomic64_read(&adev->visible_pin_size);
511 mem.cpu_accessible_vram.heap_usage =
512 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
513 mem.cpu_accessible_vram.max_allocation =
514 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
515
516 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
517 mem.gtt.total_heap_size *= PAGE_SIZE;
518 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
519 atomic64_read(&adev->gart_pin_size);
520 mem.gtt.heap_usage =
521 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
522 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
523
524 return copy_to_user(out, &mem,
525 min((size_t)size, sizeof(mem)))
526 ? -EFAULT : 0;
527 }
528 case AMDGPU_INFO_READ_MMR_REG: {
529 unsigned n, alloc_size;
530 uint32_t *regs;
531 unsigned se_num = (info->read_mmr_reg.instance >>
532 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
533 AMDGPU_INFO_MMR_SE_INDEX_MASK;
534 unsigned sh_num = (info->read_mmr_reg.instance >>
535 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
536 AMDGPU_INFO_MMR_SH_INDEX_MASK;
537
538 /* set full masks if the userspace set all bits
539 * in the bitfields */
540 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
541 se_num = 0xffffffff;
542 else if (se_num >= AMDGPU_GFX_MAX_SE)
543 return -EINVAL;
544 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
545 sh_num = 0xffffffff;
546 else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
547 return -EINVAL;
548
549 if (info->read_mmr_reg.count > 128)
550 return -EINVAL;
551
552 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
553 if (!regs)
554 return -ENOMEM;
555 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
556
557 for (i = 0; i < info->read_mmr_reg.count; i++)
558 if (amdgpu_asic_read_register(adev, se_num, sh_num,
559 info->read_mmr_reg.dword_offset + i,
560 ®s[i])) {
561 DRM_DEBUG_KMS("unallowed offset %#x\n",
562 info->read_mmr_reg.dword_offset + i);
563 kfree(regs);
564 return -EFAULT;
565 }
566 n = copy_to_user(out, regs, min(size, alloc_size));
567 kfree(regs);
568 return n ? -EFAULT : 0;
569 }
570 case AMDGPU_INFO_DEV_INFO: {
571 struct drm_amdgpu_info_device dev_info;
572 uint64_t vm_size;
573
574 memset(&dev_info, 0, sizeof(dev_info));
575 dev_info.device_id = dev->pdev->device;
576 dev_info.chip_rev = adev->rev_id;
577 dev_info.external_rev = adev->external_rev_id;
578 dev_info.pci_rev = dev->pdev->revision;
579 dev_info.family = adev->family;
580 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
581 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
582 /* return all clocks in KHz */
583 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
584 if (adev->pm.dpm_enabled) {
585 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
586 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
587 } else {
588 dev_info.max_engine_clock = adev->clock.default_sclk * 10;
589 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
590 }
591 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
592 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
593 adev->gfx.config.max_shader_engines;
594 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
595 dev_info._pad = 0;
596 dev_info.ids_flags = 0;
597 if (adev->flags & AMD_IS_APU)
598 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
599 if (amdgpu_sriov_vf(adev))
600 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
601
602 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
603 vm_size -= AMDGPU_VA_RESERVED_SIZE;
604
605 /* Older VCE FW versions are buggy and can handle only 40bits */
606 if (adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
607 vm_size = min(vm_size, 1ULL << 40);
608
609 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
610 dev_info.virtual_address_max =
611 min(vm_size, AMDGPU_VA_HOLE_START);
612
613 if (vm_size > AMDGPU_VA_HOLE_START) {
614 dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
615 dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
616 }
617 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
618 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
619 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
620 dev_info.cu_active_number = adev->gfx.cu_info.number;
621 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
622 dev_info.ce_ram_size = adev->gfx.ce_ram_size;
623 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
624 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
625 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
626 sizeof(adev->gfx.cu_info.bitmap));
627 dev_info.vram_type = adev->gmc.vram_type;
628 dev_info.vram_bit_width = adev->gmc.vram_width;
629 dev_info.vce_harvest_config = adev->vce.harvest_config;
630 dev_info.gc_double_offchip_lds_buf =
631 adev->gfx.config.double_offchip_lds_buf;
632
633 if (amdgpu_ngg) {
634 dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
635 dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
636 dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
637 dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
638 dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
639 dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
640 dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
641 dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
642 }
643 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
644 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
645 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
646 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
647 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
648 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
649 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
650
651 return copy_to_user(out, &dev_info,
652 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
653 }
654 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
655 unsigned i;
656 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
657 struct amd_vce_state *vce_state;
658
659 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
660 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
661 if (vce_state) {
662 vce_clk_table.entries[i].sclk = vce_state->sclk;
663 vce_clk_table.entries[i].mclk = vce_state->mclk;
664 vce_clk_table.entries[i].eclk = vce_state->evclk;
665 vce_clk_table.num_valid_entries++;
666 }
667 }
668
669 return copy_to_user(out, &vce_clk_table,
670 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
671 }
672 case AMDGPU_INFO_VBIOS: {
673 uint32_t bios_size = adev->bios_size;
674
675 switch (info->vbios_info.type) {
676 case AMDGPU_INFO_VBIOS_SIZE:
677 return copy_to_user(out, &bios_size,
678 min((size_t)size, sizeof(bios_size)))
679 ? -EFAULT : 0;
680 case AMDGPU_INFO_VBIOS_IMAGE: {
681 uint8_t *bios;
682 uint32_t bios_offset = info->vbios_info.offset;
683
684 if (bios_offset >= bios_size)
685 return -EINVAL;
686
687 bios = adev->bios + bios_offset;
688 return copy_to_user(out, bios,
689 min((size_t)size, (size_t)(bios_size - bios_offset)))
690 ? -EFAULT : 0;
691 }
692 default:
693 DRM_DEBUG_KMS("Invalid request %d\n",
694 info->vbios_info.type);
695 return -EINVAL;
696 }
697 }
698 case AMDGPU_INFO_NUM_HANDLES: {
699 struct drm_amdgpu_info_num_handles handle;
700
701 switch (info->query_hw_ip.type) {
702 case AMDGPU_HW_IP_UVD:
703 /* Starting Polaris, we support unlimited UVD handles */
704 if (adev->asic_type < CHIP_POLARIS10) {
705 handle.uvd_max_handles = adev->uvd.max_handles;
706 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
707
708 return copy_to_user(out, &handle,
709 min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
710 } else {
711 return -ENODATA;
712 }
713
714 break;
715 default:
716 return -EINVAL;
717 }
718 }
719 case AMDGPU_INFO_SENSOR: {
720 if (!adev->pm.dpm_enabled)
721 return -ENOENT;
722
723 switch (info->sensor_info.type) {
724 case AMDGPU_INFO_SENSOR_GFX_SCLK:
725 /* get sclk in Mhz */
726 if (amdgpu_dpm_read_sensor(adev,
727 AMDGPU_PP_SENSOR_GFX_SCLK,
728 (void *)&ui32, &ui32_size)) {
729 return -EINVAL;
730 }
731 ui32 /= 100;
732 break;
733 case AMDGPU_INFO_SENSOR_GFX_MCLK:
734 /* get mclk in Mhz */
735 if (amdgpu_dpm_read_sensor(adev,
736 AMDGPU_PP_SENSOR_GFX_MCLK,
737 (void *)&ui32, &ui32_size)) {
738 return -EINVAL;
739 }
740 ui32 /= 100;
741 break;
742 case AMDGPU_INFO_SENSOR_GPU_TEMP:
743 /* get temperature in millidegrees C */
744 if (amdgpu_dpm_read_sensor(adev,
745 AMDGPU_PP_SENSOR_GPU_TEMP,
746 (void *)&ui32, &ui32_size)) {
747 return -EINVAL;
748 }
749 break;
750 case AMDGPU_INFO_SENSOR_GPU_LOAD:
751 /* get GPU load */
752 if (amdgpu_dpm_read_sensor(adev,
753 AMDGPU_PP_SENSOR_GPU_LOAD,
754 (void *)&ui32, &ui32_size)) {
755 return -EINVAL;
756 }
757 break;
758 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
759 /* get average GPU power */
760 if (amdgpu_dpm_read_sensor(adev,
761 AMDGPU_PP_SENSOR_GPU_POWER,
762 (void *)&ui32, &ui32_size)) {
763 return -EINVAL;
764 }
765 ui32 >>= 8;
766 break;
767 case AMDGPU_INFO_SENSOR_VDDNB:
768 /* get VDDNB in millivolts */
769 if (amdgpu_dpm_read_sensor(adev,
770 AMDGPU_PP_SENSOR_VDDNB,
771 (void *)&ui32, &ui32_size)) {
772 return -EINVAL;
773 }
774 break;
775 case AMDGPU_INFO_SENSOR_VDDGFX:
776 /* get VDDGFX in millivolts */
777 if (amdgpu_dpm_read_sensor(adev,
778 AMDGPU_PP_SENSOR_VDDGFX,
779 (void *)&ui32, &ui32_size)) {
780 return -EINVAL;
781 }
782 break;
783 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
784 /* get stable pstate sclk in Mhz */
785 if (amdgpu_dpm_read_sensor(adev,
786 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
787 (void *)&ui32, &ui32_size)) {
788 return -EINVAL;
789 }
790 ui32 /= 100;
791 break;
792 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
793 /* get stable pstate mclk in Mhz */
794 if (amdgpu_dpm_read_sensor(adev,
795 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
796 (void *)&ui32, &ui32_size)) {
797 return -EINVAL;
798 }
799 ui32 /= 100;
800 break;
801 default:
802 DRM_DEBUG_KMS("Invalid request %d\n",
803 info->sensor_info.type);
804 return -EINVAL;
805 }
806 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
807 }
808 case AMDGPU_INFO_VRAM_LOST_COUNTER:
809 ui32 = atomic_read(&adev->vram_lost_counter);
810 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
811 default:
812 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
813 return -EINVAL;
814 }
815 return 0;
816 }
817
818
819 /*
820 * Outdated mess for old drm with Xorg being in charge (void function now).
821 */
822 /**
823 * amdgpu_driver_lastclose_kms - drm callback for last close
824 *
825 * @dev: drm dev pointer
826 *
827 * Switch vga_switcheroo state after last close (all asics).
828 */
amdgpu_driver_lastclose_kms(struct drm_device * dev)829 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
830 {
831 #if 0
832 // empty function as of 4.19
833 drm_fb_helper_lastclose(dev);
834 #endif
835 vga_switcheroo_process_delayed_switch();
836 }
837
838 /**
839 * amdgpu_driver_open_kms - drm callback for open
840 *
841 * @dev: drm dev pointer
842 * @file_priv: drm file
843 *
844 * On device open, init vm on cayman+ (all asics).
845 * Returns 0 on success, error on failure.
846 */
amdgpu_driver_open_kms(struct drm_device * dev,struct drm_file * file_priv)847 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
848 {
849 struct amdgpu_device *adev = dev->dev_private;
850 struct amdgpu_fpriv *fpriv;
851 int r, pasid;
852
853 /* Ensure IB tests are run on ring */
854 flush_delayed_work(&adev->late_init_work);
855
856 file_priv->driver_priv = NULL;
857
858 #if 0
859 r = pm_runtime_get_sync(dev->dev);
860 if (r < 0)
861 goto pm_put;
862 #endif
863
864 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
865 if (unlikely(!fpriv)) {
866 r = -ENOMEM;
867 goto out_suspend;
868 }
869
870 pasid = amdgpu_pasid_alloc(16);
871 if (pasid < 0) {
872 dev_warn(adev->dev, "No more PASIDs available!");
873 pasid = 0;
874 }
875 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
876 if (r)
877 goto error_pasid;
878
879 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
880 if (!fpriv->prt_va) {
881 r = -ENOMEM;
882 goto error_vm;
883 }
884
885 if (amdgpu_sriov_vf(adev)) {
886 r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
887 if (r)
888 goto error_vm;
889 }
890
891 lockinit(&fpriv->bo_list_lock, "agfbll", 0, LK_CANRECURSE);
892 idr_init(&fpriv->bo_list_handles);
893
894 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
895
896 file_priv->driver_priv = fpriv;
897 goto out_suspend;
898
899 error_vm:
900 amdgpu_vm_fini(adev, &fpriv->vm);
901
902 error_pasid:
903 if (pasid)
904 amdgpu_pasid_free(pasid);
905
906 kfree(fpriv);
907
908 out_suspend:
909 pm_runtime_mark_last_busy(dev->dev);
910 #if 0
911 pm_put:
912 pm_runtime_put_autosuspend(dev->dev);
913 #endif
914
915 return r;
916 }
917
918 /**
919 * amdgpu_driver_postclose_kms - drm callback for post close
920 *
921 * @dev: drm dev pointer
922 * @file_priv: drm file
923 *
924 * On device post close, tear down vm on cayman+ (all asics).
925 */
amdgpu_driver_postclose_kms(struct drm_device * dev,struct drm_file * file_priv)926 void amdgpu_driver_postclose_kms(struct drm_device *dev,
927 struct drm_file *file_priv)
928 {
929 struct amdgpu_device *adev = dev->dev_private;
930 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
931 struct amdgpu_bo_list *list;
932 struct amdgpu_bo *pd;
933 unsigned int pasid;
934 int handle;
935
936 if (!fpriv)
937 return;
938
939 pm_runtime_get_sync(dev->dev);
940
941 if (adev->asic_type != CHIP_RAVEN) {
942 amdgpu_uvd_free_handles(adev, file_priv);
943 amdgpu_vce_free_handles(adev, file_priv);
944 }
945
946 amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
947
948 if (amdgpu_sriov_vf(adev)) {
949 /* TODO: how to handle reserve failure */
950 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
951 amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
952 fpriv->csa_va = NULL;
953 amdgpu_bo_unreserve(adev->virt.csa_obj);
954 }
955
956 pasid = fpriv->vm.pasid;
957 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
958
959 amdgpu_vm_fini(adev, &fpriv->vm);
960 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
961
962 if (pasid)
963 amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
964 amdgpu_bo_unref(&pd);
965
966 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
967 amdgpu_bo_list_put(list);
968
969 idr_destroy(&fpriv->bo_list_handles);
970 mutex_destroy(&fpriv->bo_list_lock);
971
972 kfree(fpriv);
973 file_priv->driver_priv = NULL;
974
975 pm_runtime_mark_last_busy(dev->dev);
976 pm_runtime_put_autosuspend(dev->dev);
977 }
978
979 /*
980 * VBlank related functions.
981 */
982 /**
983 * amdgpu_get_vblank_counter_kms - get frame count
984 *
985 * @dev: drm dev pointer
986 * @pipe: crtc to get the frame count from
987 *
988 * Gets the frame count on the requested crtc (all asics).
989 * Returns frame count on success, -EINVAL on failure.
990 */
amdgpu_get_vblank_counter_kms(struct drm_device * dev,unsigned int pipe)991 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
992 {
993 struct amdgpu_device *adev = dev->dev_private;
994 int vpos, hpos, stat;
995 u32 count;
996
997 if (pipe >= adev->mode_info.num_crtc) {
998 DRM_ERROR("Invalid crtc %u\n", pipe);
999 return -EINVAL;
1000 }
1001
1002 /* The hw increments its frame counter at start of vsync, not at start
1003 * of vblank, as is required by DRM core vblank counter handling.
1004 * Cook the hw count here to make it appear to the caller as if it
1005 * incremented at start of vblank. We measure distance to start of
1006 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1007 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1008 * result by 1 to give the proper appearance to caller.
1009 */
1010 if (adev->mode_info.crtcs[pipe]) {
1011 /* Repeat readout if needed to provide stable result if
1012 * we cross start of vsync during the queries.
1013 */
1014 do {
1015 count = amdgpu_display_vblank_get_counter(adev, pipe);
1016 /* Ask amdgpu_display_get_crtc_scanoutpos to return
1017 * vpos as distance to start of vblank, instead of
1018 * regular vertical scanout pos.
1019 */
1020 stat = amdgpu_display_get_crtc_scanoutpos(
1021 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1022 &vpos, &hpos, NULL, NULL,
1023 &adev->mode_info.crtcs[pipe]->base.hwmode);
1024 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1025
1026 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1027 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1028 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1029 } else {
1030 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1031 pipe, vpos);
1032
1033 /* Bump counter if we are at >= leading edge of vblank,
1034 * but before vsync where vpos would turn negative and
1035 * the hw counter really increments.
1036 */
1037 if (vpos >= 0)
1038 count++;
1039 }
1040 } else {
1041 /* Fallback to use value as is. */
1042 count = amdgpu_display_vblank_get_counter(adev, pipe);
1043 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1044 }
1045
1046 return count;
1047 }
1048
1049 /**
1050 * amdgpu_enable_vblank_kms - enable vblank interrupt
1051 *
1052 * @dev: drm dev pointer
1053 * @pipe: crtc to enable vblank interrupt for
1054 *
1055 * Enable the interrupt on the requested crtc (all asics).
1056 * Returns 0 on success, -EINVAL on failure.
1057 */
amdgpu_enable_vblank_kms(struct drm_device * dev,unsigned int pipe)1058 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1059 {
1060 struct amdgpu_device *adev = dev->dev_private;
1061 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1062
1063 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1064 }
1065
1066 /**
1067 * amdgpu_disable_vblank_kms - disable vblank interrupt
1068 *
1069 * @dev: drm dev pointer
1070 * @pipe: crtc to disable vblank interrupt for
1071 *
1072 * Disable the interrupt on the requested crtc (all asics).
1073 */
amdgpu_disable_vblank_kms(struct drm_device * dev,unsigned int pipe)1074 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1075 {
1076 struct amdgpu_device *adev = dev->dev_private;
1077 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1078
1079 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1080 }
1081
1082 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1083 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1084 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1085 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1086 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1087 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1088 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1089 /* KMS */
1090 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1091 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1092 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1093 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1094 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1095 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1096 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1097 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1098 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1099 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1100 };
1101 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1102
1103 /*
1104 * Debugfs info
1105 */
1106 #if defined(CONFIG_DEBUG_FS)
1107
amdgpu_debugfs_firmware_info(struct seq_file * m,void * data)1108 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1109 {
1110 struct drm_info_node *node = (struct drm_info_node *) m->private;
1111 struct drm_device *dev = node->minor->dev;
1112 struct amdgpu_device *adev = dev->dev_private;
1113 struct drm_amdgpu_info_firmware fw_info;
1114 struct drm_amdgpu_query_fw query_fw;
1115 struct atom_context *ctx = adev->mode_info.atom_context;
1116 int ret, i;
1117
1118 /* VCE */
1119 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1120 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1121 if (ret)
1122 return ret;
1123 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1124 fw_info.feature, fw_info.ver);
1125
1126 /* UVD */
1127 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1128 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1129 if (ret)
1130 return ret;
1131 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1132 fw_info.feature, fw_info.ver);
1133
1134 /* GMC */
1135 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1136 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1137 if (ret)
1138 return ret;
1139 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1140 fw_info.feature, fw_info.ver);
1141
1142 /* ME */
1143 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1144 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1145 if (ret)
1146 return ret;
1147 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1148 fw_info.feature, fw_info.ver);
1149
1150 /* PFP */
1151 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1152 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1153 if (ret)
1154 return ret;
1155 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1156 fw_info.feature, fw_info.ver);
1157
1158 /* CE */
1159 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1160 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1161 if (ret)
1162 return ret;
1163 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1164 fw_info.feature, fw_info.ver);
1165
1166 /* RLC */
1167 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1168 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1169 if (ret)
1170 return ret;
1171 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1172 fw_info.feature, fw_info.ver);
1173
1174 /* RLC SAVE RESTORE LIST CNTL */
1175 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1176 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1177 if (ret)
1178 return ret;
1179 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1180 fw_info.feature, fw_info.ver);
1181
1182 /* RLC SAVE RESTORE LIST GPM MEM */
1183 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1184 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1185 if (ret)
1186 return ret;
1187 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1188 fw_info.feature, fw_info.ver);
1189
1190 /* RLC SAVE RESTORE LIST SRM MEM */
1191 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1192 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1193 if (ret)
1194 return ret;
1195 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1196 fw_info.feature, fw_info.ver);
1197
1198 /* MEC */
1199 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1200 query_fw.index = 0;
1201 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1202 if (ret)
1203 return ret;
1204 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1205 fw_info.feature, fw_info.ver);
1206
1207 /* MEC2 */
1208 if (adev->asic_type == CHIP_KAVERI ||
1209 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1210 query_fw.index = 1;
1211 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1212 if (ret)
1213 return ret;
1214 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1215 fw_info.feature, fw_info.ver);
1216 }
1217
1218 /* PSP SOS */
1219 query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1220 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1221 if (ret)
1222 return ret;
1223 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1224 fw_info.feature, fw_info.ver);
1225
1226
1227 /* PSP ASD */
1228 query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1229 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1230 if (ret)
1231 return ret;
1232 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1233 fw_info.feature, fw_info.ver);
1234
1235 /* SMC */
1236 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1237 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1238 if (ret)
1239 return ret;
1240 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1241 fw_info.feature, fw_info.ver);
1242
1243 /* SDMA */
1244 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1245 for (i = 0; i < adev->sdma.num_instances; i++) {
1246 query_fw.index = i;
1247 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1248 if (ret)
1249 return ret;
1250 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1251 i, fw_info.feature, fw_info.ver);
1252 }
1253
1254 /* VCN */
1255 query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1256 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1257 if (ret)
1258 return ret;
1259 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1260 fw_info.feature, fw_info.ver);
1261
1262
1263 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1264
1265 return 0;
1266 }
1267
1268 static const struct drm_info_list amdgpu_firmware_info_list[] = {
1269 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1270 };
1271 #endif
1272
amdgpu_debugfs_firmware_init(struct amdgpu_device * adev)1273 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1274 {
1275 #if defined(CONFIG_DEBUG_FS)
1276 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1277 ARRAY_SIZE(amdgpu_firmware_info_list));
1278 #else
1279 return 0;
1280 #endif
1281 }
1282