1 /*	$NetBSD: amdgpu_kms.c,v 1.6 2021/12/19 12:02:39 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2008 Advanced Micro Devices, Inc.
5  * Copyright 2008 Red Hat Inc.
6  * Copyright 2009 Jerome Glisse.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24  * OTHER DEALINGS IN THE SOFTWARE.
25  *
26  * Authors: Dave Airlie
27  *          Alex Deucher
28  *          Jerome Glisse
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: amdgpu_kms.c,v 1.6 2021/12/19 12:02:39 riastradh Exp $");
33 
34 #include "amdgpu.h"
35 #include <drm/drm_debugfs.h>
36 #include <drm/amdgpu_drm.h>
37 #include "amdgpu_sched.h"
38 #include "amdgpu_uvd.h"
39 #include "amdgpu_vce.h"
40 #include "atom.h"
41 
42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h>
44 #include <linux/uaccess.h>
45 #include <linux/pci.h>
46 #include <linux/pm_runtime.h>
47 #include "amdgpu_amdkfd.h"
48 #include "amdgpu_gem.h"
49 #include "amdgpu_display.h"
50 #include "amdgpu_ras.h"
51 
52 #include <linux/nbsd-namespace.h>
amdgpu_unregister_gpu_instance(struct amdgpu_device * adev)53 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
54 {
55 	struct amdgpu_gpu_instance *gpu_instance;
56 	int i;
57 
58 	mutex_lock(&mgpu_info.mutex);
59 
60 	for (i = 0; i < mgpu_info.num_gpu; i++) {
61 		gpu_instance = &(mgpu_info.gpu_ins[i]);
62 		if (gpu_instance->adev == adev) {
63 			mgpu_info.gpu_ins[i] =
64 				mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
65 			mgpu_info.num_gpu--;
66 			if (adev->flags & AMD_IS_APU)
67 				mgpu_info.num_apu--;
68 			else
69 				mgpu_info.num_dgpu--;
70 			break;
71 		}
72 	}
73 
74 	mutex_unlock(&mgpu_info.mutex);
75 }
76 
77 /**
78  * amdgpu_driver_unload_kms - Main unload function for KMS.
79  *
80  * @dev: drm dev pointer
81  *
82  * This is the main unload function for KMS (all asics).
83  * Returns 0 on success.
84  */
amdgpu_driver_unload_kms(struct drm_device * dev)85 void amdgpu_driver_unload_kms(struct drm_device *dev)
86 {
87 	struct amdgpu_device *adev = dev->dev_private;
88 
89 	if (adev == NULL)
90 		return;
91 
92 	amdgpu_unregister_gpu_instance(adev);
93 
94 	if (adev->rmmio_size == 0)
95 		goto done_free;
96 
97 	if (amdgpu_sriov_vf(adev))
98 		amdgpu_virt_request_full_gpu(adev, false);
99 
100 	if (adev->runpm) {
101 		pm_runtime_get_sync(dev->dev);
102 		pm_runtime_forbid(dev->dev);
103 	}
104 
105 	amdgpu_acpi_fini(adev);
106 
107 	amdgpu_device_fini(adev);
108 
109 done_free:
110 	kfree(adev);
111 	dev->dev_private = NULL;
112 }
113 
amdgpu_register_gpu_instance(struct amdgpu_device * adev)114 void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
115 {
116 	struct amdgpu_gpu_instance *gpu_instance;
117 
118 	mutex_lock(&mgpu_info.mutex);
119 
120 	if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
121 		DRM_ERROR("Cannot register more gpu instance\n");
122 		mutex_unlock(&mgpu_info.mutex);
123 		return;
124 	}
125 
126 	gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
127 	gpu_instance->adev = adev;
128 	gpu_instance->mgpu_fan_enabled = 0;
129 
130 	mgpu_info.num_gpu++;
131 	if (adev->flags & AMD_IS_APU)
132 		mgpu_info.num_apu++;
133 	else
134 		mgpu_info.num_dgpu++;
135 
136 	mutex_unlock(&mgpu_info.mutex);
137 }
138 
139 /**
140  * amdgpu_driver_load_kms - Main load function for KMS.
141  *
142  * @dev: drm dev pointer
143  * @flags: device flags
144  *
145  * This is the main load function for KMS (all asics).
146  * Returns 0 on success, error on failure.
147  */
amdgpu_driver_load_kms(struct drm_device * dev,unsigned long flags)148 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
149 {
150 	struct amdgpu_device *adev;
151 	int r, acpi_status;
152 
153 	adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
154 	if (adev == NULL) {
155 		return -ENOMEM;
156 	}
157 	dev->dev_private = (void *)adev;
158 
159 	if (amdgpu_has_atpx() &&
160 	    (amdgpu_is_atpx_hybrid() ||
161 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
162 	    ((flags & AMD_IS_APU) == 0) &&
163 	    !pci_is_thunderbolt_attached(dev->pdev))
164 		flags |= AMD_IS_PX;
165 
166 	/* amdgpu_device_init should report only fatal error
167 	 * like memory allocation failure or iomapping failure,
168 	 * or memory manager initialization failure, it must
169 	 * properly initialize the GPU MC controller and permit
170 	 * VRAM allocation
171 	 */
172 	r = amdgpu_device_init(adev, dev, dev->pdev, flags);
173 	if (r) {
174 		dev_err(pci_dev_dev(dev->pdev), "Fatal error during GPU init\n");
175 		goto out;
176 	}
177 
178 	if (amdgpu_device_supports_boco(dev) &&
179 	    (amdgpu_runtime_pm != 0)) /* enable runpm by default */
180 		adev->runpm = true;
181 	else if (amdgpu_device_supports_baco(dev) &&
182 		 (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */
183 		adev->runpm = true;
184 
185 	/* Call ACPI methods: require modeset init
186 	 * but failure is not fatal
187 	 */
188 	if (!r) {
189 		acpi_status = amdgpu_acpi_init(adev);
190 		if (acpi_status)
191 			dev_dbg(pci_dev_dev(dev->pdev),
192 				"Error during ACPI methods call\n");
193 	}
194 
195 	if (adev->runpm) {
196 		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
197 		pm_runtime_use_autosuspend(dev->dev);
198 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
199 		pm_runtime_set_active(dev->dev);
200 		pm_runtime_allow(dev->dev);
201 		pm_runtime_mark_last_busy(dev->dev);
202 		pm_runtime_put_autosuspend(dev->dev);
203 	}
204 
205 out:
206 	if (r) {
207 		/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
208 		if (adev->rmmio_size && adev->runpm)
209 			pm_runtime_put_noidle(dev->dev);
210 		amdgpu_driver_unload_kms(dev);
211 	}
212 
213 	return r;
214 }
215 
amdgpu_firmware_info(struct drm_amdgpu_info_firmware * fw_info,struct drm_amdgpu_query_fw * query_fw,struct amdgpu_device * adev)216 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
217 				struct drm_amdgpu_query_fw *query_fw,
218 				struct amdgpu_device *adev)
219 {
220 	switch (query_fw->fw_type) {
221 	case AMDGPU_INFO_FW_VCE:
222 		fw_info->ver = adev->vce.fw_version;
223 		fw_info->feature = adev->vce.fb_version;
224 		break;
225 	case AMDGPU_INFO_FW_UVD:
226 		fw_info->ver = adev->uvd.fw_version;
227 		fw_info->feature = 0;
228 		break;
229 	case AMDGPU_INFO_FW_VCN:
230 		fw_info->ver = adev->vcn.fw_version;
231 		fw_info->feature = 0;
232 		break;
233 	case AMDGPU_INFO_FW_GMC:
234 		fw_info->ver = adev->gmc.fw_version;
235 		fw_info->feature = 0;
236 		break;
237 	case AMDGPU_INFO_FW_GFX_ME:
238 		fw_info->ver = adev->gfx.me_fw_version;
239 		fw_info->feature = adev->gfx.me_feature_version;
240 		break;
241 	case AMDGPU_INFO_FW_GFX_PFP:
242 		fw_info->ver = adev->gfx.pfp_fw_version;
243 		fw_info->feature = adev->gfx.pfp_feature_version;
244 		break;
245 	case AMDGPU_INFO_FW_GFX_CE:
246 		fw_info->ver = adev->gfx.ce_fw_version;
247 		fw_info->feature = adev->gfx.ce_feature_version;
248 		break;
249 	case AMDGPU_INFO_FW_GFX_RLC:
250 		fw_info->ver = adev->gfx.rlc_fw_version;
251 		fw_info->feature = adev->gfx.rlc_feature_version;
252 		break;
253 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
254 		fw_info->ver = adev->gfx.rlc_srlc_fw_version;
255 		fw_info->feature = adev->gfx.rlc_srlc_feature_version;
256 		break;
257 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
258 		fw_info->ver = adev->gfx.rlc_srlg_fw_version;
259 		fw_info->feature = adev->gfx.rlc_srlg_feature_version;
260 		break;
261 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
262 		fw_info->ver = adev->gfx.rlc_srls_fw_version;
263 		fw_info->feature = adev->gfx.rlc_srls_feature_version;
264 		break;
265 	case AMDGPU_INFO_FW_GFX_MEC:
266 		if (query_fw->index == 0) {
267 			fw_info->ver = adev->gfx.mec_fw_version;
268 			fw_info->feature = adev->gfx.mec_feature_version;
269 		} else if (query_fw->index == 1) {
270 			fw_info->ver = adev->gfx.mec2_fw_version;
271 			fw_info->feature = adev->gfx.mec2_feature_version;
272 		} else
273 			return -EINVAL;
274 		break;
275 	case AMDGPU_INFO_FW_SMC:
276 		fw_info->ver = adev->pm.fw_version;
277 		fw_info->feature = 0;
278 		break;
279 	case AMDGPU_INFO_FW_TA:
280 		if (query_fw->index > 1)
281 			return -EINVAL;
282 		if (query_fw->index == 0) {
283 			fw_info->ver = adev->psp.ta_fw_version;
284 			fw_info->feature = adev->psp.ta_xgmi_ucode_version;
285 		} else {
286 			fw_info->ver = adev->psp.ta_fw_version;
287 			fw_info->feature = adev->psp.ta_ras_ucode_version;
288 		}
289 		break;
290 	case AMDGPU_INFO_FW_SDMA:
291 		if (query_fw->index >= adev->sdma.num_instances)
292 			return -EINVAL;
293 		fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
294 		fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
295 		break;
296 	case AMDGPU_INFO_FW_SOS:
297 		fw_info->ver = adev->psp.sos_fw_version;
298 		fw_info->feature = adev->psp.sos_feature_version;
299 		break;
300 	case AMDGPU_INFO_FW_ASD:
301 		fw_info->ver = adev->psp.asd_fw_version;
302 		fw_info->feature = adev->psp.asd_feature_version;
303 		break;
304 	case AMDGPU_INFO_FW_DMCU:
305 		fw_info->ver = adev->dm.dmcu_fw_version;
306 		fw_info->feature = 0;
307 		break;
308 	case AMDGPU_INFO_FW_DMCUB:
309 		fw_info->ver = adev->dm.dmcub_fw_version;
310 		fw_info->feature = 0;
311 		break;
312 	default:
313 		return -EINVAL;
314 	}
315 	return 0;
316 }
317 
amdgpu_hw_ip_info(struct amdgpu_device * adev,struct drm_amdgpu_info * info,struct drm_amdgpu_info_hw_ip * result)318 static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
319 			     struct drm_amdgpu_info *info,
320 			     struct drm_amdgpu_info_hw_ip *result)
321 {
322 	uint32_t ib_start_alignment = 0;
323 	uint32_t ib_size_alignment = 0;
324 	enum amd_ip_block_type type;
325 	unsigned int num_rings = 0;
326 	unsigned int i, j;
327 
328 	if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
329 		return -EINVAL;
330 
331 	switch (info->query_hw_ip.type) {
332 	case AMDGPU_HW_IP_GFX:
333 		type = AMD_IP_BLOCK_TYPE_GFX;
334 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
335 			if (adev->gfx.gfx_ring[i].sched.ready)
336 				++num_rings;
337 		ib_start_alignment = 32;
338 		ib_size_alignment = 32;
339 		break;
340 	case AMDGPU_HW_IP_COMPUTE:
341 		type = AMD_IP_BLOCK_TYPE_GFX;
342 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
343 			if (adev->gfx.compute_ring[i].sched.ready)
344 				++num_rings;
345 		ib_start_alignment = 32;
346 		ib_size_alignment = 32;
347 		break;
348 	case AMDGPU_HW_IP_DMA:
349 		type = AMD_IP_BLOCK_TYPE_SDMA;
350 		for (i = 0; i < adev->sdma.num_instances; i++)
351 			if (adev->sdma.instance[i].ring.sched.ready)
352 				++num_rings;
353 		ib_start_alignment = 256;
354 		ib_size_alignment = 4;
355 		break;
356 	case AMDGPU_HW_IP_UVD:
357 		type = AMD_IP_BLOCK_TYPE_UVD;
358 		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
359 			if (adev->uvd.harvest_config & (1 << i))
360 				continue;
361 
362 			if (adev->uvd.inst[i].ring.sched.ready)
363 				++num_rings;
364 		}
365 		ib_start_alignment = 64;
366 		ib_size_alignment = 64;
367 		break;
368 	case AMDGPU_HW_IP_VCE:
369 		type = AMD_IP_BLOCK_TYPE_VCE;
370 		for (i = 0; i < adev->vce.num_rings; i++)
371 			if (adev->vce.ring[i].sched.ready)
372 				++num_rings;
373 		ib_start_alignment = 4;
374 		ib_size_alignment = 1;
375 		break;
376 	case AMDGPU_HW_IP_UVD_ENC:
377 		type = AMD_IP_BLOCK_TYPE_UVD;
378 		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
379 			if (adev->uvd.harvest_config & (1 << i))
380 				continue;
381 
382 			for (j = 0; j < adev->uvd.num_enc_rings; j++)
383 				if (adev->uvd.inst[i].ring_enc[j].sched.ready)
384 					++num_rings;
385 		}
386 		ib_start_alignment = 64;
387 		ib_size_alignment = 64;
388 		break;
389 	case AMDGPU_HW_IP_VCN_DEC:
390 		type = AMD_IP_BLOCK_TYPE_VCN;
391 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
392 			if (adev->uvd.harvest_config & (1 << i))
393 				continue;
394 
395 			if (adev->vcn.inst[i].ring_dec.sched.ready)
396 				++num_rings;
397 		}
398 		ib_start_alignment = 16;
399 		ib_size_alignment = 16;
400 		break;
401 	case AMDGPU_HW_IP_VCN_ENC:
402 		type = AMD_IP_BLOCK_TYPE_VCN;
403 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
404 			if (adev->uvd.harvest_config & (1 << i))
405 				continue;
406 
407 			for (j = 0; j < adev->vcn.num_enc_rings; j++)
408 				if (adev->vcn.inst[i].ring_enc[j].sched.ready)
409 					++num_rings;
410 		}
411 		ib_start_alignment = 64;
412 		ib_size_alignment = 1;
413 		break;
414 	case AMDGPU_HW_IP_VCN_JPEG:
415 		type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
416 			AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
417 
418 		for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
419 			if (adev->jpeg.harvest_config & (1 << i))
420 				continue;
421 
422 			if (adev->jpeg.inst[i].ring_dec.sched.ready)
423 				++num_rings;
424 		}
425 		ib_start_alignment = 16;
426 		ib_size_alignment = 16;
427 		break;
428 	default:
429 		return -EINVAL;
430 	}
431 
432 	for (i = 0; i < adev->num_ip_blocks; i++)
433 		if (adev->ip_blocks[i].version->type == type &&
434 		    adev->ip_blocks[i].status.valid)
435 			break;
436 
437 	if (i == adev->num_ip_blocks)
438 		return 0;
439 
440 	num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
441 			num_rings);
442 
443 	result->hw_ip_version_major = adev->ip_blocks[i].version->major;
444 	result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
445 	result->capabilities_flags = 0;
446 	result->available_rings = (1 << num_rings) - 1;
447 	result->ib_start_alignment = ib_start_alignment;
448 	result->ib_size_alignment = ib_size_alignment;
449 	return 0;
450 }
451 
452 /*
453  * Userspace get information ioctl
454  */
455 /**
456  * amdgpu_info_ioctl - answer a device specific request.
457  *
458  * @adev: amdgpu device pointer
459  * @data: request object
460  * @filp: drm filp
461  *
462  * This function is used to pass device specific parameters to the userspace
463  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
464  * etc. (all asics).
465  * Returns 0 on success, -EINVAL on failure.
466  */
amdgpu_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)467 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
468 {
469 	struct amdgpu_device *adev = dev->dev_private;
470 	struct drm_amdgpu_info *info = data;
471 	struct amdgpu_mode_info *minfo = &adev->mode_info;
472 	void __user *out = (void __user *)(uintptr_t)info->return_pointer;
473 	uint32_t size = info->return_size;
474 	struct drm_crtc *crtc;
475 	uint32_t ui32 = 0;
476 	uint64_t ui64 = 0;
477 	int i, found;
478 	int ui32_size = sizeof(ui32);
479 
480 	if (!info->return_size || !info->return_pointer)
481 		return -EINVAL;
482 
483 	switch (info->query) {
484 	case AMDGPU_INFO_ACCEL_WORKING:
485 		ui32 = adev->accel_working;
486 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
487 	case AMDGPU_INFO_CRTC_FROM_ID:
488 		for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
489 			crtc = (struct drm_crtc *)minfo->crtcs[i];
490 			if (crtc && crtc->base.id == info->mode_crtc.id) {
491 				struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
492 				ui32 = amdgpu_crtc->crtc_id;
493 				found = 1;
494 				break;
495 			}
496 		}
497 		if (!found) {
498 			DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
499 			return -EINVAL;
500 		}
501 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
502 	case AMDGPU_INFO_HW_IP_INFO: {
503 		struct drm_amdgpu_info_hw_ip ip = {};
504 		int ret;
505 
506 		ret = amdgpu_hw_ip_info(adev, info, &ip);
507 		if (ret)
508 			return ret;
509 
510 		ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
511 		return ret ? -EFAULT : 0;
512 	}
513 	case AMDGPU_INFO_HW_IP_COUNT: {
514 		enum amd_ip_block_type type;
515 		uint32_t count = 0;
516 
517 		switch (info->query_hw_ip.type) {
518 		case AMDGPU_HW_IP_GFX:
519 			type = AMD_IP_BLOCK_TYPE_GFX;
520 			break;
521 		case AMDGPU_HW_IP_COMPUTE:
522 			type = AMD_IP_BLOCK_TYPE_GFX;
523 			break;
524 		case AMDGPU_HW_IP_DMA:
525 			type = AMD_IP_BLOCK_TYPE_SDMA;
526 			break;
527 		case AMDGPU_HW_IP_UVD:
528 			type = AMD_IP_BLOCK_TYPE_UVD;
529 			break;
530 		case AMDGPU_HW_IP_VCE:
531 			type = AMD_IP_BLOCK_TYPE_VCE;
532 			break;
533 		case AMDGPU_HW_IP_UVD_ENC:
534 			type = AMD_IP_BLOCK_TYPE_UVD;
535 			break;
536 		case AMDGPU_HW_IP_VCN_DEC:
537 		case AMDGPU_HW_IP_VCN_ENC:
538 			type = AMD_IP_BLOCK_TYPE_VCN;
539 			break;
540 		case AMDGPU_HW_IP_VCN_JPEG:
541 			type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
542 				AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
543 			break;
544 		default:
545 			return -EINVAL;
546 		}
547 
548 		for (i = 0; i < adev->num_ip_blocks; i++)
549 			if (adev->ip_blocks[i].version->type == type &&
550 			    adev->ip_blocks[i].status.valid &&
551 			    count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
552 				count++;
553 
554 		return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
555 	}
556 	case AMDGPU_INFO_TIMESTAMP:
557 		ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
558 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
559 	case AMDGPU_INFO_FW_VERSION: {
560 		struct drm_amdgpu_info_firmware fw_info;
561 		int ret;
562 
563 		/* We only support one instance of each IP block right now. */
564 		if (info->query_fw.ip_instance != 0)
565 			return -EINVAL;
566 
567 		ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
568 		if (ret)
569 			return ret;
570 
571 		return copy_to_user(out, &fw_info,
572 				    min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
573 	}
574 	case AMDGPU_INFO_NUM_BYTES_MOVED:
575 		ui64 = atomic64_read(&adev->num_bytes_moved);
576 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
577 	case AMDGPU_INFO_NUM_EVICTIONS:
578 		ui64 = atomic64_read(&adev->num_evictions);
579 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
580 	case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
581 		ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
582 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
583 	case AMDGPU_INFO_VRAM_USAGE:
584 		ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
585 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
586 	case AMDGPU_INFO_VIS_VRAM_USAGE:
587 		ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
588 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
589 	case AMDGPU_INFO_GTT_USAGE:
590 		ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
591 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
592 	case AMDGPU_INFO_GDS_CONFIG: {
593 		struct drm_amdgpu_info_gds gds_info;
594 
595 		memset(&gds_info, 0, sizeof(gds_info));
596 		gds_info.compute_partition_size = adev->gds.gds_size;
597 		gds_info.gds_total_size = adev->gds.gds_size;
598 		gds_info.gws_per_compute_partition = adev->gds.gws_size;
599 		gds_info.oa_per_compute_partition = adev->gds.oa_size;
600 		return copy_to_user(out, &gds_info,
601 				    min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
602 	}
603 	case AMDGPU_INFO_VRAM_GTT: {
604 		struct drm_amdgpu_info_vram_gtt vram_gtt;
605 
606 		vram_gtt.vram_size = adev->gmc.real_vram_size -
607 			atomic64_read(&adev->vram_pin_size) -
608 			AMDGPU_VM_RESERVED_VRAM;
609 		vram_gtt.vram_cpu_accessible_size =
610 			min(adev->gmc.visible_vram_size -
611 			    atomic64_read(&adev->visible_pin_size),
612 			    vram_gtt.vram_size);
613 		vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
614 		vram_gtt.gtt_size *= PAGE_SIZE;
615 		vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
616 		return copy_to_user(out, &vram_gtt,
617 				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
618 	}
619 	case AMDGPU_INFO_MEMORY: {
620 		struct drm_amdgpu_memory_info mem;
621 
622 		memset(&mem, 0, sizeof(mem));
623 		mem.vram.total_heap_size = adev->gmc.real_vram_size;
624 		mem.vram.usable_heap_size = adev->gmc.real_vram_size -
625 			atomic64_read(&adev->vram_pin_size) -
626 			AMDGPU_VM_RESERVED_VRAM;
627 		mem.vram.heap_usage =
628 			amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
629 		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
630 
631 		mem.cpu_accessible_vram.total_heap_size =
632 			adev->gmc.visible_vram_size;
633 		mem.cpu_accessible_vram.usable_heap_size =
634 			min(adev->gmc.visible_vram_size -
635 			    atomic64_read(&adev->visible_pin_size),
636 			    mem.vram.usable_heap_size);
637 		mem.cpu_accessible_vram.heap_usage =
638 			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
639 		mem.cpu_accessible_vram.max_allocation =
640 			mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
641 
642 		mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
643 		mem.gtt.total_heap_size *= PAGE_SIZE;
644 		mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
645 			atomic64_read(&adev->gart_pin_size);
646 		mem.gtt.heap_usage =
647 			amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
648 		mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
649 
650 		return copy_to_user(out, &mem,
651 				    min((size_t)size, sizeof(mem)))
652 				    ? -EFAULT : 0;
653 	}
654 	case AMDGPU_INFO_READ_MMR_REG: {
655 		unsigned n, alloc_size;
656 		uint32_t *regs;
657 		unsigned se_num = (info->read_mmr_reg.instance >>
658 				   AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
659 				  AMDGPU_INFO_MMR_SE_INDEX_MASK;
660 		unsigned sh_num = (info->read_mmr_reg.instance >>
661 				   AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
662 				  AMDGPU_INFO_MMR_SH_INDEX_MASK;
663 
664 		/* set full masks if the userspace set all bits
665 		 * in the bitfields */
666 		if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
667 			se_num = 0xffffffff;
668 		if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
669 			sh_num = 0xffffffff;
670 
671 		if (info->read_mmr_reg.count > 128)
672 			return -EINVAL;
673 
674 		regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
675 		if (!regs)
676 			return -ENOMEM;
677 		alloc_size = info->read_mmr_reg.count * sizeof(*regs);
678 
679 		amdgpu_gfx_off_ctrl(adev, false);
680 		for (i = 0; i < info->read_mmr_reg.count; i++) {
681 			if (amdgpu_asic_read_register(adev, se_num, sh_num,
682 						      info->read_mmr_reg.dword_offset + i,
683 						      &regs[i])) {
684 				DRM_DEBUG_KMS("unallowed offset %#x\n",
685 					      info->read_mmr_reg.dword_offset + i);
686 				kfree(regs);
687 				amdgpu_gfx_off_ctrl(adev, true);
688 				return -EFAULT;
689 			}
690 		}
691 		amdgpu_gfx_off_ctrl(adev, true);
692 		n = copy_to_user(out, regs, min(size, alloc_size));
693 		kfree(regs);
694 		return n ? -EFAULT : 0;
695 	}
696 	case AMDGPU_INFO_DEV_INFO: {
697 		struct drm_amdgpu_info_device dev_info = {};
698 		uint64_t vm_size;
699 
700 		dev_info.device_id = dev->pdev->device;
701 		dev_info.chip_rev = adev->rev_id;
702 		dev_info.external_rev = adev->external_rev_id;
703 		dev_info.pci_rev = dev->pdev->revision;
704 		dev_info.family = adev->family;
705 		dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
706 		dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
707 		/* return all clocks in KHz */
708 		dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
709 		if (adev->pm.dpm_enabled) {
710 			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
711 			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
712 		} else {
713 			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
714 			dev_info.max_memory_clock = adev->clock.default_mclk * 10;
715 		}
716 		dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
717 		dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
718 			adev->gfx.config.max_shader_engines;
719 		dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
720 		dev_info._pad = 0;
721 		dev_info.ids_flags = 0;
722 		if (adev->flags & AMD_IS_APU)
723 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
724 		if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
725 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
726 
727 		vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
728 		vm_size -= AMDGPU_VA_RESERVED_SIZE;
729 
730 		/* Older VCE FW versions are buggy and can handle only 40bits */
731 		if (adev->vce.fw_version &&
732 		    adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
733 			vm_size = min(vm_size, 1ULL << 40);
734 
735 		dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
736 		dev_info.virtual_address_max =
737 			min(vm_size, AMDGPU_GMC_HOLE_START);
738 
739 		if (vm_size > AMDGPU_GMC_HOLE_START) {
740 			dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
741 			dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
742 		}
743 		dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
744 		dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
745 		dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
746 		dev_info.cu_active_number = adev->gfx.cu_info.number;
747 		dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
748 		dev_info.ce_ram_size = adev->gfx.ce_ram_size;
749 		memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
750 		       sizeof(adev->gfx.cu_info.ao_cu_bitmap));
751 		memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
752 		       sizeof(adev->gfx.cu_info.bitmap));
753 		dev_info.vram_type = adev->gmc.vram_type;
754 		dev_info.vram_bit_width = adev->gmc.vram_width;
755 		dev_info.vce_harvest_config = adev->vce.harvest_config;
756 		dev_info.gc_double_offchip_lds_buf =
757 			adev->gfx.config.double_offchip_lds_buf;
758 		dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
759 		dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
760 		dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
761 		dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
762 		dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
763 		dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
764 		dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
765 
766 		if (adev->family >= AMDGPU_FAMILY_NV)
767 			dev_info.pa_sc_tile_steering_override =
768 				adev->gfx.config.pa_sc_tile_steering_override;
769 
770 		dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
771 
772 		return copy_to_user(out, &dev_info,
773 				    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
774 	}
775 	case AMDGPU_INFO_VCE_CLOCK_TABLE: {
776 		unsigned i;
777 		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
778 		struct amd_vce_state *vce_state;
779 
780 		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
781 			vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
782 			if (vce_state) {
783 				vce_clk_table.entries[i].sclk = vce_state->sclk;
784 				vce_clk_table.entries[i].mclk = vce_state->mclk;
785 				vce_clk_table.entries[i].eclk = vce_state->evclk;
786 				vce_clk_table.num_valid_entries++;
787 			}
788 		}
789 
790 		return copy_to_user(out, &vce_clk_table,
791 				    min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
792 	}
793 	case AMDGPU_INFO_VBIOS: {
794 		uint32_t bios_size = adev->bios_size;
795 
796 		switch (info->vbios_info.type) {
797 		case AMDGPU_INFO_VBIOS_SIZE:
798 			return copy_to_user(out, &bios_size,
799 					min((size_t)size, sizeof(bios_size)))
800 					? -EFAULT : 0;
801 		case AMDGPU_INFO_VBIOS_IMAGE: {
802 			uint8_t *bios;
803 			uint32_t bios_offset = info->vbios_info.offset;
804 
805 			if (bios_offset >= bios_size)
806 				return -EINVAL;
807 
808 			bios = adev->bios + bios_offset;
809 			return copy_to_user(out, bios,
810 					    min((size_t)size, (size_t)(bios_size - bios_offset)))
811 					? -EFAULT : 0;
812 		}
813 		default:
814 			DRM_DEBUG_KMS("Invalid request %d\n",
815 					info->vbios_info.type);
816 			return -EINVAL;
817 		}
818 	}
819 	case AMDGPU_INFO_NUM_HANDLES: {
820 		struct drm_amdgpu_info_num_handles handle;
821 
822 		switch (info->query_hw_ip.type) {
823 		case AMDGPU_HW_IP_UVD:
824 			/* Starting Polaris, we support unlimited UVD handles */
825 			if (adev->asic_type < CHIP_POLARIS10) {
826 				handle.uvd_max_handles = adev->uvd.max_handles;
827 				handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
828 
829 				return copy_to_user(out, &handle,
830 					min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
831 			} else {
832 				return -ENODATA;
833 			}
834 
835 			break;
836 		default:
837 			return -EINVAL;
838 		}
839 	}
840 	case AMDGPU_INFO_SENSOR: {
841 		if (!adev->pm.dpm_enabled)
842 			return -ENOENT;
843 
844 		switch (info->sensor_info.type) {
845 		case AMDGPU_INFO_SENSOR_GFX_SCLK:
846 			/* get sclk in Mhz */
847 			if (amdgpu_dpm_read_sensor(adev,
848 						   AMDGPU_PP_SENSOR_GFX_SCLK,
849 						   (void *)&ui32, &ui32_size)) {
850 				return -EINVAL;
851 			}
852 			ui32 /= 100;
853 			break;
854 		case AMDGPU_INFO_SENSOR_GFX_MCLK:
855 			/* get mclk in Mhz */
856 			if (amdgpu_dpm_read_sensor(adev,
857 						   AMDGPU_PP_SENSOR_GFX_MCLK,
858 						   (void *)&ui32, &ui32_size)) {
859 				return -EINVAL;
860 			}
861 			ui32 /= 100;
862 			break;
863 		case AMDGPU_INFO_SENSOR_GPU_TEMP:
864 			/* get temperature in millidegrees C */
865 			if (amdgpu_dpm_read_sensor(adev,
866 						   AMDGPU_PP_SENSOR_GPU_TEMP,
867 						   (void *)&ui32, &ui32_size)) {
868 				return -EINVAL;
869 			}
870 			break;
871 		case AMDGPU_INFO_SENSOR_GPU_LOAD:
872 			/* get GPU load */
873 			if (amdgpu_dpm_read_sensor(adev,
874 						   AMDGPU_PP_SENSOR_GPU_LOAD,
875 						   (void *)&ui32, &ui32_size)) {
876 				return -EINVAL;
877 			}
878 			break;
879 		case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
880 			/* get average GPU power */
881 			if (amdgpu_dpm_read_sensor(adev,
882 						   AMDGPU_PP_SENSOR_GPU_POWER,
883 						   (void *)&ui32, &ui32_size)) {
884 				return -EINVAL;
885 			}
886 			ui32 >>= 8;
887 			break;
888 		case AMDGPU_INFO_SENSOR_VDDNB:
889 			/* get VDDNB in millivolts */
890 			if (amdgpu_dpm_read_sensor(adev,
891 						   AMDGPU_PP_SENSOR_VDDNB,
892 						   (void *)&ui32, &ui32_size)) {
893 				return -EINVAL;
894 			}
895 			break;
896 		case AMDGPU_INFO_SENSOR_VDDGFX:
897 			/* get VDDGFX in millivolts */
898 			if (amdgpu_dpm_read_sensor(adev,
899 						   AMDGPU_PP_SENSOR_VDDGFX,
900 						   (void *)&ui32, &ui32_size)) {
901 				return -EINVAL;
902 			}
903 			break;
904 		case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
905 			/* get stable pstate sclk in Mhz */
906 			if (amdgpu_dpm_read_sensor(adev,
907 						   AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
908 						   (void *)&ui32, &ui32_size)) {
909 				return -EINVAL;
910 			}
911 			ui32 /= 100;
912 			break;
913 		case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
914 			/* get stable pstate mclk in Mhz */
915 			if (amdgpu_dpm_read_sensor(adev,
916 						   AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
917 						   (void *)&ui32, &ui32_size)) {
918 				return -EINVAL;
919 			}
920 			ui32 /= 100;
921 			break;
922 		default:
923 			DRM_DEBUG_KMS("Invalid request %d\n",
924 				      info->sensor_info.type);
925 			return -EINVAL;
926 		}
927 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
928 	}
929 	case AMDGPU_INFO_VRAM_LOST_COUNTER:
930 		ui32 = atomic_read(&adev->vram_lost_counter);
931 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
932 	case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
933 		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
934 		uint64_t ras_mask;
935 
936 		if (!ras)
937 			return -EINVAL;
938 		ras_mask = (uint64_t)ras->supported << 32 | ras->features;
939 
940 		return copy_to_user(out, &ras_mask,
941 				min_t(u64, size, sizeof(ras_mask))) ?
942 			-EFAULT : 0;
943 	}
944 	default:
945 		DRM_DEBUG_KMS("Invalid request %d\n", info->query);
946 		return -EINVAL;
947 	}
948 	return 0;
949 }
950 
951 
952 /*
953  * Outdated mess for old drm with Xorg being in charge (void function now).
954  */
955 /**
956  * amdgpu_driver_lastclose_kms - drm callback for last close
957  *
958  * @dev: drm dev pointer
959  *
960  * Switch vga_switcheroo state after last close (all asics).
961  */
amdgpu_driver_lastclose_kms(struct drm_device * dev)962 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
963 {
964 	drm_fb_helper_lastclose(dev);
965 #ifndef __NetBSD__		/* XXX radeon vga */
966 	vga_switcheroo_process_delayed_switch();
967 #endif
968 }
969 
970 /**
971  * amdgpu_driver_open_kms - drm callback for open
972  *
973  * @dev: drm dev pointer
974  * @file_priv: drm file
975  *
976  * On device open, init vm on cayman+ (all asics).
977  * Returns 0 on success, error on failure.
978  */
amdgpu_driver_open_kms(struct drm_device * dev,struct drm_file * file_priv)979 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
980 {
981 	struct amdgpu_device *adev = dev->dev_private;
982 	struct amdgpu_fpriv *fpriv;
983 	int r, pasid;
984 
985 	/* Ensure IB tests are run on ring */
986 	flush_delayed_work(&adev->delayed_init_work);
987 
988 
989 	if (amdgpu_ras_intr_triggered()) {
990 		DRM_ERROR("RAS Intr triggered, device disabled!!");
991 		return -EHWPOISON;
992 	}
993 
994 	file_priv->driver_priv = NULL;
995 
996 	r = pm_runtime_get_sync(dev->dev);
997 	if (r < 0)
998 		return r;
999 
1000 	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
1001 	if (unlikely(!fpriv)) {
1002 		r = -ENOMEM;
1003 		goto out_suspend;
1004 	}
1005 
1006 	pasid = amdgpu_pasid_alloc(16);
1007 	if (pasid < 0) {
1008 		dev_warn(adev->dev, "No more PASIDs available!");
1009 		pasid = 0;
1010 	}
1011 	r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
1012 	if (r)
1013 		goto error_pasid;
1014 
1015 	fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
1016 	if (!fpriv->prt_va) {
1017 		r = -ENOMEM;
1018 		goto error_vm;
1019 	}
1020 
1021 	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1022 		uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1023 
1024 		r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1025 						&fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
1026 		if (r)
1027 			goto error_vm;
1028 	}
1029 
1030 	mutex_init(&fpriv->bo_list_lock);
1031 	idr_init(&fpriv->bo_list_handles);
1032 
1033 	amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
1034 
1035 	file_priv->driver_priv = fpriv;
1036 	goto out_suspend;
1037 
1038 error_vm:
1039 	amdgpu_vm_fini(adev, &fpriv->vm);
1040 
1041 error_pasid:
1042 	if (pasid)
1043 		amdgpu_pasid_free(pasid);
1044 
1045 	kfree(fpriv);
1046 
1047 out_suspend:
1048 	pm_runtime_mark_last_busy(dev->dev);
1049 	pm_runtime_put_autosuspend(dev->dev);
1050 
1051 	return r;
1052 }
1053 
1054 /**
1055  * amdgpu_driver_postclose_kms - drm callback for post close
1056  *
1057  * @dev: drm dev pointer
1058  * @file_priv: drm file
1059  *
1060  * On device post close, tear down vm on cayman+ (all asics).
1061  */
amdgpu_driver_postclose_kms(struct drm_device * dev,struct drm_file * file_priv)1062 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1063 				 struct drm_file *file_priv)
1064 {
1065 	struct amdgpu_device *adev = dev->dev_private;
1066 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1067 	struct amdgpu_bo_list *list;
1068 	struct amdgpu_bo *pd;
1069 	unsigned int pasid;
1070 	int handle;
1071 
1072 	if (!fpriv)
1073 		return;
1074 
1075 	pm_runtime_get_sync(dev->dev);
1076 
1077 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1078 		amdgpu_uvd_free_handles(adev, file_priv);
1079 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1080 		amdgpu_vce_free_handles(adev, file_priv);
1081 
1082 	amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
1083 
1084 	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1085 		/* TODO: how to handle reserve failure */
1086 		BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
1087 		amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
1088 		fpriv->csa_va = NULL;
1089 		amdgpu_bo_unreserve(adev->virt.csa_obj);
1090 	}
1091 
1092 	pasid = fpriv->vm.pasid;
1093 	pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
1094 
1095 	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1096 	amdgpu_vm_fini(adev, &fpriv->vm);
1097 
1098 	if (pasid)
1099 		amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
1100 	amdgpu_bo_unref(&pd);
1101 
1102 	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1103 		amdgpu_bo_list_put(list);
1104 
1105 	idr_destroy(&fpriv->bo_list_handles);
1106 	mutex_destroy(&fpriv->bo_list_lock);
1107 
1108 	kfree(fpriv);
1109 	file_priv->driver_priv = NULL;
1110 
1111 	pm_runtime_mark_last_busy(dev->dev);
1112 	pm_runtime_put_autosuspend(dev->dev);
1113 }
1114 
1115 /*
1116  * VBlank related functions.
1117  */
1118 /**
1119  * amdgpu_get_vblank_counter_kms - get frame count
1120  *
1121  * @dev: drm dev pointer
1122  * @pipe: crtc to get the frame count from
1123  *
1124  * Gets the frame count on the requested crtc (all asics).
1125  * Returns frame count on success, -EINVAL on failure.
1126  */
amdgpu_get_vblank_counter_kms(struct drm_device * dev,unsigned int pipe)1127 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
1128 {
1129 	struct amdgpu_device *adev = dev->dev_private;
1130 	int vpos, hpos, stat;
1131 	u32 count;
1132 
1133 	if (pipe >= adev->mode_info.num_crtc) {
1134 		DRM_ERROR("Invalid crtc %u\n", pipe);
1135 		return -EINVAL;
1136 	}
1137 
1138 	/* The hw increments its frame counter at start of vsync, not at start
1139 	 * of vblank, as is required by DRM core vblank counter handling.
1140 	 * Cook the hw count here to make it appear to the caller as if it
1141 	 * incremented at start of vblank. We measure distance to start of
1142 	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1143 	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1144 	 * result by 1 to give the proper appearance to caller.
1145 	 */
1146 	if (adev->mode_info.crtcs[pipe]) {
1147 		/* Repeat readout if needed to provide stable result if
1148 		 * we cross start of vsync during the queries.
1149 		 */
1150 		do {
1151 			count = amdgpu_display_vblank_get_counter(adev, pipe);
1152 			/* Ask amdgpu_display_get_crtc_scanoutpos to return
1153 			 * vpos as distance to start of vblank, instead of
1154 			 * regular vertical scanout pos.
1155 			 */
1156 			stat = amdgpu_display_get_crtc_scanoutpos(
1157 				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1158 				&vpos, &hpos, NULL, NULL,
1159 				&adev->mode_info.crtcs[pipe]->base.hwmode);
1160 		} while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1161 
1162 		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1163 		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1164 			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1165 		} else {
1166 			DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1167 				      pipe, vpos);
1168 
1169 			/* Bump counter if we are at >= leading edge of vblank,
1170 			 * but before vsync where vpos would turn negative and
1171 			 * the hw counter really increments.
1172 			 */
1173 			if (vpos >= 0)
1174 				count++;
1175 		}
1176 	} else {
1177 		/* Fallback to use value as is. */
1178 		count = amdgpu_display_vblank_get_counter(adev, pipe);
1179 		DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1180 	}
1181 
1182 	return count;
1183 }
1184 
1185 /**
1186  * amdgpu_enable_vblank_kms - enable vblank interrupt
1187  *
1188  * @dev: drm dev pointer
1189  * @pipe: crtc to enable vblank interrupt for
1190  *
1191  * Enable the interrupt on the requested crtc (all asics).
1192  * Returns 0 on success, -EINVAL on failure.
1193  */
amdgpu_enable_vblank_kms(struct drm_device * dev,unsigned int pipe)1194 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1195 {
1196 	struct amdgpu_device *adev = dev->dev_private;
1197 	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1198 
1199 	return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1200 }
1201 
1202 /**
1203  * amdgpu_disable_vblank_kms - disable vblank interrupt
1204  *
1205  * @dev: drm dev pointer
1206  * @pipe: crtc to disable vblank interrupt for
1207  *
1208  * Disable the interrupt on the requested crtc (all asics).
1209  */
amdgpu_disable_vblank_kms(struct drm_device * dev,unsigned int pipe)1210 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1211 {
1212 	struct amdgpu_device *adev = dev->dev_private;
1213 	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1214 
1215 	amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1216 }
1217 
1218 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1219 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1220 	DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1221 	DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1222 	DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1223 	DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1224 	DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1225 	/* KMS */
1226 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1227 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1228 	DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1229 	DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1230 	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1231 	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1232 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1233 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1234 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1235 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1236 };
1237 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1238 
1239 /*
1240  * Debugfs info
1241  */
1242 #if defined(CONFIG_DEBUG_FS)
1243 
amdgpu_debugfs_firmware_info(struct seq_file * m,void * data)1244 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1245 {
1246 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1247 	struct drm_device *dev = node->minor->dev;
1248 	struct amdgpu_device *adev = dev->dev_private;
1249 	struct drm_amdgpu_info_firmware fw_info;
1250 	struct drm_amdgpu_query_fw query_fw;
1251 	struct atom_context *ctx = adev->mode_info.atom_context;
1252 	int ret, i;
1253 
1254 	/* VCE */
1255 	query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1256 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1257 	if (ret)
1258 		return ret;
1259 	seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1260 		   fw_info.feature, fw_info.ver);
1261 
1262 	/* UVD */
1263 	query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1264 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1265 	if (ret)
1266 		return ret;
1267 	seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1268 		   fw_info.feature, fw_info.ver);
1269 
1270 	/* GMC */
1271 	query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1272 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1273 	if (ret)
1274 		return ret;
1275 	seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1276 		   fw_info.feature, fw_info.ver);
1277 
1278 	/* ME */
1279 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1280 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1281 	if (ret)
1282 		return ret;
1283 	seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1284 		   fw_info.feature, fw_info.ver);
1285 
1286 	/* PFP */
1287 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1288 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1289 	if (ret)
1290 		return ret;
1291 	seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1292 		   fw_info.feature, fw_info.ver);
1293 
1294 	/* CE */
1295 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1296 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1297 	if (ret)
1298 		return ret;
1299 	seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1300 		   fw_info.feature, fw_info.ver);
1301 
1302 	/* RLC */
1303 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1304 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1305 	if (ret)
1306 		return ret;
1307 	seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1308 		   fw_info.feature, fw_info.ver);
1309 
1310 	/* RLC SAVE RESTORE LIST CNTL */
1311 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1312 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1313 	if (ret)
1314 		return ret;
1315 	seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1316 		   fw_info.feature, fw_info.ver);
1317 
1318 	/* RLC SAVE RESTORE LIST GPM MEM */
1319 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1320 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1321 	if (ret)
1322 		return ret;
1323 	seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1324 		   fw_info.feature, fw_info.ver);
1325 
1326 	/* RLC SAVE RESTORE LIST SRM MEM */
1327 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1328 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1329 	if (ret)
1330 		return ret;
1331 	seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1332 		   fw_info.feature, fw_info.ver);
1333 
1334 	/* MEC */
1335 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1336 	query_fw.index = 0;
1337 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1338 	if (ret)
1339 		return ret;
1340 	seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1341 		   fw_info.feature, fw_info.ver);
1342 
1343 	/* MEC2 */
1344 	if (adev->asic_type == CHIP_KAVERI ||
1345 	    (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1346 		query_fw.index = 1;
1347 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1348 		if (ret)
1349 			return ret;
1350 		seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1351 			   fw_info.feature, fw_info.ver);
1352 	}
1353 
1354 	/* PSP SOS */
1355 	query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1356 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1357 	if (ret)
1358 		return ret;
1359 	seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1360 		   fw_info.feature, fw_info.ver);
1361 
1362 
1363 	/* PSP ASD */
1364 	query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1365 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1366 	if (ret)
1367 		return ret;
1368 	seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1369 		   fw_info.feature, fw_info.ver);
1370 
1371 	query_fw.fw_type = AMDGPU_INFO_FW_TA;
1372 	for (i = 0; i < 2; i++) {
1373 		query_fw.index = i;
1374 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1375 		if (ret)
1376 			continue;
1377 		seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
1378 				i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
1379 	}
1380 
1381 	/* SMC */
1382 	query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1383 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1384 	if (ret)
1385 		return ret;
1386 	seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1387 		   fw_info.feature, fw_info.ver);
1388 
1389 	/* SDMA */
1390 	query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1391 	for (i = 0; i < adev->sdma.num_instances; i++) {
1392 		query_fw.index = i;
1393 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1394 		if (ret)
1395 			return ret;
1396 		seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1397 			   i, fw_info.feature, fw_info.ver);
1398 	}
1399 
1400 	/* VCN */
1401 	query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1402 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1403 	if (ret)
1404 		return ret;
1405 	seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1406 		   fw_info.feature, fw_info.ver);
1407 
1408 	/* DMCU */
1409 	query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1410 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1411 	if (ret)
1412 		return ret;
1413 	seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1414 		   fw_info.feature, fw_info.ver);
1415 
1416 	/* DMCUB */
1417 	query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
1418 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1419 	if (ret)
1420 		return ret;
1421 	seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
1422 		   fw_info.feature, fw_info.ver);
1423 
1424 
1425 	seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1426 
1427 	return 0;
1428 }
1429 
1430 static const struct drm_info_list amdgpu_firmware_info_list[] = {
1431 	{"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1432 };
1433 #endif
1434 
amdgpu_debugfs_firmware_init(struct amdgpu_device * adev)1435 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1436 {
1437 #if defined(CONFIG_DEBUG_FS)
1438 	return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1439 					ARRAY_SIZE(amdgpu_firmware_info_list));
1440 #else
1441 	return 0;
1442 #endif
1443 }
1444