xref: /dragonfly/sys/dev/drm/radeon/radeon_kms.c (revision 3f2dd94a)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include "radeon.h"
30 #include <drm/radeon_drm.h>
31 #include "radeon_asic.h"
32 #include "radeon_kms.h"
33 
34 #include <linux/vga_switcheroo.h>
35 #include <linux/slab.h>
36 #include <linux/pm_runtime.h>
37 
38 #if defined(CONFIG_VGA_SWITCHEROO)
39 bool radeon_has_atpx(void);
40 #else
radeon_has_atpx(void)41 static inline bool radeon_has_atpx(void) { return false; }
42 #endif
43 
44 /**
45  * radeon_driver_unload_kms - Main unload function for KMS.
46  *
47  * @dev: drm dev pointer
48  *
49  * This is the main unload function for KMS (all asics).
50  * It calls radeon_modeset_fini() to tear down the
51  * displays, and radeon_device_fini() to tear down
52  * the rest of the device (CP, writeback, etc.).
53  * Returns 0 on success.
54  */
radeon_driver_unload_kms(struct drm_device * dev)55 void radeon_driver_unload_kms(struct drm_device *dev)
56 {
57 	struct radeon_device *rdev = dev->dev_private;
58 
59 	if (rdev == NULL)
60 		return;
61 
62 	if (rdev->rmmio == NULL)
63 		goto done_free;
64 
65 #ifdef PM_TODO
66 	if (radeon_is_px(dev)) {
67 		pm_runtime_get_sync(dev->dev);
68 		pm_runtime_forbid(dev->dev);
69 	}
70 #endif
71 
72 	radeon_acpi_fini(rdev);
73 	radeon_modeset_fini(rdev);
74 	radeon_device_fini(rdev);
75 
76 done_free:
77 	/* XXX pending drm update, after this accessing pdev is illegal! */
78 	drm_fini_pdev(&dev->pdev);
79 	kfree(rdev);
80 	dev->dev_private = NULL;
81 }
82 
83 /**
84  * radeon_driver_load_kms - Main load function for KMS.
85  *
86  * @dev: drm dev pointer
87  * @flags: device flags
88  *
89  * This is the main load function for KMS (all asics).
90  * It calls radeon_device_init() to set up the non-display
91  * parts of the chip (asic init, CP, writeback, etc.), and
92  * radeon_modeset_init() to set up the display parts
93  * (crtcs, encoders, hotplug detect, etc.).
94  * Returns 0 on success, error on failure.
95  */
radeon_driver_load_kms(struct drm_device * dev,unsigned long flags)96 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
97 {
98 	struct radeon_device *rdev;
99 	int r, acpi_status;
100 
101 	if (!radeon_si_support) {
102 		switch (flags & RADEON_FAMILY_MASK) {
103 		case CHIP_TAHITI:
104 		case CHIP_PITCAIRN:
105 		case CHIP_VERDE:
106 		case CHIP_OLAND:
107 		case CHIP_HAINAN:
108 			dev_info(dev->dev,
109 				 "SI support disabled by module param\n");
110 			return -ENODEV;
111 		}
112 	}
113 	if (!radeon_cik_support) {
114 		switch (flags & RADEON_FAMILY_MASK) {
115 		case CHIP_KAVERI:
116 		case CHIP_BONAIRE:
117 		case CHIP_HAWAII:
118 		case CHIP_KABINI:
119 		case CHIP_MULLINS:
120 			dev_info(dev->dev,
121 				 "CIK support disabled by module param\n");
122 			return -ENODEV;
123 		}
124 	}
125 
126 #ifdef CONFIG_DRM_AMDGPU_CIK
127 	if (!radeon_cik_support) {
128 		switch (flags & RADEON_FAMILY_MASK) {
129 		case CHIP_KAVERI:
130 		case CHIP_BONAIRE:
131 		case CHIP_HAWAII:
132 		case CHIP_KABINI:
133 		case CHIP_MULLINS:
134 			dev_info(dev->dev,
135 				 "CIK support provided by amdgpu.\n");
136 			dev_info(dev->dev,
137 		"Use radeon.cik_support=1 amdgpu.cik_support=0 to override.\n"
138 				);
139 			return -ENODEV;
140 		}
141 	}
142 #endif
143 
144 	rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
145 	if (rdev == NULL) {
146 		return -ENOMEM;
147 	}
148 	dev->dev_private = (void *)rdev;
149 
150 	/* update BUS flag */
151 	if (drm_pci_device_is_agp(dev)) {
152 		DRM_INFO("RADEON_IS_AGP\n");
153 		flags |= RADEON_IS_AGP;
154 	} else if (pci_is_pcie(dev->dev->bsddev)) {
155 		DRM_INFO("RADEON_IS_PCIE\n");
156 		flags |= RADEON_IS_PCIE;
157 	} else {
158 		DRM_INFO("RADEON_IS_PCI\n");
159 		flags |= RADEON_IS_PCI;
160 	}
161 
162 #ifdef PM_TODO
163 	if ((radeon_runtime_pm != 0) &&
164 	    radeon_has_atpx() &&
165 	    ((flags & RADEON_IS_IGP) == 0) &&
166    	    !pci_is_thunderbolt_attached(rdev->pdev))
167 	        flags |= RADEON_IS_PX;
168 #endif
169 
170 	/* radeon_device_init should report only fatal error
171 	 * like memory allocation failure or iomapping failure,
172 	 * or memory manager initialization failure, it must
173 	 * properly initialize the GPU MC controller and permit
174 	 * VRAM allocation
175 	 */
176 	r = radeon_device_init(rdev, dev, dev->pdev, flags);
177 	if (r) {
178 		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
179 		goto out;
180 	}
181 
182 	/* Again modeset_init should fail only on fatal error
183 	 * otherwise it should provide enough functionalities
184 	 * for shadowfb to run
185 	 */
186 	r = radeon_modeset_init(rdev);
187 	if (r)
188 		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
189 
190 	/* Call ACPI methods: require modeset init
191 	 * but failure is not fatal
192 	 */
193 	if (!r) {
194 		acpi_status = radeon_acpi_init(rdev);
195 		if (acpi_status)
196 		dev_dbg(&dev->pdev->dev,
197 				"Error during ACPI methods call\n");
198 	}
199 
200 #ifdef PM_TODO
201 	if (radeon_is_px(dev)) {
202 		pm_runtime_use_autosuspend(dev->dev);
203 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
204 		pm_runtime_set_active(dev->dev);
205 		pm_runtime_allow(dev->dev);
206 		pm_runtime_mark_last_busy(dev->dev);
207 		pm_runtime_put_autosuspend(dev->dev);
208 	}
209 #endif
210 
211 out:
212 	if (r)
213 		radeon_driver_unload_kms(dev);
214 
215 
216 	return r;
217 }
218 
219 /**
220  * radeon_set_filp_rights - Set filp right.
221  *
222  * @dev: drm dev pointer
223  * @owner: drm file
224  * @applier: drm file
225  * @value: value
226  *
227  * Sets the filp rights for the device (all asics).
228  */
radeon_set_filp_rights(struct drm_device * dev,struct drm_file ** owner,struct drm_file * applier,uint32_t * value)229 static void radeon_set_filp_rights(struct drm_device *dev,
230 				   struct drm_file **owner,
231 				   struct drm_file *applier,
232 				   uint32_t *value)
233 {
234 	struct radeon_device *rdev = dev->dev_private;
235 
236 	mutex_lock(&rdev->gem.mutex);
237 	if (*value == 1) {
238 		/* wants rights */
239 		if (!*owner)
240 			*owner = applier;
241 	} else if (*value == 0) {
242 		/* revokes rights */
243 		if (*owner == applier)
244 			*owner = NULL;
245 	}
246 	*value = *owner == applier ? 1 : 0;
247 	mutex_unlock(&rdev->gem.mutex);
248 }
249 
250 /*
251  * Userspace get information ioctl
252  */
253 /**
254  * radeon_info_ioctl - answer a device specific request.
255  *
256  * @rdev: radeon device pointer
257  * @data: request object
258  * @filp: drm filp
259  *
260  * This function is used to pass device specific parameters to the userspace
261  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
262  * etc. (all asics).
263  * Returns 0 on success, -EINVAL on failure.
264  */
radeon_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)265 static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
266 {
267 	struct radeon_device *rdev = dev->dev_private;
268 	struct drm_radeon_info *info = data;
269 	struct radeon_mode_info *minfo = &rdev->mode_info;
270 	uint32_t *value, value_tmp, *value_ptr, value_size;
271 	uint64_t value64;
272 	struct drm_crtc *crtc;
273 	int i, found;
274 
275 	value_ptr = (uint32_t *)((unsigned long)info->value);
276 	value = &value_tmp;
277 	value_size = sizeof(uint32_t);
278 
279 	switch (info->request) {
280 	case RADEON_INFO_DEVICE_ID:
281 		*value = dev->pdev->device;
282 		break;
283 	case RADEON_INFO_NUM_GB_PIPES:
284 		*value = rdev->num_gb_pipes;
285 		break;
286 	case RADEON_INFO_NUM_Z_PIPES:
287 		*value = rdev->num_z_pipes;
288 		break;
289 	case RADEON_INFO_ACCEL_WORKING:
290 		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
291 		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
292 			*value = false;
293 		else
294 			*value = rdev->accel_working;
295 		break;
296 	case RADEON_INFO_CRTC_FROM_ID:
297 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
298 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
299 			return -EFAULT;
300 		}
301 		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
302 			crtc = (struct drm_crtc *)minfo->crtcs[i];
303 			if (crtc && crtc->base.id == *value) {
304 				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
305 				*value = radeon_crtc->crtc_id;
306 				found = 1;
307 				break;
308 			}
309 		}
310 		if (!found) {
311 			DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
312 			return -EINVAL;
313 		}
314 		break;
315 	case RADEON_INFO_ACCEL_WORKING2:
316 		if (rdev->family == CHIP_HAWAII) {
317 			if (rdev->accel_working) {
318 				if (rdev->new_fw)
319 					*value = 3;
320 				else
321 					*value = 2;
322 			} else {
323 				*value = 0;
324 			}
325 		} else {
326 			*value = rdev->accel_working;
327 		}
328 		break;
329 	case RADEON_INFO_TILING_CONFIG:
330 		if (rdev->family >= CHIP_BONAIRE)
331 			*value = rdev->config.cik.tile_config;
332 		else if (rdev->family >= CHIP_TAHITI)
333 			*value = rdev->config.si.tile_config;
334 		else if (rdev->family >= CHIP_CAYMAN)
335 			*value = rdev->config.cayman.tile_config;
336 		else if (rdev->family >= CHIP_CEDAR)
337 			*value = rdev->config.evergreen.tile_config;
338 		else if (rdev->family >= CHIP_RV770)
339 			*value = rdev->config.rv770.tile_config;
340 		else if (rdev->family >= CHIP_R600)
341 			*value = rdev->config.r600.tile_config;
342 		else {
343 			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
344 			return -EINVAL;
345 		}
346 		break;
347 	case RADEON_INFO_WANT_HYPERZ:
348 		/* The "value" here is both an input and output parameter.
349 		 * If the input value is 1, filp requests hyper-z access.
350 		 * If the input value is 0, filp revokes its hyper-z access.
351 		 *
352 		 * When returning, the value is 1 if filp owns hyper-z access,
353 		 * 0 otherwise. */
354 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
355 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
356 			return -EFAULT;
357 		}
358 		if (*value >= 2) {
359 			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
360 			return -EINVAL;
361 		}
362 		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
363 		break;
364 	case RADEON_INFO_WANT_CMASK:
365 		/* The same logic as Hyper-Z. */
366 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
367 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
368 			return -EFAULT;
369 		}
370 		if (*value >= 2) {
371 			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
372 			return -EINVAL;
373 		}
374 		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
375 		break;
376 	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
377 		/* return clock value in KHz */
378 		if (rdev->asic->get_xclk)
379 			*value = radeon_get_xclk(rdev) * 10;
380 		else
381 			*value = rdev->clock.spll.reference_freq * 10;
382 		break;
383 	case RADEON_INFO_NUM_BACKENDS:
384 		if (rdev->family >= CHIP_BONAIRE)
385 			*value = rdev->config.cik.max_backends_per_se *
386 				rdev->config.cik.max_shader_engines;
387 		else if (rdev->family >= CHIP_TAHITI)
388 			*value = rdev->config.si.max_backends_per_se *
389 				rdev->config.si.max_shader_engines;
390 		else if (rdev->family >= CHIP_CAYMAN)
391 			*value = rdev->config.cayman.max_backends_per_se *
392 				rdev->config.cayman.max_shader_engines;
393 		else if (rdev->family >= CHIP_CEDAR)
394 			*value = rdev->config.evergreen.max_backends;
395 		else if (rdev->family >= CHIP_RV770)
396 			*value = rdev->config.rv770.max_backends;
397 		else if (rdev->family >= CHIP_R600)
398 			*value = rdev->config.r600.max_backends;
399 		else {
400 			return -EINVAL;
401 		}
402 		break;
403 	case RADEON_INFO_NUM_TILE_PIPES:
404 		if (rdev->family >= CHIP_BONAIRE)
405 			*value = rdev->config.cik.max_tile_pipes;
406 		else if (rdev->family >= CHIP_TAHITI)
407 			*value = rdev->config.si.max_tile_pipes;
408 		else if (rdev->family >= CHIP_CAYMAN)
409 			*value = rdev->config.cayman.max_tile_pipes;
410 		else if (rdev->family >= CHIP_CEDAR)
411 			*value = rdev->config.evergreen.max_tile_pipes;
412 		else if (rdev->family >= CHIP_RV770)
413 			*value = rdev->config.rv770.max_tile_pipes;
414 		else if (rdev->family >= CHIP_R600)
415 			*value = rdev->config.r600.max_tile_pipes;
416 		else {
417 			return -EINVAL;
418 		}
419 		break;
420 	case RADEON_INFO_FUSION_GART_WORKING:
421 		*value = 1;
422 		break;
423 	case RADEON_INFO_BACKEND_MAP:
424 		if (rdev->family >= CHIP_BONAIRE)
425 			*value = rdev->config.cik.backend_map;
426 		else if (rdev->family >= CHIP_TAHITI)
427 			*value = rdev->config.si.backend_map;
428 		else if (rdev->family >= CHIP_CAYMAN)
429 			*value = rdev->config.cayman.backend_map;
430 		else if (rdev->family >= CHIP_CEDAR)
431 			*value = rdev->config.evergreen.backend_map;
432 		else if (rdev->family >= CHIP_RV770)
433 			*value = rdev->config.rv770.backend_map;
434 		else if (rdev->family >= CHIP_R600)
435 			*value = rdev->config.r600.backend_map;
436 		else {
437 			return -EINVAL;
438 		}
439 		break;
440 	case RADEON_INFO_VA_START:
441 		/* this is where we report if vm is supported or not */
442 		if (rdev->family < CHIP_CAYMAN)
443 			return -EINVAL;
444 		*value = RADEON_VA_RESERVED_SIZE;
445 		break;
446 	case RADEON_INFO_IB_VM_MAX_SIZE:
447 		/* this is where we report if vm is supported or not */
448 		if (rdev->family < CHIP_CAYMAN)
449 			return -EINVAL;
450 		*value = RADEON_IB_VM_MAX_SIZE;
451 		break;
452 	case RADEON_INFO_MAX_PIPES:
453 		if (rdev->family >= CHIP_BONAIRE)
454 			*value = rdev->config.cik.max_cu_per_sh;
455 		else if (rdev->family >= CHIP_TAHITI)
456 			*value = rdev->config.si.max_cu_per_sh;
457 		else if (rdev->family >= CHIP_CAYMAN)
458 			*value = rdev->config.cayman.max_pipes_per_simd;
459 		else if (rdev->family >= CHIP_CEDAR)
460 			*value = rdev->config.evergreen.max_pipes;
461 		else if (rdev->family >= CHIP_RV770)
462 			*value = rdev->config.rv770.max_pipes;
463 		else if (rdev->family >= CHIP_R600)
464 			*value = rdev->config.r600.max_pipes;
465 		else {
466 			return -EINVAL;
467 		}
468 		break;
469 	case RADEON_INFO_TIMESTAMP:
470 		if (rdev->family < CHIP_R600) {
471 			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
472 			return -EINVAL;
473 		}
474 		value = (uint32_t*)&value64;
475 		value_size = sizeof(uint64_t);
476 		value64 = radeon_get_gpu_clock_counter(rdev);
477 		break;
478 	case RADEON_INFO_MAX_SE:
479 		if (rdev->family >= CHIP_BONAIRE)
480 			*value = rdev->config.cik.max_shader_engines;
481 		else if (rdev->family >= CHIP_TAHITI)
482 			*value = rdev->config.si.max_shader_engines;
483 		else if (rdev->family >= CHIP_CAYMAN)
484 			*value = rdev->config.cayman.max_shader_engines;
485 		else if (rdev->family >= CHIP_CEDAR)
486 			*value = rdev->config.evergreen.num_ses;
487 		else
488 			*value = 1;
489 		break;
490 	case RADEON_INFO_MAX_SH_PER_SE:
491 		if (rdev->family >= CHIP_BONAIRE)
492 			*value = rdev->config.cik.max_sh_per_se;
493 		else if (rdev->family >= CHIP_TAHITI)
494 			*value = rdev->config.si.max_sh_per_se;
495 		else
496 			return -EINVAL;
497 		break;
498 	case RADEON_INFO_FASTFB_WORKING:
499 		*value = rdev->fastfb_working;
500 		break;
501 	case RADEON_INFO_RING_WORKING:
502 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
503 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
504 			return -EFAULT;
505 		}
506 		switch (*value) {
507 		case RADEON_CS_RING_GFX:
508 		case RADEON_CS_RING_COMPUTE:
509 			*value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
510 			break;
511 		case RADEON_CS_RING_DMA:
512 			*value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
513 			*value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
514 			break;
515 		case RADEON_CS_RING_UVD:
516 			*value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
517 			break;
518 		case RADEON_CS_RING_VCE:
519 			*value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
520 			break;
521 		default:
522 			return -EINVAL;
523 		}
524 		break;
525 	case RADEON_INFO_SI_TILE_MODE_ARRAY:
526 		if (rdev->family >= CHIP_BONAIRE) {
527 			value = rdev->config.cik.tile_mode_array;
528 			value_size = sizeof(uint32_t)*32;
529 		} else if (rdev->family >= CHIP_TAHITI) {
530 			value = rdev->config.si.tile_mode_array;
531 			value_size = sizeof(uint32_t)*32;
532 		} else {
533 			DRM_DEBUG_KMS("tile mode array is si+ only!\n");
534 			return -EINVAL;
535 		}
536 		break;
537 	case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
538 		if (rdev->family >= CHIP_BONAIRE) {
539 			value = rdev->config.cik.macrotile_mode_array;
540 			value_size = sizeof(uint32_t)*16;
541 		} else {
542 			DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
543 			return -EINVAL;
544 		}
545 		break;
546 	case RADEON_INFO_SI_CP_DMA_COMPUTE:
547 		*value = 1;
548 		break;
549 	case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
550 		if (rdev->family >= CHIP_BONAIRE) {
551 			*value = rdev->config.cik.backend_enable_mask;
552 		} else if (rdev->family >= CHIP_TAHITI) {
553 			*value = rdev->config.si.backend_enable_mask;
554 		} else {
555 			DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
556 		}
557 		break;
558 	case RADEON_INFO_MAX_SCLK:
559 		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
560 		    rdev->pm.dpm_enabled)
561 			*value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
562 		else
563 			*value = rdev->pm.default_sclk * 10;
564 		break;
565 	case RADEON_INFO_VCE_FW_VERSION:
566 		*value = rdev->vce.fw_version;
567 		break;
568 	case RADEON_INFO_VCE_FB_VERSION:
569 		*value = rdev->vce.fb_version;
570 		break;
571 	case RADEON_INFO_NUM_BYTES_MOVED:
572 		value = (uint32_t*)&value64;
573 		value_size = sizeof(uint64_t);
574 		value64 = atomic64_read(&rdev->num_bytes_moved);
575 		break;
576 	case RADEON_INFO_VRAM_USAGE:
577 		value = (uint32_t*)&value64;
578 		value_size = sizeof(uint64_t);
579 		value64 = atomic64_read(&rdev->vram_usage);
580 		break;
581 	case RADEON_INFO_GTT_USAGE:
582 		value = (uint32_t*)&value64;
583 		value_size = sizeof(uint64_t);
584 		value64 = atomic64_read(&rdev->gtt_usage);
585 		break;
586 	case RADEON_INFO_ACTIVE_CU_COUNT:
587 		if (rdev->family >= CHIP_BONAIRE)
588 			*value = rdev->config.cik.active_cus;
589 		else if (rdev->family >= CHIP_TAHITI)
590 			*value = rdev->config.si.active_cus;
591 		else if (rdev->family >= CHIP_CAYMAN)
592 			*value = rdev->config.cayman.active_simds;
593 		else if (rdev->family >= CHIP_CEDAR)
594 			*value = rdev->config.evergreen.active_simds;
595 		else if (rdev->family >= CHIP_RV770)
596 			*value = rdev->config.rv770.active_simds;
597 		else if (rdev->family >= CHIP_R600)
598 			*value = rdev->config.r600.active_simds;
599 		else
600 			*value = 1;
601 		break;
602 	case RADEON_INFO_CURRENT_GPU_TEMP:
603 		/* get temperature in millidegrees C */
604 		if (rdev->asic->pm.get_temperature)
605 			*value = radeon_get_temperature(rdev);
606 		else
607 			*value = 0;
608 		break;
609 	case RADEON_INFO_CURRENT_GPU_SCLK:
610 		/* get sclk in Mhz */
611 		if (rdev->pm.dpm_enabled)
612 			*value = radeon_dpm_get_current_sclk(rdev) / 100;
613 		else
614 			*value = rdev->pm.current_sclk / 100;
615 		break;
616 	case RADEON_INFO_CURRENT_GPU_MCLK:
617 		/* get mclk in Mhz */
618 		if (rdev->pm.dpm_enabled)
619 			*value = radeon_dpm_get_current_mclk(rdev) / 100;
620 		else
621 			*value = rdev->pm.current_mclk / 100;
622 		break;
623 	case RADEON_INFO_READ_REG:
624 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
625 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
626 			return -EFAULT;
627 		}
628 		if (radeon_get_allowed_info_register(rdev, *value, value))
629 			return -EINVAL;
630 		break;
631 	case RADEON_INFO_GPU_RESET_COUNTER:
632 		*value = atomic_read(&rdev->gpu_reset_counter);
633 		break;
634 	default:
635 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
636 		return -EINVAL;
637 	}
638 	if (copy_to_user(value_ptr, (char*)value, value_size)) {
639 		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
640 		return -EFAULT;
641 	}
642 	return 0;
643 }
644 
645 
646 /*
647  * Outdated mess for old drm with Xorg being in charge (void function now).
648  */
649 /**
650  * radeon_driver_lastclose_kms - drm callback for last close
651  *
652  * @dev: drm dev pointer
653  *
654  * Switch vga_switcheroo state after last close (all asics).
655  */
radeon_driver_lastclose_kms(struct drm_device * dev)656 void radeon_driver_lastclose_kms(struct drm_device *dev)
657 {
658 	struct radeon_device *rdev = dev->dev_private;
659 
660 	radeon_fbdev_restore_mode(rdev);
661 	vga_switcheroo_process_delayed_switch();
662 }
663 
664 /**
665  * radeon_driver_open_kms - drm callback for open
666  *
667  * @dev: drm dev pointer
668  * @file_priv: drm file
669  *
670  * On device open, init vm on cayman+ (all asics).
671  * Returns 0 on success, error on failure.
672  */
radeon_driver_open_kms(struct drm_device * dev,struct drm_file * file_priv)673 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
674 {
675 	struct radeon_device *rdev = dev->dev_private;
676 	int r;
677 
678 	file_priv->driver_priv = NULL;
679 
680 	r = pm_runtime_get_sync(dev->dev);
681 	if (r < 0)
682 		return r;
683 
684 	/* new gpu have virtual address space support */
685 	if (rdev->family >= CHIP_CAYMAN) {
686 		struct radeon_fpriv *fpriv;
687 		struct radeon_vm *vm;
688 
689 		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
690 		if (unlikely(!fpriv)) {
691 			r = -ENOMEM;
692 			goto out_suspend;
693 		}
694 
695 		if (rdev->accel_working) {
696 			vm = &fpriv->vm;
697 			r = radeon_vm_init(rdev, vm);
698 			if (r) {
699 				kfree(fpriv);
700 				goto out_suspend;
701 			}
702 
703 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
704 			if (r) {
705 				radeon_vm_fini(rdev, vm);
706 				kfree(fpriv);
707 				goto out_suspend;
708 			}
709 
710 			/* map the ib pool buffer read only into
711 			 * virtual address space */
712 			vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
713 							rdev->ring_tmp_bo.bo);
714 			r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
715 						  RADEON_VA_IB_OFFSET,
716 						  RADEON_VM_PAGE_READABLE |
717 						  RADEON_VM_PAGE_SNOOPED);
718 			if (r) {
719 				radeon_vm_fini(rdev, vm);
720 				kfree(fpriv);
721 				goto out_suspend;
722 			}
723 		}
724 		file_priv->driver_priv = fpriv;
725 	}
726 
727 out_suspend:
728 	pm_runtime_mark_last_busy(dev->dev);
729 	pm_runtime_put_autosuspend(dev->dev);
730 	return r;
731 }
732 
733 /**
734  * radeon_driver_postclose_kms - drm callback for post close
735  *
736  * @dev: drm dev pointer
737  * @file_priv: drm file
738  *
739  * On device close, tear down hyperz and cmask filps on r1xx-r5xx
740  * (all asics).  And tear down vm on cayman+ (all asics).
741  */
radeon_driver_postclose_kms(struct drm_device * dev,struct drm_file * file_priv)742 void radeon_driver_postclose_kms(struct drm_device *dev,
743 				 struct drm_file *file_priv)
744 {
745 	struct radeon_device *rdev = dev->dev_private;
746 
747 	pm_runtime_get_sync(dev->dev);
748 
749 	mutex_lock(&rdev->gem.mutex);
750 	if (rdev->hyperz_filp == file_priv)
751 		rdev->hyperz_filp = NULL;
752 	if (rdev->cmask_filp == file_priv)
753 		rdev->cmask_filp = NULL;
754 	mutex_unlock(&rdev->gem.mutex);
755 
756 	radeon_uvd_free_handles(rdev, file_priv);
757 	radeon_vce_free_handles(rdev, file_priv);
758 
759 	/* new gpu have virtual address space support */
760 	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
761 		struct radeon_fpriv *fpriv = file_priv->driver_priv;
762 		struct radeon_vm *vm = &fpriv->vm;
763 		int r;
764 
765 		if (rdev->accel_working) {
766 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
767 			if (!r) {
768 				if (vm->ib_bo_va)
769 					radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
770 				radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
771 			}
772 			radeon_vm_fini(rdev, vm);
773 		}
774 
775 		kfree(fpriv);
776 		file_priv->driver_priv = NULL;
777 	}
778 	pm_runtime_mark_last_busy(dev->dev);
779 	pm_runtime_put_autosuspend(dev->dev);
780 }
781 
782 /*
783  * VBlank related functions.
784  */
785 /**
786  * radeon_get_vblank_counter_kms - get frame count
787  *
788  * @dev: drm dev pointer
789  * @pipe: crtc to get the frame count from
790  *
791  * Gets the frame count on the requested crtc (all asics).
792  * Returns frame count on success, -EINVAL on failure.
793  */
794 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
radeon_get_vblank_counter_kms(struct drm_device * dev,unsigned int pipe)795 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
796 {
797 	int vpos, hpos, stat;
798 	u32 count;
799 	struct radeon_device *rdev = dev->dev_private;
800 
801 	if (pipe >= rdev->num_crtc) {
802 		DRM_ERROR("Invalid crtc %u\n", pipe);
803 		return -EINVAL;
804 	}
805 
806 	/* The hw increments its frame counter at start of vsync, not at start
807 	 * of vblank, as is required by DRM core vblank counter handling.
808 	 * Cook the hw count here to make it appear to the caller as if it
809 	 * incremented at start of vblank. We measure distance to start of
810 	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
811 	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
812 	 * result by 1 to give the proper appearance to caller.
813 	 */
814 	if (rdev->mode_info.crtcs[pipe]) {
815 		/* Repeat readout if needed to provide stable result if
816 		 * we cross start of vsync during the queries.
817 		 */
818 		do {
819 			count = radeon_get_vblank_counter(rdev, pipe);
820 			/* Ask radeon_get_crtc_scanoutpos to return vpos as
821 			 * distance to start of vblank, instead of regular
822 			 * vertical scanout pos.
823 			 */
824 			stat = radeon_get_crtc_scanoutpos(
825 				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
826 				&vpos, &hpos, NULL, NULL,
827 				&rdev->mode_info.crtcs[pipe]->base.hwmode);
828 		} while (count != radeon_get_vblank_counter(rdev, pipe));
829 
830 		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
831 		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
832 			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
833 		}
834 		else {
835 			DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n",
836 				      pipe, vpos);
837 
838 			/* Bump counter if we are at >= leading edge of vblank,
839 			 * but before vsync where vpos would turn negative and
840 			 * the hw counter really increments.
841 			 */
842 			if (vpos >= 0)
843 				count++;
844 		}
845 	}
846 	else {
847 	    /* Fallback to use value as is. */
848 	    count = radeon_get_vblank_counter(rdev, pipe);
849 	    DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
850 	}
851 
852 	return count;
853 }
854 
855 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
856 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
857 
858 /**
859  * radeon_enable_vblank_kms - enable vblank interrupt
860  *
861  * @dev: drm dev pointer
862  * @crtc: crtc to enable vblank interrupt for
863  *
864  * Enable the interrupt on the requested crtc (all asics).
865  * Returns 0 on success, -EINVAL on failure.
866  */
radeon_enable_vblank_kms(struct drm_device * dev,int crtc)867 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
868 {
869 	struct radeon_device *rdev = dev->dev_private;
870 	unsigned long irqflags;
871 	int r;
872 
873 	if (crtc < 0 || crtc >= rdev->num_crtc) {
874 		DRM_ERROR("Invalid crtc %d\n", crtc);
875 		return -EINVAL;
876 	}
877 
878 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
879 	rdev->irq.crtc_vblank_int[crtc] = true;
880 	r = radeon_irq_set(rdev);
881 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
882 	return r;
883 }
884 
885 /**
886  * radeon_disable_vblank_kms - disable vblank interrupt
887  *
888  * @dev: drm dev pointer
889  * @crtc: crtc to disable vblank interrupt for
890  *
891  * Disable the interrupt on the requested crtc (all asics).
892  */
radeon_disable_vblank_kms(struct drm_device * dev,int crtc)893 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
894 {
895 	struct radeon_device *rdev = dev->dev_private;
896 	unsigned long irqflags;
897 
898 	if (crtc < 0 || crtc >= rdev->num_crtc) {
899 		DRM_ERROR("Invalid crtc %d\n", crtc);
900 		return;
901 	}
902 
903 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
904 	rdev->irq.crtc_vblank_int[crtc] = false;
905 	radeon_irq_set(rdev);
906 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
907 }
908 
909 const struct drm_ioctl_desc radeon_ioctls_kms[] = {
910 	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
911 	DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
912 	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
913 	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
914 	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH),
915 	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH),
916 	DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH),
917 	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH),
918 	DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH),
919 	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH),
920 	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH),
921 	DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH),
922 	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH),
923 	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH),
924 	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
925 	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH),
926 	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH),
927 	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH),
928 	DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH),
929 	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH),
930 	DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH),
931 	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
932 	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH),
933 	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH),
934 	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH),
935 	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH),
936 	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH),
937 	/* KMS */
938 	DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
939 	DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
940 	DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
941 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
942 	DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
943 	DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
944 	DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
945 	DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
946 	DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
947 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
948 	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
949 	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
950 	DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
951 	DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
952 #if 0
953 	DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
954 #endif
955 };
956 int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
957