1 /* $NetBSD: radeon_kms.c,v 1.5 2021/12/18 23:45:43 riastradh Exp $ */
2
3 /*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 * Copyright 2009 Jerome Glisse.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Dave Airlie
27 * Alex Deucher
28 * Jerome Glisse
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: radeon_kms.c,v 1.5 2021/12/18 23:45:43 riastradh Exp $");
33
34 #include <linux/pci.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/slab.h>
37 #include <linux/uaccess.h>
38 #include <linux/vga_switcheroo.h>
39
40 #include <drm/drm_fb_helper.h>
41 #include <drm/drm_file.h>
42 #include <drm/drm_ioctl.h>
43 #include <drm/radeon_drm.h>
44
45 #include "radeon.h"
46 #include "radeon_asic.h"
47
48 #if defined(CONFIG_VGA_SWITCHEROO)
49 bool radeon_has_atpx(void);
50 #else
radeon_has_atpx(void)51 static inline bool radeon_has_atpx(void) { return false; }
52 #endif
53
54 /**
55 * radeon_driver_unload_kms - Main unload function for KMS.
56 *
57 * @dev: drm dev pointer
58 *
59 * This is the main unload function for KMS (all asics).
60 * It calls radeon_modeset_fini() to tear down the
61 * displays, and radeon_device_fini() to tear down
62 * the rest of the device (CP, writeback, etc.).
63 * Returns 0 on success.
64 */
radeon_driver_unload_kms(struct drm_device * dev)65 void radeon_driver_unload_kms(struct drm_device *dev)
66 {
67 struct radeon_device *rdev = dev->dev_private;
68
69 if (rdev == NULL)
70 return;
71
72 #ifdef __NetBSD__
73 /* XXX ugh */
74 if (rdev->rmmio_size)
75 goto done_free;
76 #else
77 if (rdev->rmmio == NULL)
78 goto done_free;
79 #endif
80
81 if (radeon_is_px(dev)) {
82 pm_runtime_get_sync(dev->dev);
83 pm_runtime_forbid(dev->dev);
84 }
85
86 radeon_acpi_fini(rdev);
87
88 radeon_modeset_fini(rdev);
89 radeon_device_fini(rdev);
90
91 done_free:
92 kfree(rdev);
93 dev->dev_private = NULL;
94 }
95
96 /**
97 * radeon_driver_load_kms - Main load function for KMS.
98 *
99 * @dev: drm dev pointer
100 * @flags: device flags
101 *
102 * This is the main load function for KMS (all asics).
103 * It calls radeon_device_init() to set up the non-display
104 * parts of the chip (asic init, CP, writeback, etc.), and
105 * radeon_modeset_init() to set up the display parts
106 * (crtcs, encoders, hotplug detect, etc.).
107 * Returns 0 on success, error on failure.
108 */
radeon_driver_load_kms(struct drm_device * dev,unsigned long flags)109 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
110 {
111 struct radeon_device *rdev;
112 int r, acpi_status;
113
114 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
115 if (rdev == NULL) {
116 return -ENOMEM;
117 }
118 dev->dev_private = (void *)rdev;
119
120 /* update BUS flag */
121 if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) {
122 flags |= RADEON_IS_AGP;
123 } else if (pci_is_pcie(dev->pdev)) {
124 flags |= RADEON_IS_PCIE;
125 } else {
126 flags |= RADEON_IS_PCI;
127 }
128
129 if ((radeon_runtime_pm != 0) &&
130 radeon_has_atpx() &&
131 ((flags & RADEON_IS_IGP) == 0) &&
132 !pci_is_thunderbolt_attached(dev->pdev))
133 flags |= RADEON_IS_PX;
134
135 /* radeon_device_init should report only fatal error
136 * like memory allocation failure or iomapping failure,
137 * or memory manager initialization failure, it must
138 * properly initialize the GPU MC controller and permit
139 * VRAM allocation
140 */
141 r = radeon_device_init(rdev, dev, dev->pdev, flags);
142 if (r) {
143 dev_err(dev->dev, "Fatal error during GPU init\n");
144 goto out;
145 }
146
147 /* Again modeset_init should fail only on fatal error
148 * otherwise it should provide enough functionalities
149 * for shadowfb to run
150 */
151 r = radeon_modeset_init(rdev);
152 if (r)
153 dev_err(dev->dev, "Fatal error during modeset init\n");
154
155 /* Call ACPI methods: require modeset init
156 * but failure is not fatal
157 */
158 if (!r) {
159 acpi_status = radeon_acpi_init(rdev);
160 if (acpi_status)
161 dev_dbg(dev->dev,
162 "Error during ACPI methods call\n");
163 }
164
165 if (radeon_is_px(dev)) {
166 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
167 pm_runtime_use_autosuspend(dev->dev);
168 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
169 pm_runtime_set_active(dev->dev);
170 pm_runtime_allow(dev->dev);
171 pm_runtime_mark_last_busy(dev->dev);
172 pm_runtime_put_autosuspend(dev->dev);
173 }
174
175 out:
176 if (r)
177 radeon_driver_unload_kms(dev);
178
179
180 return r;
181 }
182
183 /**
184 * radeon_set_filp_rights - Set filp right.
185 *
186 * @dev: drm dev pointer
187 * @owner: drm file
188 * @applier: drm file
189 * @value: value
190 *
191 * Sets the filp rights for the device (all asics).
192 */
radeon_set_filp_rights(struct drm_device * dev,struct drm_file ** owner,struct drm_file * applier,uint32_t * value)193 static void radeon_set_filp_rights(struct drm_device *dev,
194 struct drm_file **owner,
195 struct drm_file *applier,
196 uint32_t *value)
197 {
198 struct radeon_device *rdev = dev->dev_private;
199
200 mutex_lock(&rdev->gem.mutex);
201 if (*value == 1) {
202 /* wants rights */
203 if (!*owner)
204 *owner = applier;
205 } else if (*value == 0) {
206 /* revokes rights */
207 if (*owner == applier)
208 *owner = NULL;
209 }
210 *value = *owner == applier ? 1 : 0;
211 mutex_unlock(&rdev->gem.mutex);
212 }
213
214 /*
215 * Userspace get information ioctl
216 */
217 /**
218 * radeon_info_ioctl - answer a device specific request.
219 *
220 * @rdev: radeon device pointer
221 * @data: request object
222 * @filp: drm filp
223 *
224 * This function is used to pass device specific parameters to the userspace
225 * drivers. Examples include: pci device id, pipeline parms, tiling params,
226 * etc. (all asics).
227 * Returns 0 on success, -EINVAL on failure.
228 */
radeon_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)229 static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
230 {
231 struct radeon_device *rdev = dev->dev_private;
232 struct drm_radeon_info *info = data;
233 struct radeon_mode_info *minfo = &rdev->mode_info;
234 uint32_t *value, value_tmp, *value_ptr, value_size;
235 uint64_t value64;
236 struct drm_crtc *crtc;
237 int i, found;
238
239 value_ptr = (uint32_t *)((unsigned long)info->value);
240 value = &value_tmp;
241 value_size = sizeof(uint32_t);
242
243 switch (info->request) {
244 case RADEON_INFO_DEVICE_ID:
245 *value = dev->pdev->device;
246 break;
247 case RADEON_INFO_NUM_GB_PIPES:
248 *value = rdev->num_gb_pipes;
249 break;
250 case RADEON_INFO_NUM_Z_PIPES:
251 *value = rdev->num_z_pipes;
252 break;
253 case RADEON_INFO_ACCEL_WORKING:
254 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */
255 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
256 *value = false;
257 else
258 *value = rdev->accel_working;
259 break;
260 case RADEON_INFO_CRTC_FROM_ID:
261 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
262 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
263 return -EFAULT;
264 }
265 for (i = 0, found = 0; i < rdev->num_crtc; i++) {
266 crtc = (struct drm_crtc *)minfo->crtcs[i];
267 if (crtc && crtc->base.id == *value) {
268 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
269 *value = radeon_crtc->crtc_id;
270 found = 1;
271 break;
272 }
273 }
274 if (!found) {
275 DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
276 return -EINVAL;
277 }
278 break;
279 case RADEON_INFO_ACCEL_WORKING2:
280 if (rdev->family == CHIP_HAWAII) {
281 if (rdev->accel_working) {
282 if (rdev->new_fw)
283 *value = 3;
284 else
285 *value = 2;
286 } else {
287 *value = 0;
288 }
289 } else {
290 *value = rdev->accel_working;
291 }
292 break;
293 case RADEON_INFO_TILING_CONFIG:
294 if (rdev->family >= CHIP_BONAIRE)
295 *value = rdev->config.cik.tile_config;
296 else if (rdev->family >= CHIP_TAHITI)
297 *value = rdev->config.si.tile_config;
298 else if (rdev->family >= CHIP_CAYMAN)
299 *value = rdev->config.cayman.tile_config;
300 else if (rdev->family >= CHIP_CEDAR)
301 *value = rdev->config.evergreen.tile_config;
302 else if (rdev->family >= CHIP_RV770)
303 *value = rdev->config.rv770.tile_config;
304 else if (rdev->family >= CHIP_R600)
305 *value = rdev->config.r600.tile_config;
306 else {
307 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
308 return -EINVAL;
309 }
310 break;
311 case RADEON_INFO_WANT_HYPERZ:
312 /* The "value" here is both an input and output parameter.
313 * If the input value is 1, filp requests hyper-z access.
314 * If the input value is 0, filp revokes its hyper-z access.
315 *
316 * When returning, the value is 1 if filp owns hyper-z access,
317 * 0 otherwise. */
318 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
319 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
320 return -EFAULT;
321 }
322 if (*value >= 2) {
323 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
324 return -EINVAL;
325 }
326 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
327 break;
328 case RADEON_INFO_WANT_CMASK:
329 /* The same logic as Hyper-Z. */
330 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
331 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
332 return -EFAULT;
333 }
334 if (*value >= 2) {
335 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
336 return -EINVAL;
337 }
338 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
339 break;
340 case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
341 /* return clock value in KHz */
342 if (rdev->asic->get_xclk)
343 *value = radeon_get_xclk(rdev) * 10;
344 else
345 *value = rdev->clock.spll.reference_freq * 10;
346 break;
347 case RADEON_INFO_NUM_BACKENDS:
348 if (rdev->family >= CHIP_BONAIRE)
349 *value = rdev->config.cik.max_backends_per_se *
350 rdev->config.cik.max_shader_engines;
351 else if (rdev->family >= CHIP_TAHITI)
352 *value = rdev->config.si.max_backends_per_se *
353 rdev->config.si.max_shader_engines;
354 else if (rdev->family >= CHIP_CAYMAN)
355 *value = rdev->config.cayman.max_backends_per_se *
356 rdev->config.cayman.max_shader_engines;
357 else if (rdev->family >= CHIP_CEDAR)
358 *value = rdev->config.evergreen.max_backends;
359 else if (rdev->family >= CHIP_RV770)
360 *value = rdev->config.rv770.max_backends;
361 else if (rdev->family >= CHIP_R600)
362 *value = rdev->config.r600.max_backends;
363 else {
364 return -EINVAL;
365 }
366 break;
367 case RADEON_INFO_NUM_TILE_PIPES:
368 if (rdev->family >= CHIP_BONAIRE)
369 *value = rdev->config.cik.max_tile_pipes;
370 else if (rdev->family >= CHIP_TAHITI)
371 *value = rdev->config.si.max_tile_pipes;
372 else if (rdev->family >= CHIP_CAYMAN)
373 *value = rdev->config.cayman.max_tile_pipes;
374 else if (rdev->family >= CHIP_CEDAR)
375 *value = rdev->config.evergreen.max_tile_pipes;
376 else if (rdev->family >= CHIP_RV770)
377 *value = rdev->config.rv770.max_tile_pipes;
378 else if (rdev->family >= CHIP_R600)
379 *value = rdev->config.r600.max_tile_pipes;
380 else {
381 return -EINVAL;
382 }
383 break;
384 case RADEON_INFO_FUSION_GART_WORKING:
385 *value = 1;
386 break;
387 case RADEON_INFO_BACKEND_MAP:
388 if (rdev->family >= CHIP_BONAIRE)
389 *value = rdev->config.cik.backend_map;
390 else if (rdev->family >= CHIP_TAHITI)
391 *value = rdev->config.si.backend_map;
392 else if (rdev->family >= CHIP_CAYMAN)
393 *value = rdev->config.cayman.backend_map;
394 else if (rdev->family >= CHIP_CEDAR)
395 *value = rdev->config.evergreen.backend_map;
396 else if (rdev->family >= CHIP_RV770)
397 *value = rdev->config.rv770.backend_map;
398 else if (rdev->family >= CHIP_R600)
399 *value = rdev->config.r600.backend_map;
400 else {
401 return -EINVAL;
402 }
403 break;
404 case RADEON_INFO_VA_START:
405 /* this is where we report if vm is supported or not */
406 if (rdev->family < CHIP_CAYMAN)
407 return -EINVAL;
408 *value = RADEON_VA_RESERVED_SIZE;
409 break;
410 case RADEON_INFO_IB_VM_MAX_SIZE:
411 /* this is where we report if vm is supported or not */
412 if (rdev->family < CHIP_CAYMAN)
413 return -EINVAL;
414 *value = RADEON_IB_VM_MAX_SIZE;
415 break;
416 case RADEON_INFO_MAX_PIPES:
417 if (rdev->family >= CHIP_BONAIRE)
418 *value = rdev->config.cik.max_cu_per_sh;
419 else if (rdev->family >= CHIP_TAHITI)
420 *value = rdev->config.si.max_cu_per_sh;
421 else if (rdev->family >= CHIP_CAYMAN)
422 *value = rdev->config.cayman.max_pipes_per_simd;
423 else if (rdev->family >= CHIP_CEDAR)
424 *value = rdev->config.evergreen.max_pipes;
425 else if (rdev->family >= CHIP_RV770)
426 *value = rdev->config.rv770.max_pipes;
427 else if (rdev->family >= CHIP_R600)
428 *value = rdev->config.r600.max_pipes;
429 else {
430 return -EINVAL;
431 }
432 break;
433 case RADEON_INFO_TIMESTAMP:
434 if (rdev->family < CHIP_R600) {
435 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
436 return -EINVAL;
437 }
438 value = (uint32_t*)&value64;
439 value_size = sizeof(uint64_t);
440 value64 = radeon_get_gpu_clock_counter(rdev);
441 break;
442 case RADEON_INFO_MAX_SE:
443 if (rdev->family >= CHIP_BONAIRE)
444 *value = rdev->config.cik.max_shader_engines;
445 else if (rdev->family >= CHIP_TAHITI)
446 *value = rdev->config.si.max_shader_engines;
447 else if (rdev->family >= CHIP_CAYMAN)
448 *value = rdev->config.cayman.max_shader_engines;
449 else if (rdev->family >= CHIP_CEDAR)
450 *value = rdev->config.evergreen.num_ses;
451 else
452 *value = 1;
453 break;
454 case RADEON_INFO_MAX_SH_PER_SE:
455 if (rdev->family >= CHIP_BONAIRE)
456 *value = rdev->config.cik.max_sh_per_se;
457 else if (rdev->family >= CHIP_TAHITI)
458 *value = rdev->config.si.max_sh_per_se;
459 else
460 return -EINVAL;
461 break;
462 case RADEON_INFO_FASTFB_WORKING:
463 *value = rdev->fastfb_working;
464 break;
465 case RADEON_INFO_RING_WORKING:
466 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
467 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
468 return -EFAULT;
469 }
470 switch (*value) {
471 case RADEON_CS_RING_GFX:
472 case RADEON_CS_RING_COMPUTE:
473 *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
474 break;
475 case RADEON_CS_RING_DMA:
476 *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
477 *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
478 break;
479 case RADEON_CS_RING_UVD:
480 *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
481 break;
482 case RADEON_CS_RING_VCE:
483 *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
484 break;
485 default:
486 return -EINVAL;
487 }
488 break;
489 case RADEON_INFO_SI_TILE_MODE_ARRAY:
490 if (rdev->family >= CHIP_BONAIRE) {
491 value = rdev->config.cik.tile_mode_array;
492 value_size = sizeof(uint32_t)*32;
493 } else if (rdev->family >= CHIP_TAHITI) {
494 value = rdev->config.si.tile_mode_array;
495 value_size = sizeof(uint32_t)*32;
496 } else {
497 DRM_DEBUG_KMS("tile mode array is si+ only!\n");
498 return -EINVAL;
499 }
500 break;
501 case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
502 if (rdev->family >= CHIP_BONAIRE) {
503 value = rdev->config.cik.macrotile_mode_array;
504 value_size = sizeof(uint32_t)*16;
505 } else {
506 DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
507 return -EINVAL;
508 }
509 break;
510 case RADEON_INFO_SI_CP_DMA_COMPUTE:
511 *value = 1;
512 break;
513 case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
514 if (rdev->family >= CHIP_BONAIRE) {
515 *value = rdev->config.cik.backend_enable_mask;
516 } else if (rdev->family >= CHIP_TAHITI) {
517 *value = rdev->config.si.backend_enable_mask;
518 } else {
519 DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
520 }
521 break;
522 case RADEON_INFO_MAX_SCLK:
523 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
524 rdev->pm.dpm_enabled)
525 *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
526 else
527 *value = rdev->pm.default_sclk * 10;
528 break;
529 case RADEON_INFO_VCE_FW_VERSION:
530 *value = rdev->vce.fw_version;
531 break;
532 case RADEON_INFO_VCE_FB_VERSION:
533 *value = rdev->vce.fb_version;
534 break;
535 case RADEON_INFO_NUM_BYTES_MOVED:
536 value = (uint32_t*)&value64;
537 value_size = sizeof(uint64_t);
538 value64 = atomic64_read(&rdev->num_bytes_moved);
539 break;
540 case RADEON_INFO_VRAM_USAGE:
541 value = (uint32_t*)&value64;
542 value_size = sizeof(uint64_t);
543 value64 = atomic64_read(&rdev->vram_usage);
544 break;
545 case RADEON_INFO_GTT_USAGE:
546 value = (uint32_t*)&value64;
547 value_size = sizeof(uint64_t);
548 value64 = atomic64_read(&rdev->gtt_usage);
549 break;
550 case RADEON_INFO_ACTIVE_CU_COUNT:
551 if (rdev->family >= CHIP_BONAIRE)
552 *value = rdev->config.cik.active_cus;
553 else if (rdev->family >= CHIP_TAHITI)
554 *value = rdev->config.si.active_cus;
555 else if (rdev->family >= CHIP_CAYMAN)
556 *value = rdev->config.cayman.active_simds;
557 else if (rdev->family >= CHIP_CEDAR)
558 *value = rdev->config.evergreen.active_simds;
559 else if (rdev->family >= CHIP_RV770)
560 *value = rdev->config.rv770.active_simds;
561 else if (rdev->family >= CHIP_R600)
562 *value = rdev->config.r600.active_simds;
563 else
564 *value = 1;
565 break;
566 case RADEON_INFO_CURRENT_GPU_TEMP:
567 /* get temperature in millidegrees C */
568 if (rdev->asic->pm.get_temperature)
569 *value = radeon_get_temperature(rdev);
570 else
571 *value = 0;
572 break;
573 case RADEON_INFO_CURRENT_GPU_SCLK:
574 /* get sclk in Mhz */
575 if (rdev->pm.dpm_enabled)
576 *value = radeon_dpm_get_current_sclk(rdev) / 100;
577 else
578 *value = rdev->pm.current_sclk / 100;
579 break;
580 case RADEON_INFO_CURRENT_GPU_MCLK:
581 /* get mclk in Mhz */
582 if (rdev->pm.dpm_enabled)
583 *value = radeon_dpm_get_current_mclk(rdev) / 100;
584 else
585 *value = rdev->pm.current_mclk / 100;
586 break;
587 case RADEON_INFO_READ_REG:
588 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
589 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
590 return -EFAULT;
591 }
592 if (radeon_get_allowed_info_register(rdev, *value, value))
593 return -EINVAL;
594 break;
595 case RADEON_INFO_VA_UNMAP_WORKING:
596 *value = true;
597 break;
598 case RADEON_INFO_GPU_RESET_COUNTER:
599 *value = atomic_read(&rdev->gpu_reset_counter);
600 break;
601 default:
602 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
603 return -EINVAL;
604 }
605 if (copy_to_user(value_ptr, (char*)value, value_size)) {
606 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
607 return -EFAULT;
608 }
609 return 0;
610 }
611
612
613 /*
614 * Outdated mess for old drm with Xorg being in charge (void function now).
615 */
616 /**
617 * radeon_driver_lastclose_kms - drm callback for last close
618 *
619 * @dev: drm dev pointer
620 *
621 * Switch vga_switcheroo state after last close (all asics).
622 */
radeon_driver_lastclose_kms(struct drm_device * dev)623 void radeon_driver_lastclose_kms(struct drm_device *dev)
624 {
625 #ifndef __NetBSD__ /* XXX radeon vga */
626 drm_fb_helper_lastclose(dev);
627 vga_switcheroo_process_delayed_switch();
628 #endif
629 }
630
631 /**
632 * radeon_driver_open_kms - drm callback for open
633 *
634 * @dev: drm dev pointer
635 * @file_priv: drm file
636 *
637 * On device open, init vm on cayman+ (all asics).
638 * Returns 0 on success, error on failure.
639 */
radeon_driver_open_kms(struct drm_device * dev,struct drm_file * file_priv)640 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
641 {
642 struct radeon_device *rdev = dev->dev_private;
643 int r;
644
645 file_priv->driver_priv = NULL;
646
647 r = pm_runtime_get_sync(dev->dev);
648 if (r < 0)
649 return r;
650
651 /* new gpu have virtual address space support */
652 if (rdev->family >= CHIP_CAYMAN) {
653 struct radeon_fpriv *fpriv;
654 struct radeon_vm *vm;
655
656 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
657 if (unlikely(!fpriv)) {
658 r = -ENOMEM;
659 goto out_suspend;
660 }
661
662 if (rdev->accel_working) {
663 vm = &fpriv->vm;
664 r = radeon_vm_init(rdev, vm);
665 if (r) {
666 kfree(fpriv);
667 goto out_suspend;
668 }
669
670 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
671 if (r) {
672 radeon_vm_fini(rdev, vm);
673 kfree(fpriv);
674 goto out_suspend;
675 }
676
677 /* map the ib pool buffer read only into
678 * virtual address space */
679 vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
680 rdev->ring_tmp_bo.bo);
681 r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
682 RADEON_VA_IB_OFFSET,
683 RADEON_VM_PAGE_READABLE |
684 RADEON_VM_PAGE_SNOOPED);
685 if (r) {
686 radeon_vm_fini(rdev, vm);
687 kfree(fpriv);
688 goto out_suspend;
689 }
690 }
691 file_priv->driver_priv = fpriv;
692 }
693
694 out_suspend:
695 pm_runtime_mark_last_busy(dev->dev);
696 pm_runtime_put_autosuspend(dev->dev);
697 return r;
698 }
699
700 /**
701 * radeon_driver_postclose_kms - drm callback for post close
702 *
703 * @dev: drm dev pointer
704 * @file_priv: drm file
705 *
706 * On device close, tear down hyperz and cmask filps on r1xx-r5xx
707 * (all asics). And tear down vm on cayman+ (all asics).
708 */
radeon_driver_postclose_kms(struct drm_device * dev,struct drm_file * file_priv)709 void radeon_driver_postclose_kms(struct drm_device *dev,
710 struct drm_file *file_priv)
711 {
712 struct radeon_device *rdev = dev->dev_private;
713
714 pm_runtime_get_sync(dev->dev);
715
716 mutex_lock(&rdev->gem.mutex);
717 if (rdev->hyperz_filp == file_priv)
718 rdev->hyperz_filp = NULL;
719 if (rdev->cmask_filp == file_priv)
720 rdev->cmask_filp = NULL;
721 mutex_unlock(&rdev->gem.mutex);
722
723 radeon_uvd_free_handles(rdev, file_priv);
724 radeon_vce_free_handles(rdev, file_priv);
725
726 /* new gpu have virtual address space support */
727 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
728 struct radeon_fpriv *fpriv = file_priv->driver_priv;
729 struct radeon_vm *vm = &fpriv->vm;
730 int r;
731
732 if (rdev->accel_working) {
733 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
734 if (!r) {
735 if (vm->ib_bo_va)
736 radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
737 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
738 }
739 radeon_vm_fini(rdev, vm);
740 }
741
742 kfree(fpriv);
743 file_priv->driver_priv = NULL;
744 }
745 pm_runtime_mark_last_busy(dev->dev);
746 pm_runtime_put_autosuspend(dev->dev);
747 }
748
749 /*
750 * VBlank related functions.
751 */
752 /**
753 * radeon_get_vblank_counter_kms - get frame count
754 *
755 * @dev: drm dev pointer
756 * @pipe: crtc to get the frame count from
757 *
758 * Gets the frame count on the requested crtc (all asics).
759 * Returns frame count on success, -EINVAL on failure.
760 */
radeon_get_vblank_counter_kms(struct drm_device * dev,unsigned int pipe)761 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
762 {
763 int vpos, hpos, stat;
764 u32 count;
765 struct radeon_device *rdev = dev->dev_private;
766
767 if (pipe >= rdev->num_crtc) {
768 DRM_ERROR("Invalid crtc %u\n", pipe);
769 return -EINVAL;
770 }
771
772 /* The hw increments its frame counter at start of vsync, not at start
773 * of vblank, as is required by DRM core vblank counter handling.
774 * Cook the hw count here to make it appear to the caller as if it
775 * incremented at start of vblank. We measure distance to start of
776 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
777 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
778 * result by 1 to give the proper appearance to caller.
779 */
780 if (rdev->mode_info.crtcs[pipe]) {
781 /* Repeat readout if needed to provide stable result if
782 * we cross start of vsync during the queries.
783 */
784 do {
785 count = radeon_get_vblank_counter(rdev, pipe);
786 /* Ask radeon_get_crtc_scanoutpos to return vpos as
787 * distance to start of vblank, instead of regular
788 * vertical scanout pos.
789 */
790 stat = radeon_get_crtc_scanoutpos(
791 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
792 &vpos, &hpos, NULL, NULL,
793 &rdev->mode_info.crtcs[pipe]->base.hwmode);
794 } while (count != radeon_get_vblank_counter(rdev, pipe));
795
796 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
797 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
798 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
799 }
800 else {
801 DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n",
802 pipe, vpos);
803
804 /* Bump counter if we are at >= leading edge of vblank,
805 * but before vsync where vpos would turn negative and
806 * the hw counter really increments.
807 */
808 if (vpos >= 0)
809 count++;
810 }
811 }
812 else {
813 /* Fallback to use value as is. */
814 count = radeon_get_vblank_counter(rdev, pipe);
815 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
816 }
817
818 return count;
819 }
820
821 /**
822 * radeon_enable_vblank_kms - enable vblank interrupt
823 *
824 * @dev: drm dev pointer
825 * @crtc: crtc to enable vblank interrupt for
826 *
827 * Enable the interrupt on the requested crtc (all asics).
828 * Returns 0 on success, -EINVAL on failure.
829 */
radeon_enable_vblank_kms(struct drm_device * dev,int crtc)830 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
831 {
832 struct radeon_device *rdev = dev->dev_private;
833 unsigned long irqflags;
834 int r;
835
836 if (crtc < 0 || crtc >= rdev->num_crtc) {
837 DRM_ERROR("Invalid crtc %d\n", crtc);
838 return -EINVAL;
839 }
840
841 spin_lock_irqsave(&rdev->irq.lock, irqflags);
842 rdev->irq.crtc_vblank_int[crtc] = true;
843 r = radeon_irq_set(rdev);
844 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
845 return r;
846 }
847
848 /**
849 * radeon_disable_vblank_kms - disable vblank interrupt
850 *
851 * @dev: drm dev pointer
852 * @crtc: crtc to disable vblank interrupt for
853 *
854 * Disable the interrupt on the requested crtc (all asics).
855 */
radeon_disable_vblank_kms(struct drm_device * dev,int crtc)856 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
857 {
858 struct radeon_device *rdev = dev->dev_private;
859 unsigned long irqflags;
860
861 if (crtc < 0 || crtc >= rdev->num_crtc) {
862 DRM_ERROR("Invalid crtc %d\n", crtc);
863 return;
864 }
865
866 spin_lock_irqsave(&rdev->irq.lock, irqflags);
867 rdev->irq.crtc_vblank_int[crtc] = false;
868 radeon_irq_set(rdev);
869 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
870 }
871
872 const struct drm_ioctl_desc radeon_ioctls_kms[] = {
873 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
874 DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
875 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
876 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
877 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH),
878 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH),
879 DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH),
880 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH),
881 DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH),
882 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH),
883 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH),
884 DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH),
885 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH),
886 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH),
887 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
888 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH),
889 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH),
890 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH),
891 DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH),
892 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH),
893 DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH),
894 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
895 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH),
896 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH),
897 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH),
898 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH),
899 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH),
900 /* KMS */
901 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
902 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
903 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
904 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
905 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
906 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
907 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
908 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
909 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
910 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
911 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
912 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
913 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
914 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
915 DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
916 };
917 int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
918