xref: /dragonfly/sys/dev/drm/radeon/radeon_kms.c (revision b29f78b5)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  *
28  * $FreeBSD: head/sys/dev/drm2/radeon/radeon_kms.c 254885 2013-08-25 19:37:15Z dumbbell $
29  */
30 
31 #include <drm/drmP.h>
32 #include "radeon.h"
33 #include <uapi_drm/radeon_drm.h>
34 #include "radeon_asic.h"
35 #include "radeon_kms.h"
36 
37 /**
38  * radeon_driver_unload_kms - Main unload function for KMS.
39  *
40  * @dev: drm dev pointer
41  *
42  * This is the main unload function for KMS (all asics).
43  * It calls radeon_modeset_fini() to tear down the
44  * displays, and radeon_device_fini() to tear down
45  * the rest of the device (CP, writeback, etc.).
46  * Returns 0 on success.
47  */
48 int radeon_driver_unload_kms(struct drm_device *dev)
49 {
50 	struct radeon_device *rdev = dev->dev_private;
51 
52 	if (rdev == NULL)
53 		return 0;
54 	radeon_acpi_fini(rdev);
55 	radeon_modeset_fini(rdev);
56 	radeon_device_fini(rdev);
57 	drm_free(rdev, M_DRM);
58 	dev->dev_private = NULL;
59 	return 0;
60 }
61 
62 /**
63  * radeon_driver_load_kms - Main load function for KMS.
64  *
65  * @dev: drm dev pointer
66  * @flags: device flags
67  *
68  * This is the main load function for KMS (all asics).
69  * It calls radeon_device_init() to set up the non-display
70  * parts of the chip (asic init, CP, writeback, etc.), and
71  * radeon_modeset_init() to set up the display parts
72  * (crtcs, encoders, hotplug detect, etc.).
73  * Returns 0 on success, error on failure.
74  */
75 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
76 {
77 	struct radeon_device *rdev;
78 	int r, acpi_status;
79 
80 	rdev = kmalloc(sizeof(struct radeon_device), M_DRM,
81 		       M_ZERO | M_WAITOK);
82 	if (rdev == NULL) {
83 		return -ENOMEM;
84 	}
85 	dev->dev_private = (void *)rdev;
86 
87 	/* update BUS flag */
88 	if (drm_device_is_agp(dev)) {
89 		DRM_INFO("RADEON_IS_AGP\n");
90 		flags |= RADEON_IS_AGP;
91 	} else if (drm_device_is_pcie(dev)) {
92 		DRM_INFO("RADEON_IS_PCIE\n");
93 		flags |= RADEON_IS_PCIE;
94 	} else {
95 		DRM_INFO("RADEON_IS_PCI\n");
96 		flags |= RADEON_IS_PCI;
97 	}
98 
99 	/* radeon_device_init should report only fatal error
100 	 * like memory allocation failure or iomapping failure,
101 	 * or memory manager initialization failure, it must
102 	 * properly initialize the GPU MC controller and permit
103 	 * VRAM allocation
104 	 */
105 	r = radeon_device_init(rdev, dev, flags);
106 	if (r) {
107 		dev_err(dev->dev, "Fatal error during GPU init\n");
108 		goto out;
109 	}
110 
111 	/* Again modeset_init should fail only on fatal error
112 	 * otherwise it should provide enough functionalities
113 	 * for shadowfb to run
114 	 */
115 	r = radeon_modeset_init(rdev);
116 	if (r)
117 		dev_err(dev->dev, "Fatal error during modeset init\n");
118 
119 	/* Call ACPI methods: require modeset init
120 	 * but failure is not fatal
121 	 */
122 	if (!r) {
123 		acpi_status = radeon_acpi_init(rdev);
124 		if (acpi_status)
125 		dev_dbg(dev->dev,
126 				"Error during ACPI methods call\n");
127 	}
128 
129 out:
130 	if (r)
131 		radeon_driver_unload_kms(dev);
132 	return r;
133 }
134 
135 /**
136  * radeon_set_filp_rights - Set filp right.
137  *
138  * @dev: drm dev pointer
139  * @owner: drm file
140  * @applier: drm file
141  * @value: value
142  *
143  * Sets the filp rights for the device (all asics).
144  */
145 static void radeon_set_filp_rights(struct drm_device *dev,
146 				   struct drm_file **owner,
147 				   struct drm_file *applier,
148 				   uint32_t *value)
149 {
150 	DRM_LOCK(dev);
151 	if (*value == 1) {
152 		/* wants rights */
153 		if (!*owner)
154 			*owner = applier;
155 	} else if (*value == 0) {
156 		/* revokes rights */
157 		if (*owner == applier)
158 			*owner = NULL;
159 	}
160 	*value = *owner == applier ? 1 : 0;
161 	DRM_UNLOCK(dev);
162 }
163 
164 /*
165  * Userspace get information ioctl
166  */
167 /**
168  * radeon_info_ioctl - answer a device specific request.
169  *
170  * @rdev: radeon device pointer
171  * @data: request object
172  * @filp: drm filp
173  *
174  * This function is used to pass device specific parameters to the userspace
175  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
176  * etc. (all asics).
177  * Returns 0 on success, -EINVAL on failure.
178  */
179 static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
180 {
181 	struct radeon_device *rdev = dev->dev_private;
182 	struct drm_radeon_info *info = data;
183 	struct radeon_mode_info *minfo = &rdev->mode_info;
184 	uint32_t value, *value_ptr;
185 	uint64_t value64, *value_ptr64;
186 	struct drm_crtc *crtc;
187 	int i, found;
188 
189 	/* TIMESTAMP is a 64-bit value, needs special handling. */
190 	if (info->request == RADEON_INFO_TIMESTAMP) {
191 		if (rdev->family >= CHIP_R600) {
192 			value_ptr64 = (uint64_t*)((unsigned long)info->value);
193 			value64 = radeon_get_gpu_clock_counter(rdev);
194 
195 			if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
196 				DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
197 				return -EFAULT;
198 			}
199 			return 0;
200 		} else {
201 			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
202 			return -EINVAL;
203 		}
204 	}
205 
206 	value_ptr = (uint32_t *)((unsigned long)info->value);
207 	if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
208 		DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
209 		return -EFAULT;
210 	}
211 
212 	switch (info->request) {
213 	case RADEON_INFO_DEVICE_ID:
214 		value = dev->pci_device;
215 		break;
216 	case RADEON_INFO_NUM_GB_PIPES:
217 		value = rdev->num_gb_pipes;
218 		break;
219 	case RADEON_INFO_NUM_Z_PIPES:
220 		value = rdev->num_z_pipes;
221 		break;
222 	case RADEON_INFO_ACCEL_WORKING:
223 		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
224 		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
225 			value = false;
226 		else
227 			value = rdev->accel_working;
228 		break;
229 	case RADEON_INFO_CRTC_FROM_ID:
230 		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
231 			crtc = (struct drm_crtc *)minfo->crtcs[i];
232 			if (crtc && crtc->base.id == value) {
233 				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
234 				value = radeon_crtc->crtc_id;
235 				found = 1;
236 				break;
237 			}
238 		}
239 		if (!found) {
240 			DRM_DEBUG_KMS("unknown crtc id %d\n", value);
241 			return -EINVAL;
242 		}
243 		break;
244 	case RADEON_INFO_ACCEL_WORKING2:
245 		value = rdev->accel_working;
246 		break;
247 	case RADEON_INFO_TILING_CONFIG:
248 		if (rdev->family >= CHIP_TAHITI)
249 			value = rdev->config.si.tile_config;
250 		else if (rdev->family >= CHIP_CAYMAN)
251 			value = rdev->config.cayman.tile_config;
252 		else if (rdev->family >= CHIP_CEDAR)
253 			value = rdev->config.evergreen.tile_config;
254 		else if (rdev->family >= CHIP_RV770)
255 			value = rdev->config.rv770.tile_config;
256 		else if (rdev->family >= CHIP_R600)
257 			value = rdev->config.r600.tile_config;
258 		else {
259 			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
260 			return -EINVAL;
261 		}
262 		break;
263 	case RADEON_INFO_WANT_HYPERZ:
264 		/* The "value" here is both an input and output parameter.
265 		 * If the input value is 1, filp requests hyper-z access.
266 		 * If the input value is 0, filp revokes its hyper-z access.
267 		 *
268 		 * When returning, the value is 1 if filp owns hyper-z access,
269 		 * 0 otherwise. */
270 		if (value >= 2) {
271 			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
272 			return -EINVAL;
273 		}
274 		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
275 		break;
276 	case RADEON_INFO_WANT_CMASK:
277 		/* The same logic as Hyper-Z. */
278 		if (value >= 2) {
279 			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
280 			return -EINVAL;
281 		}
282 		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
283 		break;
284 	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
285 		/* return clock value in KHz */
286 		if (rdev->asic->get_xclk)
287 			value = radeon_get_xclk(rdev) * 10;
288 		else
289 			value = rdev->clock.spll.reference_freq * 10;
290 		break;
291 	case RADEON_INFO_NUM_BACKENDS:
292 		if (rdev->family >= CHIP_TAHITI)
293 			value = rdev->config.si.max_backends_per_se *
294 				rdev->config.si.max_shader_engines;
295 		else if (rdev->family >= CHIP_CAYMAN)
296 			value = rdev->config.cayman.max_backends_per_se *
297 				rdev->config.cayman.max_shader_engines;
298 		else if (rdev->family >= CHIP_CEDAR)
299 			value = rdev->config.evergreen.max_backends;
300 		else if (rdev->family >= CHIP_RV770)
301 			value = rdev->config.rv770.max_backends;
302 		else if (rdev->family >= CHIP_R600)
303 			value = rdev->config.r600.max_backends;
304 		else {
305 			return -EINVAL;
306 		}
307 		break;
308 	case RADEON_INFO_NUM_TILE_PIPES:
309 		if (rdev->family >= CHIP_TAHITI)
310 			value = rdev->config.si.max_tile_pipes;
311 		else if (rdev->family >= CHIP_CAYMAN)
312 			value = rdev->config.cayman.max_tile_pipes;
313 		else if (rdev->family >= CHIP_CEDAR)
314 			value = rdev->config.evergreen.max_tile_pipes;
315 		else if (rdev->family >= CHIP_RV770)
316 			value = rdev->config.rv770.max_tile_pipes;
317 		else if (rdev->family >= CHIP_R600)
318 			value = rdev->config.r600.max_tile_pipes;
319 		else {
320 			return -EINVAL;
321 		}
322 		break;
323 	case RADEON_INFO_FUSION_GART_WORKING:
324 		value = 1;
325 		break;
326 	case RADEON_INFO_BACKEND_MAP:
327 		if (rdev->family >= CHIP_TAHITI)
328 			value = rdev->config.si.backend_map;
329 		else if (rdev->family >= CHIP_CAYMAN)
330 			value = rdev->config.cayman.backend_map;
331 		else if (rdev->family >= CHIP_CEDAR)
332 			value = rdev->config.evergreen.backend_map;
333 		else if (rdev->family >= CHIP_RV770)
334 			value = rdev->config.rv770.backend_map;
335 		else if (rdev->family >= CHIP_R600)
336 			value = rdev->config.r600.backend_map;
337 		else {
338 			return -EINVAL;
339 		}
340 		break;
341 	case RADEON_INFO_VA_START:
342 		/* this is where we report if vm is supported or not */
343 		if (rdev->family < CHIP_CAYMAN)
344 			return -EINVAL;
345 		value = RADEON_VA_RESERVED_SIZE;
346 		break;
347 	case RADEON_INFO_IB_VM_MAX_SIZE:
348 		/* this is where we report if vm is supported or not */
349 		if (rdev->family < CHIP_CAYMAN)
350 			return -EINVAL;
351 		value = RADEON_IB_VM_MAX_SIZE;
352 		break;
353 	case RADEON_INFO_MAX_PIPES:
354 		if (rdev->family >= CHIP_TAHITI)
355 			value = rdev->config.si.max_cu_per_sh;
356 		else if (rdev->family >= CHIP_CAYMAN)
357 			value = rdev->config.cayman.max_pipes_per_simd;
358 		else if (rdev->family >= CHIP_CEDAR)
359 			value = rdev->config.evergreen.max_pipes;
360 		else if (rdev->family >= CHIP_RV770)
361 			value = rdev->config.rv770.max_pipes;
362 		else if (rdev->family >= CHIP_R600)
363 			value = rdev->config.r600.max_pipes;
364 		else {
365 			return -EINVAL;
366 		}
367 		break;
368 	case RADEON_INFO_MAX_SE:
369 		if (rdev->family >= CHIP_TAHITI)
370 			value = rdev->config.si.max_shader_engines;
371 		else if (rdev->family >= CHIP_CAYMAN)
372 			value = rdev->config.cayman.max_shader_engines;
373 		else if (rdev->family >= CHIP_CEDAR)
374 			value = rdev->config.evergreen.num_ses;
375 		else
376 			value = 1;
377 		break;
378 	case RADEON_INFO_MAX_SH_PER_SE:
379 		if (rdev->family >= CHIP_TAHITI)
380 			value = rdev->config.si.max_sh_per_se;
381 		else
382 			return -EINVAL;
383 		break;
384 	default:
385 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
386 		return -EINVAL;
387 	}
388 	if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
389 		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
390 		return -EFAULT;
391 	}
392 	return 0;
393 }
394 
395 
396 /*
397  * Outdated mess for old drm with Xorg being in charge (void function now).
398  */
399 /**
400  * radeon_driver_firstopen_kms - drm callback for first open
401  *
402  * @dev: drm dev pointer
403  *
404  * Nothing to be done for KMS (all asics).
405  * Returns 0 on success.
406  */
407 int radeon_driver_firstopen_kms(struct drm_device *dev)
408 {
409 	return 0;
410 }
411 
412 /**
413  * radeon_driver_firstopen_kms - drm callback for last close
414  *
415  * @dev: drm dev pointer
416  *
417  * Switch vga switcheroo state after last close (all asics).
418  */
419 void radeon_driver_lastclose_kms(struct drm_device *dev)
420 {
421 #ifdef DUMBBELL_WIP
422 	vga_switcheroo_process_delayed_switch();
423 #endif /* DUMBBELL_WIP */
424 }
425 
426 /**
427  * radeon_driver_open_kms - drm callback for open
428  *
429  * @dev: drm dev pointer
430  * @file_priv: drm file
431  *
432  * On device open, init vm on cayman+ (all asics).
433  * Returns 0 on success, error on failure.
434  */
435 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
436 {
437 	struct radeon_device *rdev = dev->dev_private;
438 
439 	file_priv->driver_priv = NULL;
440 
441 	/* new gpu have virtual address space support */
442 	if (rdev->family >= CHIP_CAYMAN) {
443 		struct radeon_fpriv *fpriv;
444 		struct radeon_bo_va *bo_va;
445 		int r;
446 
447 		fpriv = kmalloc(sizeof(*fpriv), M_DRM,
448 				M_ZERO | M_WAITOK);
449 		if (unlikely(!fpriv)) {
450 			return -ENOMEM;
451 		}
452 
453 		radeon_vm_init(rdev, &fpriv->vm);
454 
455 		/* map the ib pool buffer read only into
456 		 * virtual address space */
457 		bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
458 					 rdev->ring_tmp_bo.bo);
459 		r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
460 					  RADEON_VM_PAGE_READABLE |
461 					  RADEON_VM_PAGE_SNOOPED);
462 		if (r) {
463 			radeon_vm_fini(rdev, &fpriv->vm);
464 			drm_free(fpriv, M_DRM);
465 			return r;
466 		}
467 
468 		file_priv->driver_priv = fpriv;
469 	}
470 	return 0;
471 }
472 
473 /**
474  * radeon_driver_postclose_kms - drm callback for post close
475  *
476  * @dev: drm dev pointer
477  * @file_priv: drm file
478  *
479  * On device post close, tear down vm on cayman+ (all asics).
480  */
481 void radeon_driver_postclose_kms(struct drm_device *dev,
482 				 struct drm_file *file_priv)
483 {
484 	struct radeon_device *rdev = dev->dev_private;
485 
486 	/* new gpu have virtual address space support */
487 	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
488 		struct radeon_fpriv *fpriv = file_priv->driver_priv;
489 		struct radeon_bo_va *bo_va;
490 		int r;
491 
492 		r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
493 		if (!r) {
494 			bo_va = radeon_vm_bo_find(&fpriv->vm,
495 						  rdev->ring_tmp_bo.bo);
496 			if (bo_va)
497 				radeon_vm_bo_rmv(rdev, bo_va);
498 			radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
499 		}
500 
501 		radeon_vm_fini(rdev, &fpriv->vm);
502 		drm_free(fpriv, M_DRM);
503 		file_priv->driver_priv = NULL;
504 	}
505 }
506 
507 /**
508  * radeon_driver_preclose_kms - drm callback for pre close
509  *
510  * @dev: drm dev pointer
511  * @file_priv: drm file
512  *
513  * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
514  * (all asics).
515  */
516 void radeon_driver_preclose_kms(struct drm_device *dev,
517 				struct drm_file *file_priv)
518 {
519 	struct radeon_device *rdev = dev->dev_private;
520 	if (rdev->hyperz_filp == file_priv)
521 		rdev->hyperz_filp = NULL;
522 	if (rdev->cmask_filp == file_priv)
523 		rdev->cmask_filp = NULL;
524 }
525 
526 /*
527  * VBlank related functions.
528  */
529 /**
530  * radeon_get_vblank_counter_kms - get frame count
531  *
532  * @dev: drm dev pointer
533  * @crtc: crtc to get the frame count from
534  *
535  * Gets the frame count on the requested crtc (all asics).
536  * Returns frame count on success, -EINVAL on failure.
537  */
538 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
539 {
540 	struct radeon_device *rdev = dev->dev_private;
541 
542 	if (crtc < 0 || crtc >= rdev->num_crtc) {
543 		DRM_ERROR("Invalid crtc %d\n", crtc);
544 		return -EINVAL;
545 	}
546 
547 	return radeon_get_vblank_counter(rdev, crtc);
548 }
549 
550 /**
551  * radeon_enable_vblank_kms - enable vblank interrupt
552  *
553  * @dev: drm dev pointer
554  * @crtc: crtc to enable vblank interrupt for
555  *
556  * Enable the interrupt on the requested crtc (all asics).
557  * Returns 0 on success, -EINVAL on failure.
558  */
559 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
560 {
561 	struct radeon_device *rdev = dev->dev_private;
562 	int r;
563 
564 	if (crtc < 0 || crtc >= rdev->num_crtc) {
565 		DRM_ERROR("Invalid crtc %d\n", crtc);
566 		return -EINVAL;
567 	}
568 
569 	lockmgr(&rdev->irq.lock, LK_EXCLUSIVE);
570 	rdev->irq.crtc_vblank_int[crtc] = true;
571 	r = radeon_irq_set(rdev);
572 	lockmgr(&rdev->irq.lock, LK_RELEASE);
573 	return r;
574 }
575 
576 /**
577  * radeon_disable_vblank_kms - disable vblank interrupt
578  *
579  * @dev: drm dev pointer
580  * @crtc: crtc to disable vblank interrupt for
581  *
582  * Disable the interrupt on the requested crtc (all asics).
583  */
584 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
585 {
586 	struct radeon_device *rdev = dev->dev_private;
587 
588 	if (crtc < 0 || crtc >= rdev->num_crtc) {
589 		DRM_ERROR("Invalid crtc %d\n", crtc);
590 		return;
591 	}
592 
593 	lockmgr(&rdev->irq.lock, LK_EXCLUSIVE);
594 	rdev->irq.crtc_vblank_int[crtc] = false;
595 	radeon_irq_set(rdev);
596 	lockmgr(&rdev->irq.lock, LK_RELEASE);
597 }
598 
599 /**
600  * radeon_get_vblank_timestamp_kms - get vblank timestamp
601  *
602  * @dev: drm dev pointer
603  * @crtc: crtc to get the timestamp for
604  * @max_error: max error
605  * @vblank_time: time value
606  * @flags: flags passed to the driver
607  *
608  * Gets the timestamp on the requested crtc based on the
609  * scanout position.  (all asics).
610  * Returns postive status flags on success, negative error on failure.
611  */
612 int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
613 				    int *max_error,
614 				    struct timeval *vblank_time,
615 				    unsigned flags)
616 {
617 	struct drm_crtc *drmcrtc;
618 	struct radeon_device *rdev = dev->dev_private;
619 
620 	if (crtc < 0 || crtc >= dev->num_crtcs) {
621 		DRM_ERROR("Invalid crtc %d\n", crtc);
622 		return -EINVAL;
623 	}
624 
625 	/* Get associated drm_crtc: */
626 	drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
627 
628 	/* Helper routine in DRM core does all the work: */
629 	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
630 						     vblank_time, flags,
631 						     drmcrtc);
632 }
633 
634 /*
635  * IOCTL.
636  */
637 int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
638 			 struct drm_file *file_priv)
639 {
640 	/* Not valid in KMS. */
641 	return -EINVAL;
642 }
643 
644 #define KMS_INVALID_IOCTL(name)						\
645 static int								\
646 name(struct drm_device *dev, void *data, struct drm_file *file_priv)	\
647 {									\
648 	DRM_ERROR("invalid ioctl with kms %s\n", __func__);		\
649 	return -EINVAL;							\
650 }
651 
652 /*
653  * All these ioctls are invalid in kms world.
654  */
655 KMS_INVALID_IOCTL(radeon_cp_init_kms)
656 KMS_INVALID_IOCTL(radeon_cp_start_kms)
657 KMS_INVALID_IOCTL(radeon_cp_stop_kms)
658 KMS_INVALID_IOCTL(radeon_cp_reset_kms)
659 KMS_INVALID_IOCTL(radeon_cp_idle_kms)
660 KMS_INVALID_IOCTL(radeon_cp_resume_kms)
661 KMS_INVALID_IOCTL(radeon_engine_reset_kms)
662 KMS_INVALID_IOCTL(radeon_fullscreen_kms)
663 KMS_INVALID_IOCTL(radeon_cp_swap_kms)
664 KMS_INVALID_IOCTL(radeon_cp_clear_kms)
665 KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
666 KMS_INVALID_IOCTL(radeon_cp_indices_kms)
667 KMS_INVALID_IOCTL(radeon_cp_texture_kms)
668 KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
669 KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
670 KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
671 KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
672 KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
673 KMS_INVALID_IOCTL(radeon_cp_flip_kms)
674 KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
675 KMS_INVALID_IOCTL(radeon_mem_free_kms)
676 KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
677 KMS_INVALID_IOCTL(radeon_irq_emit_kms)
678 KMS_INVALID_IOCTL(radeon_irq_wait_kms)
679 KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
680 KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
681 KMS_INVALID_IOCTL(radeon_surface_free_kms)
682 
683 
684 struct drm_ioctl_desc radeon_ioctls_kms[] = {
685 	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
686 	DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
687 	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
688 	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
689 	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
690 	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
691 	DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
692 	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
693 	DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
694 	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
695 	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
696 	DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
697 	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
698 	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
699 	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
700 	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
701 	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
702 	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
703 	DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
704 	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
705 	DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
706 	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
707 	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
708 	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
709 	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
710 	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
711 	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
712 	/* KMS */
713 	DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
714 	DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
715 	DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
716 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
717 	DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
718 	DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
719 	DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
720 	DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
721 	DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
722 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
723 	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
724 	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
725 	DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
726 };
727 int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
728