xref: /openbsd/sys/dev/pci/drm/radeon/radeon_kms.c (revision 09467b48)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/pci.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/slab.h>
32 #include <linux/uaccess.h>
33 #include <linux/vga_switcheroo.h>
34 
35 #include <drm/drm_agpsupport.h>
36 #include <drm/drm_fb_helper.h>
37 #include <drm/drm_file.h>
38 #include <drm/drm_ioctl.h>
39 #include <drm/radeon_drm.h>
40 #include <drm/drm_drv.h>
41 #include <drm/drm_pci.h>
42 
43 #include "radeon.h"
44 #include "radeon_asic.h"
45 
46 #if defined(CONFIG_VGA_SWITCHEROO)
47 bool radeon_has_atpx(void);
48 #else
49 static inline bool radeon_has_atpx(void) { return false; }
50 #endif
51 
52 #include "vga.h"
53 
54 #if NVGA > 0
55 #include <dev/ic/mc6845reg.h>
56 #include <dev/ic/pcdisplayvar.h>
57 #include <dev/ic/vgareg.h>
58 #include <dev/ic/vgavar.h>
59 
60 extern int vga_console_attached;
61 #endif
62 
63 #ifdef __amd64__
64 #include "efifb.h"
65 #include <machine/biosvar.h>
66 #endif
67 
68 #if NEFIFB > 0
69 #include <machine/efifbvar.h>
70 #endif
71 
72 int	radeondrm_probe(struct device *, void *, void *);
73 void	radeondrm_attach_kms(struct device *, struct device *, void *);
74 int	radeondrm_detach_kms(struct device *, int);
75 int	radeondrm_activate_kms(struct device *, int);
76 void	radeondrm_attachhook(struct device *);
77 int	radeondrm_forcedetach(struct radeon_device *);
78 
79 bool		radeon_msi_ok(struct radeon_device *);
80 irqreturn_t	radeon_driver_irq_handler_kms(void *);
81 
82 extern const struct pci_device_id radeondrm_pciidlist[];
83 extern struct drm_driver kms_driver;
84 const struct drm_ioctl_desc radeon_ioctls_kms[];
85 extern int radeon_max_kms_ioctl;
86 
87 /*
88  * set if the mountroot hook has a fatal error
89  * such as not being able to find the firmware on newer cards
90  */
91 int radeon_fatal_error;
92 
93 struct cfattach radeondrm_ca = {
94         sizeof (struct radeon_device), radeondrm_probe, radeondrm_attach_kms,
95         radeondrm_detach_kms, radeondrm_activate_kms
96 };
97 
98 struct cfdriver radeondrm_cd = {
99 	NULL, "radeondrm", DV_DULL
100 };
101 
102 int
103 radeondrm_probe(struct device *parent, void *match, void *aux)
104 {
105 	if (radeon_fatal_error)
106 		return 0;
107 	if (drm_pciprobe(aux, radeondrm_pciidlist))
108 		return 20;
109 	return 0;
110 }
111 
112 /**
113  * radeon_driver_unload_kms - Main unload function for KMS.
114  *
115  * @dev: drm dev pointer
116  *
117  * This is the main unload function for KMS (all asics).
118  * It calls radeon_modeset_fini() to tear down the
119  * displays, and radeon_device_fini() to tear down
120  * the rest of the device (CP, writeback, etc.).
121  * Returns 0 on success.
122  */
123 #ifdef __linux__
124 int radeon_driver_unload_kms(struct drm_device *dev)
125 {
126 	struct radeon_device *rdev = dev->dev_private;
127 
128 	if (rdev == NULL)
129 		return 0;
130 
131 	if (rdev->rmmio == NULL)
132 		goto done_free;
133 
134 	pm_runtime_get_sync(dev->dev);
135 
136 	radeon_kfd_device_fini(rdev);
137 
138 	radeon_acpi_fini(rdev);
139 
140 	radeon_modeset_fini(rdev);
141 	radeon_device_fini(rdev);
142 
143 	if (dev->agp)
144 		arch_phys_wc_del(dev->agp->agp_mtrr);
145 	kfree(dev->agp);
146 	dev->agp = NULL;
147 
148 done_free:
149 	kfree(rdev);
150 	dev->dev_private = NULL;
151 	return 0;
152 }
153 #else
154 int
155 radeondrm_detach_kms(struct device *self, int flags)
156 {
157 	struct radeon_device *rdev = (struct radeon_device *)self;
158 
159 	if (rdev == NULL)
160 		return 0;
161 
162 	pci_intr_disestablish(rdev->pc, rdev->irqh);
163 
164 #ifdef notyet
165 	pm_runtime_get_sync(dev->dev);
166 
167 	radeon_kfd_device_fini(rdev);
168 #endif
169 
170 	radeon_acpi_fini(rdev);
171 
172 	radeon_modeset_fini(rdev);
173 	radeon_device_fini(rdev);
174 
175 	if (rdev->ddev != NULL) {
176 		config_detach(rdev->ddev->dev, flags);
177 		rdev->ddev = NULL;
178 	}
179 
180 	return 0;
181 }
182 #endif
183 
184 void radeondrm_burner(void *, u_int, u_int);
185 int radeondrm_wsioctl(void *, u_long, caddr_t, int, struct proc *);
186 paddr_t radeondrm_wsmmap(void *, off_t, int);
187 int radeondrm_alloc_screen(void *, const struct wsscreen_descr *,
188     void **, int *, int *, uint32_t *);
189 void radeondrm_free_screen(void *, void *);
190 int radeondrm_show_screen(void *, void *, int,
191     void (*)(void *, int, int), void *);
192 void radeondrm_doswitch(void *);
193 void radeondrm_enter_ddb(void *, void *);
194 #ifdef __sparc64__
195 void radeondrm_setcolor(void *, u_int, u_int8_t, u_int8_t, u_int8_t);
196 #endif
197 
198 struct wsscreen_descr radeondrm_stdscreen = {
199 	"std",
200 	0, 0,
201 	0,
202 	0, 0,
203 	WSSCREEN_UNDERLINE | WSSCREEN_HILIT |
204 	WSSCREEN_REVERSE | WSSCREEN_WSCOLORS
205 };
206 
207 const struct wsscreen_descr *radeondrm_scrlist[] = {
208 	&radeondrm_stdscreen,
209 };
210 
211 struct wsscreen_list radeondrm_screenlist = {
212 	nitems(radeondrm_scrlist), radeondrm_scrlist
213 };
214 
215 struct wsdisplay_accessops radeondrm_accessops = {
216 	.ioctl = radeondrm_wsioctl,
217 	.mmap = radeondrm_wsmmap,
218 	.alloc_screen = radeondrm_alloc_screen,
219 	.free_screen = radeondrm_free_screen,
220 	.show_screen = radeondrm_show_screen,
221 	.enter_ddb = radeondrm_enter_ddb,
222 	.getchar = rasops_getchar,
223 	.load_font = rasops_load_font,
224 	.list_font = rasops_list_font,
225 	.scrollback = rasops_scrollback,
226 	.burn_screen = radeondrm_burner
227 };
228 
229 int
230 radeondrm_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
231 {
232 	struct rasops_info *ri = v;
233 	struct wsdisplay_fbinfo *wdf;
234 
235 	switch (cmd) {
236 	case WSDISPLAYIO_GTYPE:
237 		*(u_int *)data = WSDISPLAY_TYPE_RADEONDRM;
238 		return 0;
239 	case WSDISPLAYIO_GINFO:
240 		wdf = (struct wsdisplay_fbinfo *)data;
241 		wdf->width = ri->ri_width;
242 		wdf->height = ri->ri_height;
243 		wdf->depth = ri->ri_depth;
244 		wdf->cmsize = 0;
245 		return 0;
246 	default:
247 		return -1;
248 	}
249 }
250 
251 paddr_t
252 radeondrm_wsmmap(void *v, off_t off, int prot)
253 {
254 	return (-1);
255 }
256 
257 int
258 radeondrm_alloc_screen(void *v, const struct wsscreen_descr *type,
259     void **cookiep, int *curxp, int *curyp, uint32_t *attrp)
260 {
261 	return rasops_alloc_screen(v, cookiep, curxp, curyp, attrp);
262 }
263 
264 void
265 radeondrm_free_screen(void *v, void *cookie)
266 {
267 	return rasops_free_screen(v, cookie);
268 }
269 
270 int
271 radeondrm_show_screen(void *v, void *cookie, int waitok,
272     void (*cb)(void *, int, int), void *cbarg)
273 {
274 	struct rasops_info *ri = v;
275 	struct radeon_device *rdev = ri->ri_hw;
276 
277 	if (cookie == ri->ri_active)
278 		return (0);
279 
280 	rdev->switchcb = cb;
281 	rdev->switchcbarg = cbarg;
282 	rdev->switchcookie = cookie;
283 	if (cb) {
284 		task_add(systq, &rdev->switchtask);
285 		return (EAGAIN);
286 	}
287 
288 	radeondrm_doswitch(v);
289 
290 	return (0);
291 }
292 
293 void
294 radeondrm_doswitch(void *v)
295 {
296 	struct rasops_info *ri = v;
297 	struct radeon_device *rdev = ri->ri_hw;
298 #ifndef __sparc64__
299 	struct drm_device *dev = rdev->ddev;
300 	struct drm_crtc *crtc;
301 	uint16_t *r_base, *g_base, *b_base;
302 	int i, ret = 0;
303 #endif
304 
305 	rasops_show_screen(ri, rdev->switchcookie, 0, NULL, NULL);
306 #ifdef __sparc64__
307 	fbwscons_setcolormap(&rdev->sf, radeondrm_setcolor);
308 #else
309 	for (i = 0; i < rdev->num_crtc; i++) {
310 		struct drm_modeset_acquire_ctx ctx;
311 		crtc = &rdev->mode_info.crtcs[i]->base;
312 
313 		r_base = crtc->gamma_store;
314 		g_base = r_base + crtc->gamma_size;
315 		b_base = g_base + crtc->gamma_size;
316 
317 		DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
318 
319 		*r_base = rasops_cmap[3 * i] << 2;
320 		*g_base = rasops_cmap[(3 * i) + 1] << 2;
321 		*b_base = rasops_cmap[(3 * i) + 2] << 2;
322 
323 		crtc->funcs->gamma_set(crtc, r_base, g_base, b_base,
324 		    crtc->gamma_size, &ctx);
325 
326 		DRM_MODESET_LOCK_ALL_END(ctx, ret);
327 	}
328 #endif
329 	drm_fb_helper_restore_fbdev_mode_unlocked((void *)rdev->mode_info.rfbdev);
330 
331 	if (rdev->switchcb)
332 		(rdev->switchcb)(rdev->switchcbarg, 0, 0);
333 }
334 
335 void
336 radeondrm_enter_ddb(void *v, void *cookie)
337 {
338 	struct rasops_info *ri = v;
339 	struct radeon_device *rdev = ri->ri_hw;
340 	struct drm_fb_helper *fb_helper = (void *)rdev->mode_info.rfbdev;
341 
342 	if (cookie == ri->ri_active)
343 		return;
344 
345 	rasops_show_screen(ri, cookie, 0, NULL, NULL);
346 	drm_fb_helper_debug_enter(fb_helper->fbdev);
347 }
348 
349 #ifdef __sparc64__
350 void
351 radeondrm_setcolor(void *v, u_int index, u_int8_t r, u_int8_t g, u_int8_t b)
352 {
353 	struct sunfb *sf = v;
354 	struct radeon_device *rdev = sf->sf_ro.ri_hw;
355 	struct drm_device *dev = rdev->ddev;
356 	uint16_t red, green, blue;
357 	uint16_t *r_base, *g_base, *b_base;
358 	struct drm_crtc *crtc;
359 	int i, ret = 0;
360 
361 	for (i = 0; i < rdev->num_crtc; i++) {
362 		struct drm_modeset_acquire_ctx ctx;
363 		crtc = &rdev->mode_info.crtcs[i]->base;
364 
365 		red = (r << 8) | r;
366 		green = (g << 8) | g;
367 		blue = (b << 8) | b;
368 
369 		DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
370 
371 		r_base = crtc->gamma_store;
372 		g_base = r_base + crtc->gamma_size;
373 		b_base = g_base + crtc->gamma_size;
374 
375 		*r_base = red >> 6;
376 		*g_base = green >> 6;
377 		*b_base = blue >> 6;
378 
379 		crtc->funcs->gamma_set(crtc, r_base, g_base, b_base,
380 		    crtc->gamma_size, &ctx);
381 
382 		DRM_MODESET_LOCK_ALL_END(ctx, ret);
383 	}
384 }
385 #endif
386 
387 #ifdef __linux__
388 /**
389  * radeon_driver_load_kms - Main load function for KMS.
390  *
391  * @dev: drm dev pointer
392  * @flags: device flags
393  *
394  * This is the main load function for KMS (all asics).
395  * It calls radeon_device_init() to set up the non-display
396  * parts of the chip (asic init, CP, writeback, etc.), and
397  * radeon_modeset_init() to set up the display parts
398  * (crtcs, encoders, hotplug detect, etc.).
399  * Returns 0 on success, error on failure.
400  */
401 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
402 {
403 	struct radeon_device *rdev;
404 	int r, acpi_status;
405 
406 	rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
407 	if (rdev == NULL) {
408 		return -ENOMEM;
409 	}
410 	dev->dev_private = (void *)rdev;
411 
412 	/* update BUS flag */
413 	if (drm_pci_device_is_agp(dev)) {
414 		flags |= RADEON_IS_AGP;
415 	} else if (pci_is_pcie(dev->pdev)) {
416 		flags |= RADEON_IS_PCIE;
417 	} else {
418 		flags |= RADEON_IS_PCI;
419 	}
420 
421 	if ((radeon_runtime_pm != 0) &&
422 	    radeon_has_atpx() &&
423 	    ((flags & RADEON_IS_IGP) == 0) &&
424 	    !pci_is_thunderbolt_attached(dev->pdev))
425 		flags |= RADEON_IS_PX;
426 
427 	/* radeon_device_init should report only fatal error
428 	 * like memory allocation failure or iomapping failure,
429 	 * or memory manager initialization failure, it must
430 	 * properly initialize the GPU MC controller and permit
431 	 * VRAM allocation
432 	 */
433 	r = radeon_device_init(rdev, dev, dev->pdev, flags);
434 	if (r) {
435 		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
436 		goto out;
437 	}
438 
439 	/* Again modeset_init should fail only on fatal error
440 	 * otherwise it should provide enough functionalities
441 	 * for shadowfb to run
442 	 */
443 	r = radeon_modeset_init(rdev);
444 	if (r)
445 		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
446 
447 	/* Call ACPI methods: require modeset init
448 	 * but failure is not fatal
449 	 */
450 	if (!r) {
451 		acpi_status = radeon_acpi_init(rdev);
452 		if (acpi_status)
453 		dev_dbg(&dev->pdev->dev,
454 				"Error during ACPI methods call\n");
455 	}
456 
457 	if (radeon_is_px(dev)) {
458 		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
459 		pm_runtime_use_autosuspend(dev->dev);
460 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
461 		pm_runtime_set_active(dev->dev);
462 		pm_runtime_allow(dev->dev);
463 		pm_runtime_mark_last_busy(dev->dev);
464 		pm_runtime_put_autosuspend(dev->dev);
465 	}
466 
467 out:
468 	if (r)
469 		radeon_driver_unload_kms(dev);
470 
471 
472 	return r;
473 }
474 #endif
475 
476 void
477 radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
478 {
479 	struct radeon_device	*rdev = (struct radeon_device *)self;
480 	struct drm_device	*dev;
481 	struct pci_attach_args	*pa = aux;
482 	const struct pci_device_id *id_entry;
483 	int			 is_agp;
484 	pcireg_t		 type;
485 	int			 i;
486 	uint8_t			 rmmio_bar;
487 	paddr_t			 fb_aper;
488 #if !defined(__sparc64__)
489 	pcireg_t		 addr, mask;
490 	int			 s;
491 #endif
492 
493 #if defined(__sparc64__) || defined(__macppc__)
494 	extern int fbnode;
495 #endif
496 
497 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
498 	    PCI_PRODUCT(pa->pa_id), radeondrm_pciidlist);
499 	rdev->flags = id_entry->driver_data;
500 	rdev->family = rdev->flags & RADEON_FAMILY_MASK;
501 	rdev->pc = pa->pa_pc;
502 	rdev->pa_tag = pa->pa_tag;
503 	rdev->iot = pa->pa_iot;
504 	rdev->memt = pa->pa_memt;
505 	rdev->dmat = pa->pa_dmat;
506 
507 #if defined(__sparc64__) || defined(__macppc__)
508 	if (fbnode == PCITAG_NODE(rdev->pa_tag))
509 		rdev->console = rdev->primary = 1;
510 #else
511 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
512 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA &&
513 	    (pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
514 	    & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
515 	    == (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE)) {
516 		rdev->primary = 1;
517 #if NVGA > 0
518 		rdev->console = vga_is_console(pa->pa_iot, -1);
519 		vga_console_attached = 1;
520 #endif
521 	}
522 
523 #if NEFIFB > 0
524 	if (efifb_is_primary(pa)) {
525 		rdev->primary = 1;
526 		rdev->console = efifb_is_console(pa);
527 		efifb_detach();
528 	}
529 #endif
530 #endif
531 
532 #define RADEON_PCI_MEM		0x10
533 
534 	type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RADEON_PCI_MEM);
535 	if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM ||
536 	    pci_mapreg_info(pa->pa_pc, pa->pa_tag, RADEON_PCI_MEM,
537 	    type, &rdev->fb_aper_offset, &rdev->fb_aper_size, NULL)) {
538 		printf(": can't get frambuffer info\n");
539 		return;
540 	}
541 #if !defined(__sparc64__)
542 	if (rdev->fb_aper_offset == 0) {
543 		bus_size_t start, end;
544 		bus_addr_t base;
545 
546 		start = max(PCI_MEM_START, pa->pa_memex->ex_start);
547 		end = min(PCI_MEM_END, pa->pa_memex->ex_end);
548 		if (pa->pa_memex == NULL ||
549 		    extent_alloc_subregion(pa->pa_memex, start, end,
550 		    rdev->fb_aper_size, rdev->fb_aper_size, 0, 0, 0, &base)) {
551 			printf(": can't reserve framebuffer space\n");
552 			return;
553 		}
554 		pci_conf_write(pa->pa_pc, pa->pa_tag, RADEON_PCI_MEM, base);
555 		if (PCI_MAPREG_MEM_TYPE(type) == PCI_MAPREG_MEM_TYPE_64BIT)
556 			pci_conf_write(pa->pa_pc, pa->pa_tag,
557 			    RADEON_PCI_MEM + 4, (uint64_t)base >> 32);
558 		rdev->fb_aper_offset = base;
559 	}
560 #endif
561 
562 	for (i = PCI_MAPREG_START; i < PCI_MAPREG_END ; i+= 4) {
563 		type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
564 		if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_IO)
565 			continue;
566 		if (pci_mapreg_map(pa, i, type, 0, NULL,
567 		    &rdev->rio_mem, NULL, &rdev->rio_mem_size, 0)) {
568 			printf(": can't map rio space\n");
569 			return;
570 		}
571 
572 		if (type & PCI_MAPREG_MEM_TYPE_64BIT)
573 			i += 4;
574 	}
575 
576 	if (rdev->family >= CHIP_BONAIRE) {
577 		type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x18);
578 		if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM ||
579 		    pci_mapreg_map(pa, 0x18, type, 0, NULL,
580 		    &rdev->doorbell.bsh, &rdev->doorbell.base,
581 		    &rdev->doorbell.size, 0)) {
582 			printf(": can't map doorbell space\n");
583 			return;
584 		}
585 	}
586 
587 	if (rdev->family >= CHIP_BONAIRE)
588 		rmmio_bar = 0x24;
589 	else
590 		rmmio_bar = 0x18;
591 
592 	type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, rmmio_bar);
593 	if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM ||
594 	    pci_mapreg_map(pa, rmmio_bar, type, 0, NULL,
595 	    &rdev->rmmio_bsh, &rdev->rmmio_base, &rdev->rmmio_size, 0)) {
596 		printf(": can't map rmmio space\n");
597 		return;
598 	}
599 
600 #if !defined(__sparc64__)
601 	/*
602 	 * Make sure we have a base address for the ROM such that we
603 	 * can map it later.
604 	 */
605 	s = splhigh();
606 	addr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
607 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
608 	mask = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
609 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, addr);
610 	splx(s);
611 
612 	if (addr == 0 && PCI_ROM_SIZE(mask) != 0 && pa->pa_memex) {
613 		bus_size_t size, start, end;
614 		bus_addr_t base;
615 
616 		size = PCI_ROM_SIZE(mask);
617 		start = max(PCI_MEM_START, pa->pa_memex->ex_start);
618 		end = min(PCI_MEM_END, pa->pa_memex->ex_end);
619 		if (extent_alloc_subregion(pa->pa_memex, start, end, size,
620 		    size, 0, 0, 0, &base) == 0)
621 			pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, base);
622 	}
623 #endif
624 
625 #ifdef notyet
626 	mtx_init(&rdev->swi_lock, IPL_TTY);
627 #endif
628 
629 	/* update BUS flag */
630 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP, NULL, NULL)) {
631 		rdev->flags |= RADEON_IS_AGP;
632 	} else if (pci_get_capability(pa->pa_pc, pa->pa_tag,
633 	    PCI_CAP_PCIEXPRESS, NULL, NULL)) {
634 		rdev->flags |= RADEON_IS_PCIE;
635 	} else {
636 		rdev->flags |= RADEON_IS_PCI;
637 	}
638 
639 	if ((radeon_runtime_pm != 0) &&
640 	    radeon_has_atpx() &&
641 	    ((rdev->flags & RADEON_IS_IGP) == 0))
642 		rdev->flags |= RADEON_IS_PX;
643 
644 	DRM_DEBUG("%s card detected\n",
645 		 ((rdev->flags & RADEON_IS_AGP) ? "AGP" :
646 		 (((rdev->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
647 
648 	is_agp = pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP,
649 	    NULL, NULL);
650 
651 	printf("\n");
652 
653 	kms_driver.num_ioctls = radeon_max_kms_ioctl;
654 	kms_driver.driver_features |= DRIVER_MODESET;
655 
656 	dev = drm_attach_pci(&kms_driver, pa, is_agp, rdev->primary,
657 	    self, NULL);
658 	rdev->ddev = dev;
659 	rdev->pdev = dev->pdev;
660 
661 	if (!radeon_msi_ok(rdev))
662 		pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
663 
664 	rdev->msi_enabled = 0;
665 	if (pci_intr_map_msi(pa, &rdev->intrh) == 0)
666 		rdev->msi_enabled = 1;
667 	else if (pci_intr_map(pa, &rdev->intrh) != 0) {
668 		printf(": couldn't map interrupt\n");
669 		return;
670 	}
671 	printf("%s: %s\n", rdev->self.dv_xname,
672 	    pci_intr_string(pa->pa_pc, rdev->intrh));
673 
674 	rdev->irqh = pci_intr_establish(pa->pa_pc, rdev->intrh, IPL_TTY,
675 	    radeon_driver_irq_handler_kms, rdev->ddev, rdev->self.dv_xname);
676 	if (rdev->irqh == NULL) {
677 		printf("%s: couldn't establish interrupt\n",
678 		    rdev->self.dv_xname);
679 		return;
680 	}
681 	rdev->pdev->irq = -1;
682 
683 #ifdef __sparc64__
684 {
685 	struct rasops_info *ri;
686 	int node, console;
687 
688 	node = PCITAG_NODE(pa->pa_tag);
689 	console = (fbnode == node);
690 
691 	fb_setsize(&rdev->sf, 8, 1152, 900, node, 0);
692 
693 	/*
694 	 * The firmware sets up the framebuffer such that at starts at
695 	 * an offset from the start of video memory.
696 	 */
697 	rdev->fb_offset =
698 	    bus_space_read_4(rdev->memt, rdev->rmmio_bsh, RADEON_CRTC_OFFSET);
699 	if (bus_space_map(rdev->memt, rdev->fb_aper_offset + rdev->fb_offset,
700 	    rdev->sf.sf_fbsize, BUS_SPACE_MAP_LINEAR, &rdev->memh)) {
701 		printf("%s: can't map video memory\n", rdev->self.dv_xname);
702 		return;
703 	}
704 
705 	ri = &rdev->sf.sf_ro;
706 	ri->ri_bits = bus_space_vaddr(rdev->memt, rdev->memh);
707 	ri->ri_hw = rdev;
708 	ri->ri_updatecursor = NULL;
709 
710 	fbwscons_init(&rdev->sf, RI_VCONS | RI_WRONLY | RI_BSWAP, console);
711 	if (console)
712 		fbwscons_console_init(&rdev->sf, -1);
713 }
714 #endif
715 
716 	fb_aper = bus_space_mmap(rdev->memt, rdev->fb_aper_offset, 0, 0, 0);
717 	if (fb_aper != -1)
718 		rasops_claim_framebuffer(fb_aper, rdev->fb_aper_size, self);
719 
720 	rdev->shutdown = true;
721 	config_mountroot(self, radeondrm_attachhook);
722 }
723 
724 int
725 radeondrm_forcedetach(struct radeon_device *rdev)
726 {
727 	struct pci_softc	*sc = (struct pci_softc *)rdev->self.dv_parent;
728 	pcitag_t		 tag = rdev->pa_tag;
729 
730 #if NVGA > 0
731 	if (rdev->primary)
732 		vga_console_attached = 0;
733 #endif
734 
735 	/* reprobe pci device for non efi systems */
736 #if NEFIFB > 0
737 	if (bios_efiinfo == NULL && !efifb_cb_found()) {
738 #endif
739 		config_detach(&rdev->self, 0);
740 		return pci_probe_device(sc, tag, NULL, NULL);
741 #if NEFIFB > 0
742 	} else if (rdev->primary) {
743 		efifb_reattach();
744 	}
745 #endif
746 
747 	return 0;
748 }
749 
750 void
751 radeondrm_attachhook(struct device *self)
752 {
753 	struct radeon_device	*rdev = (struct radeon_device *)self;
754 	int			 r, acpi_status;
755 
756 	/* radeon_device_init should report only fatal error
757 	 * like memory allocation failure or iomapping failure,
758 	 * or memory manager initialization failure, it must
759 	 * properly initialize the GPU MC controller and permit
760 	 * VRAM allocation
761 	 */
762 	r = radeon_device_init(rdev, rdev->ddev, rdev->ddev->pdev, rdev->flags);
763 	if (r) {
764 		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
765 		radeon_fatal_error = 1;
766 		radeondrm_forcedetach(rdev);
767 		return;
768 	}
769 
770 	/* Again modeset_init should fail only on fatal error
771 	 * otherwise it should provide enough functionalities
772 	 * for shadowfb to run
773 	 */
774 	r = radeon_modeset_init(rdev);
775 	if (r)
776 		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
777 
778 	/* Call ACPI methods: require modeset init
779 	 * but failure is not fatal
780 	 */
781 	if (!r) {
782 		acpi_status = radeon_acpi_init(rdev);
783 		if (acpi_status)
784 			DRM_DEBUG("Error during ACPI methods call\n");
785 	}
786 
787 #ifdef notyet
788 	radeon_kfd_device_probe(rdev);
789 	radeon_kfd_device_init(rdev);
790 #endif
791 
792 	if (radeon_is_px(rdev->ddev)) {
793 		pm_runtime_use_autosuspend(dev->dev);
794 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
795 		pm_runtime_set_active(dev->dev);
796 		pm_runtime_allow(dev->dev);
797 		pm_runtime_mark_last_busy(dev->dev);
798 		pm_runtime_put_autosuspend(dev->dev);
799 	}
800 
801 {
802 	struct wsemuldisplaydev_attach_args aa;
803 	struct rasops_info *ri = &rdev->ro;
804 
805 	task_set(&rdev->switchtask, radeondrm_doswitch, ri);
806 
807 	if (ri->ri_bits == NULL)
808 		return;
809 
810 #ifdef __sparc64__
811 	fbwscons_setcolormap(&rdev->sf, radeondrm_setcolor);
812 #endif
813 
814 #ifndef __sparc64__
815 	ri->ri_flg = RI_CENTER | RI_VCONS | RI_WRONLY;
816 	rasops_init(ri, 160, 160);
817 
818 	ri->ri_hw = rdev;
819 #else
820 	ri = &rdev->sf.sf_ro;
821 #endif
822 
823 	radeondrm_stdscreen.capabilities = ri->ri_caps;
824 	radeondrm_stdscreen.nrows = ri->ri_rows;
825 	radeondrm_stdscreen.ncols = ri->ri_cols;
826 	radeondrm_stdscreen.textops = &ri->ri_ops;
827 	radeondrm_stdscreen.fontwidth = ri->ri_font->fontwidth;
828 	radeondrm_stdscreen.fontheight = ri->ri_font->fontheight;
829 
830 	aa.console = rdev->console;
831 	aa.primary = rdev->primary;
832 	aa.scrdata = &radeondrm_screenlist;
833 	aa.accessops = &radeondrm_accessops;
834 	aa.accesscookie = ri;
835 	aa.defaultscreens = 0;
836 
837 	if (rdev->console) {
838 		uint32_t defattr;
839 
840 		ri->ri_ops.pack_attr(ri->ri_active, 0, 0, 0, &defattr);
841 		wsdisplay_cnattach(&radeondrm_stdscreen, ri->ri_active,
842 		    ri->ri_ccol, ri->ri_crow, defattr);
843 	}
844 
845 	/*
846 	 * Now that we've taken over the console, disable decoding of
847 	 * VGA legacy addresses, and opt out of arbitration.
848 	 */
849 	radeon_vga_set_state(rdev, false);
850 	pci_disable_legacy_vga(&rdev->self);
851 
852 	printf("%s: %dx%d, %dbpp\n", rdev->self.dv_xname,
853 	    ri->ri_width, ri->ri_height, ri->ri_depth);
854 
855 	config_found_sm(&rdev->self, &aa, wsemuldisplaydevprint,
856 	    wsemuldisplaydevsubmatch);
857 
858 	/*
859 	 * in linux via radeon_pci_probe -> drm_get_pci_dev -> drm_dev_register
860 	 */
861 	drm_dev_register(rdev->ddev, rdev->flags);
862 }
863 }
864 
865 int
866 radeondrm_activate_kms(struct device *self, int act)
867 {
868 	struct radeon_device *rdev = (struct radeon_device *)self;
869 	int rv = 0;
870 
871 	if (rdev->ddev == NULL)
872 		return (0);
873 
874 	switch (act) {
875 	case DVACT_QUIESCE:
876 		rv = config_activate_children(self, act);
877 		radeon_suspend_kms(rdev->ddev, true, true, false);
878 		break;
879 	case DVACT_SUSPEND:
880 		break;
881 	case DVACT_RESUME:
882 		break;
883 	case DVACT_WAKEUP:
884 		radeon_resume_kms(rdev->ddev, true, true);
885 		rv = config_activate_children(self, act);
886 		break;
887 	}
888 
889 	return (rv);
890 }
891 
892 
893 /**
894  * radeon_set_filp_rights - Set filp right.
895  *
896  * @dev: drm dev pointer
897  * @owner: drm file
898  * @applier: drm file
899  * @value: value
900  *
901  * Sets the filp rights for the device (all asics).
902  */
903 static void radeon_set_filp_rights(struct drm_device *dev,
904 				   struct drm_file **owner,
905 				   struct drm_file *applier,
906 				   uint32_t *value)
907 {
908 	struct radeon_device *rdev = dev->dev_private;
909 
910 	mutex_lock(&rdev->gem.mutex);
911 	if (*value == 1) {
912 		/* wants rights */
913 		if (!*owner)
914 			*owner = applier;
915 	} else if (*value == 0) {
916 		/* revokes rights */
917 		if (*owner == applier)
918 			*owner = NULL;
919 	}
920 	*value = *owner == applier ? 1 : 0;
921 	mutex_unlock(&rdev->gem.mutex);
922 }
923 
924 /*
925  * Userspace get information ioctl
926  */
927 /**
928  * radeon_info_ioctl - answer a device specific request.
929  *
930  * @rdev: radeon device pointer
931  * @data: request object
932  * @filp: drm filp
933  *
934  * This function is used to pass device specific parameters to the userspace
935  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
936  * etc. (all asics).
937  * Returns 0 on success, -EINVAL on failure.
938  */
939 static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
940 {
941 	struct radeon_device *rdev = dev->dev_private;
942 	struct drm_radeon_info *info = data;
943 	struct radeon_mode_info *minfo = &rdev->mode_info;
944 	uint32_t *value, value_tmp, *value_ptr, value_size;
945 	uint64_t value64;
946 	struct drm_crtc *crtc;
947 	int i, found;
948 
949 	value_ptr = (uint32_t *)((unsigned long)info->value);
950 	value = &value_tmp;
951 	value_size = sizeof(uint32_t);
952 
953 	switch (info->request) {
954 	case RADEON_INFO_DEVICE_ID:
955 		*value = dev->pdev->device;
956 		break;
957 	case RADEON_INFO_NUM_GB_PIPES:
958 		*value = rdev->num_gb_pipes;
959 		break;
960 	case RADEON_INFO_NUM_Z_PIPES:
961 		*value = rdev->num_z_pipes;
962 		break;
963 	case RADEON_INFO_ACCEL_WORKING:
964 		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
965 		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
966 			*value = false;
967 		else
968 			*value = rdev->accel_working;
969 		break;
970 	case RADEON_INFO_CRTC_FROM_ID:
971 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
972 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
973 			return -EFAULT;
974 		}
975 		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
976 			crtc = (struct drm_crtc *)minfo->crtcs[i];
977 			if (crtc && crtc->base.id == *value) {
978 				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
979 				*value = radeon_crtc->crtc_id;
980 				found = 1;
981 				break;
982 			}
983 		}
984 		if (!found) {
985 			DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
986 			return -EINVAL;
987 		}
988 		break;
989 	case RADEON_INFO_ACCEL_WORKING2:
990 		if (rdev->family == CHIP_HAWAII) {
991 			if (rdev->accel_working) {
992 				if (rdev->new_fw)
993 					*value = 3;
994 				else
995 					*value = 2;
996 			} else {
997 				*value = 0;
998 			}
999 		} else {
1000 			*value = rdev->accel_working;
1001 		}
1002 		break;
1003 	case RADEON_INFO_TILING_CONFIG:
1004 		if (rdev->family >= CHIP_BONAIRE)
1005 			*value = rdev->config.cik.tile_config;
1006 		else if (rdev->family >= CHIP_TAHITI)
1007 			*value = rdev->config.si.tile_config;
1008 		else if (rdev->family >= CHIP_CAYMAN)
1009 			*value = rdev->config.cayman.tile_config;
1010 		else if (rdev->family >= CHIP_CEDAR)
1011 			*value = rdev->config.evergreen.tile_config;
1012 		else if (rdev->family >= CHIP_RV770)
1013 			*value = rdev->config.rv770.tile_config;
1014 		else if (rdev->family >= CHIP_R600)
1015 			*value = rdev->config.r600.tile_config;
1016 		else {
1017 			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
1018 			return -EINVAL;
1019 		}
1020 		break;
1021 	case RADEON_INFO_WANT_HYPERZ:
1022 		/* The "value" here is both an input and output parameter.
1023 		 * If the input value is 1, filp requests hyper-z access.
1024 		 * If the input value is 0, filp revokes its hyper-z access.
1025 		 *
1026 		 * When returning, the value is 1 if filp owns hyper-z access,
1027 		 * 0 otherwise. */
1028 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
1029 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
1030 			return -EFAULT;
1031 		}
1032 		if (*value >= 2) {
1033 			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
1034 			return -EINVAL;
1035 		}
1036 		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
1037 		break;
1038 	case RADEON_INFO_WANT_CMASK:
1039 		/* The same logic as Hyper-Z. */
1040 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
1041 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
1042 			return -EFAULT;
1043 		}
1044 		if (*value >= 2) {
1045 			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
1046 			return -EINVAL;
1047 		}
1048 		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
1049 		break;
1050 	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
1051 		/* return clock value in KHz */
1052 		if (rdev->asic->get_xclk)
1053 			*value = radeon_get_xclk(rdev) * 10;
1054 		else
1055 			*value = rdev->clock.spll.reference_freq * 10;
1056 		break;
1057 	case RADEON_INFO_NUM_BACKENDS:
1058 		if (rdev->family >= CHIP_BONAIRE)
1059 			*value = rdev->config.cik.max_backends_per_se *
1060 				rdev->config.cik.max_shader_engines;
1061 		else if (rdev->family >= CHIP_TAHITI)
1062 			*value = rdev->config.si.max_backends_per_se *
1063 				rdev->config.si.max_shader_engines;
1064 		else if (rdev->family >= CHIP_CAYMAN)
1065 			*value = rdev->config.cayman.max_backends_per_se *
1066 				rdev->config.cayman.max_shader_engines;
1067 		else if (rdev->family >= CHIP_CEDAR)
1068 			*value = rdev->config.evergreen.max_backends;
1069 		else if (rdev->family >= CHIP_RV770)
1070 			*value = rdev->config.rv770.max_backends;
1071 		else if (rdev->family >= CHIP_R600)
1072 			*value = rdev->config.r600.max_backends;
1073 		else {
1074 			return -EINVAL;
1075 		}
1076 		break;
1077 	case RADEON_INFO_NUM_TILE_PIPES:
1078 		if (rdev->family >= CHIP_BONAIRE)
1079 			*value = rdev->config.cik.max_tile_pipes;
1080 		else if (rdev->family >= CHIP_TAHITI)
1081 			*value = rdev->config.si.max_tile_pipes;
1082 		else if (rdev->family >= CHIP_CAYMAN)
1083 			*value = rdev->config.cayman.max_tile_pipes;
1084 		else if (rdev->family >= CHIP_CEDAR)
1085 			*value = rdev->config.evergreen.max_tile_pipes;
1086 		else if (rdev->family >= CHIP_RV770)
1087 			*value = rdev->config.rv770.max_tile_pipes;
1088 		else if (rdev->family >= CHIP_R600)
1089 			*value = rdev->config.r600.max_tile_pipes;
1090 		else {
1091 			return -EINVAL;
1092 		}
1093 		break;
1094 	case RADEON_INFO_FUSION_GART_WORKING:
1095 		*value = 1;
1096 		break;
1097 	case RADEON_INFO_BACKEND_MAP:
1098 		if (rdev->family >= CHIP_BONAIRE)
1099 			*value = rdev->config.cik.backend_map;
1100 		else if (rdev->family >= CHIP_TAHITI)
1101 			*value = rdev->config.si.backend_map;
1102 		else if (rdev->family >= CHIP_CAYMAN)
1103 			*value = rdev->config.cayman.backend_map;
1104 		else if (rdev->family >= CHIP_CEDAR)
1105 			*value = rdev->config.evergreen.backend_map;
1106 		else if (rdev->family >= CHIP_RV770)
1107 			*value = rdev->config.rv770.backend_map;
1108 		else if (rdev->family >= CHIP_R600)
1109 			*value = rdev->config.r600.backend_map;
1110 		else {
1111 			return -EINVAL;
1112 		}
1113 		break;
1114 	case RADEON_INFO_VA_START:
1115 		/* this is where we report if vm is supported or not */
1116 		if (rdev->family < CHIP_CAYMAN)
1117 			return -EINVAL;
1118 		*value = RADEON_VA_RESERVED_SIZE;
1119 		break;
1120 	case RADEON_INFO_IB_VM_MAX_SIZE:
1121 		/* this is where we report if vm is supported or not */
1122 		if (rdev->family < CHIP_CAYMAN)
1123 			return -EINVAL;
1124 		*value = RADEON_IB_VM_MAX_SIZE;
1125 		break;
1126 	case RADEON_INFO_MAX_PIPES:
1127 		if (rdev->family >= CHIP_BONAIRE)
1128 			*value = rdev->config.cik.max_cu_per_sh;
1129 		else if (rdev->family >= CHIP_TAHITI)
1130 			*value = rdev->config.si.max_cu_per_sh;
1131 		else if (rdev->family >= CHIP_CAYMAN)
1132 			*value = rdev->config.cayman.max_pipes_per_simd;
1133 		else if (rdev->family >= CHIP_CEDAR)
1134 			*value = rdev->config.evergreen.max_pipes;
1135 		else if (rdev->family >= CHIP_RV770)
1136 			*value = rdev->config.rv770.max_pipes;
1137 		else if (rdev->family >= CHIP_R600)
1138 			*value = rdev->config.r600.max_pipes;
1139 		else {
1140 			return -EINVAL;
1141 		}
1142 		break;
1143 	case RADEON_INFO_TIMESTAMP:
1144 		if (rdev->family < CHIP_R600) {
1145 			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
1146 			return -EINVAL;
1147 		}
1148 		value = (uint32_t*)&value64;
1149 		value_size = sizeof(uint64_t);
1150 		value64 = radeon_get_gpu_clock_counter(rdev);
1151 		break;
1152 	case RADEON_INFO_MAX_SE:
1153 		if (rdev->family >= CHIP_BONAIRE)
1154 			*value = rdev->config.cik.max_shader_engines;
1155 		else if (rdev->family >= CHIP_TAHITI)
1156 			*value = rdev->config.si.max_shader_engines;
1157 		else if (rdev->family >= CHIP_CAYMAN)
1158 			*value = rdev->config.cayman.max_shader_engines;
1159 		else if (rdev->family >= CHIP_CEDAR)
1160 			*value = rdev->config.evergreen.num_ses;
1161 		else
1162 			*value = 1;
1163 		break;
1164 	case RADEON_INFO_MAX_SH_PER_SE:
1165 		if (rdev->family >= CHIP_BONAIRE)
1166 			*value = rdev->config.cik.max_sh_per_se;
1167 		else if (rdev->family >= CHIP_TAHITI)
1168 			*value = rdev->config.si.max_sh_per_se;
1169 		else
1170 			return -EINVAL;
1171 		break;
1172 	case RADEON_INFO_FASTFB_WORKING:
1173 		*value = rdev->fastfb_working;
1174 		break;
1175 	case RADEON_INFO_RING_WORKING:
1176 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
1177 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
1178 			return -EFAULT;
1179 		}
1180 		switch (*value) {
1181 		case RADEON_CS_RING_GFX:
1182 		case RADEON_CS_RING_COMPUTE:
1183 			*value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
1184 			break;
1185 		case RADEON_CS_RING_DMA:
1186 			*value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
1187 			*value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
1188 			break;
1189 		case RADEON_CS_RING_UVD:
1190 			*value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
1191 			break;
1192 		case RADEON_CS_RING_VCE:
1193 			*value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
1194 			break;
1195 		default:
1196 			return -EINVAL;
1197 		}
1198 		break;
1199 	case RADEON_INFO_SI_TILE_MODE_ARRAY:
1200 		if (rdev->family >= CHIP_BONAIRE) {
1201 			value = rdev->config.cik.tile_mode_array;
1202 			value_size = sizeof(uint32_t)*32;
1203 		} else if (rdev->family >= CHIP_TAHITI) {
1204 			value = rdev->config.si.tile_mode_array;
1205 			value_size = sizeof(uint32_t)*32;
1206 		} else {
1207 			DRM_DEBUG_KMS("tile mode array is si+ only!\n");
1208 			return -EINVAL;
1209 		}
1210 		break;
1211 	case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
1212 		if (rdev->family >= CHIP_BONAIRE) {
1213 			value = rdev->config.cik.macrotile_mode_array;
1214 			value_size = sizeof(uint32_t)*16;
1215 		} else {
1216 			DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
1217 			return -EINVAL;
1218 		}
1219 		break;
1220 	case RADEON_INFO_SI_CP_DMA_COMPUTE:
1221 		*value = 1;
1222 		break;
1223 	case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
1224 		if (rdev->family >= CHIP_BONAIRE) {
1225 			*value = rdev->config.cik.backend_enable_mask;
1226 		} else if (rdev->family >= CHIP_TAHITI) {
1227 			*value = rdev->config.si.backend_enable_mask;
1228 		} else {
1229 			DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
1230 		}
1231 		break;
1232 	case RADEON_INFO_MAX_SCLK:
1233 		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
1234 		    rdev->pm.dpm_enabled)
1235 			*value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
1236 		else
1237 			*value = rdev->pm.default_sclk * 10;
1238 		break;
1239 	case RADEON_INFO_VCE_FW_VERSION:
1240 		*value = rdev->vce.fw_version;
1241 		break;
1242 	case RADEON_INFO_VCE_FB_VERSION:
1243 		*value = rdev->vce.fb_version;
1244 		break;
1245 	case RADEON_INFO_NUM_BYTES_MOVED:
1246 		value = (uint32_t*)&value64;
1247 		value_size = sizeof(uint64_t);
1248 		value64 = atomic64_read(&rdev->num_bytes_moved);
1249 		break;
1250 	case RADEON_INFO_VRAM_USAGE:
1251 		value = (uint32_t*)&value64;
1252 		value_size = sizeof(uint64_t);
1253 		value64 = atomic64_read(&rdev->vram_usage);
1254 		break;
1255 	case RADEON_INFO_GTT_USAGE:
1256 		value = (uint32_t*)&value64;
1257 		value_size = sizeof(uint64_t);
1258 		value64 = atomic64_read(&rdev->gtt_usage);
1259 		break;
1260 	case RADEON_INFO_ACTIVE_CU_COUNT:
1261 		if (rdev->family >= CHIP_BONAIRE)
1262 			*value = rdev->config.cik.active_cus;
1263 		else if (rdev->family >= CHIP_TAHITI)
1264 			*value = rdev->config.si.active_cus;
1265 		else if (rdev->family >= CHIP_CAYMAN)
1266 			*value = rdev->config.cayman.active_simds;
1267 		else if (rdev->family >= CHIP_CEDAR)
1268 			*value = rdev->config.evergreen.active_simds;
1269 		else if (rdev->family >= CHIP_RV770)
1270 			*value = rdev->config.rv770.active_simds;
1271 		else if (rdev->family >= CHIP_R600)
1272 			*value = rdev->config.r600.active_simds;
1273 		else
1274 			*value = 1;
1275 		break;
1276 	case RADEON_INFO_CURRENT_GPU_TEMP:
1277 		/* get temperature in millidegrees C */
1278 		if (rdev->asic->pm.get_temperature)
1279 			*value = radeon_get_temperature(rdev);
1280 		else
1281 			*value = 0;
1282 		break;
1283 	case RADEON_INFO_CURRENT_GPU_SCLK:
1284 		/* get sclk in Mhz */
1285 		if (rdev->pm.dpm_enabled)
1286 			*value = radeon_dpm_get_current_sclk(rdev) / 100;
1287 		else
1288 			*value = rdev->pm.current_sclk / 100;
1289 		break;
1290 	case RADEON_INFO_CURRENT_GPU_MCLK:
1291 		/* get mclk in Mhz */
1292 		if (rdev->pm.dpm_enabled)
1293 			*value = radeon_dpm_get_current_mclk(rdev) / 100;
1294 		else
1295 			*value = rdev->pm.current_mclk / 100;
1296 		break;
1297 	case RADEON_INFO_READ_REG:
1298 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
1299 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
1300 			return -EFAULT;
1301 		}
1302 		if (radeon_get_allowed_info_register(rdev, *value, value))
1303 			return -EINVAL;
1304 		break;
1305 	case RADEON_INFO_VA_UNMAP_WORKING:
1306 		*value = true;
1307 		break;
1308 	case RADEON_INFO_GPU_RESET_COUNTER:
1309 		*value = atomic_read(&rdev->gpu_reset_counter);
1310 		break;
1311 	default:
1312 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
1313 		return -EINVAL;
1314 	}
1315 	if (copy_to_user(value_ptr, (char*)value, value_size)) {
1316 		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
1317 		return -EFAULT;
1318 	}
1319 	return 0;
1320 }
1321 
1322 
1323 /*
1324  * Outdated mess for old drm with Xorg being in charge (void function now).
1325  */
1326 /**
1327  * radeon_driver_lastclose_kms - drm callback for last close
1328  *
1329  * @dev: drm dev pointer
1330  *
1331  * Switch vga_switcheroo state after last close (all asics).
1332  */
1333 void radeon_driver_lastclose_kms(struct drm_device *dev)
1334 {
1335 #ifdef __sparc64__
1336 	struct radeon_device *rdev = dev->dev_private;
1337 	fbwscons_setcolormap(&rdev->sf, radeondrm_setcolor);
1338 #endif
1339 	drm_fb_helper_lastclose(dev);
1340 	vga_switcheroo_process_delayed_switch();
1341 }
1342 
1343 /**
1344  * radeon_driver_open_kms - drm callback for open
1345  *
1346  * @dev: drm dev pointer
1347  * @file_priv: drm file
1348  *
1349  * On device open, init vm on cayman+ (all asics).
1350  * Returns 0 on success, error on failure.
1351  */
1352 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
1353 {
1354 	struct radeon_device *rdev = dev->dev_private;
1355 	int r;
1356 
1357 	file_priv->driver_priv = NULL;
1358 
1359 	r = pm_runtime_get_sync(dev->dev);
1360 	if (r < 0)
1361 		return r;
1362 
1363 	/* new gpu have virtual address space support */
1364 	if (rdev->family >= CHIP_CAYMAN) {
1365 		struct radeon_fpriv *fpriv;
1366 		struct radeon_vm *vm;
1367 
1368 		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
1369 		if (unlikely(!fpriv)) {
1370 			r = -ENOMEM;
1371 			goto out_suspend;
1372 		}
1373 
1374 		if (rdev->accel_working) {
1375 			vm = &fpriv->vm;
1376 			r = radeon_vm_init(rdev, vm);
1377 			if (r) {
1378 				kfree(fpriv);
1379 				goto out_suspend;
1380 			}
1381 
1382 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
1383 			if (r) {
1384 				radeon_vm_fini(rdev, vm);
1385 				kfree(fpriv);
1386 				goto out_suspend;
1387 			}
1388 
1389 			/* map the ib pool buffer read only into
1390 			 * virtual address space */
1391 			vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
1392 							rdev->ring_tmp_bo.bo);
1393 			r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
1394 						  RADEON_VA_IB_OFFSET,
1395 						  RADEON_VM_PAGE_READABLE |
1396 						  RADEON_VM_PAGE_SNOOPED);
1397 			if (r) {
1398 				radeon_vm_fini(rdev, vm);
1399 				kfree(fpriv);
1400 				goto out_suspend;
1401 			}
1402 		}
1403 		file_priv->driver_priv = fpriv;
1404 	}
1405 
1406 out_suspend:
1407 	pm_runtime_mark_last_busy(dev->dev);
1408 	pm_runtime_put_autosuspend(dev->dev);
1409 	return r;
1410 }
1411 
1412 /**
1413  * radeon_driver_postclose_kms - drm callback for post close
1414  *
1415  * @dev: drm dev pointer
1416  * @file_priv: drm file
1417  *
1418  * On device close, tear down hyperz and cmask filps on r1xx-r5xx
1419  * (all asics).  And tear down vm on cayman+ (all asics).
1420  */
1421 void radeon_driver_postclose_kms(struct drm_device *dev,
1422 				 struct drm_file *file_priv)
1423 {
1424 	struct radeon_device *rdev = dev->dev_private;
1425 
1426 	pm_runtime_get_sync(dev->dev);
1427 
1428 	mutex_lock(&rdev->gem.mutex);
1429 	if (rdev->hyperz_filp == file_priv)
1430 		rdev->hyperz_filp = NULL;
1431 	if (rdev->cmask_filp == file_priv)
1432 		rdev->cmask_filp = NULL;
1433 	mutex_unlock(&rdev->gem.mutex);
1434 
1435 	radeon_uvd_free_handles(rdev, file_priv);
1436 	radeon_vce_free_handles(rdev, file_priv);
1437 
1438 	/* new gpu have virtual address space support */
1439 	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
1440 		struct radeon_fpriv *fpriv = file_priv->driver_priv;
1441 		struct radeon_vm *vm = &fpriv->vm;
1442 		int r;
1443 
1444 		if (rdev->accel_working) {
1445 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
1446 			if (!r) {
1447 				if (vm->ib_bo_va)
1448 					radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
1449 				radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
1450 			}
1451 			radeon_vm_fini(rdev, vm);
1452 		}
1453 
1454 		kfree(fpriv);
1455 		file_priv->driver_priv = NULL;
1456 	}
1457 	pm_runtime_mark_last_busy(dev->dev);
1458 	pm_runtime_put_autosuspend(dev->dev);
1459 }
1460 
1461 /*
1462  * VBlank related functions.
1463  */
1464 /**
1465  * radeon_get_vblank_counter_kms - get frame count
1466  *
1467  * @crtc: crtc to get the frame count from
1468  *
1469  * Gets the frame count on the requested crtc (all asics).
1470  * Returns frame count on success, -EINVAL on failure.
1471  */
1472 u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc)
1473 {
1474 	struct drm_device *dev = crtc->dev;
1475 	unsigned int pipe = crtc->index;
1476 	int vpos, hpos, stat;
1477 	u32 count;
1478 	struct radeon_device *rdev = dev->dev_private;
1479 
1480 	if (pipe >= rdev->num_crtc) {
1481 		DRM_ERROR("Invalid crtc %u\n", pipe);
1482 		return -EINVAL;
1483 	}
1484 
1485 	/* The hw increments its frame counter at start of vsync, not at start
1486 	 * of vblank, as is required by DRM core vblank counter handling.
1487 	 * Cook the hw count here to make it appear to the caller as if it
1488 	 * incremented at start of vblank. We measure distance to start of
1489 	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1490 	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1491 	 * result by 1 to give the proper appearance to caller.
1492 	 */
1493 	if (rdev->mode_info.crtcs[pipe]) {
1494 		/* Repeat readout if needed to provide stable result if
1495 		 * we cross start of vsync during the queries.
1496 		 */
1497 		do {
1498 			count = radeon_get_vblank_counter(rdev, pipe);
1499 			/* Ask radeon_get_crtc_scanoutpos to return vpos as
1500 			 * distance to start of vblank, instead of regular
1501 			 * vertical scanout pos.
1502 			 */
1503 			stat = radeon_get_crtc_scanoutpos(
1504 				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1505 				&vpos, &hpos, NULL, NULL,
1506 				&rdev->mode_info.crtcs[pipe]->base.hwmode);
1507 		} while (count != radeon_get_vblank_counter(rdev, pipe));
1508 
1509 		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1510 		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1511 			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1512 		}
1513 		else {
1514 			DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n",
1515 				      pipe, vpos);
1516 
1517 			/* Bump counter if we are at >= leading edge of vblank,
1518 			 * but before vsync where vpos would turn negative and
1519 			 * the hw counter really increments.
1520 			 */
1521 			if (vpos >= 0)
1522 				count++;
1523 		}
1524 	}
1525 	else {
1526 	    /* Fallback to use value as is. */
1527 	    count = radeon_get_vblank_counter(rdev, pipe);
1528 	    DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1529 	}
1530 
1531 	return count;
1532 }
1533 
1534 /**
1535  * radeon_enable_vblank_kms - enable vblank interrupt
1536  *
1537  * @crtc: crtc to enable vblank interrupt for
1538  *
1539  * Enable the interrupt on the requested crtc (all asics).
1540  * Returns 0 on success, -EINVAL on failure.
1541  */
1542 int radeon_enable_vblank_kms(struct drm_crtc *crtc)
1543 {
1544 	struct drm_device *dev = crtc->dev;
1545 	unsigned int pipe = crtc->index;
1546 	struct radeon_device *rdev = dev->dev_private;
1547 	unsigned long irqflags;
1548 	int r;
1549 
1550 	if (pipe < 0 || pipe >= rdev->num_crtc) {
1551 		DRM_ERROR("Invalid crtc %d\n", pipe);
1552 		return -EINVAL;
1553 	}
1554 
1555 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
1556 	rdev->irq.crtc_vblank_int[pipe] = true;
1557 	r = radeon_irq_set(rdev);
1558 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
1559 	return r;
1560 }
1561 
1562 /**
1563  * radeon_disable_vblank_kms - disable vblank interrupt
1564  *
1565  * @crtc: crtc to disable vblank interrupt for
1566  *
1567  * Disable the interrupt on the requested crtc (all asics).
1568  */
1569 void radeon_disable_vblank_kms(struct drm_crtc *crtc)
1570 {
1571 	struct drm_device *dev = crtc->dev;
1572 	unsigned int pipe = crtc->index;
1573 	struct radeon_device *rdev = dev->dev_private;
1574 	unsigned long irqflags;
1575 
1576 	if (pipe < 0 || pipe >= rdev->num_crtc) {
1577 		DRM_ERROR("Invalid crtc %d\n", pipe);
1578 		return;
1579 	}
1580 
1581 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
1582 	rdev->irq.crtc_vblank_int[pipe] = false;
1583 	radeon_irq_set(rdev);
1584 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
1585 }
1586 
1587 const struct drm_ioctl_desc radeon_ioctls_kms[] = {
1588 	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1589 	DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1590 	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1591 	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1592 	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH),
1593 	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH),
1594 	DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH),
1595 	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH),
1596 	DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH),
1597 	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH),
1598 	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH),
1599 	DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH),
1600 	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH),
1601 	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH),
1602 	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1603 	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH),
1604 	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH),
1605 	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH),
1606 	DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH),
1607 	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH),
1608 	DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH),
1609 	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1610 	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH),
1611 	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH),
1612 	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH),
1613 	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH),
1614 	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH),
1615 	/* KMS */
1616 	DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1617 	DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1618 	DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1619 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1620 	DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
1621 	DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
1622 	DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1623 	DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1624 	DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1625 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1626 	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1627 	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1628 	DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1629 	DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1630 	DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1631 };
1632 int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
1633