1 /* $NetBSD: radeon_device.c,v 1.14 2021/12/19 12:02:20 riastradh Exp $ */
2
3 /*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 * Copyright 2009 Jerome Glisse.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Dave Airlie
27 * Alex Deucher
28 * Jerome Glisse
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: radeon_device.c,v 1.14 2021/12/19 12:02:20 riastradh Exp $");
33
34 #include <linux/console.h>
35 #include <linux/efi.h>
36 #include <linux/pci.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/slab.h>
39 #include <linux/vga_switcheroo.h>
40 #include <linux/vgaarb.h>
41
42 #include <drm/drm_cache.h>
43 #include <drm/drm_crtc_helper.h>
44 #include <drm/drm_debugfs.h>
45 #include <drm/drm_device.h>
46 #include <drm/drm_file.h>
47 #include <drm/drm_probe_helper.h>
48 #include <drm/radeon_drm.h>
49
50 #include "radeon_reg.h"
51 #include "radeon.h"
52 #include "atom.h"
53
54 #include <linux/nbsd-namespace.h>
55
56 static const char radeon_family_name[][16] = {
57 "R100",
58 "RV100",
59 "RS100",
60 "RV200",
61 "RS200",
62 "R200",
63 "RV250",
64 "RS300",
65 "RV280",
66 "R300",
67 "R350",
68 "RV350",
69 "RV380",
70 "R420",
71 "R423",
72 "RV410",
73 "RS400",
74 "RS480",
75 "RS600",
76 "RS690",
77 "RS740",
78 "RV515",
79 "R520",
80 "RV530",
81 "RV560",
82 "RV570",
83 "R580",
84 "R600",
85 "RV610",
86 "RV630",
87 "RV670",
88 "RV620",
89 "RV635",
90 "RS780",
91 "RS880",
92 "RV770",
93 "RV730",
94 "RV710",
95 "RV740",
96 "CEDAR",
97 "REDWOOD",
98 "JUNIPER",
99 "CYPRESS",
100 "HEMLOCK",
101 "PALM",
102 "SUMO",
103 "SUMO2",
104 "BARTS",
105 "TURKS",
106 "CAICOS",
107 "CAYMAN",
108 "ARUBA",
109 "TAHITI",
110 "PITCAIRN",
111 "VERDE",
112 "OLAND",
113 "HAINAN",
114 "BONAIRE",
115 "KAVERI",
116 "KABINI",
117 "HAWAII",
118 "MULLINS",
119 "LAST",
120 };
121
122 #if defined(CONFIG_VGA_SWITCHEROO)
123 bool radeon_has_atpx_dgpu_power_cntl(void);
124 bool radeon_is_atpx_hybrid(void);
125 #else
radeon_has_atpx_dgpu_power_cntl(void)126 static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
radeon_is_atpx_hybrid(void)127 static inline bool radeon_is_atpx_hybrid(void) { return false; }
128 #endif
129
130 #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
131
132 struct radeon_px_quirk {
133 u32 chip_vendor;
134 u32 chip_device;
135 u32 subsys_vendor;
136 u32 subsys_device;
137 u32 px_quirk_flags;
138 };
139
140 static struct radeon_px_quirk radeon_px_quirk_list[] = {
141 /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
142 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
143 */
144 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
145 /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
146 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
147 */
148 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
149 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
150 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
151 */
152 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
153 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
154 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
155 */
156 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
157 /* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
158 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
159 */
160 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
161 { 0, 0, 0, 0, 0 },
162 };
163
radeon_is_px(struct drm_device * dev)164 bool radeon_is_px(struct drm_device *dev)
165 {
166 struct radeon_device *rdev = dev->dev_private;
167
168 if (rdev->flags & RADEON_IS_PX)
169 return true;
170 return false;
171 }
172
radeon_device_handle_px_quirks(struct radeon_device * rdev)173 static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
174 {
175 struct radeon_px_quirk *p = radeon_px_quirk_list;
176
177 /* Apply PX quirks */
178 while (p && p->chip_device != 0) {
179 if (rdev->pdev->vendor == p->chip_vendor &&
180 rdev->pdev->device == p->chip_device &&
181 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
182 rdev->pdev->subsystem_device == p->subsys_device) {
183 rdev->px_quirk_flags = p->px_quirk_flags;
184 break;
185 }
186 ++p;
187 }
188
189 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
190 rdev->flags &= ~RADEON_IS_PX;
191
192 /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
193 if (!radeon_is_atpx_hybrid() &&
194 !radeon_has_atpx_dgpu_power_cntl())
195 rdev->flags &= ~RADEON_IS_PX;
196 }
197
198 /**
199 * radeon_program_register_sequence - program an array of registers.
200 *
201 * @rdev: radeon_device pointer
202 * @registers: pointer to the register array
203 * @array_size: size of the register array
204 *
205 * Programs an array or registers with and and or masks.
206 * This is a helper for setting golden registers.
207 */
radeon_program_register_sequence(struct radeon_device * rdev,const u32 * registers,const u32 array_size)208 void radeon_program_register_sequence(struct radeon_device *rdev,
209 const u32 *registers,
210 const u32 array_size)
211 {
212 u32 tmp, reg, and_mask, or_mask;
213 int i;
214
215 if (array_size % 3)
216 return;
217
218 for (i = 0; i < array_size; i +=3) {
219 reg = registers[i + 0];
220 and_mask = registers[i + 1];
221 or_mask = registers[i + 2];
222
223 if (and_mask == 0xffffffff) {
224 tmp = or_mask;
225 } else {
226 tmp = RREG32(reg);
227 tmp &= ~and_mask;
228 tmp |= or_mask;
229 }
230 WREG32(reg, tmp);
231 }
232 }
233
radeon_pci_config_reset(struct radeon_device * rdev)234 void radeon_pci_config_reset(struct radeon_device *rdev)
235 {
236 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
237 }
238
239 /**
240 * radeon_surface_init - Clear GPU surface registers.
241 *
242 * @rdev: radeon_device pointer
243 *
244 * Clear GPU surface registers (r1xx-r5xx).
245 */
radeon_surface_init(struct radeon_device * rdev)246 void radeon_surface_init(struct radeon_device *rdev)
247 {
248 /* FIXME: check this out */
249 if (rdev->family < CHIP_R600) {
250 int i;
251
252 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
253 if (rdev->surface_regs[i].bo)
254 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
255 else
256 radeon_clear_surface_reg(rdev, i);
257 }
258 /* enable surfaces */
259 WREG32(RADEON_SURFACE_CNTL, 0);
260 }
261 }
262
263 /*
264 * GPU scratch registers helpers function.
265 */
266 /**
267 * radeon_scratch_init - Init scratch register driver information.
268 *
269 * @rdev: radeon_device pointer
270 *
271 * Init CP scratch register driver information (r1xx-r5xx)
272 */
radeon_scratch_init(struct radeon_device * rdev)273 void radeon_scratch_init(struct radeon_device *rdev)
274 {
275 int i;
276
277 /* FIXME: check this out */
278 if (rdev->family < CHIP_R300) {
279 rdev->scratch.num_reg = 5;
280 } else {
281 rdev->scratch.num_reg = 7;
282 }
283 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
284 for (i = 0; i < rdev->scratch.num_reg; i++) {
285 rdev->scratch.free[i] = true;
286 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
287 }
288 }
289
290 /**
291 * radeon_scratch_get - Allocate a scratch register
292 *
293 * @rdev: radeon_device pointer
294 * @reg: scratch register mmio offset
295 *
296 * Allocate a CP scratch register for use by the driver (all asics).
297 * Returns 0 on success or -EINVAL on failure.
298 */
radeon_scratch_get(struct radeon_device * rdev,uint32_t * reg)299 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
300 {
301 int i;
302
303 for (i = 0; i < rdev->scratch.num_reg; i++) {
304 if (rdev->scratch.free[i]) {
305 rdev->scratch.free[i] = false;
306 *reg = rdev->scratch.reg[i];
307 return 0;
308 }
309 }
310 return -EINVAL;
311 }
312
313 /**
314 * radeon_scratch_free - Free a scratch register
315 *
316 * @rdev: radeon_device pointer
317 * @reg: scratch register mmio offset
318 *
319 * Free a CP scratch register allocated for use by the driver (all asics)
320 */
radeon_scratch_free(struct radeon_device * rdev,uint32_t reg)321 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
322 {
323 int i;
324
325 for (i = 0; i < rdev->scratch.num_reg; i++) {
326 if (rdev->scratch.reg[i] == reg) {
327 rdev->scratch.free[i] = true;
328 return;
329 }
330 }
331 }
332
333 /*
334 * GPU doorbell aperture helpers function.
335 */
336 /**
337 * radeon_doorbell_init - Init doorbell driver information.
338 *
339 * @rdev: radeon_device pointer
340 *
341 * Init doorbell driver information (CIK)
342 * Returns 0 on success, error on failure.
343 */
radeon_doorbell_init(struct radeon_device * rdev)344 static int radeon_doorbell_init(struct radeon_device *rdev)
345 {
346 #ifdef __NetBSD__
347 int r;
348 #endif
349
350 /* doorbell bar mapping */
351 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
352 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
353
354 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
355 if (rdev->doorbell.num_doorbells == 0)
356 return -EINVAL;
357
358 #ifdef __NetBSD__
359 /* XXX errno NetBSD->Linux */
360 rdev->doorbell.bst = rdev->pdev->pd_pa.pa_memt;
361 r = -bus_space_map(rdev->doorbell.bst, rdev->doorbell.base,
362 (rdev->doorbell.num_doorbells * sizeof(uint32_t)),
363 0, &rdev->doorbell.bsh);
364 if (r)
365 return r;
366 #else
367 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
368 if (rdev->doorbell.ptr == NULL) {
369 return -ENOMEM;
370 }
371 #endif
372 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
373 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
374
375 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
376
377 return 0;
378 }
379
380 /**
381 * radeon_doorbell_fini - Tear down doorbell driver information.
382 *
383 * @rdev: radeon_device pointer
384 *
385 * Tear down doorbell driver information (CIK)
386 */
radeon_doorbell_fini(struct radeon_device * rdev)387 static void radeon_doorbell_fini(struct radeon_device *rdev)
388 {
389 #ifdef __NetBSD__
390 bus_space_unmap(rdev->doorbell.bst, rdev->doorbell.bsh,
391 (rdev->doorbell.num_doorbells * sizeof(uint32_t)));
392 #else
393 iounmap(rdev->doorbell.ptr);
394 rdev->doorbell.ptr = NULL;
395 #endif
396 }
397
398 /**
399 * radeon_doorbell_get - Allocate a doorbell entry
400 *
401 * @rdev: radeon_device pointer
402 * @doorbell: doorbell index
403 *
404 * Allocate a doorbell for use by the driver (all asics).
405 * Returns 0 on success or -EINVAL on failure.
406 */
radeon_doorbell_get(struct radeon_device * rdev,u32 * doorbell)407 int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
408 {
409 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
410 if (offset < rdev->doorbell.num_doorbells) {
411 __set_bit(offset, rdev->doorbell.used);
412 *doorbell = offset;
413 return 0;
414 } else {
415 return -EINVAL;
416 }
417 }
418
419 /**
420 * radeon_doorbell_free - Free a doorbell entry
421 *
422 * @rdev: radeon_device pointer
423 * @doorbell: doorbell index
424 *
425 * Free a doorbell allocated for use by the driver (all asics)
426 */
radeon_doorbell_free(struct radeon_device * rdev,u32 doorbell)427 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
428 {
429 if (doorbell < rdev->doorbell.num_doorbells)
430 __clear_bit(doorbell, rdev->doorbell.used);
431 }
432
433 /*
434 * radeon_wb_*()
435 * Writeback is the the method by which the the GPU updates special pages
436 * in memory with the status of certain GPU events (fences, ring pointers,
437 * etc.).
438 */
439
440 /**
441 * radeon_wb_disable - Disable Writeback
442 *
443 * @rdev: radeon_device pointer
444 *
445 * Disables Writeback (all asics). Used for suspend.
446 */
radeon_wb_disable(struct radeon_device * rdev)447 void radeon_wb_disable(struct radeon_device *rdev)
448 {
449 rdev->wb.enabled = false;
450 }
451
452 /**
453 * radeon_wb_fini - Disable Writeback and free memory
454 *
455 * @rdev: radeon_device pointer
456 *
457 * Disables Writeback and frees the Writeback memory (all asics).
458 * Used at driver shutdown.
459 */
radeon_wb_fini(struct radeon_device * rdev)460 void radeon_wb_fini(struct radeon_device *rdev)
461 {
462 radeon_wb_disable(rdev);
463 if (rdev->wb.wb_obj) {
464 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
465 radeon_bo_kunmap(rdev->wb.wb_obj);
466 radeon_bo_unpin(rdev->wb.wb_obj);
467 radeon_bo_unreserve(rdev->wb.wb_obj);
468 }
469 radeon_bo_unref(&rdev->wb.wb_obj);
470 rdev->wb.wb = NULL;
471 rdev->wb.wb_obj = NULL;
472 }
473 }
474
475 /**
476 * radeon_wb_init- Init Writeback driver info and allocate memory
477 *
478 * @rdev: radeon_device pointer
479 *
480 * Disables Writeback and frees the Writeback memory (all asics).
481 * Used at driver startup.
482 * Returns 0 on success or an -error on failure.
483 */
radeon_wb_init(struct radeon_device * rdev)484 int radeon_wb_init(struct radeon_device *rdev)
485 {
486 int r;
487
488 if (rdev->wb.wb_obj == NULL) {
489 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
490 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
491 &rdev->wb.wb_obj);
492 if (r) {
493 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
494 return r;
495 }
496 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
497 if (unlikely(r != 0)) {
498 radeon_wb_fini(rdev);
499 return r;
500 }
501 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
502 &rdev->wb.gpu_addr);
503 if (r) {
504 radeon_bo_unreserve(rdev->wb.wb_obj);
505 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
506 radeon_wb_fini(rdev);
507 return r;
508 }
509 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)__UNVOLATILE(&rdev->wb.wb));
510 radeon_bo_unreserve(rdev->wb.wb_obj);
511 if (r) {
512 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
513 radeon_wb_fini(rdev);
514 return r;
515 }
516 }
517
518 /* clear wb memory */
519 memset(__UNVOLATILE(rdev->wb.wb), 0, RADEON_GPU_PAGE_SIZE);
520 /* disable event_write fences */
521 rdev->wb.use_event = false;
522 /* disabled via module param */
523 if (radeon_no_wb == 1) {
524 rdev->wb.enabled = false;
525 } else {
526 if (rdev->flags & RADEON_IS_AGP) {
527 /* often unreliable on AGP */
528 rdev->wb.enabled = false;
529 } else if (rdev->family < CHIP_R300) {
530 /* often unreliable on pre-r300 */
531 rdev->wb.enabled = false;
532 } else {
533 rdev->wb.enabled = true;
534 /* event_write fences are only available on r600+ */
535 if (rdev->family >= CHIP_R600) {
536 rdev->wb.use_event = true;
537 }
538 }
539 }
540 /* always use writeback/events on NI, APUs */
541 if (rdev->family >= CHIP_PALM) {
542 rdev->wb.enabled = true;
543 rdev->wb.use_event = true;
544 }
545
546 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
547
548 return 0;
549 }
550
551 /**
552 * radeon_vram_location - try to find VRAM location
553 * @rdev: radeon device structure holding all necessary informations
554 * @mc: memory controller structure holding memory informations
555 * @base: base address at which to put VRAM
556 *
557 * Function will place try to place VRAM at base address provided
558 * as parameter (which is so far either PCI aperture address or
559 * for IGP TOM base address).
560 *
561 * If there is not enough space to fit the unvisible VRAM in the 32bits
562 * address space then we limit the VRAM size to the aperture.
563 *
564 * If we are using AGP and if the AGP aperture doesn't allow us to have
565 * room for all the VRAM than we restrict the VRAM to the PCI aperture
566 * size and print a warning.
567 *
568 * This function will never fails, worst case are limiting VRAM.
569 *
570 * Note: GTT start, end, size should be initialized before calling this
571 * function on AGP platform.
572 *
573 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
574 * this shouldn't be a problem as we are using the PCI aperture as a reference.
575 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
576 * not IGP.
577 *
578 * Note: we use mc_vram_size as on some board we need to program the mc to
579 * cover the whole aperture even if VRAM size is inferior to aperture size
580 * Novell bug 204882 + along with lots of ubuntu ones
581 *
582 * Note: when limiting vram it's safe to overwritte real_vram_size because
583 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
584 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
585 * ones)
586 *
587 * Note: IGP TOM addr should be the same as the aperture addr, we don't
588 * explicitly check for that thought.
589 *
590 * FIXME: when reducing VRAM size align new size on power of 2.
591 */
radeon_vram_location(struct radeon_device * rdev,struct radeon_mc * mc,u64 base)592 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
593 {
594 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
595
596 mc->vram_start = base;
597 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
598 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
599 mc->real_vram_size = mc->aper_size;
600 mc->mc_vram_size = mc->aper_size;
601 }
602 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
603 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
604 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
605 mc->real_vram_size = mc->aper_size;
606 mc->mc_vram_size = mc->aper_size;
607 }
608 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
609 if (limit && limit < mc->real_vram_size)
610 mc->real_vram_size = limit;
611 dev_info(rdev->dev, "VRAM: %"PRIu64"M 0x%016"PRIX64" - 0x%016"PRIX64" (%"PRIu64"M used)\n",
612 mc->mc_vram_size >> 20, mc->vram_start,
613 mc->vram_end, mc->real_vram_size >> 20);
614 }
615
616 /**
617 * radeon_gtt_location - try to find GTT location
618 * @rdev: radeon device structure holding all necessary informations
619 * @mc: memory controller structure holding memory informations
620 *
621 * Function will place try to place GTT before or after VRAM.
622 *
623 * If GTT size is bigger than space left then we ajust GTT size.
624 * Thus function will never fails.
625 *
626 * FIXME: when reducing GTT size align new size on power of 2.
627 */
radeon_gtt_location(struct radeon_device * rdev,struct radeon_mc * mc)628 void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
629 {
630 u64 size_af, size_bf;
631
632 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
633 size_bf = mc->vram_start & ~mc->gtt_base_align;
634 if (size_bf > size_af) {
635 if (mc->gtt_size > size_bf) {
636 dev_warn(rdev->dev, "limiting GTT\n");
637 mc->gtt_size = size_bf;
638 }
639 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
640 } else {
641 if (mc->gtt_size > size_af) {
642 dev_warn(rdev->dev, "limiting GTT\n");
643 mc->gtt_size = size_af;
644 }
645 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
646 }
647 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
648 dev_info(rdev->dev, "GTT: %"PRIu64"M 0x%016"PRIX64" - 0x%016"PRIX64"\n",
649 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
650 }
651
652 /*
653 * GPU helpers function.
654 */
655
656 /**
657 * radeon_device_is_virtual - check if we are running is a virtual environment
658 *
659 * Check if the asic has been passed through to a VM (all asics).
660 * Used at driver startup.
661 * Returns true if virtual or false if not.
662 */
radeon_device_is_virtual(void)663 bool radeon_device_is_virtual(void)
664 {
665 #ifdef CONFIG_X86
666 #ifdef __NetBSD__ /* XXX virtualization */
667 return false;
668 #else
669 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
670 #endif
671 #else
672 return false;
673 #endif
674 }
675
676 /**
677 * radeon_card_posted - check if the hw has already been initialized
678 *
679 * @rdev: radeon_device pointer
680 *
681 * Check if the asic has been initialized (all asics).
682 * Used at driver startup.
683 * Returns true if initialized or false if not.
684 */
radeon_card_posted(struct radeon_device * rdev)685 bool radeon_card_posted(struct radeon_device *rdev)
686 {
687 uint32_t reg;
688
689 /* for pass through, always force asic_init for CI */
690 if (rdev->family >= CHIP_BONAIRE &&
691 radeon_device_is_virtual())
692 return false;
693
694 #ifndef __NetBSD__ /* XXX radeon efi */
695 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
696 if (efi_enabled(EFI_BOOT) &&
697 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
698 (rdev->family < CHIP_R600))
699 return false;
700 #endif
701
702 if (ASIC_IS_NODCE(rdev))
703 goto check_memsize;
704
705 /* first check CRTCs */
706 if (ASIC_IS_DCE4(rdev)) {
707 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
708 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
709 if (rdev->num_crtc >= 4) {
710 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
711 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
712 }
713 if (rdev->num_crtc >= 6) {
714 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
715 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
716 }
717 if (reg & EVERGREEN_CRTC_MASTER_EN)
718 return true;
719 } else if (ASIC_IS_AVIVO(rdev)) {
720 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
721 RREG32(AVIVO_D2CRTC_CONTROL);
722 if (reg & AVIVO_CRTC_EN) {
723 return true;
724 }
725 } else {
726 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
727 RREG32(RADEON_CRTC2_GEN_CNTL);
728 if (reg & RADEON_CRTC_EN) {
729 return true;
730 }
731 }
732
733 check_memsize:
734 /* then check MEM_SIZE, in case the crtcs are off */
735 if (rdev->family >= CHIP_R600)
736 reg = RREG32(R600_CONFIG_MEMSIZE);
737 else
738 reg = RREG32(RADEON_CONFIG_MEMSIZE);
739
740 if (reg)
741 return true;
742
743 return false;
744
745 }
746
747 /**
748 * radeon_update_bandwidth_info - update display bandwidth params
749 *
750 * @rdev: radeon_device pointer
751 *
752 * Used when sclk/mclk are switched or display modes are set.
753 * params are used to calculate display watermarks (all asics)
754 */
radeon_update_bandwidth_info(struct radeon_device * rdev)755 void radeon_update_bandwidth_info(struct radeon_device *rdev)
756 {
757 fixed20_12 a;
758 u32 sclk = rdev->pm.current_sclk;
759 u32 mclk = rdev->pm.current_mclk;
760
761 /* sclk/mclk in Mhz */
762 a.full = dfixed_const(100);
763 rdev->pm.sclk.full = dfixed_const(sclk);
764 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
765 rdev->pm.mclk.full = dfixed_const(mclk);
766 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
767
768 if (rdev->flags & RADEON_IS_IGP) {
769 a.full = dfixed_const(16);
770 /* core_bandwidth = sclk(Mhz) * 16 */
771 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
772 }
773 }
774
775 /**
776 * radeon_boot_test_post_card - check and possibly initialize the hw
777 *
778 * @rdev: radeon_device pointer
779 *
780 * Check if the asic is initialized and if not, attempt to initialize
781 * it (all asics).
782 * Returns true if initialized or false if not.
783 */
radeon_boot_test_post_card(struct radeon_device * rdev)784 bool radeon_boot_test_post_card(struct radeon_device *rdev)
785 {
786 if (radeon_card_posted(rdev))
787 return true;
788
789 if (rdev->bios) {
790 DRM_INFO("GPU not posted. posting now...\n");
791 if (rdev->is_atom_bios)
792 atom_asic_init(rdev->mode_info.atom_context);
793 else
794 radeon_combios_asic_init(rdev->ddev);
795 return true;
796 } else {
797 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
798 return false;
799 }
800 }
801
802 /**
803 * radeon_dummy_page_init - init dummy page used by the driver
804 *
805 * @rdev: radeon_device pointer
806 *
807 * Allocate the dummy page used by the driver (all asics).
808 * This dummy page is used by the driver as a filler for gart entries
809 * when pages are taken out of the GART
810 * Returns 0 on sucess, -ENOMEM on failure.
811 */
radeon_dummy_page_init(struct radeon_device * rdev)812 int radeon_dummy_page_init(struct radeon_device *rdev)
813 {
814 #ifdef __NetBSD__
815 int rsegs;
816 int error;
817
818 /* XXX Can this be called more than once?? */
819 if (rdev->dummy_page.rdp_map != NULL)
820 return 0;
821
822 error = bus_dmamem_alloc(rdev->ddev->dmat, PAGE_SIZE, PAGE_SIZE, 0,
823 &rdev->dummy_page.rdp_seg, 1, &rsegs, BUS_DMA_WAITOK);
824 if (error)
825 goto fail0;
826 KASSERT(rsegs == 1);
827 error = bus_dmamap_create(rdev->ddev->dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
828 BUS_DMA_WAITOK, &rdev->dummy_page.rdp_map);
829 if (error)
830 goto fail1;
831 error = bus_dmamem_map(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1,
832 PAGE_SIZE, &rdev->dummy_page.rdp_addr,
833 BUS_DMA_WAITOK|BUS_DMA_NOCACHE);
834 if (error)
835 goto fail2;
836 error = bus_dmamap_load(rdev->ddev->dmat, rdev->dummy_page.rdp_map,
837 rdev->dummy_page.rdp_addr, PAGE_SIZE, NULL, BUS_DMA_WAITOK);
838 if (error)
839 goto fail3;
840
841 memset(rdev->dummy_page.rdp_addr, 0, PAGE_SIZE);
842 bus_dmamap_sync(rdev->ddev->dmat, rdev->dummy_page.rdp_map, 0,
843 PAGE_SIZE, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
844
845 /* Success! */
846 rdev->dummy_page.addr = rdev->dummy_page.rdp_map->dm_segs[0].ds_addr;
847 rdev->dummy_page.entry = radeon_gart_get_page_entry(
848 rdev->dummy_page.addr, RADEON_GART_PAGE_DUMMY);
849 return 0;
850
851 fail4: __unused
852 bus_dmamap_unload(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
853 fail3: bus_dmamem_unmap(rdev->ddev->dmat, rdev->dummy_page.rdp_addr,
854 PAGE_SIZE);
855 fail2: bus_dmamap_destroy(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
856 fail1: bus_dmamem_free(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1);
857 fail0: KASSERT(error);
858 rdev->dummy_page.rdp_map = NULL;
859 /* XXX errno NetBSD->Linux */
860 return -error;
861 #else
862 if (rdev->dummy_page.page)
863 return 0;
864 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
865 if (rdev->dummy_page.page == NULL)
866 return -ENOMEM;
867 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
868 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
869 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
870 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
871 __free_page(rdev->dummy_page.page);
872 rdev->dummy_page.page = NULL;
873 return -ENOMEM;
874 }
875 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
876 RADEON_GART_PAGE_DUMMY);
877 return 0;
878 #endif
879 }
880
881 /**
882 * radeon_dummy_page_fini - free dummy page used by the driver
883 *
884 * @rdev: radeon_device pointer
885 *
886 * Frees the dummy page used by the driver (all asics).
887 */
radeon_dummy_page_fini(struct radeon_device * rdev)888 void radeon_dummy_page_fini(struct radeon_device *rdev)
889 {
890 #ifdef __NetBSD__
891
892 if (rdev->dummy_page.rdp_map == NULL)
893 return;
894 bus_dmamap_unload(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
895 bus_dmamem_unmap(rdev->ddev->dmat, rdev->dummy_page.rdp_addr,
896 PAGE_SIZE);
897 bus_dmamap_destroy(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
898 bus_dmamem_free(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1);
899 rdev->dummy_page.rdp_map = NULL;
900 #else
901 if (rdev->dummy_page.page == NULL)
902 return;
903 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
904 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
905 __free_page(rdev->dummy_page.page);
906 rdev->dummy_page.page = NULL;
907 #endif
908 }
909
910
911 /* ATOM accessor methods */
912 /*
913 * ATOM is an interpreted byte code stored in tables in the vbios. The
914 * driver registers callbacks to access registers and the interpreter
915 * in the driver parses the tables and executes then to program specific
916 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
917 * atombios.h, and atom.c
918 */
919
920 /**
921 * cail_pll_read - read PLL register
922 *
923 * @info: atom card_info pointer
924 * @reg: PLL register offset
925 *
926 * Provides a PLL register accessor for the atom interpreter (r4xx+).
927 * Returns the value of the PLL register.
928 */
cail_pll_read(struct card_info * info,uint32_t reg)929 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
930 {
931 struct radeon_device *rdev = info->dev->dev_private;
932 uint32_t r;
933
934 r = rdev->pll_rreg(rdev, reg);
935 return r;
936 }
937
938 /**
939 * cail_pll_write - write PLL register
940 *
941 * @info: atom card_info pointer
942 * @reg: PLL register offset
943 * @val: value to write to the pll register
944 *
945 * Provides a PLL register accessor for the atom interpreter (r4xx+).
946 */
cail_pll_write(struct card_info * info,uint32_t reg,uint32_t val)947 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
948 {
949 struct radeon_device *rdev = info->dev->dev_private;
950
951 rdev->pll_wreg(rdev, reg, val);
952 }
953
954 /**
955 * cail_mc_read - read MC (Memory Controller) register
956 *
957 * @info: atom card_info pointer
958 * @reg: MC register offset
959 *
960 * Provides an MC register accessor for the atom interpreter (r4xx+).
961 * Returns the value of the MC register.
962 */
cail_mc_read(struct card_info * info,uint32_t reg)963 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
964 {
965 struct radeon_device *rdev = info->dev->dev_private;
966 uint32_t r;
967
968 r = rdev->mc_rreg(rdev, reg);
969 return r;
970 }
971
972 /**
973 * cail_mc_write - write MC (Memory Controller) register
974 *
975 * @info: atom card_info pointer
976 * @reg: MC register offset
977 * @val: value to write to the pll register
978 *
979 * Provides a MC register accessor for the atom interpreter (r4xx+).
980 */
cail_mc_write(struct card_info * info,uint32_t reg,uint32_t val)981 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
982 {
983 struct radeon_device *rdev = info->dev->dev_private;
984
985 rdev->mc_wreg(rdev, reg, val);
986 }
987
988 /**
989 * cail_reg_write - write MMIO register
990 *
991 * @info: atom card_info pointer
992 * @reg: MMIO register offset
993 * @val: value to write to the pll register
994 *
995 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
996 */
cail_reg_write(struct card_info * info,uint32_t reg,uint32_t val)997 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
998 {
999 struct radeon_device *rdev = info->dev->dev_private;
1000
1001 WREG32(reg*4, val);
1002 }
1003
1004 /**
1005 * cail_reg_read - read MMIO register
1006 *
1007 * @info: atom card_info pointer
1008 * @reg: MMIO register offset
1009 *
1010 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
1011 * Returns the value of the MMIO register.
1012 */
cail_reg_read(struct card_info * info,uint32_t reg)1013 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
1014 {
1015 struct radeon_device *rdev = info->dev->dev_private;
1016 uint32_t r;
1017
1018 r = RREG32(reg*4);
1019 return r;
1020 }
1021
1022 /**
1023 * cail_ioreg_write - write IO register
1024 *
1025 * @info: atom card_info pointer
1026 * @reg: IO register offset
1027 * @val: value to write to the pll register
1028 *
1029 * Provides a IO register accessor for the atom interpreter (r4xx+).
1030 */
cail_ioreg_write(struct card_info * info,uint32_t reg,uint32_t val)1031 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
1032 {
1033 struct radeon_device *rdev = info->dev->dev_private;
1034
1035 WREG32_IO(reg*4, val);
1036 }
1037
1038 /**
1039 * cail_ioreg_read - read IO register
1040 *
1041 * @info: atom card_info pointer
1042 * @reg: IO register offset
1043 *
1044 * Provides an IO register accessor for the atom interpreter (r4xx+).
1045 * Returns the value of the IO register.
1046 */
cail_ioreg_read(struct card_info * info,uint32_t reg)1047 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
1048 {
1049 struct radeon_device *rdev = info->dev->dev_private;
1050 uint32_t r;
1051
1052 r = RREG32_IO(reg*4);
1053 return r;
1054 }
1055
1056 /**
1057 * radeon_atombios_init - init the driver info and callbacks for atombios
1058 *
1059 * @rdev: radeon_device pointer
1060 *
1061 * Initializes the driver info and register access callbacks for the
1062 * ATOM interpreter (r4xx+).
1063 * Returns 0 on sucess, -ENOMEM on failure.
1064 * Called at driver startup.
1065 */
radeon_atombios_init(struct radeon_device * rdev)1066 int radeon_atombios_init(struct radeon_device *rdev)
1067 {
1068 struct card_info *atom_card_info =
1069 kzalloc(sizeof(struct card_info), GFP_KERNEL);
1070
1071 if (!atom_card_info)
1072 return -ENOMEM;
1073
1074 rdev->mode_info.atom_card_info = atom_card_info;
1075 atom_card_info->dev = rdev->ddev;
1076 atom_card_info->reg_read = cail_reg_read;
1077 atom_card_info->reg_write = cail_reg_write;
1078 /* needed for iio ops */
1079 #ifdef __NetBSD__
1080 if (rdev->rio_mem_size)
1081 #else
1082 if (rdev->rio_mem)
1083 #endif
1084 {
1085 atom_card_info->ioreg_read = cail_ioreg_read;
1086 atom_card_info->ioreg_write = cail_ioreg_write;
1087 } else {
1088 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
1089 atom_card_info->ioreg_read = cail_reg_read;
1090 atom_card_info->ioreg_write = cail_reg_write;
1091 }
1092 atom_card_info->mc_read = cail_mc_read;
1093 atom_card_info->mc_write = cail_mc_write;
1094 atom_card_info->pll_read = cail_pll_read;
1095 atom_card_info->pll_write = cail_pll_write;
1096
1097 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1098 if (!rdev->mode_info.atom_context) {
1099 radeon_atombios_fini(rdev);
1100 return -ENOMEM;
1101 }
1102
1103 mutex_init(&rdev->mode_info.atom_context->mutex);
1104 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1105 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1106 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1107 return 0;
1108 }
1109
1110 /**
1111 * radeon_atombios_fini - free the driver info and callbacks for atombios
1112 *
1113 * @rdev: radeon_device pointer
1114 *
1115 * Frees the driver info and register access callbacks for the ATOM
1116 * interpreter (r4xx+).
1117 * Called at driver shutdown.
1118 */
radeon_atombios_fini(struct radeon_device * rdev)1119 void radeon_atombios_fini(struct radeon_device *rdev)
1120 {
1121 if (rdev->mode_info.atom_context) {
1122 mutex_destroy(&rdev->mode_info.atom_context->scratch_mutex);
1123 mutex_destroy(&rdev->mode_info.atom_context->mutex);
1124 kfree(rdev->mode_info.atom_context->scratch);
1125 }
1126 kfree(rdev->mode_info.atom_context);
1127 rdev->mode_info.atom_context = NULL;
1128 kfree(rdev->mode_info.atom_card_info);
1129 rdev->mode_info.atom_card_info = NULL;
1130 }
1131
1132 /* COMBIOS */
1133 /*
1134 * COMBIOS is the bios format prior to ATOM. It provides
1135 * command tables similar to ATOM, but doesn't have a unified
1136 * parser. See radeon_combios.c
1137 */
1138
1139 /**
1140 * radeon_combios_init - init the driver info for combios
1141 *
1142 * @rdev: radeon_device pointer
1143 *
1144 * Initializes the driver info for combios (r1xx-r3xx).
1145 * Returns 0 on sucess.
1146 * Called at driver startup.
1147 */
radeon_combios_init(struct radeon_device * rdev)1148 int radeon_combios_init(struct radeon_device *rdev)
1149 {
1150 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1151 return 0;
1152 }
1153
1154 /**
1155 * radeon_combios_fini - free the driver info for combios
1156 *
1157 * @rdev: radeon_device pointer
1158 *
1159 * Frees the driver info for combios (r1xx-r3xx).
1160 * Called at driver shutdown.
1161 */
radeon_combios_fini(struct radeon_device * rdev)1162 void radeon_combios_fini(struct radeon_device *rdev)
1163 {
1164 }
1165
1166 #ifndef __NetBSD__ /* XXX radeon vga */
1167 /* if we get transitioned to only one device, take VGA back */
1168 /**
1169 * radeon_vga_set_decode - enable/disable vga decode
1170 *
1171 * @cookie: radeon_device pointer
1172 * @state: enable/disable vga decode
1173 *
1174 * Enable/disable vga decode (all asics).
1175 * Returns VGA resource flags.
1176 */
radeon_vga_set_decode(void * cookie,bool state)1177 static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1178 {
1179 struct radeon_device *rdev = cookie;
1180 radeon_vga_set_state(rdev, state);
1181 if (state)
1182 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1183 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1184 else
1185 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1186 }
1187 #endif
1188
1189 /**
1190 * radeon_check_pot_argument - check that argument is a power of two
1191 *
1192 * @arg: value to check
1193 *
1194 * Validates that a certain argument is a power of two (all asics).
1195 * Returns true if argument is valid.
1196 */
radeon_check_pot_argument(int arg)1197 static bool radeon_check_pot_argument(int arg)
1198 {
1199 return (arg & (arg - 1)) == 0;
1200 }
1201
1202 /**
1203 * Determine a sensible default GART size according to ASIC family.
1204 *
1205 * @family ASIC family name
1206 */
radeon_gart_size_auto(enum radeon_family family)1207 static int radeon_gart_size_auto(enum radeon_family family)
1208 {
1209 /* default to a larger gart size on newer asics */
1210 if (family >= CHIP_TAHITI)
1211 return 2048;
1212 else if (family >= CHIP_RV770)
1213 return 1024;
1214 else
1215 return 512;
1216 }
1217
1218 /**
1219 * radeon_check_arguments - validate module params
1220 *
1221 * @rdev: radeon_device pointer
1222 *
1223 * Validates certain module parameters and updates
1224 * the associated values used by the driver (all asics).
1225 */
radeon_check_arguments(struct radeon_device * rdev)1226 static void radeon_check_arguments(struct radeon_device *rdev)
1227 {
1228 /* vramlimit must be a power of two */
1229 if (!radeon_check_pot_argument(radeon_vram_limit)) {
1230 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1231 radeon_vram_limit);
1232 radeon_vram_limit = 0;
1233 }
1234
1235 if (radeon_gart_size == -1) {
1236 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1237 }
1238 /* gtt size must be power of two and greater or equal to 32M */
1239 if (radeon_gart_size < 32) {
1240 dev_warn(rdev->dev, "gart size (%d) too small\n",
1241 radeon_gart_size);
1242 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1243 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1244 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1245 radeon_gart_size);
1246 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1247 }
1248 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1249
1250 /* AGP mode can only be -1, 1, 2, 4, 8 */
1251 switch (radeon_agpmode) {
1252 case -1:
1253 case 0:
1254 case 1:
1255 case 2:
1256 case 4:
1257 case 8:
1258 break;
1259 default:
1260 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1261 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1262 radeon_agpmode = 0;
1263 break;
1264 }
1265
1266 if (!radeon_check_pot_argument(radeon_vm_size)) {
1267 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1268 radeon_vm_size);
1269 radeon_vm_size = 4;
1270 }
1271
1272 if (radeon_vm_size < 1) {
1273 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1274 radeon_vm_size);
1275 radeon_vm_size = 4;
1276 }
1277
1278 /*
1279 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1280 */
1281 if (radeon_vm_size > 1024) {
1282 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1283 radeon_vm_size);
1284 radeon_vm_size = 4;
1285 }
1286
1287 /* defines number of bits in page table versus page directory,
1288 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1289 * page table and the remaining bits are in the page directory */
1290 if (radeon_vm_block_size == -1) {
1291
1292 /* Total bits covered by PD + PTs */
1293 unsigned bits = ilog2(radeon_vm_size) + 18;
1294
1295 /* Make sure the PD is 4K in size up to 8GB address space.
1296 Above that split equal between PD and PTs */
1297 if (radeon_vm_size <= 8)
1298 radeon_vm_block_size = bits - 9;
1299 else
1300 radeon_vm_block_size = (bits + 3) / 2;
1301
1302 } else if (radeon_vm_block_size < 9) {
1303 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1304 radeon_vm_block_size);
1305 radeon_vm_block_size = 9;
1306 }
1307
1308 if (radeon_vm_block_size > 24 ||
1309 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1310 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1311 radeon_vm_block_size);
1312 radeon_vm_block_size = 9;
1313 }
1314 }
1315
1316 #ifndef __NetBSD__ /* XXX radeon vga */
1317 /**
1318 * radeon_switcheroo_set_state - set switcheroo state
1319 *
1320 * @pdev: pci dev pointer
1321 * @state: vga_switcheroo state
1322 *
1323 * Callback for the switcheroo driver. Suspends or resumes the
1324 * the asics before or after it is powered up using ACPI methods.
1325 */
radeon_switcheroo_set_state(struct pci_dev * pdev,enum vga_switcheroo_state state)1326 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1327 {
1328 struct drm_device *dev = pci_get_drvdata(pdev);
1329
1330 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1331 return;
1332
1333 if (state == VGA_SWITCHEROO_ON) {
1334 pr_info("radeon: switched on\n");
1335 /* don't suspend or resume card normally */
1336 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1337
1338 radeon_resume_kms(dev, true, true);
1339
1340 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1341 drm_kms_helper_poll_enable(dev);
1342 } else {
1343 pr_info("radeon: switched off\n");
1344 drm_kms_helper_poll_disable(dev);
1345 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1346 radeon_suspend_kms(dev, true, true, false);
1347 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1348 }
1349 }
1350
1351 /**
1352 * radeon_switcheroo_can_switch - see if switcheroo state can change
1353 *
1354 * @pdev: pci dev pointer
1355 *
1356 * Callback for the switcheroo driver. Check of the switcheroo
1357 * state can be changed.
1358 * Returns true if the state can be changed, false if not.
1359 */
radeon_switcheroo_can_switch(struct pci_dev * pdev)1360 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1361 {
1362 struct drm_device *dev = pci_get_drvdata(pdev);
1363
1364 /*
1365 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1366 * locking inversion with the driver load path. And the access here is
1367 * completely racy anyway. So don't bother with locking for now.
1368 */
1369 return dev->open_count == 0;
1370 }
1371
1372 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1373 .set_gpu_state = radeon_switcheroo_set_state,
1374 .reprobe = NULL,
1375 .can_switch = radeon_switcheroo_can_switch,
1376 };
1377 #endif
1378
1379 /**
1380 * radeon_device_init - initialize the driver
1381 *
1382 * @rdev: radeon_device pointer
1383 * @pdev: drm dev pointer
1384 * @pdev: pci dev pointer
1385 * @flags: driver flags
1386 *
1387 * Initializes the driver info and hw (all asics).
1388 * Returns 0 for success or an error on failure.
1389 * Called at driver startup.
1390 */
radeon_device_init(struct radeon_device * rdev,struct drm_device * ddev,struct pci_dev * pdev,uint32_t flags)1391 int radeon_device_init(struct radeon_device *rdev,
1392 struct drm_device *ddev,
1393 struct pci_dev *pdev,
1394 uint32_t flags)
1395 {
1396 int r, i;
1397 int dma_bits;
1398 #ifndef __NetBSD__
1399 bool runtime = false;
1400 #endif
1401
1402 rdev->shutdown = false;
1403 rdev->dev = ddev->dev;
1404 rdev->ddev = ddev;
1405 rdev->pdev = pdev;
1406 rdev->flags = flags;
1407 rdev->family = flags & RADEON_FAMILY_MASK;
1408 rdev->is_atom_bios = false;
1409 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1410 rdev->mc.gtt_size = 512 * 1024 * 1024;
1411 rdev->accel_working = false;
1412 /* set up ring ids */
1413 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1414 rdev->ring[i].idx = i;
1415 }
1416 rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1417
1418 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1419 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1420 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1421
1422 /* mutex initialization are all done here so we
1423 * can recall function without having locking issues */
1424 mutex_init(&rdev->ring_lock);
1425 mutex_init(&rdev->dc_hw_i2c_mutex);
1426 atomic_set(&rdev->ih.lock, 0);
1427 mutex_init(&rdev->gem.mutex);
1428 mutex_init(&rdev->pm.mutex);
1429 mutex_init(&rdev->gpu_clock_mutex);
1430 mutex_init(&rdev->srbm_mutex);
1431 init_rwsem(&rdev->pm.mclk_lock);
1432 init_rwsem(&rdev->exclusive_lock);
1433 spin_lock_init(&rdev->irq.vblank_lock);
1434 DRM_INIT_WAITQUEUE(&rdev->irq.vblank_queue, "radvblnk");
1435 r = radeon_gem_init(rdev);
1436 if (r)
1437 return r;
1438
1439 radeon_check_arguments(rdev);
1440 /* Adjust VM size here.
1441 * Max GPUVM size for cayman+ is 40 bits.
1442 */
1443 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1444
1445 /* Set asic functions */
1446 r = radeon_asic_init(rdev);
1447 if (r)
1448 return r;
1449
1450 /* all of the newer IGP chips have an internal gart
1451 * However some rs4xx report as AGP, so remove that here.
1452 */
1453 if ((rdev->family >= CHIP_RS400) &&
1454 (rdev->flags & RADEON_IS_IGP)) {
1455 rdev->flags &= ~RADEON_IS_AGP;
1456 }
1457
1458 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1459 radeon_agp_disable(rdev);
1460 }
1461
1462 /* Set the internal MC address mask
1463 * This is the max address of the GPU's
1464 * internal address space.
1465 */
1466 if (rdev->family >= CHIP_CAYMAN)
1467 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1468 else if (rdev->family >= CHIP_CEDAR)
1469 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1470 else
1471 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1472
1473 /* set DMA mask.
1474 * PCIE - can handle 40-bits.
1475 * IGP - can handle 40-bits
1476 * AGP - generally dma32 is safest
1477 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1478 */
1479 dma_bits = 40;
1480 if (rdev->flags & RADEON_IS_AGP)
1481 dma_bits = 32;
1482 if ((rdev->flags & RADEON_IS_PCI) &&
1483 (rdev->family <= CHIP_RS740))
1484 dma_bits = 32;
1485 #ifdef CONFIG_PPC64
1486 if (rdev->family == CHIP_CEDAR)
1487 dma_bits = 32;
1488 #endif
1489
1490 #ifdef __NetBSD__
1491 r = drm_limit_dma_space(rdev->ddev, 0, __BITS(dma_bits - 1, 0));
1492 #else
1493 r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
1494 #endif
1495 if (r) {
1496 pr_warn("radeon: No suitable DMA available\n");
1497 return r;
1498 }
1499 rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
1500
1501 /* Registers mapping */
1502 /* TODO: block userspace mapping of io register */
1503 /* XXX Destroy these locks on detach... */
1504 spin_lock_init(&rdev->mmio_idx_lock);
1505 spin_lock_init(&rdev->smc_idx_lock);
1506 spin_lock_init(&rdev->pll_idx_lock);
1507 spin_lock_init(&rdev->mc_idx_lock);
1508 spin_lock_init(&rdev->pcie_idx_lock);
1509 spin_lock_init(&rdev->pciep_idx_lock);
1510 spin_lock_init(&rdev->pif_idx_lock);
1511 spin_lock_init(&rdev->cg_idx_lock);
1512 spin_lock_init(&rdev->uvd_idx_lock);
1513 spin_lock_init(&rdev->rcu_idx_lock);
1514 spin_lock_init(&rdev->didt_idx_lock);
1515 spin_lock_init(&rdev->end_idx_lock);
1516 #ifdef __NetBSD__
1517 {
1518 pcireg_t bar;
1519
1520 if (rdev->family >= CHIP_BONAIRE)
1521 bar = 5;
1522 else
1523 bar = 2;
1524 if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(bar),
1525 pci_mapreg_type(rdev->pdev->pd_pa.pa_pc,
1526 rdev->pdev->pd_pa.pa_tag, PCI_BAR(bar)),
1527 0,
1528 &rdev->rmmio_bst, &rdev->rmmio_bsh,
1529 &rdev->rmmio_addr, &rdev->rmmio_size))
1530 return -EIO;
1531 }
1532 DRM_INFO("register mmio base: 0x%"PRIxMAX"\n",
1533 (uintmax_t)rdev->rmmio_addr);
1534 DRM_INFO("register mmio size: %"PRIuMAX"\n",
1535 (uintmax_t)rdev->rmmio_size);
1536 #else
1537 if (rdev->family >= CHIP_BONAIRE) {
1538 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1539 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1540 } else {
1541 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1542 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1543 }
1544 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1545 if (rdev->rmmio == NULL)
1546 return -ENOMEM;
1547 #endif
1548
1549 /* doorbell bar mapping */
1550 if (rdev->family >= CHIP_BONAIRE)
1551 radeon_doorbell_init(rdev);
1552
1553 /* io port mapping */
1554 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1555 #ifdef __NetBSD__
1556 if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(i),
1557 PCI_MAPREG_TYPE_IO, 0,
1558 &rdev->rio_mem_bst, &rdev->rio_mem_bsh,
1559 NULL, &rdev->rio_mem_size))
1560 continue;
1561 break;
1562 #else
1563 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1564 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1565 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1566 break;
1567 }
1568 #endif
1569 }
1570 #ifdef __NetBSD__
1571 if (i == DEVICE_COUNT_RESOURCE)
1572 DRM_ERROR("Unable to find PCI I/O BAR\n");
1573 #else
1574 if (rdev->rio_mem == NULL)
1575 DRM_ERROR("Unable to find PCI I/O BAR\n");
1576 #endif
1577
1578 if (rdev->flags & RADEON_IS_PX)
1579 radeon_device_handle_px_quirks(rdev);
1580
1581 #ifndef __NetBSD__ /* XXX radeon vga */
1582 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1583 /* this will fail for cards that aren't VGA class devices, just
1584 * ignore it */
1585 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1586
1587 if (rdev->flags & RADEON_IS_PX)
1588 runtime = true;
1589 if (!pci_is_thunderbolt_attached(rdev->pdev))
1590 vga_switcheroo_register_client(rdev->pdev,
1591 &radeon_switcheroo_ops, runtime);
1592 if (runtime)
1593 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1594 #endif
1595
1596 r = radeon_init(rdev);
1597 if (r)
1598 goto failed;
1599
1600 r = radeon_gem_debugfs_init(rdev);
1601 if (r) {
1602 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1603 }
1604
1605 r = radeon_mst_debugfs_init(rdev);
1606 if (r) {
1607 DRM_ERROR("registering mst debugfs failed (%d).\n", r);
1608 }
1609
1610 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1611 /* Acceleration not working on AGP card try again
1612 * with fallback to PCI or PCIE GART
1613 */
1614 radeon_asic_reset(rdev);
1615 radeon_fini(rdev);
1616 radeon_agp_disable(rdev);
1617 r = radeon_init(rdev);
1618 if (r)
1619 goto failed;
1620 }
1621
1622 r = radeon_ib_ring_tests(rdev);
1623 if (r)
1624 DRM_ERROR("ib ring test failed (%d).\n", r);
1625
1626 /*
1627 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1628 * after the CP ring have chew one packet at least. Hence here we stop
1629 * and restart DPM after the radeon_ib_ring_tests().
1630 */
1631 if (rdev->pm.dpm_enabled &&
1632 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1633 (rdev->family == CHIP_TURKS) &&
1634 (rdev->flags & RADEON_IS_MOBILITY)) {
1635 mutex_lock(&rdev->pm.mutex);
1636 radeon_dpm_disable(rdev);
1637 radeon_dpm_enable(rdev);
1638 mutex_unlock(&rdev->pm.mutex);
1639 }
1640
1641 if ((radeon_testing & 1)) {
1642 if (rdev->accel_working)
1643 radeon_test_moves(rdev);
1644 else
1645 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1646 }
1647 if ((radeon_testing & 2)) {
1648 if (rdev->accel_working)
1649 radeon_test_syncing(rdev);
1650 else
1651 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1652 }
1653 if (radeon_benchmarking) {
1654 if (rdev->accel_working)
1655 radeon_benchmark(rdev, radeon_benchmarking);
1656 else
1657 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1658 }
1659 return 0;
1660
1661 failed:
1662 #ifndef __NetBSD__ /* XXX radeon vga */
1663 if (runtime)
1664 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1665 #endif
1666 return r;
1667 }
1668
1669 /**
1670 * radeon_device_fini - tear down the driver
1671 *
1672 * @rdev: radeon_device pointer
1673 *
1674 * Tear down the driver info (all asics).
1675 * Called at driver shutdown.
1676 */
radeon_device_fini(struct radeon_device * rdev)1677 void radeon_device_fini(struct radeon_device *rdev)
1678 {
1679 DRM_INFO("radeon: finishing device.\n");
1680 rdev->shutdown = true;
1681 /* evict vram memory */
1682 radeon_bo_evict_vram(rdev);
1683 radeon_fini(rdev);
1684 #ifndef __NetBSD__
1685 if (!pci_is_thunderbolt_attached(rdev->pdev))
1686 vga_switcheroo_unregister_client(rdev->pdev);
1687 if (rdev->flags & RADEON_IS_PX)
1688 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1689 vga_client_register(rdev->pdev, NULL, NULL, NULL);
1690 #endif
1691 #ifdef __NetBSD__
1692 if (rdev->rio_mem_size)
1693 bus_space_unmap(rdev->rio_mem_bst, rdev->rio_mem_bsh,
1694 rdev->rio_mem_size);
1695 rdev->rio_mem_size = 0;
1696 bus_space_unmap(rdev->rmmio_bst, rdev->rmmio_bsh, rdev->rmmio_size);
1697 #else
1698 if (rdev->rio_mem)
1699 pci_iounmap(rdev->pdev, rdev->rio_mem);
1700 rdev->rio_mem = NULL;
1701 iounmap(rdev->rmmio);
1702 rdev->rmmio = NULL;
1703 #endif
1704 if (rdev->family >= CHIP_BONAIRE)
1705 radeon_doorbell_fini(rdev);
1706
1707 DRM_DESTROY_WAITQUEUE(&rdev->irq.vblank_queue);
1708 spin_lock_destroy(&rdev->irq.vblank_lock);
1709 destroy_rwsem(&rdev->exclusive_lock);
1710 destroy_rwsem(&rdev->pm.mclk_lock);
1711 mutex_destroy(&rdev->srbm_mutex);
1712 mutex_destroy(&rdev->gpu_clock_mutex);
1713 mutex_destroy(&rdev->pm.mutex);
1714 mutex_destroy(&rdev->gem.mutex);
1715 mutex_destroy(&rdev->dc_hw_i2c_mutex);
1716 mutex_destroy(&rdev->ring_lock);
1717 }
1718
1719
1720 /*
1721 * Suspend & resume.
1722 */
1723 /**
1724 * radeon_suspend_kms - initiate device suspend
1725 *
1726 * @pdev: drm dev pointer
1727 * @state: suspend state
1728 *
1729 * Puts the hw in the suspend state (all asics).
1730 * Returns 0 for success or an error on failure.
1731 * Called at driver suspend.
1732 */
radeon_suspend_kms(struct drm_device * dev,bool suspend,bool fbcon,bool freeze)1733 int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1734 bool fbcon, bool freeze)
1735 {
1736 struct radeon_device *rdev;
1737 struct drm_crtc *crtc;
1738 struct drm_connector *connector;
1739 int i, r;
1740
1741 if (dev == NULL || dev->dev_private == NULL) {
1742 return -ENODEV;
1743 }
1744
1745 rdev = dev->dev_private;
1746
1747 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1748 return 0;
1749
1750 drm_kms_helper_poll_disable(dev);
1751
1752 drm_modeset_lock_all(dev);
1753 /* turn off display hw */
1754 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1755 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1756 }
1757 drm_modeset_unlock_all(dev);
1758
1759 /* unpin the front buffers and cursors */
1760 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1761 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1762 struct drm_framebuffer *fb = crtc->primary->fb;
1763 struct radeon_bo *robj;
1764
1765 if (radeon_crtc->cursor_bo) {
1766 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1767 r = radeon_bo_reserve(robj, false);
1768 if (r == 0) {
1769 radeon_bo_unpin(robj);
1770 radeon_bo_unreserve(robj);
1771 }
1772 }
1773
1774 if (fb == NULL || fb->obj[0] == NULL) {
1775 continue;
1776 }
1777 robj = gem_to_radeon_bo(fb->obj[0]);
1778 /* don't unpin kernel fb objects */
1779 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1780 r = radeon_bo_reserve(robj, false);
1781 if (r == 0) {
1782 radeon_bo_unpin(robj);
1783 radeon_bo_unreserve(robj);
1784 }
1785 }
1786 }
1787 /* evict vram memory */
1788 radeon_bo_evict_vram(rdev);
1789
1790 /* wait for gpu to finish processing current batch */
1791 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1792 r = radeon_fence_wait_empty(rdev, i);
1793 if (r) {
1794 /* delay GPU reset to resume */
1795 radeon_fence_driver_force_completion(rdev, i);
1796 }
1797 }
1798
1799 radeon_save_bios_scratch_regs(rdev);
1800
1801 radeon_suspend(rdev);
1802 radeon_hpd_fini(rdev);
1803 /* evict remaining vram memory
1804 * This second call to evict vram is to evict the gart page table
1805 * using the CPU.
1806 */
1807 radeon_bo_evict_vram(rdev);
1808
1809 radeon_agp_suspend(rdev);
1810
1811 #ifndef __NetBSD__ /* pmf handles this for us. */
1812 pci_save_state(dev->pdev);
1813 if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1814 rdev->asic->asic_reset(rdev, true);
1815 pci_restore_state(dev->pdev);
1816 } else if (suspend) {
1817 /* Shut down the device */
1818 pci_disable_device(dev->pdev);
1819 pci_set_power_state(dev->pdev, PCI_D3hot);
1820 }
1821 #endif
1822
1823 if (fbcon) {
1824 console_lock();
1825 radeon_fbdev_set_suspend(rdev, 1);
1826 console_unlock();
1827 }
1828 return 0;
1829 }
1830
1831 /**
1832 * radeon_resume_kms - initiate device resume
1833 *
1834 * @pdev: drm dev pointer
1835 *
1836 * Bring the hw back to operating state (all asics).
1837 * Returns 0 for success or an error on failure.
1838 * Called at driver resume.
1839 */
radeon_resume_kms(struct drm_device * dev,bool resume,bool fbcon)1840 int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1841 {
1842 struct drm_connector *connector;
1843 struct radeon_device *rdev = dev->dev_private;
1844 struct drm_crtc *crtc;
1845 int r;
1846
1847 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1848 return 0;
1849
1850 if (fbcon) {
1851 console_lock();
1852 }
1853 #ifndef __NetBSD__ /* pmf handles this for us. */
1854 if (resume) {
1855 pci_set_power_state(dev->pdev, PCI_D0);
1856 pci_restore_state(dev->pdev);
1857 if (pci_enable_device(dev->pdev)) {
1858 if (fbcon)
1859 console_unlock();
1860 return -1;
1861 }
1862 }
1863 #endif
1864 /* resume AGP if in use */
1865 radeon_agp_resume(rdev);
1866 radeon_resume(rdev);
1867
1868 r = radeon_ib_ring_tests(rdev);
1869 if (r)
1870 DRM_ERROR("ib ring test failed (%d).\n", r);
1871
1872 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1873 /* do dpm late init */
1874 r = radeon_pm_late_init(rdev);
1875 if (r) {
1876 rdev->pm.dpm_enabled = false;
1877 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1878 }
1879 } else {
1880 /* resume old pm late */
1881 radeon_pm_resume(rdev);
1882 }
1883
1884 radeon_restore_bios_scratch_regs(rdev);
1885
1886 /* pin cursors */
1887 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1888 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1889
1890 if (radeon_crtc->cursor_bo) {
1891 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1892 r = radeon_bo_reserve(robj, false);
1893 if (r == 0) {
1894 /* Only 27 bit offset for legacy cursor */
1895 r = radeon_bo_pin_restricted(robj,
1896 RADEON_GEM_DOMAIN_VRAM,
1897 ASIC_IS_AVIVO(rdev) ?
1898 0 : 1 << 27,
1899 &radeon_crtc->cursor_addr);
1900 if (r != 0)
1901 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1902 radeon_bo_unreserve(robj);
1903 }
1904 }
1905 }
1906
1907 /* init dig PHYs, disp eng pll */
1908 if (rdev->is_atom_bios) {
1909 radeon_atom_encoder_init(rdev);
1910 radeon_atom_disp_eng_pll_init(rdev);
1911 /* turn on the BL */
1912 if (rdev->mode_info.bl_encoder) {
1913 u8 bl_level = radeon_get_backlight_level(rdev,
1914 rdev->mode_info.bl_encoder);
1915 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1916 bl_level);
1917 }
1918 }
1919 /* reset hpd state */
1920 radeon_hpd_init(rdev);
1921 /* blat the mode back in */
1922 if (fbcon) {
1923 drm_helper_resume_force_mode(dev);
1924 /* turn on display hw */
1925 drm_modeset_lock_all(dev);
1926 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1927 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1928 }
1929 drm_modeset_unlock_all(dev);
1930 }
1931
1932 drm_kms_helper_poll_enable(dev);
1933
1934 /* set the power state here in case we are a PX system or headless */
1935 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1936 radeon_pm_compute_clocks(rdev);
1937
1938 if (fbcon) {
1939 radeon_fbdev_set_suspend(rdev, 0);
1940 console_unlock();
1941 }
1942
1943 return 0;
1944 }
1945
1946 /**
1947 * radeon_gpu_reset - reset the asic
1948 *
1949 * @rdev: radeon device pointer
1950 *
1951 * Attempt the reset the GPU if it has hung (all asics).
1952 * Returns 0 for success or an error on failure.
1953 */
radeon_gpu_reset(struct radeon_device * rdev)1954 int radeon_gpu_reset(struct radeon_device *rdev)
1955 {
1956 unsigned ring_sizes[RADEON_NUM_RINGS];
1957 uint32_t *ring_data[RADEON_NUM_RINGS];
1958
1959 bool saved = false;
1960
1961 int i, r;
1962 int resched;
1963
1964 down_write(&rdev->exclusive_lock);
1965
1966 if (!rdev->needs_reset) {
1967 up_write(&rdev->exclusive_lock);
1968 return 0;
1969 }
1970
1971 atomic_inc(&rdev->gpu_reset_counter);
1972
1973 radeon_save_bios_scratch_regs(rdev);
1974 /* block TTM */
1975 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1976 radeon_suspend(rdev);
1977 radeon_hpd_fini(rdev);
1978
1979 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1980 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1981 &ring_data[i]);
1982 if (ring_sizes[i]) {
1983 saved = true;
1984 dev_info(rdev->dev, "Saved %d dwords of commands "
1985 "on ring %d.\n", ring_sizes[i], i);
1986 }
1987 }
1988
1989 r = radeon_asic_reset(rdev);
1990 if (!r) {
1991 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1992 radeon_resume(rdev);
1993 }
1994
1995 radeon_restore_bios_scratch_regs(rdev);
1996
1997 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1998 if (!r && ring_data[i]) {
1999 radeon_ring_restore(rdev, &rdev->ring[i],
2000 ring_sizes[i], ring_data[i]);
2001 } else {
2002 radeon_fence_driver_force_completion(rdev, i);
2003 kfree(ring_data[i]);
2004 }
2005 }
2006
2007 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2008 /* do dpm late init */
2009 r = radeon_pm_late_init(rdev);
2010 if (r) {
2011 rdev->pm.dpm_enabled = false;
2012 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
2013 }
2014 } else {
2015 /* resume old pm late */
2016 radeon_pm_resume(rdev);
2017 }
2018
2019 /* init dig PHYs, disp eng pll */
2020 if (rdev->is_atom_bios) {
2021 radeon_atom_encoder_init(rdev);
2022 radeon_atom_disp_eng_pll_init(rdev);
2023 /* turn on the BL */
2024 if (rdev->mode_info.bl_encoder) {
2025 u8 bl_level = radeon_get_backlight_level(rdev,
2026 rdev->mode_info.bl_encoder);
2027 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
2028 bl_level);
2029 }
2030 }
2031 /* reset hpd state */
2032 radeon_hpd_init(rdev);
2033
2034 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
2035
2036 rdev->in_reset = true;
2037 rdev->needs_reset = false;
2038
2039 downgrade_write(&rdev->exclusive_lock);
2040
2041 drm_helper_resume_force_mode(rdev->ddev);
2042
2043 /* set the power state here in case we are a PX system or headless */
2044 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
2045 radeon_pm_compute_clocks(rdev);
2046
2047 if (!r) {
2048 r = radeon_ib_ring_tests(rdev);
2049 if (r && saved)
2050 r = -EAGAIN;
2051 } else {
2052 /* bad news, how to tell it to userspace ? */
2053 dev_info(rdev->dev, "GPU reset failed\n");
2054 }
2055
2056 rdev->needs_reset = r == -EAGAIN;
2057 rdev->in_reset = false;
2058
2059 up_read(&rdev->exclusive_lock);
2060 return r;
2061 }
2062
2063
2064 /*
2065 * Debugfs
2066 */
radeon_debugfs_add_files(struct radeon_device * rdev,struct drm_info_list * files,unsigned nfiles)2067 int radeon_debugfs_add_files(struct radeon_device *rdev,
2068 struct drm_info_list *files,
2069 unsigned nfiles)
2070 {
2071 unsigned i;
2072
2073 for (i = 0; i < rdev->debugfs_count; i++) {
2074 if (rdev->debugfs[i].files == files) {
2075 /* Already registered */
2076 return 0;
2077 }
2078 }
2079
2080 i = rdev->debugfs_count + 1;
2081 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
2082 DRM_ERROR("Reached maximum number of debugfs components.\n");
2083 DRM_ERROR("Report so we increase "
2084 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
2085 return -EINVAL;
2086 }
2087 rdev->debugfs[rdev->debugfs_count].files = files;
2088 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
2089 rdev->debugfs_count = i;
2090 #if defined(CONFIG_DEBUG_FS)
2091 drm_debugfs_create_files(files, nfiles,
2092 rdev->ddev->primary->debugfs_root,
2093 rdev->ddev->primary);
2094 #endif
2095 return 0;
2096 }
2097