1 /* $NetBSD: radeon_rv515.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $ */
2
3 /*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 * Copyright 2009 Jerome Glisse.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Dave Airlie
27 * Alex Deucher
28 * Jerome Glisse
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: radeon_rv515.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $");
33
34 #include <linux/seq_file.h>
35 #include <linux/slab.h>
36
37 #include <drm/drm_debugfs.h>
38 #include <drm/drm_device.h>
39 #include <drm/drm_file.h>
40
41 #include "atom.h"
42 #include "radeon.h"
43 #include "radeon_asic.h"
44 #include "rv515_reg_safe.h"
45 #include "rv515d.h"
46
47 /* This files gather functions specifics to: rv515 */
48 static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
49 static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
50 static void rv515_gpu_init(struct radeon_device *rdev);
51 int rv515_mc_wait_for_idle(struct radeon_device *rdev);
52
53 static const u32 crtc_offsets[2] =
54 {
55 0,
56 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
57 };
58
rv515_debugfs(struct radeon_device * rdev)59 void rv515_debugfs(struct radeon_device *rdev)
60 {
61 if (r100_debugfs_rbbm_init(rdev)) {
62 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
63 }
64 if (rv515_debugfs_pipes_info_init(rdev)) {
65 DRM_ERROR("Failed to register debugfs file for pipes !\n");
66 }
67 if (rv515_debugfs_ga_info_init(rdev)) {
68 DRM_ERROR("Failed to register debugfs file for pipes !\n");
69 }
70 }
71
rv515_ring_start(struct radeon_device * rdev,struct radeon_ring * ring)72 void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
73 {
74 int r;
75
76 r = radeon_ring_lock(rdev, ring, 64);
77 if (r) {
78 return;
79 }
80 radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
81 radeon_ring_write(ring,
82 ISYNC_ANY2D_IDLE3D |
83 ISYNC_ANY3D_IDLE2D |
84 ISYNC_WAIT_IDLEGUI |
85 ISYNC_CPSCRATCH_IDLEGUI);
86 radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
87 radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
88 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
89 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
90 radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
91 radeon_ring_write(ring, 0);
92 radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
93 radeon_ring_write(ring, 0);
94 radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
95 radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
96 radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
97 radeon_ring_write(ring, 0);
98 radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
99 radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
100 radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
101 radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
102 radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
103 radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
104 radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
105 radeon_ring_write(ring, 0);
106 radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
107 radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
108 radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
109 radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
110 radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
111 radeon_ring_write(ring,
112 ((6 << MS_X0_SHIFT) |
113 (6 << MS_Y0_SHIFT) |
114 (6 << MS_X1_SHIFT) |
115 (6 << MS_Y1_SHIFT) |
116 (6 << MS_X2_SHIFT) |
117 (6 << MS_Y2_SHIFT) |
118 (6 << MSBD0_Y_SHIFT) |
119 (6 << MSBD0_X_SHIFT)));
120 radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
121 radeon_ring_write(ring,
122 ((6 << MS_X3_SHIFT) |
123 (6 << MS_Y3_SHIFT) |
124 (6 << MS_X4_SHIFT) |
125 (6 << MS_Y4_SHIFT) |
126 (6 << MS_X5_SHIFT) |
127 (6 << MS_Y5_SHIFT) |
128 (6 << MSBD1_SHIFT)));
129 radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
130 radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
131 radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
132 radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
133 radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
134 radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
135 radeon_ring_write(ring, PACKET0(0x20C8, 0));
136 radeon_ring_write(ring, 0);
137 radeon_ring_unlock_commit(rdev, ring, false);
138 }
139
rv515_mc_wait_for_idle(struct radeon_device * rdev)140 int rv515_mc_wait_for_idle(struct radeon_device *rdev)
141 {
142 unsigned i;
143 uint32_t tmp;
144
145 for (i = 0; i < rdev->usec_timeout; i++) {
146 /* read MC_STATUS */
147 tmp = RREG32_MC(MC_STATUS);
148 if (tmp & MC_STATUS_IDLE) {
149 return 0;
150 }
151 udelay(1);
152 }
153 return -1;
154 }
155
rv515_vga_render_disable(struct radeon_device * rdev)156 void rv515_vga_render_disable(struct radeon_device *rdev)
157 {
158 WREG32(R_000300_VGA_RENDER_CONTROL,
159 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
160 }
161
rv515_gpu_init(struct radeon_device * rdev)162 static void rv515_gpu_init(struct radeon_device *rdev)
163 {
164 unsigned pipe_select_current, gb_pipe_select, tmp;
165
166 if (r100_gui_wait_for_idle(rdev)) {
167 pr_warn("Failed to wait GUI idle while resetting GPU. Bad things might happen.\n");
168 }
169 rv515_vga_render_disable(rdev);
170 r420_pipes_init(rdev);
171 gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
172 tmp = RREG32(R300_DST_PIPE_CONFIG);
173 pipe_select_current = (tmp >> 2) & 3;
174 tmp = (1 << pipe_select_current) |
175 (((gb_pipe_select >> 8) & 0xF) << 4);
176 WREG32_PLL(0x000D, tmp);
177 if (r100_gui_wait_for_idle(rdev)) {
178 pr_warn("Failed to wait GUI idle while resetting GPU. Bad things might happen.\n");
179 }
180 if (rv515_mc_wait_for_idle(rdev)) {
181 pr_warn("Failed to wait MC idle while programming pipes. Bad things might happen.\n");
182 }
183 }
184
rv515_vram_get_type(struct radeon_device * rdev)185 static void rv515_vram_get_type(struct radeon_device *rdev)
186 {
187 uint32_t tmp;
188
189 rdev->mc.vram_width = 128;
190 rdev->mc.vram_is_ddr = true;
191 tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK;
192 switch (tmp) {
193 case 0:
194 rdev->mc.vram_width = 64;
195 break;
196 case 1:
197 rdev->mc.vram_width = 128;
198 break;
199 default:
200 rdev->mc.vram_width = 128;
201 break;
202 }
203 }
204
rv515_mc_init(struct radeon_device * rdev)205 static void rv515_mc_init(struct radeon_device *rdev)
206 {
207
208 rv515_vram_get_type(rdev);
209 r100_vram_init_sizes(rdev);
210 radeon_vram_location(rdev, &rdev->mc, 0);
211 rdev->mc.gtt_base_align = 0;
212 if (!(rdev->flags & RADEON_IS_AGP))
213 radeon_gtt_location(rdev, &rdev->mc);
214 radeon_update_bandwidth_info(rdev);
215 }
216
rv515_mc_rreg(struct radeon_device * rdev,uint32_t reg)217 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
218 {
219 unsigned long flags;
220 uint32_t r;
221
222 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
223 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
224 r = RREG32(MC_IND_DATA);
225 WREG32(MC_IND_INDEX, 0);
226 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
227
228 return r;
229 }
230
rv515_mc_wreg(struct radeon_device * rdev,uint32_t reg,uint32_t v)231 void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
232 {
233 unsigned long flags;
234
235 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
236 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
237 WREG32(MC_IND_DATA, (v));
238 WREG32(MC_IND_INDEX, 0);
239 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
240 }
241
242 #if defined(CONFIG_DEBUG_FS)
rv515_debugfs_pipes_info(struct seq_file * m,void * data)243 static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
244 {
245 struct drm_info_node *node = (struct drm_info_node *) m->private;
246 struct drm_device *dev = node->minor->dev;
247 struct radeon_device *rdev = dev->dev_private;
248 uint32_t tmp;
249
250 tmp = RREG32(GB_PIPE_SELECT);
251 seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
252 tmp = RREG32(SU_REG_DEST);
253 seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
254 tmp = RREG32(GB_TILE_CONFIG);
255 seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
256 tmp = RREG32(DST_PIPE_CONFIG);
257 seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
258 return 0;
259 }
260
rv515_debugfs_ga_info(struct seq_file * m,void * data)261 static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
262 {
263 struct drm_info_node *node = (struct drm_info_node *) m->private;
264 struct drm_device *dev = node->minor->dev;
265 struct radeon_device *rdev = dev->dev_private;
266 uint32_t tmp;
267
268 tmp = RREG32(0x2140);
269 seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
270 radeon_asic_reset(rdev);
271 tmp = RREG32(0x425C);
272 seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
273 return 0;
274 }
275
276 static struct drm_info_list rv515_pipes_info_list[] = {
277 {"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
278 };
279
280 static struct drm_info_list rv515_ga_info_list[] = {
281 {"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
282 };
283 #endif
284
rv515_debugfs_pipes_info_init(struct radeon_device * rdev)285 static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
286 {
287 #if defined(CONFIG_DEBUG_FS)
288 return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
289 #else
290 return 0;
291 #endif
292 }
293
rv515_debugfs_ga_info_init(struct radeon_device * rdev)294 static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
295 {
296 #if defined(CONFIG_DEBUG_FS)
297 return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
298 #else
299 return 0;
300 #endif
301 }
302
rv515_mc_stop(struct radeon_device * rdev,struct rv515_mc_save * save)303 void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
304 {
305 u32 crtc_enabled, tmp, frame_count, blackout;
306 int i, j;
307
308 save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
309 save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
310
311 /* disable VGA render */
312 WREG32(R_000300_VGA_RENDER_CONTROL, 0);
313 /* blank the display controllers */
314 for (i = 0; i < rdev->num_crtc; i++) {
315 crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
316 if (crtc_enabled) {
317 save->crtc_enabled[i] = true;
318 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
319 if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
320 radeon_wait_for_vblank(rdev, i);
321 WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
322 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
323 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
324 WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
325 }
326 /* wait for the next frame */
327 frame_count = radeon_get_vblank_counter(rdev, i);
328 for (j = 0; j < rdev->usec_timeout; j++) {
329 if (radeon_get_vblank_counter(rdev, i) != frame_count)
330 break;
331 udelay(1);
332 }
333
334 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
335 WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
336 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
337 tmp &= ~AVIVO_CRTC_EN;
338 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
339 WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
340 save->crtc_enabled[i] = false;
341 /* ***** */
342 } else {
343 save->crtc_enabled[i] = false;
344 }
345 }
346
347 radeon_mc_wait_for_idle(rdev);
348
349 if (rdev->family >= CHIP_R600) {
350 if (rdev->family >= CHIP_RV770)
351 blackout = RREG32(R700_MC_CITF_CNTL);
352 else
353 blackout = RREG32(R600_CITF_CNTL);
354 if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
355 /* Block CPU access */
356 WREG32(R600_BIF_FB_EN, 0);
357 /* blackout the MC */
358 blackout |= R600_BLACKOUT_MASK;
359 if (rdev->family >= CHIP_RV770)
360 WREG32(R700_MC_CITF_CNTL, blackout);
361 else
362 WREG32(R600_CITF_CNTL, blackout);
363 }
364 }
365 /* wait for the MC to settle */
366 udelay(100);
367
368 /* lock double buffered regs */
369 for (i = 0; i < rdev->num_crtc; i++) {
370 if (save->crtc_enabled[i]) {
371 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
372 if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
373 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
374 WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
375 }
376 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
377 if (!(tmp & 1)) {
378 tmp |= 1;
379 WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
380 }
381 }
382 }
383 }
384
rv515_mc_resume(struct radeon_device * rdev,struct rv515_mc_save * save)385 void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
386 {
387 u32 tmp, frame_count;
388 int i, j;
389
390 /* update crtc base addresses */
391 for (i = 0; i < rdev->num_crtc; i++) {
392 if (rdev->family >= CHIP_RV770) {
393 if (i == 0) {
394 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
395 upper_32_bits(rdev->mc.vram_start));
396 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
397 upper_32_bits(rdev->mc.vram_start));
398 } else {
399 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
400 upper_32_bits(rdev->mc.vram_start));
401 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
402 upper_32_bits(rdev->mc.vram_start));
403 }
404 }
405 WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
406 (u32)rdev->mc.vram_start);
407 WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
408 (u32)rdev->mc.vram_start);
409 }
410 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
411
412 /* unlock regs and wait for update */
413 for (i = 0; i < rdev->num_crtc; i++) {
414 if (save->crtc_enabled[i]) {
415 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
416 if ((tmp & 0x7) != 3) {
417 tmp &= ~0x7;
418 tmp |= 0x3;
419 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
420 }
421 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
422 if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
423 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
424 WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
425 }
426 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
427 if (tmp & 1) {
428 tmp &= ~1;
429 WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
430 }
431 for (j = 0; j < rdev->usec_timeout; j++) {
432 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
433 if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
434 break;
435 udelay(1);
436 }
437 }
438 }
439
440 if (rdev->family >= CHIP_R600) {
441 /* unblackout the MC */
442 if (rdev->family >= CHIP_RV770)
443 tmp = RREG32(R700_MC_CITF_CNTL);
444 else
445 tmp = RREG32(R600_CITF_CNTL);
446 tmp &= ~R600_BLACKOUT_MASK;
447 if (rdev->family >= CHIP_RV770)
448 WREG32(R700_MC_CITF_CNTL, tmp);
449 else
450 WREG32(R600_CITF_CNTL, tmp);
451 /* allow CPU access */
452 WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
453 }
454
455 for (i = 0; i < rdev->num_crtc; i++) {
456 if (save->crtc_enabled[i]) {
457 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
458 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
459 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
460 /* wait for the next frame */
461 frame_count = radeon_get_vblank_counter(rdev, i);
462 for (j = 0; j < rdev->usec_timeout; j++) {
463 if (radeon_get_vblank_counter(rdev, i) != frame_count)
464 break;
465 udelay(1);
466 }
467 }
468 }
469 /* Unlock vga access */
470 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
471 mdelay(1);
472 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
473 }
474
rv515_mc_program(struct radeon_device * rdev)475 static void rv515_mc_program(struct radeon_device *rdev)
476 {
477 struct rv515_mc_save save;
478
479 /* Stops all mc clients */
480 rv515_mc_stop(rdev, &save);
481
482 /* Wait for mc idle */
483 if (rv515_mc_wait_for_idle(rdev))
484 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
485 /* Write VRAM size in case we are limiting it */
486 WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
487 /* Program MC, should be a 32bits limited address space */
488 WREG32_MC(R_000001_MC_FB_LOCATION,
489 S_000001_MC_FB_START(rdev->mc.vram_start >> 16) |
490 S_000001_MC_FB_TOP(rdev->mc.vram_end >> 16));
491 WREG32(R_000134_HDP_FB_LOCATION,
492 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
493 if (rdev->flags & RADEON_IS_AGP) {
494 WREG32_MC(R_000002_MC_AGP_LOCATION,
495 S_000002_MC_AGP_START(rdev->mc.gtt_start >> 16) |
496 S_000002_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
497 WREG32_MC(R_000003_MC_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
498 WREG32_MC(R_000004_MC_AGP_BASE_2,
499 S_000004_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
500 } else {
501 WREG32_MC(R_000002_MC_AGP_LOCATION, 0xFFFFFFFF);
502 WREG32_MC(R_000003_MC_AGP_BASE, 0);
503 WREG32_MC(R_000004_MC_AGP_BASE_2, 0);
504 }
505
506 rv515_mc_resume(rdev, &save);
507 }
508
rv515_clock_startup(struct radeon_device * rdev)509 void rv515_clock_startup(struct radeon_device *rdev)
510 {
511 if (radeon_dynclks != -1 && radeon_dynclks)
512 radeon_atom_set_clock_gating(rdev, 1);
513 /* We need to force on some of the block */
514 WREG32_PLL(R_00000F_CP_DYN_CNTL,
515 RREG32_PLL(R_00000F_CP_DYN_CNTL) | S_00000F_CP_FORCEON(1));
516 WREG32_PLL(R_000011_E2_DYN_CNTL,
517 RREG32_PLL(R_000011_E2_DYN_CNTL) | S_000011_E2_FORCEON(1));
518 WREG32_PLL(R_000013_IDCT_DYN_CNTL,
519 RREG32_PLL(R_000013_IDCT_DYN_CNTL) | S_000013_IDCT_FORCEON(1));
520 }
521
rv515_startup(struct radeon_device * rdev)522 static int rv515_startup(struct radeon_device *rdev)
523 {
524 int r;
525
526 rv515_mc_program(rdev);
527 /* Resume clock */
528 rv515_clock_startup(rdev);
529 /* Initialize GPU configuration (# pipes, ...) */
530 rv515_gpu_init(rdev);
531 /* Initialize GART (initialize after TTM so we can allocate
532 * memory through TTM but finalize after TTM) */
533 if (rdev->flags & RADEON_IS_PCIE) {
534 r = rv370_pcie_gart_enable(rdev);
535 if (r)
536 return r;
537 }
538
539 /* allocate wb buffer */
540 r = radeon_wb_init(rdev);
541 if (r)
542 return r;
543
544 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
545 if (r) {
546 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
547 return r;
548 }
549
550 /* Enable IRQ */
551 if (!rdev->irq.installed) {
552 r = radeon_irq_kms_init(rdev);
553 if (r)
554 return r;
555 }
556
557 rs600_irq_set(rdev);
558 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
559 /* 1M ring buffer */
560 r = r100_cp_init(rdev, 1024 * 1024);
561 if (r) {
562 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
563 return r;
564 }
565
566 r = radeon_ib_pool_init(rdev);
567 if (r) {
568 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
569 return r;
570 }
571
572 return 0;
573 }
574
rv515_resume(struct radeon_device * rdev)575 int rv515_resume(struct radeon_device *rdev)
576 {
577 int r;
578
579 /* Make sur GART are not working */
580 if (rdev->flags & RADEON_IS_PCIE)
581 rv370_pcie_gart_disable(rdev);
582 /* Resume clock before doing reset */
583 rv515_clock_startup(rdev);
584 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
585 if (radeon_asic_reset(rdev)) {
586 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
587 RREG32(R_000E40_RBBM_STATUS),
588 RREG32(R_0007C0_CP_STAT));
589 }
590 /* post */
591 atom_asic_init(rdev->mode_info.atom_context);
592 /* Resume clock after posting */
593 rv515_clock_startup(rdev);
594 /* Initialize surface registers */
595 radeon_surface_init(rdev);
596
597 rdev->accel_working = true;
598 r = rv515_startup(rdev);
599 if (r) {
600 rdev->accel_working = false;
601 }
602 return r;
603 }
604
rv515_suspend(struct radeon_device * rdev)605 int rv515_suspend(struct radeon_device *rdev)
606 {
607 radeon_pm_suspend(rdev);
608 r100_cp_disable(rdev);
609 radeon_wb_disable(rdev);
610 rs600_irq_disable(rdev);
611 if (rdev->flags & RADEON_IS_PCIE)
612 rv370_pcie_gart_disable(rdev);
613 return 0;
614 }
615
rv515_set_safe_registers(struct radeon_device * rdev)616 void rv515_set_safe_registers(struct radeon_device *rdev)
617 {
618 rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm;
619 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm);
620 }
621
rv515_fini(struct radeon_device * rdev)622 void rv515_fini(struct radeon_device *rdev)
623 {
624 radeon_pm_fini(rdev);
625 r100_cp_fini(rdev);
626 radeon_wb_fini(rdev);
627 radeon_ib_pool_fini(rdev);
628 radeon_gem_fini(rdev);
629 rv370_pcie_gart_fini(rdev);
630 radeon_agp_fini(rdev);
631 radeon_irq_kms_fini(rdev);
632 radeon_fence_driver_fini(rdev);
633 radeon_bo_fini(rdev);
634 radeon_atombios_fini(rdev);
635 kfree(rdev->bios);
636 rdev->bios = NULL;
637 }
638
rv515_init(struct radeon_device * rdev)639 int rv515_init(struct radeon_device *rdev)
640 {
641 int r;
642
643 /* Initialize scratch registers */
644 radeon_scratch_init(rdev);
645 /* Initialize surface registers */
646 radeon_surface_init(rdev);
647 /* TODO: disable VGA need to use VGA request */
648 /* restore some register to sane defaults */
649 r100_restore_sanity(rdev);
650 /* BIOS*/
651 if (!radeon_get_bios(rdev)) {
652 if (ASIC_IS_AVIVO(rdev))
653 return -EINVAL;
654 }
655 if (rdev->is_atom_bios) {
656 r = radeon_atombios_init(rdev);
657 if (r)
658 return r;
659 } else {
660 dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
661 return -EINVAL;
662 }
663 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
664 if (radeon_asic_reset(rdev)) {
665 dev_warn(rdev->dev,
666 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
667 RREG32(R_000E40_RBBM_STATUS),
668 RREG32(R_0007C0_CP_STAT));
669 }
670 /* check if cards are posted or not */
671 if (radeon_boot_test_post_card(rdev) == false)
672 return -EINVAL;
673 /* Initialize clocks */
674 radeon_get_clock_info(rdev->ddev);
675 /* initialize AGP */
676 if (rdev->flags & RADEON_IS_AGP) {
677 r = radeon_agp_init(rdev);
678 if (r) {
679 radeon_agp_disable(rdev);
680 }
681 }
682 /* initialize memory controller */
683 rv515_mc_init(rdev);
684 rv515_debugfs(rdev);
685 /* Fence driver */
686 r = radeon_fence_driver_init(rdev);
687 if (r)
688 return r;
689 /* Memory manager */
690 r = radeon_bo_init(rdev);
691 if (r)
692 return r;
693 r = rv370_pcie_gart_init(rdev);
694 if (r)
695 return r;
696 rv515_set_safe_registers(rdev);
697
698 /* Initialize power management */
699 radeon_pm_init(rdev);
700
701 rdev->accel_working = true;
702 r = rv515_startup(rdev);
703 if (r) {
704 /* Somethings want wront with the accel init stop accel */
705 dev_err(rdev->dev, "Disabling GPU acceleration\n");
706 r100_cp_fini(rdev);
707 radeon_wb_fini(rdev);
708 radeon_ib_pool_fini(rdev);
709 radeon_irq_kms_fini(rdev);
710 rv370_pcie_gart_fini(rdev);
711 radeon_agp_fini(rdev);
712 rdev->accel_working = false;
713 }
714 return 0;
715 }
716
atom_rv515_force_tv_scaler(struct radeon_device * rdev,struct radeon_crtc * crtc)717 void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *crtc)
718 {
719 int index_reg = 0x6578 + crtc->crtc_offset;
720 int data_reg = 0x657c + crtc->crtc_offset;
721
722 WREG32(0x659C + crtc->crtc_offset, 0x0);
723 WREG32(0x6594 + crtc->crtc_offset, 0x705);
724 WREG32(0x65A4 + crtc->crtc_offset, 0x10001);
725 WREG32(0x65D8 + crtc->crtc_offset, 0x0);
726 WREG32(0x65B0 + crtc->crtc_offset, 0x0);
727 WREG32(0x65C0 + crtc->crtc_offset, 0x0);
728 WREG32(0x65D4 + crtc->crtc_offset, 0x0);
729 WREG32(index_reg, 0x0);
730 WREG32(data_reg, 0x841880A8);
731 WREG32(index_reg, 0x1);
732 WREG32(data_reg, 0x84208680);
733 WREG32(index_reg, 0x2);
734 WREG32(data_reg, 0xBFF880B0);
735 WREG32(index_reg, 0x100);
736 WREG32(data_reg, 0x83D88088);
737 WREG32(index_reg, 0x101);
738 WREG32(data_reg, 0x84608680);
739 WREG32(index_reg, 0x102);
740 WREG32(data_reg, 0xBFF080D0);
741 WREG32(index_reg, 0x200);
742 WREG32(data_reg, 0x83988068);
743 WREG32(index_reg, 0x201);
744 WREG32(data_reg, 0x84A08680);
745 WREG32(index_reg, 0x202);
746 WREG32(data_reg, 0xBFF080F8);
747 WREG32(index_reg, 0x300);
748 WREG32(data_reg, 0x83588058);
749 WREG32(index_reg, 0x301);
750 WREG32(data_reg, 0x84E08660);
751 WREG32(index_reg, 0x302);
752 WREG32(data_reg, 0xBFF88120);
753 WREG32(index_reg, 0x400);
754 WREG32(data_reg, 0x83188040);
755 WREG32(index_reg, 0x401);
756 WREG32(data_reg, 0x85008660);
757 WREG32(index_reg, 0x402);
758 WREG32(data_reg, 0xBFF88150);
759 WREG32(index_reg, 0x500);
760 WREG32(data_reg, 0x82D88030);
761 WREG32(index_reg, 0x501);
762 WREG32(data_reg, 0x85408640);
763 WREG32(index_reg, 0x502);
764 WREG32(data_reg, 0xBFF88180);
765 WREG32(index_reg, 0x600);
766 WREG32(data_reg, 0x82A08018);
767 WREG32(index_reg, 0x601);
768 WREG32(data_reg, 0x85808620);
769 WREG32(index_reg, 0x602);
770 WREG32(data_reg, 0xBFF081B8);
771 WREG32(index_reg, 0x700);
772 WREG32(data_reg, 0x82608010);
773 WREG32(index_reg, 0x701);
774 WREG32(data_reg, 0x85A08600);
775 WREG32(index_reg, 0x702);
776 WREG32(data_reg, 0x800081F0);
777 WREG32(index_reg, 0x800);
778 WREG32(data_reg, 0x8228BFF8);
779 WREG32(index_reg, 0x801);
780 WREG32(data_reg, 0x85E085E0);
781 WREG32(index_reg, 0x802);
782 WREG32(data_reg, 0xBFF88228);
783 WREG32(index_reg, 0x10000);
784 WREG32(data_reg, 0x82A8BF00);
785 WREG32(index_reg, 0x10001);
786 WREG32(data_reg, 0x82A08CC0);
787 WREG32(index_reg, 0x10002);
788 WREG32(data_reg, 0x8008BEF8);
789 WREG32(index_reg, 0x10100);
790 WREG32(data_reg, 0x81F0BF28);
791 WREG32(index_reg, 0x10101);
792 WREG32(data_reg, 0x83608CA0);
793 WREG32(index_reg, 0x10102);
794 WREG32(data_reg, 0x8018BED0);
795 WREG32(index_reg, 0x10200);
796 WREG32(data_reg, 0x8148BF38);
797 WREG32(index_reg, 0x10201);
798 WREG32(data_reg, 0x84408C80);
799 WREG32(index_reg, 0x10202);
800 WREG32(data_reg, 0x8008BEB8);
801 WREG32(index_reg, 0x10300);
802 WREG32(data_reg, 0x80B0BF78);
803 WREG32(index_reg, 0x10301);
804 WREG32(data_reg, 0x85008C20);
805 WREG32(index_reg, 0x10302);
806 WREG32(data_reg, 0x8020BEA0);
807 WREG32(index_reg, 0x10400);
808 WREG32(data_reg, 0x8028BF90);
809 WREG32(index_reg, 0x10401);
810 WREG32(data_reg, 0x85E08BC0);
811 WREG32(index_reg, 0x10402);
812 WREG32(data_reg, 0x8018BE90);
813 WREG32(index_reg, 0x10500);
814 WREG32(data_reg, 0xBFB8BFB0);
815 WREG32(index_reg, 0x10501);
816 WREG32(data_reg, 0x86C08B40);
817 WREG32(index_reg, 0x10502);
818 WREG32(data_reg, 0x8010BE90);
819 WREG32(index_reg, 0x10600);
820 WREG32(data_reg, 0xBF58BFC8);
821 WREG32(index_reg, 0x10601);
822 WREG32(data_reg, 0x87A08AA0);
823 WREG32(index_reg, 0x10602);
824 WREG32(data_reg, 0x8010BE98);
825 WREG32(index_reg, 0x10700);
826 WREG32(data_reg, 0xBF10BFF0);
827 WREG32(index_reg, 0x10701);
828 WREG32(data_reg, 0x886089E0);
829 WREG32(index_reg, 0x10702);
830 WREG32(data_reg, 0x8018BEB0);
831 WREG32(index_reg, 0x10800);
832 WREG32(data_reg, 0xBED8BFE8);
833 WREG32(index_reg, 0x10801);
834 WREG32(data_reg, 0x89408940);
835 WREG32(index_reg, 0x10802);
836 WREG32(data_reg, 0xBFE8BED8);
837 WREG32(index_reg, 0x20000);
838 WREG32(data_reg, 0x80008000);
839 WREG32(index_reg, 0x20001);
840 WREG32(data_reg, 0x90008000);
841 WREG32(index_reg, 0x20002);
842 WREG32(data_reg, 0x80008000);
843 WREG32(index_reg, 0x20003);
844 WREG32(data_reg, 0x80008000);
845 WREG32(index_reg, 0x20100);
846 WREG32(data_reg, 0x80108000);
847 WREG32(index_reg, 0x20101);
848 WREG32(data_reg, 0x8FE0BF70);
849 WREG32(index_reg, 0x20102);
850 WREG32(data_reg, 0xBFE880C0);
851 WREG32(index_reg, 0x20103);
852 WREG32(data_reg, 0x80008000);
853 WREG32(index_reg, 0x20200);
854 WREG32(data_reg, 0x8018BFF8);
855 WREG32(index_reg, 0x20201);
856 WREG32(data_reg, 0x8F80BF08);
857 WREG32(index_reg, 0x20202);
858 WREG32(data_reg, 0xBFD081A0);
859 WREG32(index_reg, 0x20203);
860 WREG32(data_reg, 0xBFF88000);
861 WREG32(index_reg, 0x20300);
862 WREG32(data_reg, 0x80188000);
863 WREG32(index_reg, 0x20301);
864 WREG32(data_reg, 0x8EE0BEC0);
865 WREG32(index_reg, 0x20302);
866 WREG32(data_reg, 0xBFB082A0);
867 WREG32(index_reg, 0x20303);
868 WREG32(data_reg, 0x80008000);
869 WREG32(index_reg, 0x20400);
870 WREG32(data_reg, 0x80188000);
871 WREG32(index_reg, 0x20401);
872 WREG32(data_reg, 0x8E00BEA0);
873 WREG32(index_reg, 0x20402);
874 WREG32(data_reg, 0xBF8883C0);
875 WREG32(index_reg, 0x20403);
876 WREG32(data_reg, 0x80008000);
877 WREG32(index_reg, 0x20500);
878 WREG32(data_reg, 0x80188000);
879 WREG32(index_reg, 0x20501);
880 WREG32(data_reg, 0x8D00BE90);
881 WREG32(index_reg, 0x20502);
882 WREG32(data_reg, 0xBF588500);
883 WREG32(index_reg, 0x20503);
884 WREG32(data_reg, 0x80008008);
885 WREG32(index_reg, 0x20600);
886 WREG32(data_reg, 0x80188000);
887 WREG32(index_reg, 0x20601);
888 WREG32(data_reg, 0x8BC0BE98);
889 WREG32(index_reg, 0x20602);
890 WREG32(data_reg, 0xBF308660);
891 WREG32(index_reg, 0x20603);
892 WREG32(data_reg, 0x80008008);
893 WREG32(index_reg, 0x20700);
894 WREG32(data_reg, 0x80108000);
895 WREG32(index_reg, 0x20701);
896 WREG32(data_reg, 0x8A80BEB0);
897 WREG32(index_reg, 0x20702);
898 WREG32(data_reg, 0xBF0087C0);
899 WREG32(index_reg, 0x20703);
900 WREG32(data_reg, 0x80008008);
901 WREG32(index_reg, 0x20800);
902 WREG32(data_reg, 0x80108000);
903 WREG32(index_reg, 0x20801);
904 WREG32(data_reg, 0x8920BED0);
905 WREG32(index_reg, 0x20802);
906 WREG32(data_reg, 0xBED08920);
907 WREG32(index_reg, 0x20803);
908 WREG32(data_reg, 0x80008010);
909 WREG32(index_reg, 0x30000);
910 WREG32(data_reg, 0x90008000);
911 WREG32(index_reg, 0x30001);
912 WREG32(data_reg, 0x80008000);
913 WREG32(index_reg, 0x30100);
914 WREG32(data_reg, 0x8FE0BF90);
915 WREG32(index_reg, 0x30101);
916 WREG32(data_reg, 0xBFF880A0);
917 WREG32(index_reg, 0x30200);
918 WREG32(data_reg, 0x8F60BF40);
919 WREG32(index_reg, 0x30201);
920 WREG32(data_reg, 0xBFE88180);
921 WREG32(index_reg, 0x30300);
922 WREG32(data_reg, 0x8EC0BF00);
923 WREG32(index_reg, 0x30301);
924 WREG32(data_reg, 0xBFC88280);
925 WREG32(index_reg, 0x30400);
926 WREG32(data_reg, 0x8DE0BEE0);
927 WREG32(index_reg, 0x30401);
928 WREG32(data_reg, 0xBFA083A0);
929 WREG32(index_reg, 0x30500);
930 WREG32(data_reg, 0x8CE0BED0);
931 WREG32(index_reg, 0x30501);
932 WREG32(data_reg, 0xBF7884E0);
933 WREG32(index_reg, 0x30600);
934 WREG32(data_reg, 0x8BA0BED8);
935 WREG32(index_reg, 0x30601);
936 WREG32(data_reg, 0xBF508640);
937 WREG32(index_reg, 0x30700);
938 WREG32(data_reg, 0x8A60BEE8);
939 WREG32(index_reg, 0x30701);
940 WREG32(data_reg, 0xBF2087A0);
941 WREG32(index_reg, 0x30800);
942 WREG32(data_reg, 0x8900BF00);
943 WREG32(index_reg, 0x30801);
944 WREG32(data_reg, 0xBF008900);
945 }
946
947 struct rv515_watermark {
948 u32 lb_request_fifo_depth;
949 fixed20_12 num_line_pair;
950 fixed20_12 estimated_width;
951 fixed20_12 worst_case_latency;
952 fixed20_12 consumption_rate;
953 fixed20_12 active_time;
954 fixed20_12 dbpp;
955 fixed20_12 priority_mark_max;
956 fixed20_12 priority_mark;
957 fixed20_12 sclk;
958 };
959
rv515_crtc_bandwidth_compute(struct radeon_device * rdev,struct radeon_crtc * crtc,struct rv515_watermark * wm,bool low)960 static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
961 struct radeon_crtc *crtc,
962 struct rv515_watermark *wm,
963 bool low)
964 {
965 struct drm_display_mode *mode = &crtc->base.mode;
966 fixed20_12 a, b, c;
967 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
968 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
969 fixed20_12 sclk;
970 u32 selected_sclk;
971
972 if (!crtc->base.enabled) {
973 /* FIXME: wouldn't it better to set priority mark to maximum */
974 wm->lb_request_fifo_depth = 4;
975 return;
976 }
977
978 /* rv6xx, rv7xx */
979 if ((rdev->family >= CHIP_RV610) &&
980 (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
981 selected_sclk = radeon_dpm_get_sclk(rdev, low);
982 else
983 selected_sclk = rdev->pm.current_sclk;
984
985 /* sclk in Mhz */
986 a.full = dfixed_const(100);
987 sclk.full = dfixed_const(selected_sclk);
988 sclk.full = dfixed_div(sclk, a);
989
990 if (crtc->vsc.full > dfixed_const(2))
991 wm->num_line_pair.full = dfixed_const(2);
992 else
993 wm->num_line_pair.full = dfixed_const(1);
994
995 b.full = dfixed_const(mode->crtc_hdisplay);
996 c.full = dfixed_const(256);
997 a.full = dfixed_div(b, c);
998 request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
999 request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
1000 if (a.full < dfixed_const(4)) {
1001 wm->lb_request_fifo_depth = 4;
1002 } else {
1003 wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
1004 }
1005
1006 /* Determine consumption rate
1007 * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
1008 * vtaps = number of vertical taps,
1009 * vsc = vertical scaling ratio, defined as source/destination
1010 * hsc = horizontal scaling ration, defined as source/destination
1011 */
1012 a.full = dfixed_const(mode->clock);
1013 b.full = dfixed_const(1000);
1014 a.full = dfixed_div(a, b);
1015 pclk.full = dfixed_div(b, a);
1016 if (crtc->rmx_type != RMX_OFF) {
1017 b.full = dfixed_const(2);
1018 if (crtc->vsc.full > b.full)
1019 b.full = crtc->vsc.full;
1020 b.full = dfixed_mul(b, crtc->hsc);
1021 c.full = dfixed_const(2);
1022 b.full = dfixed_div(b, c);
1023 consumption_time.full = dfixed_div(pclk, b);
1024 } else {
1025 consumption_time.full = pclk.full;
1026 }
1027 a.full = dfixed_const(1);
1028 wm->consumption_rate.full = dfixed_div(a, consumption_time);
1029
1030
1031 /* Determine line time
1032 * LineTime = total time for one line of displayhtotal
1033 * LineTime = total number of horizontal pixels
1034 * pclk = pixel clock period(ns)
1035 */
1036 a.full = dfixed_const(crtc->base.mode.crtc_htotal);
1037 line_time.full = dfixed_mul(a, pclk);
1038
1039 /* Determine active time
1040 * ActiveTime = time of active region of display within one line,
1041 * hactive = total number of horizontal active pixels
1042 * htotal = total number of horizontal pixels
1043 */
1044 a.full = dfixed_const(crtc->base.mode.crtc_htotal);
1045 b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
1046 wm->active_time.full = dfixed_mul(line_time, b);
1047 wm->active_time.full = dfixed_div(wm->active_time, a);
1048
1049 /* Determine chunk time
1050 * ChunkTime = the time it takes the DCP to send one chunk of data
1051 * to the LB which consists of pipeline delay and inter chunk gap
1052 * sclk = system clock(Mhz)
1053 */
1054 a.full = dfixed_const(600 * 1000);
1055 chunk_time.full = dfixed_div(a, sclk);
1056 read_delay_latency.full = dfixed_const(1000);
1057
1058 /* Determine the worst case latency
1059 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
1060 * WorstCaseLatency = worst case time from urgent to when the MC starts
1061 * to return data
1062 * READ_DELAY_IDLE_MAX = constant of 1us
1063 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
1064 * which consists of pipeline delay and inter chunk gap
1065 */
1066 if (dfixed_trunc(wm->num_line_pair) > 1) {
1067 a.full = dfixed_const(3);
1068 wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
1069 wm->worst_case_latency.full += read_delay_latency.full;
1070 } else {
1071 wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
1072 }
1073
1074 /* Determine the tolerable latency
1075 * TolerableLatency = Any given request has only 1 line time
1076 * for the data to be returned
1077 * LBRequestFifoDepth = Number of chunk requests the LB can
1078 * put into the request FIFO for a display
1079 * LineTime = total time for one line of display
1080 * ChunkTime = the time it takes the DCP to send one chunk
1081 * of data to the LB which consists of
1082 * pipeline delay and inter chunk gap
1083 */
1084 if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
1085 tolerable_latency.full = line_time.full;
1086 } else {
1087 tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
1088 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
1089 tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
1090 tolerable_latency.full = line_time.full - tolerable_latency.full;
1091 }
1092 /* We assume worst case 32bits (4 bytes) */
1093 wm->dbpp.full = dfixed_const(2 * 16);
1094
1095 /* Determine the maximum priority mark
1096 * width = viewport width in pixels
1097 */
1098 a.full = dfixed_const(16);
1099 wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
1100 wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
1101 wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
1102
1103 /* Determine estimated width */
1104 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
1105 estimated_width.full = dfixed_div(estimated_width, consumption_time);
1106 if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
1107 wm->priority_mark.full = wm->priority_mark_max.full;
1108 } else {
1109 a.full = dfixed_const(16);
1110 wm->priority_mark.full = dfixed_div(estimated_width, a);
1111 wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
1112 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
1113 }
1114 }
1115
rv515_compute_mode_priority(struct radeon_device * rdev,struct rv515_watermark * wm0,struct rv515_watermark * wm1,struct drm_display_mode * mode0,struct drm_display_mode * mode1,u32 * d1mode_priority_a_cnt,u32 * d2mode_priority_a_cnt)1116 static void rv515_compute_mode_priority(struct radeon_device *rdev,
1117 struct rv515_watermark *wm0,
1118 struct rv515_watermark *wm1,
1119 struct drm_display_mode *mode0,
1120 struct drm_display_mode *mode1,
1121 u32 *d1mode_priority_a_cnt,
1122 u32 *d2mode_priority_a_cnt)
1123 {
1124 fixed20_12 priority_mark02, priority_mark12, fill_rate;
1125 fixed20_12 a, b;
1126
1127 *d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
1128 *d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
1129
1130 if (mode0 && mode1) {
1131 if (dfixed_trunc(wm0->dbpp) > 64)
1132 a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
1133 else
1134 a.full = wm0->num_line_pair.full;
1135 if (dfixed_trunc(wm1->dbpp) > 64)
1136 b.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
1137 else
1138 b.full = wm1->num_line_pair.full;
1139 a.full += b.full;
1140 fill_rate.full = dfixed_div(wm0->sclk, a);
1141 if (wm0->consumption_rate.full > fill_rate.full) {
1142 b.full = wm0->consumption_rate.full - fill_rate.full;
1143 b.full = dfixed_mul(b, wm0->active_time);
1144 a.full = dfixed_const(16);
1145 b.full = dfixed_div(b, a);
1146 a.full = dfixed_mul(wm0->worst_case_latency,
1147 wm0->consumption_rate);
1148 priority_mark02.full = a.full + b.full;
1149 } else {
1150 a.full = dfixed_mul(wm0->worst_case_latency,
1151 wm0->consumption_rate);
1152 b.full = dfixed_const(16 * 1000);
1153 priority_mark02.full = dfixed_div(a, b);
1154 }
1155 if (wm1->consumption_rate.full > fill_rate.full) {
1156 b.full = wm1->consumption_rate.full - fill_rate.full;
1157 b.full = dfixed_mul(b, wm1->active_time);
1158 a.full = dfixed_const(16);
1159 b.full = dfixed_div(b, a);
1160 a.full = dfixed_mul(wm1->worst_case_latency,
1161 wm1->consumption_rate);
1162 priority_mark12.full = a.full + b.full;
1163 } else {
1164 a.full = dfixed_mul(wm1->worst_case_latency,
1165 wm1->consumption_rate);
1166 b.full = dfixed_const(16 * 1000);
1167 priority_mark12.full = dfixed_div(a, b);
1168 }
1169 if (wm0->priority_mark.full > priority_mark02.full)
1170 priority_mark02.full = wm0->priority_mark.full;
1171 if (wm0->priority_mark_max.full > priority_mark02.full)
1172 priority_mark02.full = wm0->priority_mark_max.full;
1173 if (wm1->priority_mark.full > priority_mark12.full)
1174 priority_mark12.full = wm1->priority_mark.full;
1175 if (wm1->priority_mark_max.full > priority_mark12.full)
1176 priority_mark12.full = wm1->priority_mark_max.full;
1177 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
1178 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
1179 if (rdev->disp_priority == 2) {
1180 *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1181 *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1182 }
1183 } else if (mode0) {
1184 if (dfixed_trunc(wm0->dbpp) > 64)
1185 a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
1186 else
1187 a.full = wm0->num_line_pair.full;
1188 fill_rate.full = dfixed_div(wm0->sclk, a);
1189 if (wm0->consumption_rate.full > fill_rate.full) {
1190 b.full = wm0->consumption_rate.full - fill_rate.full;
1191 b.full = dfixed_mul(b, wm0->active_time);
1192 a.full = dfixed_const(16);
1193 b.full = dfixed_div(b, a);
1194 a.full = dfixed_mul(wm0->worst_case_latency,
1195 wm0->consumption_rate);
1196 priority_mark02.full = a.full + b.full;
1197 } else {
1198 a.full = dfixed_mul(wm0->worst_case_latency,
1199 wm0->consumption_rate);
1200 b.full = dfixed_const(16);
1201 priority_mark02.full = dfixed_div(a, b);
1202 }
1203 if (wm0->priority_mark.full > priority_mark02.full)
1204 priority_mark02.full = wm0->priority_mark.full;
1205 if (wm0->priority_mark_max.full > priority_mark02.full)
1206 priority_mark02.full = wm0->priority_mark_max.full;
1207 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
1208 if (rdev->disp_priority == 2)
1209 *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1210 } else if (mode1) {
1211 if (dfixed_trunc(wm1->dbpp) > 64)
1212 a.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
1213 else
1214 a.full = wm1->num_line_pair.full;
1215 fill_rate.full = dfixed_div(wm1->sclk, a);
1216 if (wm1->consumption_rate.full > fill_rate.full) {
1217 b.full = wm1->consumption_rate.full - fill_rate.full;
1218 b.full = dfixed_mul(b, wm1->active_time);
1219 a.full = dfixed_const(16);
1220 b.full = dfixed_div(b, a);
1221 a.full = dfixed_mul(wm1->worst_case_latency,
1222 wm1->consumption_rate);
1223 priority_mark12.full = a.full + b.full;
1224 } else {
1225 a.full = dfixed_mul(wm1->worst_case_latency,
1226 wm1->consumption_rate);
1227 b.full = dfixed_const(16 * 1000);
1228 priority_mark12.full = dfixed_div(a, b);
1229 }
1230 if (wm1->priority_mark.full > priority_mark12.full)
1231 priority_mark12.full = wm1->priority_mark.full;
1232 if (wm1->priority_mark_max.full > priority_mark12.full)
1233 priority_mark12.full = wm1->priority_mark_max.full;
1234 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
1235 if (rdev->disp_priority == 2)
1236 *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1237 }
1238 }
1239
rv515_bandwidth_avivo_update(struct radeon_device * rdev)1240 void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1241 {
1242 struct drm_display_mode *mode0 = NULL;
1243 struct drm_display_mode *mode1 = NULL;
1244 struct rv515_watermark wm0_high, wm0_low;
1245 struct rv515_watermark wm1_high, wm1_low;
1246 u32 tmp;
1247 u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
1248 u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
1249
1250 if (rdev->mode_info.crtcs[0]->base.enabled)
1251 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1252 if (rdev->mode_info.crtcs[1]->base.enabled)
1253 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1254 rs690_line_buffer_adjust(rdev, mode0, mode1);
1255
1256 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
1257 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
1258
1259 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, false);
1260 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, false);
1261
1262 tmp = wm0_high.lb_request_fifo_depth;
1263 tmp |= wm1_high.lb_request_fifo_depth << 16;
1264 WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
1265
1266 rv515_compute_mode_priority(rdev,
1267 &wm0_high, &wm1_high,
1268 mode0, mode1,
1269 &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
1270 rv515_compute_mode_priority(rdev,
1271 &wm0_low, &wm1_low,
1272 mode0, mode1,
1273 &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
1274
1275 WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
1276 WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
1277 WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
1278 WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
1279 }
1280
rv515_bandwidth_update(struct radeon_device * rdev)1281 void rv515_bandwidth_update(struct radeon_device *rdev)
1282 {
1283 uint32_t tmp;
1284 struct drm_display_mode *mode0 = NULL;
1285 struct drm_display_mode *mode1 = NULL;
1286
1287 if (!rdev->mode_info.mode_config_initialized)
1288 return;
1289
1290 radeon_update_display_priority(rdev);
1291
1292 if (rdev->mode_info.crtcs[0]->base.enabled)
1293 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1294 if (rdev->mode_info.crtcs[1]->base.enabled)
1295 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1296 /*
1297 * Set display0/1 priority up in the memory controller for
1298 * modes if the user specifies HIGH for displaypriority
1299 * option.
1300 */
1301 if ((rdev->disp_priority == 2) &&
1302 (rdev->family == CHIP_RV515)) {
1303 tmp = RREG32_MC(MC_MISC_LAT_TIMER);
1304 tmp &= ~MC_DISP1R_INIT_LAT_MASK;
1305 tmp &= ~MC_DISP0R_INIT_LAT_MASK;
1306 if (mode1)
1307 tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
1308 if (mode0)
1309 tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
1310 WREG32_MC(MC_MISC_LAT_TIMER, tmp);
1311 }
1312 rv515_bandwidth_avivo_update(rdev);
1313 }
1314