xref: /dragonfly/sys/dev/drm/radeon/rs690.c (revision 0db87cb7)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include "radeon.h"
30 #include "radeon_asic.h"
31 #include "atom.h"
32 #include "rs690d.h"
33 
34 int rs690_mc_wait_for_idle(struct radeon_device *rdev)
35 {
36 	unsigned i;
37 	uint32_t tmp;
38 
39 	for (i = 0; i < rdev->usec_timeout; i++) {
40 		/* read MC_STATUS */
41 		tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS);
42 		if (G_000090_MC_SYSTEM_IDLE(tmp))
43 			return 0;
44 		udelay(1);
45 	}
46 	return -1;
47 }
48 
49 static void rs690_gpu_init(struct radeon_device *rdev)
50 {
51 	/* FIXME: is this correct ? */
52 	r420_pipes_init(rdev);
53 	if (rs690_mc_wait_for_idle(rdev)) {
54 		printk(KERN_WARNING "Failed to wait MC idle while "
55 		       "programming pipes. Bad things might happen.\n");
56 	}
57 }
58 
59 union igp_info {
60 	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
61 	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
62 };
63 
64 void rs690_pm_info(struct radeon_device *rdev)
65 {
66 	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
67 	union igp_info *info;
68 	uint16_t data_offset;
69 	uint8_t frev, crev;
70 	fixed20_12 tmp;
71 
72 	if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
73 				   &frev, &crev, &data_offset)) {
74 		info = (union igp_info *)((uintptr_t)rdev->mode_info.atom_context->bios + data_offset);
75 
76 		/* Get various system informations from bios */
77 		switch (crev) {
78 		case 1:
79 			tmp.full = dfixed_const(100);
80 			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
81 			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
82 			if (le16_to_cpu(info->info.usK8MemoryClock))
83 				rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
84 			else if (rdev->clock.default_mclk) {
85 				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
86 				rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
87 			} else
88 				rdev->pm.igp_system_mclk.full = dfixed_const(400);
89 			rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
90 			rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
91 			break;
92 		case 2:
93 			tmp.full = dfixed_const(100);
94 			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
95 			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
96 			if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
97 				rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
98 			else if (rdev->clock.default_mclk)
99 				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
100 			else
101 				rdev->pm.igp_system_mclk.full = dfixed_const(66700);
102 			rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
103 			rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
104 			rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
105 			rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
106 			break;
107 		default:
108 			/* We assume the slower possible clock ie worst case */
109 			rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
110 			rdev->pm.igp_system_mclk.full = dfixed_const(200);
111 			rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
112 			rdev->pm.igp_ht_link_width.full = dfixed_const(8);
113 			DRM_ERROR("No integrated system info for your GPU, using safe default\n");
114 			break;
115 		}
116 	} else {
117 		/* We assume the slower possible clock ie worst case */
118 		rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
119 		rdev->pm.igp_system_mclk.full = dfixed_const(200);
120 		rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
121 		rdev->pm.igp_ht_link_width.full = dfixed_const(8);
122 		DRM_ERROR("No integrated system info for your GPU, using safe default\n");
123 	}
124 	/* Compute various bandwidth */
125 	/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4  */
126 	tmp.full = dfixed_const(4);
127 	rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
128 	/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
129 	 *              = ht_clk * ht_width / 5
130 	 */
131 	tmp.full = dfixed_const(5);
132 	rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
133 						rdev->pm.igp_ht_link_width);
134 	rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
135 	if (tmp.full < rdev->pm.max_bandwidth.full) {
136 		/* HT link is a limiting factor */
137 		rdev->pm.max_bandwidth.full = tmp.full;
138 	}
139 	/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
140 	 *                    = (sideport_clk * 14) / 10
141 	 */
142 	tmp.full = dfixed_const(14);
143 	rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
144 	tmp.full = dfixed_const(10);
145 	rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
146 }
147 
148 static void rs690_mc_init(struct radeon_device *rdev)
149 {
150 	u64 base;
151 	uint32_t h_addr, l_addr;
152 	unsigned long long k8_addr;
153 
154 	rs400_gart_adjust_size(rdev);
155 	rdev->mc.vram_is_ddr = true;
156 	rdev->mc.vram_width = 128;
157 	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
158 	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
159 	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
160 	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
161 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
162 	base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
163 	base = G_000100_MC_FB_START(base) << 16;
164 	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
165 	/* Some boards seem to be configured for 128MB of sideport memory,
166 	 * but really only have 64MB.  Just skip the sideport and use
167 	 * UMA memory.
168 	 */
169 	if (rdev->mc.igp_sideport_enabled &&
170 	    (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
171 		base += 128 * 1024 * 1024;
172 		rdev->mc.real_vram_size -= 128 * 1024 * 1024;
173 		rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
174 	}
175 
176 	/* Use K8 direct mapping for fast fb access. */
177 	rdev->fastfb_working = false;
178 	h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL));
179 	l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION);
180 	k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
181 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
182 	if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
183 #endif
184 	{
185 		/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
186 		 * memory is present.
187 		 */
188 		if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
189 			DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
190 					(unsigned long long)rdev->mc.aper_base, k8_addr);
191 			rdev->mc.aper_base = (resource_size_t)k8_addr;
192 			rdev->fastfb_working = true;
193 		}
194 	}
195 
196 	rs690_pm_info(rdev);
197 	radeon_vram_location(rdev, &rdev->mc, base);
198 	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
199 	radeon_gtt_location(rdev, &rdev->mc);
200 	radeon_update_bandwidth_info(rdev);
201 }
202 
203 void rs690_line_buffer_adjust(struct radeon_device *rdev,
204 			      struct drm_display_mode *mode1,
205 			      struct drm_display_mode *mode2)
206 {
207 	u32 tmp;
208 
209 	/*
210 	 * Line Buffer Setup
211 	 * There is a single line buffer shared by both display controllers.
212 	 * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
213 	 * the display controllers.  The paritioning can either be done
214 	 * manually or via one of four preset allocations specified in bits 1:0:
215 	 *  0 - line buffer is divided in half and shared between crtc
216 	 *  1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
217 	 *  2 - D1 gets the whole buffer
218 	 *  3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
219 	 * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual
220 	 * allocation mode. In manual allocation mode, D1 always starts at 0,
221 	 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
222 	 */
223 	tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT;
224 	tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE;
225 	/* auto */
226 	if (mode1 && mode2) {
227 		if (mode1->hdisplay > mode2->hdisplay) {
228 			if (mode1->hdisplay > 2560)
229 				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
230 			else
231 				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
232 		} else if (mode2->hdisplay > mode1->hdisplay) {
233 			if (mode2->hdisplay > 2560)
234 				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
235 			else
236 				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
237 		} else
238 			tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
239 	} else if (mode1) {
240 		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY;
241 	} else if (mode2) {
242 		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
243 	}
244 	WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
245 }
246 
247 struct rs690_watermark {
248 	u32        lb_request_fifo_depth;
249 	fixed20_12 num_line_pair;
250 	fixed20_12 estimated_width;
251 	fixed20_12 worst_case_latency;
252 	fixed20_12 consumption_rate;
253 	fixed20_12 active_time;
254 	fixed20_12 dbpp;
255 	fixed20_12 priority_mark_max;
256 	fixed20_12 priority_mark;
257 	fixed20_12 sclk;
258 };
259 
260 static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
261 					 struct radeon_crtc *crtc,
262 					 struct rs690_watermark *wm,
263 					 bool low)
264 {
265 	struct drm_display_mode *mode = &crtc->base.mode;
266 	fixed20_12 a, b, c;
267 	fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
268 	fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
269 	fixed20_12 sclk, core_bandwidth, max_bandwidth;
270 	u32 selected_sclk;
271 
272 	if (!crtc->base.enabled) {
273 		/* FIXME: wouldn't it better to set priority mark to maximum */
274 		wm->lb_request_fifo_depth = 4;
275 		return;
276 	}
277 
278 	if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) &&
279 	    (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
280 		selected_sclk = radeon_dpm_get_sclk(rdev, low);
281 	else
282 		selected_sclk = rdev->pm.current_sclk;
283 
284 	/* sclk in Mhz */
285 	a.full = dfixed_const(100);
286 	sclk.full = dfixed_const(selected_sclk);
287 	sclk.full = dfixed_div(sclk, a);
288 
289 	/* core_bandwidth = sclk(Mhz) * 16 */
290 	a.full = dfixed_const(16);
291 	core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
292 
293 	if (crtc->vsc.full > dfixed_const(2))
294 		wm->num_line_pair.full = dfixed_const(2);
295 	else
296 		wm->num_line_pair.full = dfixed_const(1);
297 
298 	b.full = dfixed_const(mode->crtc_hdisplay);
299 	c.full = dfixed_const(256);
300 	a.full = dfixed_div(b, c);
301 	request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
302 	request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
303 	if (a.full < dfixed_const(4)) {
304 		wm->lb_request_fifo_depth = 4;
305 	} else {
306 		wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
307 	}
308 
309 	/* Determine consumption rate
310 	 *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
311 	 *  vtaps = number of vertical taps,
312 	 *  vsc = vertical scaling ratio, defined as source/destination
313 	 *  hsc = horizontal scaling ration, defined as source/destination
314 	 */
315 	a.full = dfixed_const(mode->clock);
316 	b.full = dfixed_const(1000);
317 	a.full = dfixed_div(a, b);
318 	pclk.full = dfixed_div(b, a);
319 	if (crtc->rmx_type != RMX_OFF) {
320 		b.full = dfixed_const(2);
321 		if (crtc->vsc.full > b.full)
322 			b.full = crtc->vsc.full;
323 		b.full = dfixed_mul(b, crtc->hsc);
324 		c.full = dfixed_const(2);
325 		b.full = dfixed_div(b, c);
326 		consumption_time.full = dfixed_div(pclk, b);
327 	} else {
328 		consumption_time.full = pclk.full;
329 	}
330 	a.full = dfixed_const(1);
331 	wm->consumption_rate.full = dfixed_div(a, consumption_time);
332 
333 
334 	/* Determine line time
335 	 *  LineTime = total time for one line of displayhtotal
336 	 *  LineTime = total number of horizontal pixels
337 	 *  pclk = pixel clock period(ns)
338 	 */
339 	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
340 	line_time.full = dfixed_mul(a, pclk);
341 
342 	/* Determine active time
343 	 *  ActiveTime = time of active region of display within one line,
344 	 *  hactive = total number of horizontal active pixels
345 	 *  htotal = total number of horizontal pixels
346 	 */
347 	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
348 	b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
349 	wm->active_time.full = dfixed_mul(line_time, b);
350 	wm->active_time.full = dfixed_div(wm->active_time, a);
351 
352 	/* Maximun bandwidth is the minimun bandwidth of all component */
353 	max_bandwidth = core_bandwidth;
354 	if (rdev->mc.igp_sideport_enabled) {
355 		if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
356 			rdev->pm.sideport_bandwidth.full)
357 			max_bandwidth = rdev->pm.sideport_bandwidth;
358 		read_delay_latency.full = dfixed_const(370 * 800);
359 		a.full = dfixed_const(1000);
360 		b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a);
361 		read_delay_latency.full = dfixed_div(read_delay_latency, b);
362 		read_delay_latency.full = dfixed_mul(read_delay_latency, a);
363 	} else {
364 		if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
365 			rdev->pm.k8_bandwidth.full)
366 			max_bandwidth = rdev->pm.k8_bandwidth;
367 		if (max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
368 			rdev->pm.ht_bandwidth.full)
369 			max_bandwidth = rdev->pm.ht_bandwidth;
370 		read_delay_latency.full = dfixed_const(5000);
371 	}
372 
373 	/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
374 	a.full = dfixed_const(16);
375 	sclk.full = dfixed_mul(max_bandwidth, a);
376 	a.full = dfixed_const(1000);
377 	sclk.full = dfixed_div(a, sclk);
378 	/* Determine chunk time
379 	 * ChunkTime = the time it takes the DCP to send one chunk of data
380 	 * to the LB which consists of pipeline delay and inter chunk gap
381 	 * sclk = system clock(ns)
382 	 */
383 	a.full = dfixed_const(256 * 13);
384 	chunk_time.full = dfixed_mul(sclk, a);
385 	a.full = dfixed_const(10);
386 	chunk_time.full = dfixed_div(chunk_time, a);
387 
388 	/* Determine the worst case latency
389 	 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
390 	 * WorstCaseLatency = worst case time from urgent to when the MC starts
391 	 *                    to return data
392 	 * READ_DELAY_IDLE_MAX = constant of 1us
393 	 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
394 	 *             which consists of pipeline delay and inter chunk gap
395 	 */
396 	if (dfixed_trunc(wm->num_line_pair) > 1) {
397 		a.full = dfixed_const(3);
398 		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
399 		wm->worst_case_latency.full += read_delay_latency.full;
400 	} else {
401 		a.full = dfixed_const(2);
402 		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
403 		wm->worst_case_latency.full += read_delay_latency.full;
404 	}
405 
406 	/* Determine the tolerable latency
407 	 * TolerableLatency = Any given request has only 1 line time
408 	 *                    for the data to be returned
409 	 * LBRequestFifoDepth = Number of chunk requests the LB can
410 	 *                      put into the request FIFO for a display
411 	 *  LineTime = total time for one line of display
412 	 *  ChunkTime = the time it takes the DCP to send one chunk
413 	 *              of data to the LB which consists of
414 	 *  pipeline delay and inter chunk gap
415 	 */
416 	if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
417 		tolerable_latency.full = line_time.full;
418 	} else {
419 		tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
420 		tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
421 		tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
422 		tolerable_latency.full = line_time.full - tolerable_latency.full;
423 	}
424 	/* We assume worst case 32bits (4 bytes) */
425 	wm->dbpp.full = dfixed_const(4 * 8);
426 
427 	/* Determine the maximum priority mark
428 	 *  width = viewport width in pixels
429 	 */
430 	a.full = dfixed_const(16);
431 	wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
432 	wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
433 	wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
434 
435 	/* Determine estimated width */
436 	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
437 	estimated_width.full = dfixed_div(estimated_width, consumption_time);
438 	if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
439 		wm->priority_mark.full = dfixed_const(10);
440 	} else {
441 		a.full = dfixed_const(16);
442 		wm->priority_mark.full = dfixed_div(estimated_width, a);
443 		wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
444 		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
445 	}
446 }
447 
448 static void rs690_compute_mode_priority(struct radeon_device *rdev,
449 					struct rs690_watermark *wm0,
450 					struct rs690_watermark *wm1,
451 					struct drm_display_mode *mode0,
452 					struct drm_display_mode *mode1,
453 					u32 *d1mode_priority_a_cnt,
454 					u32 *d2mode_priority_a_cnt)
455 {
456 	fixed20_12 priority_mark02, priority_mark12, fill_rate;
457 	fixed20_12 a, b;
458 
459 	*d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
460 	*d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
461 
462 	if (mode0 && mode1) {
463 		if (dfixed_trunc(wm0->dbpp) > 64)
464 			a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
465 		else
466 			a.full = wm0->num_line_pair.full;
467 		if (dfixed_trunc(wm1->dbpp) > 64)
468 			b.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
469 		else
470 			b.full = wm1->num_line_pair.full;
471 		a.full += b.full;
472 		fill_rate.full = dfixed_div(wm0->sclk, a);
473 		if (wm0->consumption_rate.full > fill_rate.full) {
474 			b.full = wm0->consumption_rate.full - fill_rate.full;
475 			b.full = dfixed_mul(b, wm0->active_time);
476 			a.full = dfixed_mul(wm0->worst_case_latency,
477 						wm0->consumption_rate);
478 			a.full = a.full + b.full;
479 			b.full = dfixed_const(16 * 1000);
480 			priority_mark02.full = dfixed_div(a, b);
481 		} else {
482 			a.full = dfixed_mul(wm0->worst_case_latency,
483 						wm0->consumption_rate);
484 			b.full = dfixed_const(16 * 1000);
485 			priority_mark02.full = dfixed_div(a, b);
486 		}
487 		if (wm1->consumption_rate.full > fill_rate.full) {
488 			b.full = wm1->consumption_rate.full - fill_rate.full;
489 			b.full = dfixed_mul(b, wm1->active_time);
490 			a.full = dfixed_mul(wm1->worst_case_latency,
491 						wm1->consumption_rate);
492 			a.full = a.full + b.full;
493 			b.full = dfixed_const(16 * 1000);
494 			priority_mark12.full = dfixed_div(a, b);
495 		} else {
496 			a.full = dfixed_mul(wm1->worst_case_latency,
497 						wm1->consumption_rate);
498 			b.full = dfixed_const(16 * 1000);
499 			priority_mark12.full = dfixed_div(a, b);
500 		}
501 		if (wm0->priority_mark.full > priority_mark02.full)
502 			priority_mark02.full = wm0->priority_mark.full;
503 		if (wm0->priority_mark_max.full > priority_mark02.full)
504 			priority_mark02.full = wm0->priority_mark_max.full;
505 		if (wm1->priority_mark.full > priority_mark12.full)
506 			priority_mark12.full = wm1->priority_mark.full;
507 		if (wm1->priority_mark_max.full > priority_mark12.full)
508 			priority_mark12.full = wm1->priority_mark_max.full;
509 		*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
510 		*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
511 		if (rdev->disp_priority == 2) {
512 			*d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
513 			*d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
514 		}
515 	} else if (mode0) {
516 		if (dfixed_trunc(wm0->dbpp) > 64)
517 			a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
518 		else
519 			a.full = wm0->num_line_pair.full;
520 		fill_rate.full = dfixed_div(wm0->sclk, a);
521 		if (wm0->consumption_rate.full > fill_rate.full) {
522 			b.full = wm0->consumption_rate.full - fill_rate.full;
523 			b.full = dfixed_mul(b, wm0->active_time);
524 			a.full = dfixed_mul(wm0->worst_case_latency,
525 						wm0->consumption_rate);
526 			a.full = a.full + b.full;
527 			b.full = dfixed_const(16 * 1000);
528 			priority_mark02.full = dfixed_div(a, b);
529 		} else {
530 			a.full = dfixed_mul(wm0->worst_case_latency,
531 						wm0->consumption_rate);
532 			b.full = dfixed_const(16 * 1000);
533 			priority_mark02.full = dfixed_div(a, b);
534 		}
535 		if (wm0->priority_mark.full > priority_mark02.full)
536 			priority_mark02.full = wm0->priority_mark.full;
537 		if (wm0->priority_mark_max.full > priority_mark02.full)
538 			priority_mark02.full = wm0->priority_mark_max.full;
539 		*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
540 		if (rdev->disp_priority == 2)
541 			*d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
542 	} else if (mode1) {
543 		if (dfixed_trunc(wm1->dbpp) > 64)
544 			a.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
545 		else
546 			a.full = wm1->num_line_pair.full;
547 		fill_rate.full = dfixed_div(wm1->sclk, a);
548 		if (wm1->consumption_rate.full > fill_rate.full) {
549 			b.full = wm1->consumption_rate.full - fill_rate.full;
550 			b.full = dfixed_mul(b, wm1->active_time);
551 			a.full = dfixed_mul(wm1->worst_case_latency,
552 						wm1->consumption_rate);
553 			a.full = a.full + b.full;
554 			b.full = dfixed_const(16 * 1000);
555 			priority_mark12.full = dfixed_div(a, b);
556 		} else {
557 			a.full = dfixed_mul(wm1->worst_case_latency,
558 						wm1->consumption_rate);
559 			b.full = dfixed_const(16 * 1000);
560 			priority_mark12.full = dfixed_div(a, b);
561 		}
562 		if (wm1->priority_mark.full > priority_mark12.full)
563 			priority_mark12.full = wm1->priority_mark.full;
564 		if (wm1->priority_mark_max.full > priority_mark12.full)
565 			priority_mark12.full = wm1->priority_mark_max.full;
566 		*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
567 		if (rdev->disp_priority == 2)
568 			*d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
569 	}
570 }
571 
572 void rs690_bandwidth_update(struct radeon_device *rdev)
573 {
574 	struct drm_display_mode *mode0 = NULL;
575 	struct drm_display_mode *mode1 = NULL;
576 	struct rs690_watermark wm0_high, wm0_low;
577 	struct rs690_watermark wm1_high, wm1_low;
578 	u32 tmp;
579 	u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
580 	u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
581 
582 	radeon_update_display_priority(rdev);
583 
584 	if (rdev->mode_info.crtcs[0]->base.enabled)
585 		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
586 	if (rdev->mode_info.crtcs[1]->base.enabled)
587 		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
588 	/*
589 	 * Set display0/1 priority up in the memory controller for
590 	 * modes if the user specifies HIGH for displaypriority
591 	 * option.
592 	 */
593 	if ((rdev->disp_priority == 2) &&
594 	    ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
595 		tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
596 		tmp &= C_000104_MC_DISP0R_INIT_LAT;
597 		tmp &= C_000104_MC_DISP1R_INIT_LAT;
598 		if (mode0)
599 			tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
600 		if (mode1)
601 			tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
602 		WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
603 	}
604 	rs690_line_buffer_adjust(rdev, mode0, mode1);
605 
606 	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
607 		WREG32(R_006C9C_DCP_CONTROL, 0);
608 	if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
609 		WREG32(R_006C9C_DCP_CONTROL, 2);
610 
611 	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
612 	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
613 
614 	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, true);
615 	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, true);
616 
617 	tmp = (wm0_high.lb_request_fifo_depth - 1);
618 	tmp |= (wm1_high.lb_request_fifo_depth - 1) << 16;
619 	WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
620 
621 	rs690_compute_mode_priority(rdev,
622 				    &wm0_high, &wm1_high,
623 				    mode0, mode1,
624 				    &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
625 	rs690_compute_mode_priority(rdev,
626 				    &wm0_low, &wm1_low,
627 				    mode0, mode1,
628 				    &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
629 
630 	WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
631 	WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
632 	WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
633 	WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
634 }
635 
636 uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
637 {
638 	uint32_t r;
639 
640 	spin_lock(&rdev->mc_idx_lock);
641 	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
642 	r = RREG32(R_00007C_MC_DATA);
643 	WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
644 	spin_unlock(&rdev->mc_idx_lock);
645 	return r;
646 }
647 
648 void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
649 {
650 	spin_lock(&rdev->mc_idx_lock);
651 	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
652 		S_000078_MC_IND_WR_EN(1));
653 	WREG32(R_00007C_MC_DATA, v);
654 	WREG32(R_000078_MC_INDEX, 0x7F);
655 	spin_unlock(&rdev->mc_idx_lock);
656 }
657 
658 static void rs690_mc_program(struct radeon_device *rdev)
659 {
660 	struct rv515_mc_save save;
661 
662 	/* Stops all mc clients */
663 	rv515_mc_stop(rdev, &save);
664 
665 	/* Wait for mc idle */
666 	if (rs690_mc_wait_for_idle(rdev))
667 		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
668 	/* Program MC, should be a 32bits limited address space */
669 	WREG32_MC(R_000100_MCCFG_FB_LOCATION,
670 			S_000100_MC_FB_START(rdev->mc.vram_start >> 16) |
671 			S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16));
672 	WREG32(R_000134_HDP_FB_LOCATION,
673 		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
674 
675 	rv515_mc_resume(rdev, &save);
676 }
677 
678 static int rs690_startup(struct radeon_device *rdev)
679 {
680 	int r;
681 
682 	rs690_mc_program(rdev);
683 	/* Resume clock */
684 	rv515_clock_startup(rdev);
685 	/* Initialize GPU configuration (# pipes, ...) */
686 	rs690_gpu_init(rdev);
687 	/* Initialize GART (initialize after TTM so we can allocate
688 	 * memory through TTM but finalize after TTM) */
689 	r = rs400_gart_enable(rdev);
690 	if (r)
691 		return r;
692 
693 	/* allocate wb buffer */
694 	r = radeon_wb_init(rdev);
695 	if (r)
696 		return r;
697 
698 	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
699 	if (r) {
700 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
701 		return r;
702 	}
703 
704 	/* Enable IRQ */
705 	if (!rdev->irq.installed) {
706 		r = radeon_irq_kms_init(rdev);
707 		if (r)
708 			return r;
709 	}
710 
711 	rs600_irq_set(rdev);
712 	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
713 	/* 1M ring buffer */
714 	r = r100_cp_init(rdev, 1024 * 1024);
715 	if (r) {
716 		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
717 		return r;
718 	}
719 
720 	r = radeon_ib_pool_init(rdev);
721 	if (r) {
722 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
723 		return r;
724 	}
725 
726 	r = r600_audio_init(rdev);
727 	if (r) {
728 		dev_err(rdev->dev, "failed initializing audio\n");
729 		return r;
730 	}
731 
732 	return 0;
733 }
734 
735 int rs690_resume(struct radeon_device *rdev)
736 {
737 	int r;
738 
739 	/* Make sur GART are not working */
740 	rs400_gart_disable(rdev);
741 	/* Resume clock before doing reset */
742 	rv515_clock_startup(rdev);
743 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
744 	if (radeon_asic_reset(rdev)) {
745 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
746 			RREG32(R_000E40_RBBM_STATUS),
747 			RREG32(R_0007C0_CP_STAT));
748 	}
749 	/* post */
750 	atom_asic_init(rdev->mode_info.atom_context);
751 	/* Resume clock after posting */
752 	rv515_clock_startup(rdev);
753 	/* Initialize surface registers */
754 	radeon_surface_init(rdev);
755 
756 	rdev->accel_working = true;
757 	r = rs690_startup(rdev);
758 	if (r) {
759 		rdev->accel_working = false;
760 	}
761 	return r;
762 }
763 
764 int rs690_suspend(struct radeon_device *rdev)
765 {
766 	radeon_pm_suspend(rdev);
767 	r600_audio_fini(rdev);
768 	r100_cp_disable(rdev);
769 	radeon_wb_disable(rdev);
770 	rs600_irq_disable(rdev);
771 	rs400_gart_disable(rdev);
772 	return 0;
773 }
774 
775 void rs690_fini(struct radeon_device *rdev)
776 {
777 	radeon_pm_fini(rdev);
778 	r600_audio_fini(rdev);
779 	r100_cp_fini(rdev);
780 	radeon_wb_fini(rdev);
781 	radeon_ib_pool_fini(rdev);
782 	radeon_gem_fini(rdev);
783 	rs400_gart_fini(rdev);
784 	radeon_irq_kms_fini(rdev);
785 	radeon_fence_driver_fini(rdev);
786 	radeon_bo_fini(rdev);
787 	radeon_atombios_fini(rdev);
788 	kfree(rdev->bios);
789 	rdev->bios = NULL;
790 }
791 
792 int rs690_init(struct radeon_device *rdev)
793 {
794 	int r;
795 
796 	/* Disable VGA */
797 	rv515_vga_render_disable(rdev);
798 	/* Initialize scratch registers */
799 	radeon_scratch_init(rdev);
800 	/* Initialize surface registers */
801 	radeon_surface_init(rdev);
802 	/* restore some register to sane defaults */
803 	r100_restore_sanity(rdev);
804 	/* TODO: disable VGA need to use VGA request */
805 	/* BIOS*/
806 	if (!radeon_get_bios(rdev)) {
807 		if (ASIC_IS_AVIVO(rdev))
808 			return -EINVAL;
809 	}
810 	if (rdev->is_atom_bios) {
811 		r = radeon_atombios_init(rdev);
812 		if (r)
813 			return r;
814 	} else {
815 		dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
816 		return -EINVAL;
817 	}
818 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
819 	if (radeon_asic_reset(rdev)) {
820 		dev_warn(rdev->dev,
821 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
822 			RREG32(R_000E40_RBBM_STATUS),
823 			RREG32(R_0007C0_CP_STAT));
824 	}
825 	/* check if cards are posted or not */
826 	if (radeon_boot_test_post_card(rdev) == false)
827 		return -EINVAL;
828 
829 	/* Initialize clocks */
830 	radeon_get_clock_info(rdev->ddev);
831 	/* initialize memory controller */
832 	rs690_mc_init(rdev);
833 	rv515_debugfs(rdev);
834 	/* Fence driver */
835 	r = radeon_fence_driver_init(rdev);
836 	if (r)
837 		return r;
838 	/* Memory manager */
839 	r = radeon_bo_init(rdev);
840 	if (r)
841 		return r;
842 	r = rs400_gart_init(rdev);
843 	if (r)
844 		return r;
845 	rs600_set_safe_registers(rdev);
846 
847 	/* Initialize power management */
848 	radeon_pm_init(rdev);
849 
850 	rdev->accel_working = true;
851 	r = rs690_startup(rdev);
852 	if (r) {
853 		/* Somethings want wront with the accel init stop accel */
854 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
855 		r100_cp_fini(rdev);
856 		radeon_wb_fini(rdev);
857 		radeon_ib_pool_fini(rdev);
858 		rs400_gart_fini(rdev);
859 		radeon_irq_kms_fini(rdev);
860 		rdev->accel_working = false;
861 	}
862 	return 0;
863 }
864