1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 
28 #include "dm_services.h"
29 #include "dcn10_hubp.h"
30 #include "dcn10_hubbub.h"
31 #include "reg_helper.h"
32 
33 #define CTX \
34 	hubbub1->base.ctx
35 #define DC_LOGGER \
36 	hubbub1->base.ctx->logger
37 #define REG(reg)\
38 	hubbub1->regs->reg
39 
40 #undef FN
41 #define FN(reg_name, field_name) \
42 	hubbub1->shifts->field_name, hubbub1->masks->field_name
43 
hubbub1_wm_read_state(struct hubbub * hubbub,struct dcn_hubbub_wm * wm)44 void hubbub1_wm_read_state(struct hubbub *hubbub,
45 		struct dcn_hubbub_wm *wm)
46 {
47 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
48 	struct dcn_hubbub_wm_set *s;
49 
50 	memset(wm, 0, sizeof(struct dcn_hubbub_wm));
51 
52 	s = &wm->sets[0];
53 	s->wm_set = 0;
54 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
55 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
56 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
57 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
58 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
59 	}
60 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
61 
62 	s = &wm->sets[1];
63 	s->wm_set = 1;
64 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
65 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
66 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
67 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
68 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
69 	}
70 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
71 
72 	s = &wm->sets[2];
73 	s->wm_set = 2;
74 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
75 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
76 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
77 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
78 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
79 	}
80 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
81 
82 	s = &wm->sets[3];
83 	s->wm_set = 3;
84 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
85 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
86 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
87 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
88 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
89 	}
90 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
91 }
92 
hubbub1_allow_self_refresh_control(struct hubbub * hubbub,bool allow)93 void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
94 {
95 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
96 	/*
97 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
98 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
99 	 */
100 
101 	REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
102 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
103 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow);
104 }
105 
hubbub1_is_allow_self_refresh_enabled(struct hubbub * hubbub)106 bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
107 {
108 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
109 	uint32_t enable = 0;
110 
111 	REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
112 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
113 
114 	return enable ? true : false;
115 }
116 
117 
hubbub1_verify_allow_pstate_change_high(struct hubbub * hubbub)118 bool hubbub1_verify_allow_pstate_change_high(
119 	struct hubbub *hubbub)
120 {
121 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
122 
123 	/* pstate latency is ~20us so if we wait over 40us and pstate allow
124 	 * still not asserted, we are probably stuck and going to hang
125 	 *
126 	 * TODO: Figure out why it takes ~100us on linux
127 	 * pstate takes around ~100us (up to 200us) on linux. Unknown currently
128 	 * as to why it takes that long on linux
129 	 */
130 	const unsigned int pstate_wait_timeout_us = 200;
131 	const unsigned int pstate_wait_expected_timeout_us = 180;
132 	static unsigned int max_sampled_pstate_wait_us; /* data collection */
133 	static bool forced_pstate_allow; /* help with revert wa */
134 
135 	unsigned int debug_data;
136 	unsigned int i;
137 
138 	if (forced_pstate_allow) {
139 		/* we hacked to force pstate allow to prevent hang last time
140 		 * we verify_allow_pstate_change_high.  so disable force
141 		 * here so we can check status
142 		 */
143 		REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
144 			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
145 			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
146 		forced_pstate_allow = false;
147 	}
148 
149 	/* The following table only applies to DCN1 and DCN2,
150 	 * for newer DCNs, need to consult with HW IP folks to read RTL
151 	 * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
152 	 * description
153 	 * 0:     Pipe0 Plane0 Allow Pstate Change
154 	 * 1:     Pipe0 Plane1 Allow Pstate Change
155 	 * 2:     Pipe0 Cursor0 Allow Pstate Change
156 	 * 3:     Pipe0 Cursor1 Allow Pstate Change
157 	 * 4:     Pipe1 Plane0 Allow Pstate Change
158 	 * 5:     Pipe1 Plane1 Allow Pstate Change
159 	 * 6:     Pipe1 Cursor0 Allow Pstate Change
160 	 * 7:     Pipe1 Cursor1 Allow Pstate Change
161 	 * 8:     Pipe2 Plane0 Allow Pstate Change
162 	 * 9:     Pipe2 Plane1 Allow Pstate Change
163 	 * 10:    Pipe2 Cursor0 Allow Pstate Change
164 	 * 11:    Pipe2 Cursor1 Allow Pstate Change
165 	 * 12:    Pipe3 Plane0 Allow Pstate Change
166 	 * 13:    Pipe3 Plane1 Allow Pstate Change
167 	 * 14:    Pipe3 Cursor0 Allow Pstate Change
168 	 * 15:    Pipe3 Cursor1 Allow Pstate Change
169 	 * 16:    Pipe4 Plane0 Allow Pstate Change
170 	 * 17:    Pipe4 Plane1 Allow Pstate Change
171 	 * 18:    Pipe4 Cursor0 Allow Pstate Change
172 	 * 19:    Pipe4 Cursor1 Allow Pstate Change
173 	 * 20:    Pipe5 Plane0 Allow Pstate Change
174 	 * 21:    Pipe5 Plane1 Allow Pstate Change
175 	 * 22:    Pipe5 Cursor0 Allow Pstate Change
176 	 * 23:    Pipe5 Cursor1 Allow Pstate Change
177 	 * 24:    Pipe6 Plane0 Allow Pstate Change
178 	 * 25:    Pipe6 Plane1 Allow Pstate Change
179 	 * 26:    Pipe6 Cursor0 Allow Pstate Change
180 	 * 27:    Pipe6 Cursor1 Allow Pstate Change
181 	 * 28:    WB0 Allow Pstate Change
182 	 * 29:    WB1 Allow Pstate Change
183 	 * 30:    Arbiter's allow_pstate_change
184 	 * 31:    SOC pstate change request
185 	 */
186 
187 	REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub1->debug_test_index_pstate);
188 
189 	for (i = 0; i < pstate_wait_timeout_us; i++) {
190 		debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
191 
192 		if (debug_data & (1 << 30)) {
193 
194 			if (i > pstate_wait_expected_timeout_us)
195 				DC_LOG_WARNING("pstate took longer than expected ~%dus\n",
196 						i);
197 
198 			return true;
199 		}
200 		if (max_sampled_pstate_wait_us < i)
201 			max_sampled_pstate_wait_us = i;
202 
203 		udelay(1);
204 	}
205 
206 	/* force pstate allow to prevent system hang
207 	 * and break to debugger to investigate
208 	 */
209 	REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
210 		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
211 		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
212 	forced_pstate_allow = true;
213 
214 	DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
215 			debug_data);
216 
217 	return false;
218 }
219 
convert_and_clamp(uint32_t wm_ns,uint32_t refclk_mhz,uint32_t clamp_value)220 static uint32_t convert_and_clamp(
221 	uint32_t wm_ns,
222 	uint32_t refclk_mhz,
223 	uint32_t clamp_value)
224 {
225 	uint32_t ret_val = 0;
226 	ret_val = wm_ns * refclk_mhz;
227 	ret_val /= 1000;
228 
229 	if (ret_val > clamp_value)
230 		ret_val = clamp_value;
231 
232 	return ret_val;
233 }
234 
235 
hubbub1_wm_change_req_wa(struct hubbub * hubbub)236 void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
237 {
238 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
239 
240 	REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
241 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0,
242 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
243 }
244 
hubbub1_program_urgent_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)245 bool hubbub1_program_urgent_watermarks(
246 		struct hubbub *hubbub,
247 		struct dcn_watermark_set *watermarks,
248 		unsigned int refclk_mhz,
249 		bool safe_to_lower)
250 {
251 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
252 	uint32_t prog_wm_value;
253 	bool wm_pending = false;
254 
255 	/* Repeat for water mark set A, B, C and D. */
256 	/* clock state A */
257 	if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
258 		hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
259 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
260 				refclk_mhz, 0x1fffff);
261 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
262 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
263 
264 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
265 			"HW register value = 0x%x\n",
266 			watermarks->a.urgent_ns, prog_wm_value);
267 	} else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
268 		wm_pending = true;
269 
270 	if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
271 		hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
272 		prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
273 				refclk_mhz, 0x1fffff);
274 		REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
275 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
276 			"HW register value = 0x%x\n",
277 			watermarks->a.pte_meta_urgent_ns, prog_wm_value);
278 	} else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns)
279 		wm_pending = true;
280 
281 	/* clock state B */
282 	if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
283 		hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
284 		prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
285 				refclk_mhz, 0x1fffff);
286 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
287 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
288 
289 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
290 			"HW register value = 0x%x\n",
291 			watermarks->b.urgent_ns, prog_wm_value);
292 	} else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
293 		wm_pending = true;
294 
295 	if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
296 		hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
297 		prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
298 				refclk_mhz, 0x1fffff);
299 		REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
300 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
301 			"HW register value = 0x%x\n",
302 			watermarks->b.pte_meta_urgent_ns, prog_wm_value);
303 	} else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns)
304 		wm_pending = true;
305 
306 	/* clock state C */
307 	if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
308 		hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
309 		prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
310 				refclk_mhz, 0x1fffff);
311 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
312 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
313 
314 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
315 			"HW register value = 0x%x\n",
316 			watermarks->c.urgent_ns, prog_wm_value);
317 	} else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
318 		wm_pending = true;
319 
320 	if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
321 		hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
322 		prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
323 				refclk_mhz, 0x1fffff);
324 		REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
325 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
326 			"HW register value = 0x%x\n",
327 			watermarks->c.pte_meta_urgent_ns, prog_wm_value);
328 	} else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns)
329 		wm_pending = true;
330 
331 	/* clock state D */
332 	if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
333 		hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
334 		prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
335 				refclk_mhz, 0x1fffff);
336 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
337 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
338 
339 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
340 			"HW register value = 0x%x\n",
341 			watermarks->d.urgent_ns, prog_wm_value);
342 	} else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
343 		wm_pending = true;
344 
345 	if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
346 		hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
347 		prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
348 				refclk_mhz, 0x1fffff);
349 		REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
350 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
351 			"HW register value = 0x%x\n",
352 			watermarks->d.pte_meta_urgent_ns, prog_wm_value);
353 	} else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns)
354 		wm_pending = true;
355 
356 	return wm_pending;
357 }
358 
hubbub1_program_stutter_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)359 bool hubbub1_program_stutter_watermarks(
360 		struct hubbub *hubbub,
361 		struct dcn_watermark_set *watermarks,
362 		unsigned int refclk_mhz,
363 		bool safe_to_lower)
364 {
365 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
366 	uint32_t prog_wm_value;
367 	bool wm_pending = false;
368 
369 	/* clock state A */
370 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
371 			> hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
372 		hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
373 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
374 		prog_wm_value = convert_and_clamp(
375 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
376 				refclk_mhz, 0x1fffff);
377 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
378 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
379 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
380 			"HW register value = 0x%x\n",
381 			watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
382 	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
383 			< hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
384 		wm_pending = true;
385 
386 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
387 			> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
388 		hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
389 				watermarks->a.cstate_pstate.cstate_exit_ns;
390 		prog_wm_value = convert_and_clamp(
391 				watermarks->a.cstate_pstate.cstate_exit_ns,
392 				refclk_mhz, 0x1fffff);
393 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
394 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
395 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
396 			"HW register value = 0x%x\n",
397 			watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
398 	} else if (watermarks->a.cstate_pstate.cstate_exit_ns
399 			< hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
400 		wm_pending = true;
401 
402 	/* clock state B */
403 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
404 			> hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
405 		hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
406 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
407 		prog_wm_value = convert_and_clamp(
408 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
409 				refclk_mhz, 0x1fffff);
410 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
411 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
412 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
413 			"HW register value = 0x%x\n",
414 			watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
415 	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
416 			< hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
417 		wm_pending = true;
418 
419 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
420 			> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
421 		hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
422 				watermarks->b.cstate_pstate.cstate_exit_ns;
423 		prog_wm_value = convert_and_clamp(
424 				watermarks->b.cstate_pstate.cstate_exit_ns,
425 				refclk_mhz, 0x1fffff);
426 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
427 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
428 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
429 			"HW register value = 0x%x\n",
430 			watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
431 	} else if (watermarks->b.cstate_pstate.cstate_exit_ns
432 			< hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
433 		wm_pending = true;
434 
435 	/* clock state C */
436 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
437 			> hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
438 		hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
439 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
440 		prog_wm_value = convert_and_clamp(
441 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
442 				refclk_mhz, 0x1fffff);
443 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
444 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
445 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
446 			"HW register value = 0x%x\n",
447 			watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
448 	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
449 			< hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
450 		wm_pending = true;
451 
452 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
453 			> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
454 		hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
455 				watermarks->c.cstate_pstate.cstate_exit_ns;
456 		prog_wm_value = convert_and_clamp(
457 				watermarks->c.cstate_pstate.cstate_exit_ns,
458 				refclk_mhz, 0x1fffff);
459 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
460 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
461 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
462 			"HW register value = 0x%x\n",
463 			watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
464 	} else if (watermarks->c.cstate_pstate.cstate_exit_ns
465 			< hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
466 		wm_pending = true;
467 
468 	/* clock state D */
469 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
470 			> hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
471 		hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
472 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
473 		prog_wm_value = convert_and_clamp(
474 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
475 				refclk_mhz, 0x1fffff);
476 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
477 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
478 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
479 			"HW register value = 0x%x\n",
480 			watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
481 	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
482 			< hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
483 		wm_pending = true;
484 
485 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
486 			> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
487 		hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
488 				watermarks->d.cstate_pstate.cstate_exit_ns;
489 		prog_wm_value = convert_and_clamp(
490 				watermarks->d.cstate_pstate.cstate_exit_ns,
491 				refclk_mhz, 0x1fffff);
492 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
493 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
494 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
495 			"HW register value = 0x%x\n",
496 			watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
497 	} else if (watermarks->d.cstate_pstate.cstate_exit_ns
498 			< hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
499 		wm_pending = true;
500 
501 	return wm_pending;
502 }
503 
hubbub1_program_pstate_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)504 bool hubbub1_program_pstate_watermarks(
505 		struct hubbub *hubbub,
506 		struct dcn_watermark_set *watermarks,
507 		unsigned int refclk_mhz,
508 		bool safe_to_lower)
509 {
510 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
511 	uint32_t prog_wm_value;
512 	bool wm_pending = false;
513 
514 	/* clock state A */
515 	if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
516 			> hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
517 		hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
518 				watermarks->a.cstate_pstate.pstate_change_ns;
519 		prog_wm_value = convert_and_clamp(
520 				watermarks->a.cstate_pstate.pstate_change_ns,
521 				refclk_mhz, 0x1fffff);
522 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
523 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
524 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
525 			"HW register value = 0x%x\n\n",
526 			watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
527 	} else if (watermarks->a.cstate_pstate.pstate_change_ns
528 			< hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
529 		wm_pending = true;
530 
531 	/* clock state B */
532 	if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
533 			> hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
534 		hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
535 				watermarks->b.cstate_pstate.pstate_change_ns;
536 		prog_wm_value = convert_and_clamp(
537 				watermarks->b.cstate_pstate.pstate_change_ns,
538 				refclk_mhz, 0x1fffff);
539 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
540 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
541 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
542 			"HW register value = 0x%x\n\n",
543 			watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
544 	} else if (watermarks->b.cstate_pstate.pstate_change_ns
545 			< hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
546 		wm_pending = true;
547 
548 	/* clock state C */
549 	if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
550 			> hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
551 		hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
552 				watermarks->c.cstate_pstate.pstate_change_ns;
553 		prog_wm_value = convert_and_clamp(
554 				watermarks->c.cstate_pstate.pstate_change_ns,
555 				refclk_mhz, 0x1fffff);
556 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
557 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
558 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
559 			"HW register value = 0x%x\n\n",
560 			watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
561 	} else if (watermarks->c.cstate_pstate.pstate_change_ns
562 			< hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
563 		wm_pending = true;
564 
565 	/* clock state D */
566 	if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
567 			> hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
568 		hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
569 				watermarks->d.cstate_pstate.pstate_change_ns;
570 		prog_wm_value = convert_and_clamp(
571 				watermarks->d.cstate_pstate.pstate_change_ns,
572 				refclk_mhz, 0x1fffff);
573 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
574 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
575 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
576 			"HW register value = 0x%x\n\n",
577 			watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
578 	} else if (watermarks->d.cstate_pstate.pstate_change_ns
579 			< hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
580 		wm_pending = true;
581 
582 	return wm_pending;
583 }
584 
hubbub1_program_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)585 bool hubbub1_program_watermarks(
586 		struct hubbub *hubbub,
587 		struct dcn_watermark_set *watermarks,
588 		unsigned int refclk_mhz,
589 		bool safe_to_lower)
590 {
591 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
592 	bool wm_pending = false;
593 	/*
594 	 * Need to clamp to max of the register values (i.e. no wrap)
595 	 * for dcn1, all wm registers are 21-bit wide
596 	 */
597 	if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
598 		wm_pending = true;
599 
600 	if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
601 		wm_pending = true;
602 
603 	if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
604 		wm_pending = true;
605 
606 	REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
607 			DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
608 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
609 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
610 
611 	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
612 
613 #if 0
614 	REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
615 			DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
616 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
617 #endif
618 	return wm_pending;
619 }
620 
hubbub1_update_dchub(struct hubbub * hubbub,struct dchub_init_data * dh_data)621 void hubbub1_update_dchub(
622 	struct hubbub *hubbub,
623 	struct dchub_init_data *dh_data)
624 {
625 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
626 
627 	if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
628 		ASSERT(false);
629 		/*should not come here*/
630 		return;
631 	}
632 	/* TODO: port code from dal2 */
633 	switch (dh_data->fb_mode) {
634 	case FRAME_BUFFER_MODE_ZFB_ONLY:
635 		/*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
636 		REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
637 				SDPIF_FB_TOP, 0);
638 
639 		REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
640 				SDPIF_FB_BASE, 0x0FFFF);
641 
642 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
643 				SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
644 
645 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
646 				SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
647 
648 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
649 				SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
650 						dh_data->zfb_size_in_byte - 1) >> 22);
651 		break;
652 	case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
653 		/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
654 
655 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
656 				SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
657 
658 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
659 				SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
660 
661 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
662 				SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
663 						dh_data->zfb_size_in_byte - 1) >> 22);
664 		break;
665 	case FRAME_BUFFER_MODE_LOCAL_ONLY:
666 		/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
667 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
668 				SDPIF_AGP_BASE, 0);
669 
670 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
671 				SDPIF_AGP_BOT, 0X03FFFF);
672 
673 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
674 				SDPIF_AGP_TOP, 0);
675 		break;
676 	default:
677 		break;
678 	}
679 
680 	dh_data->dchub_initialzied = true;
681 	dh_data->dchub_info_valid = false;
682 }
683 
hubbub1_toggle_watermark_change_req(struct hubbub * hubbub)684 void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
685 {
686 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
687 
688 	uint32_t watermark_change_req;
689 
690 	REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
691 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
692 
693 	if (watermark_change_req)
694 		watermark_change_req = 0;
695 	else
696 		watermark_change_req = 1;
697 
698 	REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
699 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
700 }
701 
hubbub1_soft_reset(struct hubbub * hubbub,bool reset)702 void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
703 {
704 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
705 
706 	uint32_t reset_en = reset ? 1 : 0;
707 
708 	REG_UPDATE(DCHUBBUB_SOFT_RESET,
709 			DCHUBBUB_GLOBAL_SOFT_RESET, reset_en);
710 }
711 
hubbub1_dcc_support_swizzle(enum swizzle_mode_values swizzle,unsigned int bytes_per_element,enum segment_order * segment_order_horz,enum segment_order * segment_order_vert)712 static bool hubbub1_dcc_support_swizzle(
713 		enum swizzle_mode_values swizzle,
714 		unsigned int bytes_per_element,
715 		enum segment_order *segment_order_horz,
716 		enum segment_order *segment_order_vert)
717 {
718 	bool standard_swizzle = false;
719 	bool display_swizzle = false;
720 
721 	switch (swizzle) {
722 	case DC_SW_4KB_S:
723 	case DC_SW_64KB_S:
724 	case DC_SW_VAR_S:
725 	case DC_SW_4KB_S_X:
726 	case DC_SW_64KB_S_X:
727 	case DC_SW_VAR_S_X:
728 		standard_swizzle = true;
729 		break;
730 	case DC_SW_4KB_D:
731 	case DC_SW_64KB_D:
732 	case DC_SW_VAR_D:
733 	case DC_SW_4KB_D_X:
734 	case DC_SW_64KB_D_X:
735 	case DC_SW_VAR_D_X:
736 		display_swizzle = true;
737 		break;
738 	default:
739 		break;
740 	}
741 
742 	if (bytes_per_element == 1 && standard_swizzle) {
743 		*segment_order_horz = segment_order__contiguous;
744 		*segment_order_vert = segment_order__na;
745 		return true;
746 	}
747 	if (bytes_per_element == 2 && standard_swizzle) {
748 		*segment_order_horz = segment_order__non_contiguous;
749 		*segment_order_vert = segment_order__contiguous;
750 		return true;
751 	}
752 	if (bytes_per_element == 4 && standard_swizzle) {
753 		*segment_order_horz = segment_order__non_contiguous;
754 		*segment_order_vert = segment_order__contiguous;
755 		return true;
756 	}
757 	if (bytes_per_element == 8 && standard_swizzle) {
758 		*segment_order_horz = segment_order__na;
759 		*segment_order_vert = segment_order__contiguous;
760 		return true;
761 	}
762 	if (bytes_per_element == 8 && display_swizzle) {
763 		*segment_order_horz = segment_order__contiguous;
764 		*segment_order_vert = segment_order__non_contiguous;
765 		return true;
766 	}
767 
768 	return false;
769 }
770 
hubbub1_dcc_support_pixel_format(enum surface_pixel_format format,unsigned int * bytes_per_element)771 static bool hubbub1_dcc_support_pixel_format(
772 		enum surface_pixel_format format,
773 		unsigned int *bytes_per_element)
774 {
775 	/* DML: get_bytes_per_element */
776 	switch (format) {
777 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
778 	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
779 		*bytes_per_element = 2;
780 		return true;
781 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
782 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
783 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
784 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
785 		*bytes_per_element = 4;
786 		return true;
787 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
788 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
789 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
790 		*bytes_per_element = 8;
791 		return true;
792 	default:
793 		return false;
794 	}
795 }
796 
hubbub1_get_blk256_size(unsigned int * blk256_width,unsigned int * blk256_height,unsigned int bytes_per_element)797 static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
798 		unsigned int bytes_per_element)
799 {
800 	/* copied from DML.  might want to refactor DML to leverage from DML */
801 	/* DML : get_blk256_size */
802 	if (bytes_per_element == 1) {
803 		*blk256_width = 16;
804 		*blk256_height = 16;
805 	} else if (bytes_per_element == 2) {
806 		*blk256_width = 16;
807 		*blk256_height = 8;
808 	} else if (bytes_per_element == 4) {
809 		*blk256_width = 8;
810 		*blk256_height = 8;
811 	} else if (bytes_per_element == 8) {
812 		*blk256_width = 8;
813 		*blk256_height = 4;
814 	}
815 }
816 
hubbub1_det_request_size(unsigned int height,unsigned int width,unsigned int bpe,bool * req128_horz_wc,bool * req128_vert_wc)817 static void hubbub1_det_request_size(
818 		unsigned int height,
819 		unsigned int width,
820 		unsigned int bpe,
821 		bool *req128_horz_wc,
822 		bool *req128_vert_wc)
823 {
824 	unsigned int detile_buf_size = 164 * 1024;  /* 164KB for DCN1.0 */
825 
826 	unsigned int blk256_height = 0;
827 	unsigned int blk256_width = 0;
828 	unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
829 
830 	hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
831 
832 	swath_bytes_horz_wc = width * blk256_height * bpe;
833 	swath_bytes_vert_wc = height * blk256_width * bpe;
834 
835 	*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
836 			false : /* full 256B request */
837 			true; /* half 128b request */
838 
839 	*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
840 			false : /* full 256B request */
841 			true; /* half 128b request */
842 }
843 
hubbub1_get_dcc_compression_cap(struct hubbub * hubbub,const struct dc_dcc_surface_param * input,struct dc_surface_dcc_cap * output)844 static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
845 		const struct dc_dcc_surface_param *input,
846 		struct dc_surface_dcc_cap *output)
847 {
848 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
849 	struct dc *dc = hubbub1->base.ctx->dc;
850 
851 	/* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
852 	enum dcc_control dcc_control;
853 	unsigned int bpe;
854 	enum segment_order segment_order_horz, segment_order_vert;
855 	bool req128_horz_wc, req128_vert_wc;
856 
857 	memset(output, 0, sizeof(*output));
858 
859 	if (dc->debug.disable_dcc == DCC_DISABLE)
860 		return false;
861 
862 	if (!hubbub1->base.funcs->dcc_support_pixel_format(input->format, &bpe))
863 		return false;
864 
865 	if (!hubbub1->base.funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
866 			&segment_order_horz, &segment_order_vert))
867 		return false;
868 
869 	hubbub1_det_request_size(input->surface_size.height,  input->surface_size.width,
870 			bpe, &req128_horz_wc, &req128_vert_wc);
871 
872 	if (!req128_horz_wc && !req128_vert_wc) {
873 		dcc_control = dcc_control__256_256_xxx;
874 	} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
875 		if (!req128_horz_wc)
876 			dcc_control = dcc_control__256_256_xxx;
877 		else if (segment_order_horz == segment_order__contiguous)
878 			dcc_control = dcc_control__128_128_xxx;
879 		else
880 			dcc_control = dcc_control__256_64_64;
881 	} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
882 		if (!req128_vert_wc)
883 			dcc_control = dcc_control__256_256_xxx;
884 		else if (segment_order_vert == segment_order__contiguous)
885 			dcc_control = dcc_control__128_128_xxx;
886 		else
887 			dcc_control = dcc_control__256_64_64;
888 	} else {
889 		if ((req128_horz_wc &&
890 			segment_order_horz == segment_order__non_contiguous) ||
891 			(req128_vert_wc &&
892 			segment_order_vert == segment_order__non_contiguous))
893 			/* access_dir not known, must use most constraining */
894 			dcc_control = dcc_control__256_64_64;
895 		else
896 			/* reg128 is true for either horz and vert
897 			 * but segment_order is contiguous
898 			 */
899 			dcc_control = dcc_control__128_128_xxx;
900 	}
901 
902 	if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
903 		dcc_control != dcc_control__256_256_xxx)
904 		return false;
905 
906 	switch (dcc_control) {
907 	case dcc_control__256_256_xxx:
908 		output->grph.rgb.max_uncompressed_blk_size = 256;
909 		output->grph.rgb.max_compressed_blk_size = 256;
910 		output->grph.rgb.independent_64b_blks = false;
911 		break;
912 	case dcc_control__128_128_xxx:
913 		output->grph.rgb.max_uncompressed_blk_size = 128;
914 		output->grph.rgb.max_compressed_blk_size = 128;
915 		output->grph.rgb.independent_64b_blks = false;
916 		break;
917 	case dcc_control__256_64_64:
918 		output->grph.rgb.max_uncompressed_blk_size = 256;
919 		output->grph.rgb.max_compressed_blk_size = 64;
920 		output->grph.rgb.independent_64b_blks = true;
921 		break;
922 	default:
923 		ASSERT(false);
924 		break;
925 	}
926 
927 	output->capable = true;
928 	output->const_color_support = false;
929 
930 	return true;
931 }
932 
933 static const struct hubbub_funcs hubbub1_funcs = {
934 	.update_dchub = hubbub1_update_dchub,
935 	.dcc_support_swizzle = hubbub1_dcc_support_swizzle,
936 	.dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
937 	.get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
938 	.wm_read_state = hubbub1_wm_read_state,
939 	.program_watermarks = hubbub1_program_watermarks,
940 	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
941 	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
942 };
943 
hubbub1_construct(struct hubbub * hubbub,struct dc_context * ctx,const struct dcn_hubbub_registers * hubbub_regs,const struct dcn_hubbub_shift * hubbub_shift,const struct dcn_hubbub_mask * hubbub_mask)944 void hubbub1_construct(struct hubbub *hubbub,
945 	struct dc_context *ctx,
946 	const struct dcn_hubbub_registers *hubbub_regs,
947 	const struct dcn_hubbub_shift *hubbub_shift,
948 	const struct dcn_hubbub_mask *hubbub_mask)
949 {
950 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
951 
952 	hubbub1->base.ctx = ctx;
953 
954 	hubbub1->base.funcs = &hubbub1_funcs;
955 
956 	hubbub1->regs = hubbub_regs;
957 	hubbub1->shifts = hubbub_shift;
958 	hubbub1->masks = hubbub_mask;
959 
960 	hubbub1->debug_test_index_pstate = 0x7;
961 	if (ctx->dce_version == DCN_VERSION_1_01)
962 		hubbub1->debug_test_index_pstate = 0xB;
963 }
964 
965