xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/delay.h>
6 #include "dpu_hwio.h"
7 #include "dpu_hw_ctl.h"
8 #include "dpu_kms.h"
9 #include "dpu_trace.h"
10 
11 #define   CTL_LAYER(lm)                 \
12 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
13 #define   CTL_LAYER_EXT(lm)             \
14 	(0x40 + (((lm) - LM_0) * 0x004))
15 #define   CTL_LAYER_EXT2(lm)             \
16 	(0x70 + (((lm) - LM_0) * 0x004))
17 #define   CTL_LAYER_EXT3(lm)             \
18 	(0xA0 + (((lm) - LM_0) * 0x004))
19 #define   CTL_TOP                       0x014
20 #define   CTL_FLUSH                     0x018
21 #define   CTL_START                     0x01C
22 #define   CTL_PREPARE                   0x0d0
23 #define   CTL_SW_RESET                  0x030
24 #define   CTL_LAYER_EXTN_OFFSET         0x40
25 
26 #define CTL_MIXER_BORDER_OUT            BIT(24)
27 #define CTL_FLUSH_MASK_CTL              BIT(17)
28 
29 #define DPU_REG_RESET_TIMEOUT_US        2000
30 
31 static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
32 		struct dpu_mdss_cfg *m,
33 		void __iomem *addr,
34 		struct dpu_hw_blk_reg_map *b)
35 {
36 	int i;
37 
38 	for (i = 0; i < m->ctl_count; i++) {
39 		if (ctl == m->ctl[i].id) {
40 			b->base_off = addr;
41 			b->blk_off = m->ctl[i].base;
42 			b->length = m->ctl[i].len;
43 			b->hwversion = m->hwversion;
44 			b->log_mask = DPU_DBG_MASK_CTL;
45 			return &m->ctl[i];
46 		}
47 	}
48 	return ERR_PTR(-ENOMEM);
49 }
50 
51 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
52 		enum dpu_lm lm)
53 {
54 	int i;
55 	int stages = -EINVAL;
56 
57 	for (i = 0; i < count; i++) {
58 		if (lm == mixer[i].id) {
59 			stages = mixer[i].sblk->maxblendstages;
60 			break;
61 		}
62 	}
63 
64 	return stages;
65 }
66 
67 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
68 {
69 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
70 
71 	return DPU_REG_READ(c, CTL_FLUSH);
72 }
73 
74 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
75 {
76 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
77 				       dpu_hw_ctl_get_flush_register(ctx));
78 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
79 }
80 
81 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
82 {
83 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
84 					 dpu_hw_ctl_get_flush_register(ctx));
85 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
86 }
87 
88 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
89 {
90 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
91 				     dpu_hw_ctl_get_flush_register(ctx));
92 	ctx->pending_flush_mask = 0x0;
93 }
94 
95 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
96 		u32 flushbits)
97 {
98 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
99 					      ctx->pending_flush_mask);
100 	ctx->pending_flush_mask |= flushbits;
101 }
102 
103 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
104 {
105 	if (!ctx)
106 		return 0x0;
107 
108 	return ctx->pending_flush_mask;
109 }
110 
111 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
112 {
113 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
114 				     dpu_hw_ctl_get_flush_register(ctx));
115 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
116 }
117 
118 static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
119 	enum dpu_sspp sspp)
120 {
121 	uint32_t flushbits = 0;
122 
123 	switch (sspp) {
124 	case SSPP_VIG0:
125 		flushbits =  BIT(0);
126 		break;
127 	case SSPP_VIG1:
128 		flushbits = BIT(1);
129 		break;
130 	case SSPP_VIG2:
131 		flushbits = BIT(2);
132 		break;
133 	case SSPP_VIG3:
134 		flushbits = BIT(18);
135 		break;
136 	case SSPP_RGB0:
137 		flushbits = BIT(3);
138 		break;
139 	case SSPP_RGB1:
140 		flushbits = BIT(4);
141 		break;
142 	case SSPP_RGB2:
143 		flushbits = BIT(5);
144 		break;
145 	case SSPP_RGB3:
146 		flushbits = BIT(19);
147 		break;
148 	case SSPP_DMA0:
149 		flushbits = BIT(11);
150 		break;
151 	case SSPP_DMA1:
152 		flushbits = BIT(12);
153 		break;
154 	case SSPP_DMA2:
155 		flushbits = BIT(24);
156 		break;
157 	case SSPP_DMA3:
158 		flushbits = BIT(25);
159 		break;
160 	case SSPP_CURSOR0:
161 		flushbits = BIT(22);
162 		break;
163 	case SSPP_CURSOR1:
164 		flushbits = BIT(23);
165 		break;
166 	default:
167 		break;
168 	}
169 
170 	return flushbits;
171 }
172 
173 static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
174 	enum dpu_lm lm)
175 {
176 	uint32_t flushbits = 0;
177 
178 	switch (lm) {
179 	case LM_0:
180 		flushbits = BIT(6);
181 		break;
182 	case LM_1:
183 		flushbits = BIT(7);
184 		break;
185 	case LM_2:
186 		flushbits = BIT(8);
187 		break;
188 	case LM_3:
189 		flushbits = BIT(9);
190 		break;
191 	case LM_4:
192 		flushbits = BIT(10);
193 		break;
194 	case LM_5:
195 		flushbits = BIT(20);
196 		break;
197 	default:
198 		return -EINVAL;
199 	}
200 
201 	flushbits |= CTL_FLUSH_MASK_CTL;
202 
203 	return flushbits;
204 }
205 
206 static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
207 		u32 *flushbits, enum dpu_intf intf)
208 {
209 	switch (intf) {
210 	case INTF_0:
211 		*flushbits |= BIT(31);
212 		break;
213 	case INTF_1:
214 		*flushbits |= BIT(30);
215 		break;
216 	case INTF_2:
217 		*flushbits |= BIT(29);
218 		break;
219 	case INTF_3:
220 		*flushbits |= BIT(28);
221 		break;
222 	default:
223 		return -EINVAL;
224 	}
225 	return 0;
226 }
227 
228 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
229 {
230 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
231 	ktime_t timeout;
232 	u32 status;
233 
234 	timeout = ktime_add_us(ktime_get(), timeout_us);
235 
236 	/*
237 	 * it takes around 30us to have mdp finish resetting its ctl path
238 	 * poll every 50us so that reset should be completed at 1st poll
239 	 */
240 	do {
241 		status = DPU_REG_READ(c, CTL_SW_RESET);
242 		status &= 0x1;
243 		if (status)
244 			usleep_range(20, 50);
245 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
246 
247 	return status;
248 }
249 
250 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
251 {
252 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
253 
254 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
255 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
256 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
257 		return -EINVAL;
258 
259 	return 0;
260 }
261 
262 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
263 {
264 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
265 	u32 status;
266 
267 	status = DPU_REG_READ(c, CTL_SW_RESET);
268 	status &= 0x01;
269 	if (!status)
270 		return 0;
271 
272 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
273 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
274 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
275 		return -EINVAL;
276 	}
277 
278 	return 0;
279 }
280 
281 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
282 {
283 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
284 	int i;
285 
286 	for (i = 0; i < ctx->mixer_count; i++) {
287 		DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
288 		DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
289 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
290 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
291 	}
292 }
293 
294 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
295 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
296 {
297 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
298 	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
299 	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
300 	int i, j;
301 	int stages;
302 	int pipes_per_stage;
303 
304 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
305 	if (stages < 0)
306 		return;
307 
308 	if (test_bit(DPU_MIXER_SOURCESPLIT,
309 		&ctx->mixer_hw_caps->features))
310 		pipes_per_stage = PIPES_PER_STAGE;
311 	else
312 		pipes_per_stage = 1;
313 
314 	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
315 
316 	if (!stage_cfg)
317 		goto exit;
318 
319 	for (i = 0; i <= stages; i++) {
320 		/* overflow to ext register if 'i + 1 > 7' */
321 		mix = (i + 1) & 0x7;
322 		ext = i >= 7;
323 
324 		for (j = 0 ; j < pipes_per_stage; j++) {
325 			enum dpu_sspp_multirect_index rect_index =
326 				stage_cfg->multirect_index[i][j];
327 
328 			switch (stage_cfg->stage[i][j]) {
329 			case SSPP_VIG0:
330 				if (rect_index == DPU_SSPP_RECT_1) {
331 					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
332 				} else {
333 					mixercfg |= mix << 0;
334 					mixercfg_ext |= ext << 0;
335 				}
336 				break;
337 			case SSPP_VIG1:
338 				if (rect_index == DPU_SSPP_RECT_1) {
339 					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
340 				} else {
341 					mixercfg |= mix << 3;
342 					mixercfg_ext |= ext << 2;
343 				}
344 				break;
345 			case SSPP_VIG2:
346 				if (rect_index == DPU_SSPP_RECT_1) {
347 					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
348 				} else {
349 					mixercfg |= mix << 6;
350 					mixercfg_ext |= ext << 4;
351 				}
352 				break;
353 			case SSPP_VIG3:
354 				if (rect_index == DPU_SSPP_RECT_1) {
355 					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
356 				} else {
357 					mixercfg |= mix << 26;
358 					mixercfg_ext |= ext << 6;
359 				}
360 				break;
361 			case SSPP_RGB0:
362 				mixercfg |= mix << 9;
363 				mixercfg_ext |= ext << 8;
364 				break;
365 			case SSPP_RGB1:
366 				mixercfg |= mix << 12;
367 				mixercfg_ext |= ext << 10;
368 				break;
369 			case SSPP_RGB2:
370 				mixercfg |= mix << 15;
371 				mixercfg_ext |= ext << 12;
372 				break;
373 			case SSPP_RGB3:
374 				mixercfg |= mix << 29;
375 				mixercfg_ext |= ext << 14;
376 				break;
377 			case SSPP_DMA0:
378 				if (rect_index == DPU_SSPP_RECT_1) {
379 					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
380 				} else {
381 					mixercfg |= mix << 18;
382 					mixercfg_ext |= ext << 16;
383 				}
384 				break;
385 			case SSPP_DMA1:
386 				if (rect_index == DPU_SSPP_RECT_1) {
387 					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
388 				} else {
389 					mixercfg |= mix << 21;
390 					mixercfg_ext |= ext << 18;
391 				}
392 				break;
393 			case SSPP_DMA2:
394 				if (rect_index == DPU_SSPP_RECT_1) {
395 					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
396 				} else {
397 					mix |= (i + 1) & 0xF;
398 					mixercfg_ext2 |= mix << 0;
399 				}
400 				break;
401 			case SSPP_DMA3:
402 				if (rect_index == DPU_SSPP_RECT_1) {
403 					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
404 				} else {
405 					mix |= (i + 1) & 0xF;
406 					mixercfg_ext2 |= mix << 4;
407 				}
408 				break;
409 			case SSPP_CURSOR0:
410 				mixercfg_ext |= ((i + 1) & 0xF) << 20;
411 				break;
412 			case SSPP_CURSOR1:
413 				mixercfg_ext |= ((i + 1) & 0xF) << 26;
414 				break;
415 			default:
416 				break;
417 			}
418 		}
419 	}
420 
421 exit:
422 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
423 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
424 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
425 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
426 }
427 
428 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
429 		struct dpu_hw_intf_cfg *cfg)
430 {
431 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
432 	u32 intf_cfg = 0;
433 
434 	intf_cfg |= (cfg->intf & 0xF) << 4;
435 
436 	if (cfg->mode_3d) {
437 		intf_cfg |= BIT(19);
438 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
439 	}
440 
441 	switch (cfg->intf_mode_sel) {
442 	case DPU_CTL_MODE_SEL_VID:
443 		intf_cfg &= ~BIT(17);
444 		intf_cfg &= ~(0x3 << 15);
445 		break;
446 	case DPU_CTL_MODE_SEL_CMD:
447 		intf_cfg |= BIT(17);
448 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
449 		break;
450 	default:
451 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
452 		return;
453 	}
454 
455 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
456 }
457 
458 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
459 		unsigned long cap)
460 {
461 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
462 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
463 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
464 	ops->trigger_flush = dpu_hw_ctl_trigger_flush;
465 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
466 	ops->trigger_start = dpu_hw_ctl_trigger_start;
467 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
468 	ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
469 	ops->reset = dpu_hw_ctl_reset_control;
470 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
471 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
472 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
473 	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
474 	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
475 	ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
476 };
477 
478 static struct dpu_hw_blk_ops dpu_hw_ops;
479 
480 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
481 		void __iomem *addr,
482 		struct dpu_mdss_cfg *m)
483 {
484 	struct dpu_hw_ctl *c;
485 	struct dpu_ctl_cfg *cfg;
486 
487 	c = kzalloc(sizeof(*c), GFP_KERNEL);
488 	if (!c)
489 		return ERR_PTR(-ENOMEM);
490 
491 	cfg = _ctl_offset(idx, m, addr, &c->hw);
492 	if (IS_ERR_OR_NULL(cfg)) {
493 		kfree(c);
494 		pr_err("failed to create dpu_hw_ctl %d\n", idx);
495 		return ERR_PTR(-EINVAL);
496 	}
497 
498 	c->caps = cfg;
499 	_setup_ctl_ops(&c->ops, c->caps->features);
500 	c->idx = idx;
501 	c->mixer_count = m->mixer_count;
502 	c->mixer_hw_caps = m->mixer;
503 
504 	dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
505 
506 	return c;
507 }
508 
509 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
510 {
511 	if (ctx)
512 		dpu_hw_blk_destroy(&ctx->base);
513 	kfree(ctx);
514 }
515