1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/delay.h>
6 #include "dpu_hwio.h"
7 #include "dpu_hw_ctl.h"
8 #include "dpu_kms.h"
9 #include "dpu_trace.h"
10 
11 #define   CTL_LAYER(lm)                 \
12 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
13 #define   CTL_LAYER_EXT(lm)             \
14 	(0x40 + (((lm) - LM_0) * 0x004))
15 #define   CTL_LAYER_EXT2(lm)             \
16 	(0x70 + (((lm) - LM_0) * 0x004))
17 #define   CTL_LAYER_EXT3(lm)             \
18 	(0xA0 + (((lm) - LM_0) * 0x004))
19 #define   CTL_TOP                       0x014
20 #define   CTL_FLUSH                     0x018
21 #define   CTL_START                     0x01C
22 #define   CTL_PREPARE                   0x0d0
23 #define   CTL_SW_RESET                  0x030
24 #define   CTL_LAYER_EXTN_OFFSET         0x40
25 #define   CTL_MERGE_3D_ACTIVE           0x0E4
26 #define   CTL_INTF_ACTIVE               0x0F4
27 #define   CTL_MERGE_3D_FLUSH            0x100
28 #define   CTL_INTF_FLUSH                0x110
29 #define   CTL_INTF_MASTER               0x134
30 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
31 
32 #define CTL_MIXER_BORDER_OUT            BIT(24)
33 #define CTL_FLUSH_MASK_CTL              BIT(17)
34 
35 #define DPU_REG_RESET_TIMEOUT_US        2000
36 #define  MERGE_3D_IDX   23
37 #define  INTF_IDX       31
38 #define CTL_INVALID_BIT                 0xffff
39 
40 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
41 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
42 	1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
43 
_ctl_offset(enum dpu_ctl ctl,const struct dpu_mdss_cfg * m,void __iomem * addr,struct dpu_hw_blk_reg_map * b)44 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
45 		const struct dpu_mdss_cfg *m,
46 		void __iomem *addr,
47 		struct dpu_hw_blk_reg_map *b)
48 {
49 	int i;
50 
51 	for (i = 0; i < m->ctl_count; i++) {
52 		if (ctl == m->ctl[i].id) {
53 			b->base_off = addr;
54 			b->blk_off = m->ctl[i].base;
55 			b->length = m->ctl[i].len;
56 			b->hwversion = m->hwversion;
57 			b->log_mask = DPU_DBG_MASK_CTL;
58 			return &m->ctl[i];
59 		}
60 	}
61 	return ERR_PTR(-ENOMEM);
62 }
63 
_mixer_stages(const struct dpu_lm_cfg * mixer,int count,enum dpu_lm lm)64 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
65 		enum dpu_lm lm)
66 {
67 	int i;
68 	int stages = -EINVAL;
69 
70 	for (i = 0; i < count; i++) {
71 		if (lm == mixer[i].id) {
72 			stages = mixer[i].sblk->maxblendstages;
73 			break;
74 		}
75 	}
76 
77 	return stages;
78 }
79 
dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl * ctx)80 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
81 {
82 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
83 
84 	return DPU_REG_READ(c, CTL_FLUSH);
85 }
86 
dpu_hw_ctl_trigger_start(struct dpu_hw_ctl * ctx)87 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
88 {
89 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
90 				       dpu_hw_ctl_get_flush_register(ctx));
91 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
92 }
93 
dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl * ctx)94 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
95 {
96 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
97 					 dpu_hw_ctl_get_flush_register(ctx));
98 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
99 }
100 
dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl * ctx)101 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
102 {
103 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
104 				     dpu_hw_ctl_get_flush_register(ctx));
105 	ctx->pending_flush_mask = 0x0;
106 }
107 
dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl * ctx,u32 flushbits)108 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
109 		u32 flushbits)
110 {
111 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
112 					      ctx->pending_flush_mask);
113 	ctx->pending_flush_mask |= flushbits;
114 }
115 
dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl * ctx)116 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
117 {
118 	return ctx->pending_flush_mask;
119 }
120 
dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl * ctx)121 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
122 {
123 
124 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
125 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
126 				ctx->pending_merge_3d_flush_mask);
127 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
128 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
129 				ctx->pending_intf_flush_mask);
130 
131 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
132 }
133 
dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl * ctx)134 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
135 {
136 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
137 				     dpu_hw_ctl_get_flush_register(ctx));
138 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
139 }
140 
dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl * ctx,enum dpu_sspp sspp)141 static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
142 	enum dpu_sspp sspp)
143 {
144 	uint32_t flushbits = 0;
145 
146 	switch (sspp) {
147 	case SSPP_VIG0:
148 		flushbits =  BIT(0);
149 		break;
150 	case SSPP_VIG1:
151 		flushbits = BIT(1);
152 		break;
153 	case SSPP_VIG2:
154 		flushbits = BIT(2);
155 		break;
156 	case SSPP_VIG3:
157 		flushbits = BIT(18);
158 		break;
159 	case SSPP_RGB0:
160 		flushbits = BIT(3);
161 		break;
162 	case SSPP_RGB1:
163 		flushbits = BIT(4);
164 		break;
165 	case SSPP_RGB2:
166 		flushbits = BIT(5);
167 		break;
168 	case SSPP_RGB3:
169 		flushbits = BIT(19);
170 		break;
171 	case SSPP_DMA0:
172 		flushbits = BIT(11);
173 		break;
174 	case SSPP_DMA1:
175 		flushbits = BIT(12);
176 		break;
177 	case SSPP_DMA2:
178 		flushbits = BIT(24);
179 		break;
180 	case SSPP_DMA3:
181 		flushbits = BIT(25);
182 		break;
183 	case SSPP_CURSOR0:
184 		flushbits = BIT(22);
185 		break;
186 	case SSPP_CURSOR1:
187 		flushbits = BIT(23);
188 		break;
189 	default:
190 		break;
191 	}
192 
193 	return flushbits;
194 }
195 
dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl * ctx,enum dpu_lm lm)196 static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
197 	enum dpu_lm lm)
198 {
199 	uint32_t flushbits = 0;
200 
201 	switch (lm) {
202 	case LM_0:
203 		flushbits = BIT(6);
204 		break;
205 	case LM_1:
206 		flushbits = BIT(7);
207 		break;
208 	case LM_2:
209 		flushbits = BIT(8);
210 		break;
211 	case LM_3:
212 		flushbits = BIT(9);
213 		break;
214 	case LM_4:
215 		flushbits = BIT(10);
216 		break;
217 	case LM_5:
218 		flushbits = BIT(20);
219 		break;
220 	default:
221 		return -EINVAL;
222 	}
223 
224 	flushbits |= CTL_FLUSH_MASK_CTL;
225 
226 	return flushbits;
227 }
228 
dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl * ctx,enum dpu_intf intf)229 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
230 		enum dpu_intf intf)
231 {
232 	switch (intf) {
233 	case INTF_0:
234 		ctx->pending_flush_mask |= BIT(31);
235 		break;
236 	case INTF_1:
237 		ctx->pending_flush_mask |= BIT(30);
238 		break;
239 	case INTF_2:
240 		ctx->pending_flush_mask |= BIT(29);
241 		break;
242 	case INTF_3:
243 		ctx->pending_flush_mask |= BIT(28);
244 		break;
245 	default:
246 		break;
247 	}
248 }
249 
dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl * ctx,enum dpu_intf intf)250 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
251 		enum dpu_intf intf)
252 {
253 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
254 	ctx->pending_flush_mask |= BIT(INTF_IDX);
255 }
256 
dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl * ctx,enum dpu_merge_3d merge_3d)257 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
258 		enum dpu_merge_3d merge_3d)
259 {
260 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
261 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
262 }
263 
dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl * ctx,enum dpu_dspp dspp)264 static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
265 	enum dpu_dspp dspp)
266 {
267 	uint32_t flushbits = 0;
268 
269 	switch (dspp) {
270 	case DSPP_0:
271 		flushbits = BIT(13);
272 		break;
273 	case DSPP_1:
274 		flushbits = BIT(14);
275 		break;
276 	case DSPP_2:
277 		flushbits = BIT(15);
278 		break;
279 	case DSPP_3:
280 		flushbits = BIT(21);
281 		break;
282 	default:
283 		return 0;
284 	}
285 
286 	return flushbits;
287 }
288 
dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl * ctx,u32 timeout_us)289 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
290 {
291 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
292 	ktime_t timeout;
293 	u32 status;
294 
295 	timeout = ktime_add_us(ktime_get(), timeout_us);
296 
297 	/*
298 	 * it takes around 30us to have mdp finish resetting its ctl path
299 	 * poll every 50us so that reset should be completed at 1st poll
300 	 */
301 	do {
302 		status = DPU_REG_READ(c, CTL_SW_RESET);
303 		status &= 0x1;
304 		if (status)
305 			usleep_range(20, 50);
306 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
307 
308 	return status;
309 }
310 
dpu_hw_ctl_reset_control(struct dpu_hw_ctl * ctx)311 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
312 {
313 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
314 
315 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
316 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
317 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
318 		return -EINVAL;
319 
320 	return 0;
321 }
322 
dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl * ctx)323 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
324 {
325 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
326 	u32 status;
327 
328 	status = DPU_REG_READ(c, CTL_SW_RESET);
329 	status &= 0x01;
330 	if (!status)
331 		return 0;
332 
333 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
334 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
335 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
336 		return -EINVAL;
337 	}
338 
339 	return 0;
340 }
341 
dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl * ctx)342 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
343 {
344 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
345 	int i;
346 
347 	for (i = 0; i < ctx->mixer_count; i++) {
348 		DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
349 		DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
350 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
351 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
352 	}
353 
354 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
355 }
356 
dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl * ctx,enum dpu_lm lm,struct dpu_hw_stage_cfg * stage_cfg)357 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
358 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
359 {
360 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
361 	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
362 	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
363 	int i, j;
364 	int stages;
365 	int pipes_per_stage;
366 
367 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
368 	if (stages < 0)
369 		return;
370 
371 	if (test_bit(DPU_MIXER_SOURCESPLIT,
372 		&ctx->mixer_hw_caps->features))
373 		pipes_per_stage = PIPES_PER_STAGE;
374 	else
375 		pipes_per_stage = 1;
376 
377 	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
378 
379 	if (!stage_cfg)
380 		goto exit;
381 
382 	for (i = 0; i <= stages; i++) {
383 		/* overflow to ext register if 'i + 1 > 7' */
384 		mix = (i + 1) & 0x7;
385 		ext = i >= 7;
386 
387 		for (j = 0 ; j < pipes_per_stage; j++) {
388 			enum dpu_sspp_multirect_index rect_index =
389 				stage_cfg->multirect_index[i][j];
390 
391 			switch (stage_cfg->stage[i][j]) {
392 			case SSPP_VIG0:
393 				if (rect_index == DPU_SSPP_RECT_1) {
394 					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
395 				} else {
396 					mixercfg |= mix << 0;
397 					mixercfg_ext |= ext << 0;
398 				}
399 				break;
400 			case SSPP_VIG1:
401 				if (rect_index == DPU_SSPP_RECT_1) {
402 					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
403 				} else {
404 					mixercfg |= mix << 3;
405 					mixercfg_ext |= ext << 2;
406 				}
407 				break;
408 			case SSPP_VIG2:
409 				if (rect_index == DPU_SSPP_RECT_1) {
410 					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
411 				} else {
412 					mixercfg |= mix << 6;
413 					mixercfg_ext |= ext << 4;
414 				}
415 				break;
416 			case SSPP_VIG3:
417 				if (rect_index == DPU_SSPP_RECT_1) {
418 					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
419 				} else {
420 					mixercfg |= mix << 26;
421 					mixercfg_ext |= ext << 6;
422 				}
423 				break;
424 			case SSPP_RGB0:
425 				mixercfg |= mix << 9;
426 				mixercfg_ext |= ext << 8;
427 				break;
428 			case SSPP_RGB1:
429 				mixercfg |= mix << 12;
430 				mixercfg_ext |= ext << 10;
431 				break;
432 			case SSPP_RGB2:
433 				mixercfg |= mix << 15;
434 				mixercfg_ext |= ext << 12;
435 				break;
436 			case SSPP_RGB3:
437 				mixercfg |= mix << 29;
438 				mixercfg_ext |= ext << 14;
439 				break;
440 			case SSPP_DMA0:
441 				if (rect_index == DPU_SSPP_RECT_1) {
442 					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
443 				} else {
444 					mixercfg |= mix << 18;
445 					mixercfg_ext |= ext << 16;
446 				}
447 				break;
448 			case SSPP_DMA1:
449 				if (rect_index == DPU_SSPP_RECT_1) {
450 					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
451 				} else {
452 					mixercfg |= mix << 21;
453 					mixercfg_ext |= ext << 18;
454 				}
455 				break;
456 			case SSPP_DMA2:
457 				if (rect_index == DPU_SSPP_RECT_1) {
458 					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
459 				} else {
460 					mix |= (i + 1) & 0xF;
461 					mixercfg_ext2 |= mix << 0;
462 				}
463 				break;
464 			case SSPP_DMA3:
465 				if (rect_index == DPU_SSPP_RECT_1) {
466 					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
467 				} else {
468 					mix |= (i + 1) & 0xF;
469 					mixercfg_ext2 |= mix << 4;
470 				}
471 				break;
472 			case SSPP_CURSOR0:
473 				mixercfg_ext |= ((i + 1) & 0xF) << 20;
474 				break;
475 			case SSPP_CURSOR1:
476 				mixercfg_ext |= ((i + 1) & 0xF) << 26;
477 				break;
478 			default:
479 				break;
480 			}
481 		}
482 	}
483 
484 exit:
485 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
486 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
487 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
488 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
489 }
490 
491 
dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)492 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
493 		struct dpu_hw_intf_cfg *cfg)
494 {
495 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
496 	u32 intf_active = 0;
497 	u32 mode_sel = 0;
498 
499 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
500 		mode_sel |= BIT(17);
501 
502 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
503 	intf_active |= BIT(cfg->intf - INTF_0);
504 
505 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
506 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
507 	if (cfg->merge_3d)
508 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
509 			      BIT(cfg->merge_3d - MERGE_3D_0));
510 }
511 
dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)512 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
513 		struct dpu_hw_intf_cfg *cfg)
514 {
515 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
516 	u32 intf_cfg = 0;
517 
518 	intf_cfg |= (cfg->intf & 0xF) << 4;
519 
520 	if (cfg->mode_3d) {
521 		intf_cfg |= BIT(19);
522 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
523 	}
524 
525 	switch (cfg->intf_mode_sel) {
526 	case DPU_CTL_MODE_SEL_VID:
527 		intf_cfg &= ~BIT(17);
528 		intf_cfg &= ~(0x3 << 15);
529 		break;
530 	case DPU_CTL_MODE_SEL_CMD:
531 		intf_cfg |= BIT(17);
532 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
533 		break;
534 	default:
535 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
536 		return;
537 	}
538 
539 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
540 }
541 
dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl * ctx,unsigned long * fetch_active)542 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
543 	unsigned long *fetch_active)
544 {
545 	int i;
546 	u32 val = 0;
547 
548 	if (fetch_active) {
549 		for (i = 0; i < SSPP_MAX; i++) {
550 			if (test_bit(i, fetch_active) &&
551 				fetch_tbl[i] != CTL_INVALID_BIT)
552 				val |= BIT(fetch_tbl[i]);
553 		}
554 	}
555 
556 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
557 }
558 
_setup_ctl_ops(struct dpu_hw_ctl_ops * ops,unsigned long cap)559 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
560 		unsigned long cap)
561 {
562 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
563 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
564 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
565 		ops->update_pending_flush_intf =
566 			dpu_hw_ctl_update_pending_flush_intf_v1;
567 		ops->update_pending_flush_merge_3d =
568 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
569 	} else {
570 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
571 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
572 		ops->update_pending_flush_intf =
573 			dpu_hw_ctl_update_pending_flush_intf;
574 	}
575 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
576 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
577 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
578 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
579 	ops->trigger_start = dpu_hw_ctl_trigger_start;
580 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
581 	ops->reset = dpu_hw_ctl_reset_control;
582 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
583 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
584 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
585 	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
586 	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
587 	ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
588 	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
589 		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
590 };
591 
592 static struct dpu_hw_blk_ops dpu_hw_ops;
593 
dpu_hw_ctl_init(enum dpu_ctl idx,void __iomem * addr,const struct dpu_mdss_cfg * m)594 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
595 		void __iomem *addr,
596 		const struct dpu_mdss_cfg *m)
597 {
598 	struct dpu_hw_ctl *c;
599 	const struct dpu_ctl_cfg *cfg;
600 
601 	c = kzalloc(sizeof(*c), GFP_KERNEL);
602 	if (!c)
603 		return ERR_PTR(-ENOMEM);
604 
605 	cfg = _ctl_offset(idx, m, addr, &c->hw);
606 	if (IS_ERR_OR_NULL(cfg)) {
607 		kfree(c);
608 		pr_err("failed to create dpu_hw_ctl %d\n", idx);
609 		return ERR_PTR(-EINVAL);
610 	}
611 
612 	c->caps = cfg;
613 	_setup_ctl_ops(&c->ops, c->caps->features);
614 	c->idx = idx;
615 	c->mixer_count = m->mixer_count;
616 	c->mixer_hw_caps = m->mixer;
617 
618 	dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
619 
620 	return c;
621 }
622 
dpu_hw_ctl_destroy(struct dpu_hw_ctl * ctx)623 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
624 {
625 	if (ctx)
626 		dpu_hw_blk_destroy(&ctx->base);
627 	kfree(ctx);
628 }
629