1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
8 
9 #include "dpu_core_irq.h"
10 #include "dpu_kms.h"
11 #include "dpu_hw_interrupts.h"
12 #include "dpu_hw_util.h"
13 #include "dpu_hw_mdss.h"
14 #include "dpu_trace.h"
15 
16 /*
17  * Register offsets in MDSS register file for the interrupt registers
18  * w.r.t. to the MDP base
19  */
20 #define MDP_SSPP_TOP0_OFF		0x0
21 #define MDP_INTF_0_OFF			0x6A000
22 #define MDP_INTF_1_OFF			0x6A800
23 #define MDP_INTF_2_OFF			0x6B000
24 #define MDP_INTF_3_OFF			0x6B800
25 #define MDP_INTF_4_OFF			0x6C000
26 #define MDP_INTF_5_OFF			0x6C800
27 #define MDP_AD4_0_OFF			0x7C000
28 #define MDP_AD4_1_OFF			0x7D000
29 #define MDP_AD4_INTR_EN_OFF		0x41c
30 #define MDP_AD4_INTR_CLEAR_OFF		0x424
31 #define MDP_AD4_INTR_STATUS_OFF		0x420
32 #define MDP_INTF_0_OFF_REV_7xxx             0x34000
33 #define MDP_INTF_1_OFF_REV_7xxx             0x35000
34 #define MDP_INTF_2_OFF_REV_7xxx             0x36000
35 #define MDP_INTF_3_OFF_REV_7xxx             0x37000
36 #define MDP_INTF_4_OFF_REV_7xxx             0x38000
37 #define MDP_INTF_5_OFF_REV_7xxx             0x39000
38 
39 /**
40  * struct dpu_intr_reg - array of DPU register sets
41  * @clr_off:	offset to CLEAR reg
42  * @en_off:	offset to ENABLE reg
43  * @status_off:	offset to STATUS reg
44  */
45 struct dpu_intr_reg {
46 	u32 clr_off;
47 	u32 en_off;
48 	u32 status_off;
49 };
50 
51 /*
52  * struct dpu_intr_reg -  List of DPU interrupt registers
53  *
54  * When making changes be sure to sync with dpu_hw_intr_reg
55  */
56 static const struct dpu_intr_reg dpu_intr_set[] = {
57 	[MDP_SSPP_TOP0_INTR] = {
58 		MDP_SSPP_TOP0_OFF+INTR_CLEAR,
59 		MDP_SSPP_TOP0_OFF+INTR_EN,
60 		MDP_SSPP_TOP0_OFF+INTR_STATUS
61 	},
62 	[MDP_SSPP_TOP0_INTR2] = {
63 		MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
64 		MDP_SSPP_TOP0_OFF+INTR2_EN,
65 		MDP_SSPP_TOP0_OFF+INTR2_STATUS
66 	},
67 	[MDP_SSPP_TOP0_HIST_INTR] = {
68 		MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
69 		MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
70 		MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
71 	},
72 	[MDP_INTF0_INTR] = {
73 		MDP_INTF_0_OFF+INTF_INTR_CLEAR,
74 		MDP_INTF_0_OFF+INTF_INTR_EN,
75 		MDP_INTF_0_OFF+INTF_INTR_STATUS
76 	},
77 	[MDP_INTF1_INTR] = {
78 		MDP_INTF_1_OFF+INTF_INTR_CLEAR,
79 		MDP_INTF_1_OFF+INTF_INTR_EN,
80 		MDP_INTF_1_OFF+INTF_INTR_STATUS
81 	},
82 	[MDP_INTF2_INTR] = {
83 		MDP_INTF_2_OFF+INTF_INTR_CLEAR,
84 		MDP_INTF_2_OFF+INTF_INTR_EN,
85 		MDP_INTF_2_OFF+INTF_INTR_STATUS
86 	},
87 	[MDP_INTF3_INTR] = {
88 		MDP_INTF_3_OFF+INTF_INTR_CLEAR,
89 		MDP_INTF_3_OFF+INTF_INTR_EN,
90 		MDP_INTF_3_OFF+INTF_INTR_STATUS
91 	},
92 	[MDP_INTF4_INTR] = {
93 		MDP_INTF_4_OFF+INTF_INTR_CLEAR,
94 		MDP_INTF_4_OFF+INTF_INTR_EN,
95 		MDP_INTF_4_OFF+INTF_INTR_STATUS
96 	},
97 	[MDP_INTF5_INTR] = {
98 		MDP_INTF_5_OFF+INTF_INTR_CLEAR,
99 		MDP_INTF_5_OFF+INTF_INTR_EN,
100 		MDP_INTF_5_OFF+INTF_INTR_STATUS
101 	},
102 	[MDP_AD4_0_INTR] = {
103 		MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
104 		MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
105 		MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
106 	},
107 	[MDP_AD4_1_INTR] = {
108 		MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
109 		MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
110 		MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
111 	},
112 	[MDP_INTF0_7xxx_INTR] = {
113 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
114 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
115 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
116 	},
117 	[MDP_INTF1_7xxx_INTR] = {
118 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
119 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
120 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
121 	},
122 	[MDP_INTF2_7xxx_INTR] = {
123 		MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
124 		MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
125 		MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
126 	},
127 	[MDP_INTF3_7xxx_INTR] = {
128 		MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
129 		MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
130 		MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
131 	},
132 	[MDP_INTF4_7xxx_INTR] = {
133 		MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
134 		MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
135 		MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
136 	},
137 	[MDP_INTF5_7xxx_INTR] = {
138 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
139 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
140 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
141 	},
142 };
143 
144 #define DPU_IRQ_REG(irq_idx)	(irq_idx / 32)
145 #define DPU_IRQ_MASK(irq_idx)	(BIT(irq_idx % 32))
146 
147 /**
148  * dpu_core_irq_callback_handler - dispatch core interrupts
149  * @dpu_kms:		Pointer to DPU's KMS structure
150  * @irq_idx:		interrupt index
151  */
152 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
153 {
154 	struct dpu_irq_callback *cb;
155 
156 	VERB("irq_idx=%d\n", irq_idx);
157 
158 	if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
159 		DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
160 
161 	atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
162 
163 	/*
164 	 * Perform registered function callback
165 	 */
166 	list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
167 		if (cb->func)
168 			cb->func(cb->arg, irq_idx);
169 }
170 
171 irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
172 {
173 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
174 	int reg_idx;
175 	int irq_idx;
176 	u32 irq_status;
177 	u32 enable_mask;
178 	int bit;
179 	unsigned long irq_flags;
180 
181 	if (!intr)
182 		return IRQ_NONE;
183 
184 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
185 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
186 		if (!test_bit(reg_idx, &intr->irq_mask))
187 			continue;
188 
189 		/* Read interrupt status */
190 		irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off);
191 
192 		/* Read enable mask */
193 		enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off);
194 
195 		/* and clear the interrupt */
196 		if (irq_status)
197 			DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
198 				     irq_status);
199 
200 		/* Finally update IRQ status based on enable mask */
201 		irq_status &= enable_mask;
202 
203 		if (!irq_status)
204 			continue;
205 
206 		/*
207 		 * Search through matching intr status.
208 		 */
209 		while ((bit = ffs(irq_status)) != 0) {
210 			irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
211 
212 			dpu_core_irq_callback_handler(dpu_kms, irq_idx);
213 
214 			/*
215 			 * When callback finish, clear the irq_status
216 			 * with the matching mask. Once irq_status
217 			 * is all cleared, the search can be stopped.
218 			 */
219 			irq_status &= ~BIT(bit - 1);
220 		}
221 	}
222 
223 	/* ensure register writes go through */
224 	wmb();
225 
226 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
227 
228 	return IRQ_HANDLED;
229 }
230 
231 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
232 {
233 	int reg_idx;
234 	const struct dpu_intr_reg *reg;
235 	const char *dbgstr = NULL;
236 	uint32_t cache_irq_mask;
237 
238 	if (!intr)
239 		return -EINVAL;
240 
241 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
242 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
243 		return -EINVAL;
244 	}
245 
246 	/*
247 	 * The cache_irq_mask and hardware RMW operations needs to be done
248 	 * under irq_lock and it's the caller's responsibility to ensure that's
249 	 * held.
250 	 */
251 	assert_spin_locked(&intr->irq_lock);
252 
253 	reg_idx = DPU_IRQ_REG(irq_idx);
254 	reg = &dpu_intr_set[reg_idx];
255 
256 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
257 	if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
258 		dbgstr = "DPU IRQ already set:";
259 	} else {
260 		dbgstr = "DPU IRQ enabled:";
261 
262 		cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
263 		/* Cleaning any pending interrupt */
264 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
265 		/* Enabling interrupts with the new mask */
266 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
267 
268 		/* ensure register write goes through */
269 		wmb();
270 
271 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
272 	}
273 
274 	pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
275 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
276 
277 	return 0;
278 }
279 
280 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
281 {
282 	int reg_idx;
283 	const struct dpu_intr_reg *reg;
284 	const char *dbgstr = NULL;
285 	uint32_t cache_irq_mask;
286 
287 	if (!intr)
288 		return -EINVAL;
289 
290 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
291 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
292 		return -EINVAL;
293 	}
294 
295 	/*
296 	 * The cache_irq_mask and hardware RMW operations needs to be done
297 	 * under irq_lock and it's the caller's responsibility to ensure that's
298 	 * held.
299 	 */
300 	assert_spin_locked(&intr->irq_lock);
301 
302 	reg_idx = DPU_IRQ_REG(irq_idx);
303 	reg = &dpu_intr_set[reg_idx];
304 
305 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
306 	if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
307 		dbgstr = "DPU IRQ is already cleared:";
308 	} else {
309 		dbgstr = "DPU IRQ mask disable:";
310 
311 		cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
312 		/* Disable interrupts based on the new mask */
313 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
314 		/* Cleaning any pending interrupt */
315 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
316 
317 		/* ensure register write goes through */
318 		wmb();
319 
320 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
321 	}
322 
323 	pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
324 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
325 
326 	return 0;
327 }
328 
329 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
330 {
331 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
332 	int i;
333 
334 	if (!intr)
335 		return;
336 
337 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
338 		if (test_bit(i, &intr->irq_mask))
339 			DPU_REG_WRITE(&intr->hw,
340 					dpu_intr_set[i].clr_off, 0xffffffff);
341 	}
342 
343 	/* ensure register writes go through */
344 	wmb();
345 }
346 
347 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
348 {
349 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
350 	int i;
351 
352 	if (!intr)
353 		return;
354 
355 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
356 		if (test_bit(i, &intr->irq_mask))
357 			DPU_REG_WRITE(&intr->hw,
358 					dpu_intr_set[i].en_off, 0x00000000);
359 	}
360 
361 	/* ensure register writes go through */
362 	wmb();
363 }
364 
365 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
366 {
367 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
368 	int reg_idx;
369 	unsigned long irq_flags;
370 	u32 intr_status;
371 
372 	if (!intr)
373 		return 0;
374 
375 	if (irq_idx < 0) {
376 		DPU_ERROR("[%pS] invalid irq_idx=%d\n",
377 				__builtin_return_address(0), irq_idx);
378 		return 0;
379 	}
380 
381 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
382 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
383 		return 0;
384 	}
385 
386 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
387 
388 	reg_idx = DPU_IRQ_REG(irq_idx);
389 	intr_status = DPU_REG_READ(&intr->hw,
390 			dpu_intr_set[reg_idx].status_off) &
391 		DPU_IRQ_MASK(irq_idx);
392 	if (intr_status && clear)
393 		DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
394 				intr_status);
395 
396 	/* ensure register writes go through */
397 	wmb();
398 
399 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
400 
401 	return intr_status;
402 }
403 
404 static void __intr_offset(struct dpu_mdss_cfg *m,
405 		void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
406 {
407 	hw->base_off = addr;
408 	hw->blk_off = m->mdp[0].base;
409 	hw->hwversion = m->hwversion;
410 }
411 
412 struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
413 		struct dpu_mdss_cfg *m)
414 {
415 	struct dpu_hw_intr *intr;
416 
417 	if (!addr || !m)
418 		return ERR_PTR(-EINVAL);
419 
420 	intr = kzalloc(sizeof(*intr), GFP_KERNEL);
421 	if (!intr)
422 		return ERR_PTR(-ENOMEM);
423 
424 	__intr_offset(m, addr, &intr->hw);
425 
426 	intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
427 
428 	intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
429 			GFP_KERNEL);
430 	if (intr->cache_irq_mask == NULL) {
431 		kfree(intr);
432 		return ERR_PTR(-ENOMEM);
433 	}
434 
435 	intr->irq_mask = m->mdss_irqs;
436 
437 	spin_lock_init(&intr->irq_lock);
438 
439 	return intr;
440 }
441 
442 void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
443 {
444 	if (intr) {
445 		kfree(intr->cache_irq_mask);
446 
447 		kfree(intr->irq_cb_tbl);
448 		kfree(intr->irq_counts);
449 
450 		kfree(intr);
451 	}
452 }
453 
454 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
455 		struct dpu_irq_callback *register_irq_cb)
456 {
457 	unsigned long irq_flags;
458 
459 	if (!dpu_kms->hw_intr->irq_cb_tbl) {
460 		DPU_ERROR("invalid params\n");
461 		return -EINVAL;
462 	}
463 
464 	if (!register_irq_cb || !register_irq_cb->func) {
465 		DPU_ERROR("invalid irq_cb:%d func:%d\n",
466 				register_irq_cb != NULL,
467 				register_irq_cb ?
468 					register_irq_cb->func != NULL : -1);
469 		return -EINVAL;
470 	}
471 
472 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
473 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
474 		return -EINVAL;
475 	}
476 
477 	VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
478 
479 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
480 	trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
481 	list_del_init(&register_irq_cb->list);
482 	list_add_tail(&register_irq_cb->list,
483 			&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
484 	if (list_is_first(&register_irq_cb->list,
485 			&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
486 		int ret = dpu_hw_intr_enable_irq_locked(
487 				dpu_kms->hw_intr,
488 				irq_idx);
489 		if (ret)
490 			DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
491 					irq_idx);
492 	}
493 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
494 
495 	return 0;
496 }
497 
498 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
499 		struct dpu_irq_callback *register_irq_cb)
500 {
501 	unsigned long irq_flags;
502 
503 	if (!dpu_kms->hw_intr->irq_cb_tbl) {
504 		DPU_ERROR("invalid params\n");
505 		return -EINVAL;
506 	}
507 
508 	if (!register_irq_cb || !register_irq_cb->func) {
509 		DPU_ERROR("invalid irq_cb:%d func:%d\n",
510 				register_irq_cb != NULL,
511 				register_irq_cb ?
512 					register_irq_cb->func != NULL : -1);
513 		return -EINVAL;
514 	}
515 
516 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
517 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
518 		return -EINVAL;
519 	}
520 
521 	VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
522 
523 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
524 	trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
525 	list_del_init(&register_irq_cb->list);
526 	/* empty callback list but interrupt is still enabled */
527 	if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
528 		int ret = dpu_hw_intr_disable_irq_locked(
529 				dpu_kms->hw_intr,
530 				irq_idx);
531 		if (ret)
532 			DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
533 					irq_idx);
534 		VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
535 	}
536 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
537 
538 	return 0;
539 }
540 
541 #ifdef CONFIG_DEBUG_FS
542 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
543 {
544 	struct dpu_kms *dpu_kms = s->private;
545 	struct dpu_irq_callback *cb;
546 	unsigned long irq_flags;
547 	int i, irq_count, cb_count;
548 
549 	if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
550 		return 0;
551 
552 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
553 		spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
554 		cb_count = 0;
555 		irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
556 		list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
557 			cb_count++;
558 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
559 
560 		if (irq_count || cb_count)
561 			seq_printf(s, "idx:%d irq:%d cb:%d\n",
562 					i, irq_count, cb_count);
563 	}
564 
565 	return 0;
566 }
567 
568 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
569 
570 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
571 		struct dentry *parent)
572 {
573 	debugfs_create_file("core_irq", 0600, parent, dpu_kms,
574 		&dpu_debugfs_core_irq_fops);
575 }
576 #endif
577 
578 void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
579 {
580 	int i;
581 
582 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
583 	dpu_clear_irqs(dpu_kms);
584 	dpu_disable_all_irqs(dpu_kms);
585 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
586 
587 	/* Create irq callbacks for all possible irq_idx */
588 	dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
589 			sizeof(struct list_head), GFP_KERNEL);
590 	dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
591 			sizeof(atomic_t), GFP_KERNEL);
592 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
593 		INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
594 		atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
595 	}
596 }
597 
598 void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
599 {
600 	int i;
601 
602 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
603 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
604 		if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
605 			DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
606 
607 	dpu_clear_irqs(dpu_kms);
608 	dpu_disable_all_irqs(dpu_kms);
609 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
610 }
611