xref: /linux/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h (revision 44f57d78)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #ifndef __MDP4_KMS_H__
8 #define __MDP4_KMS_H__
9 
10 #include <drm/drm_panel.h>
11 
12 #include "msm_drv.h"
13 #include "msm_kms.h"
14 #include "disp/mdp_kms.h"
15 #include "mdp4.xml.h"
16 
17 struct device_node;
18 
19 struct mdp4_kms {
20 	struct mdp_kms base;
21 
22 	struct drm_device *dev;
23 
24 	int rev;
25 
26 	void __iomem *mmio;
27 
28 	struct regulator *vdd;
29 
30 	struct clk *clk;
31 	struct clk *pclk;
32 	struct clk *lut_clk;
33 	struct clk *axi_clk;
34 
35 	struct mdp_irq error_handler;
36 
37 	bool rpm_enabled;
38 
39 	/* empty/blank cursor bo to use when cursor is "disabled" */
40 	struct drm_gem_object *blank_cursor_bo;
41 	uint64_t blank_cursor_iova;
42 };
43 #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
44 
45 /* platform config data (ie. from DT, or pdata) */
46 struct mdp4_platform_config {
47 	struct iommu_domain *iommu;
48 	uint32_t max_clk;
49 };
50 
51 static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
52 {
53 	msm_writel(data, mdp4_kms->mmio + reg);
54 }
55 
56 static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
57 {
58 	return msm_readl(mdp4_kms->mmio + reg);
59 }
60 
61 static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
62 {
63 	switch (pipe) {
64 	case VG1:      return MDP4_OVERLAY_FLUSH_VG1;
65 	case VG2:      return MDP4_OVERLAY_FLUSH_VG2;
66 	case RGB1:     return MDP4_OVERLAY_FLUSH_RGB1;
67 	case RGB2:     return MDP4_OVERLAY_FLUSH_RGB2;
68 	default:       return 0;
69 	}
70 }
71 
72 static inline uint32_t ovlp2flush(int ovlp)
73 {
74 	switch (ovlp) {
75 	case 0:        return MDP4_OVERLAY_FLUSH_OVLP0;
76 	case 1:        return MDP4_OVERLAY_FLUSH_OVLP1;
77 	default:       return 0;
78 	}
79 }
80 
81 static inline uint32_t dma2irq(enum mdp4_dma dma)
82 {
83 	switch (dma) {
84 	case DMA_P:    return MDP4_IRQ_DMA_P_DONE;
85 	case DMA_S:    return MDP4_IRQ_DMA_S_DONE;
86 	case DMA_E:    return MDP4_IRQ_DMA_E_DONE;
87 	default:       return 0;
88 	}
89 }
90 
91 static inline uint32_t dma2err(enum mdp4_dma dma)
92 {
93 	switch (dma) {
94 	case DMA_P:    return MDP4_IRQ_PRIMARY_INTF_UDERRUN;
95 	case DMA_S:    return 0;  // ???
96 	case DMA_E:    return MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
97 	default:       return 0;
98 	}
99 }
100 
101 static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer,
102 		enum mdp4_pipe pipe, enum mdp_mixer_stage_id stage)
103 {
104 	switch (pipe) {
105 	case VG1:
106 		mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK |
107 				MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
108 		mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) |
109 			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
110 		break;
111 	case VG2:
112 		mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK |
113 				MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
114 		mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) |
115 			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
116 		break;
117 	case RGB1:
118 		mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK |
119 				MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
120 		mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) |
121 			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
122 		break;
123 	case RGB2:
124 		mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK |
125 				MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
126 		mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) |
127 			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
128 		break;
129 	case RGB3:
130 		mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK |
131 				MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
132 		mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) |
133 			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
134 		break;
135 	case VG3:
136 		mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK |
137 				MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
138 		mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) |
139 			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
140 		break;
141 	case VG4:
142 		mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK |
143 				MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
144 		mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) |
145 			COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
146 		break;
147 	default:
148 		WARN(1, "invalid pipe");
149 		break;
150 	}
151 
152 	return mixer_cfg;
153 }
154 
155 int mdp4_disable(struct mdp4_kms *mdp4_kms);
156 int mdp4_enable(struct mdp4_kms *mdp4_kms);
157 
158 void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
159 		uint32_t old_irqmask);
160 void mdp4_irq_preinstall(struct msm_kms *kms);
161 int mdp4_irq_postinstall(struct msm_kms *kms);
162 void mdp4_irq_uninstall(struct msm_kms *kms);
163 irqreturn_t mdp4_irq(struct msm_kms *kms);
164 int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
165 void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
166 
167 static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe)
168 {
169 	switch (pipe) {
170 	case VG1:
171 	case VG2:
172 	case VG3:
173 	case VG4:
174 		return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
175 				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
176 	case RGB1:
177 	case RGB2:
178 	case RGB3:
179 		return MDP_PIPE_CAP_SCALE;
180 	default:
181 		return 0;
182 	}
183 }
184 
185 enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
186 struct drm_plane *mdp4_plane_init(struct drm_device *dev,
187 		enum mdp4_pipe pipe_id, bool private_plane);
188 
189 uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
190 void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
191 void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
192 void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc);
193 struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
194 		struct drm_plane *plane, int id, int ovlp_id,
195 		enum mdp4_dma dma_id);
196 
197 long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
198 struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
199 
200 long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
201 struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
202 		struct device_node *panel_node);
203 
204 struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
205 		struct device_node *panel_node, struct drm_encoder *encoder);
206 
207 #ifdef CONFIG_DRM_MSM_DSI
208 struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev);
209 #else
210 static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
211 {
212 	return ERR_PTR(-ENODEV);
213 }
214 #endif
215 
216 #ifdef CONFIG_COMMON_CLK
217 struct clk *mpd4_lvds_pll_init(struct drm_device *dev);
218 #else
219 static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
220 {
221 	return ERR_PTR(-ENODEV);
222 }
223 #endif
224 
225 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
226 /* bus scaling data is associated with extra pointless platform devices,
227  * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
228  * to find their pdata to make the bus-scaling stuff work.
229  */
230 static inline void *mdp4_find_pdata(const char *devname)
231 {
232 	struct device *dev;
233 	dev = bus_find_device_by_name(&platform_bus_type, NULL, devname);
234 	return dev ? dev->platform_data : NULL;
235 }
236 #endif
237 
238 #endif /* __MDP4_KMS_H__ */
239