1 /* $NetBSD: nouveau_nvkm_engine_disp_sorg94.c,v 1.4 2021/12/18 23:45:35 riastradh Exp $ */
2
3 /*
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs
25 */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_disp_sorg94.c,v 1.4 2021/12/18 23:45:35 riastradh Exp $");
28
29 #include "ior.h"
30
31 #include <subdev/timer.h>
32
33 void
g94_sor_dp_watermark(struct nvkm_ior * sor,int head,u8 watermark)34 g94_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
35 {
36 struct nvkm_device *device = sor->disp->engine.subdev.device;
37 const u32 loff = nv50_sor_link(sor);
38 nvkm_mask(device, 0x61c128 + loff, 0x0000003f, watermark);
39 }
40
41 void
g94_sor_dp_activesym(struct nvkm_ior * sor,int head,u8 TU,u8 VTUa,u8 VTUf,u8 VTUi)42 g94_sor_dp_activesym(struct nvkm_ior *sor, int head,
43 u8 TU, u8 VTUa, u8 VTUf, u8 VTUi)
44 {
45 struct nvkm_device *device = sor->disp->engine.subdev.device;
46 const u32 loff = nv50_sor_link(sor);
47 nvkm_mask(device, 0x61c10c + loff, 0x000001fc, TU << 2);
48 nvkm_mask(device, 0x61c128 + loff, 0x010f7f00, VTUa << 24 |
49 VTUf << 16 |
50 VTUi << 8);
51 }
52
53 void
g94_sor_dp_audio_sym(struct nvkm_ior * sor,int head,u16 h,u32 v)54 g94_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
55 {
56 struct nvkm_device *device = sor->disp->engine.subdev.device;
57 const u32 soff = nv50_ior_base(sor);
58 nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, h);
59 nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, v);
60 }
61
62 void
g94_sor_dp_drive(struct nvkm_ior * sor,int ln,int pc,int dc,int pe,int pu)63 g94_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu)
64 {
65 struct nvkm_device *device = sor->disp->engine.subdev.device;
66 const u32 loff = nv50_sor_link(sor);
67 const u32 shift = sor->func->dp.lanes[ln] * 8;
68 u32 data[3];
69
70 data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
71 data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
72 data[2] = nvkm_rd32(device, 0x61c130 + loff);
73 if ((data[2] & 0x0000ff00) < (pu << 8) || ln == 0)
74 data[2] = (data[2] & ~0x0000ff00) | (pu << 8);
75 nvkm_wr32(device, 0x61c118 + loff, data[0] | (dc << shift));
76 nvkm_wr32(device, 0x61c120 + loff, data[1] | (pe << shift));
77 nvkm_wr32(device, 0x61c130 + loff, data[2]);
78 }
79
80 void
g94_sor_dp_pattern(struct nvkm_ior * sor,int pattern)81 g94_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
82 {
83 struct nvkm_device *device = sor->disp->engine.subdev.device;
84 const u32 loff = nv50_sor_link(sor);
85 nvkm_mask(device, 0x61c10c + loff, 0x0f000000, pattern << 24);
86 }
87
88 void
g94_sor_dp_power(struct nvkm_ior * sor,int nr)89 g94_sor_dp_power(struct nvkm_ior *sor, int nr)
90 {
91 struct nvkm_device *device = sor->disp->engine.subdev.device;
92 const u32 soff = nv50_ior_base(sor);
93 const u32 loff = nv50_sor_link(sor);
94 u32 mask = 0, i;
95
96 for (i = 0; i < nr; i++)
97 mask |= 1 << sor->func->dp.lanes[i];
98
99 nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
100 nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
101 nvkm_msec(device, 2000,
102 if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
103 break;
104 );
105 }
106
107 int
g94_sor_dp_links(struct nvkm_ior * sor,struct nvkm_i2c_aux * aux)108 g94_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
109 {
110 struct nvkm_device *device = sor->disp->engine.subdev.device;
111 const u32 soff = nv50_ior_base(sor);
112 const u32 loff = nv50_sor_link(sor);
113 u32 dpctrl = 0x00000000;
114 u32 clksor = 0x00000000;
115
116 dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
117 if (sor->dp.ef)
118 dpctrl |= 0x00004000;
119 if (sor->dp.bw > 0x06)
120 clksor |= 0x00040000;
121
122 nvkm_mask(device, 0x614300 + soff, 0x000c0000, clksor);
123 nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
124 return 0;
125 }
126
127 static bool
g94_sor_war_needed(struct nvkm_ior * sor)128 g94_sor_war_needed(struct nvkm_ior *sor)
129 {
130 struct nvkm_device *device = sor->disp->engine.subdev.device;
131 const u32 soff = nv50_ior_base(sor);
132 if (sor->asy.proto == TMDS) {
133 switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
134 case 0x00000000:
135 case 0x00030000:
136 return true;
137 default:
138 break;
139 }
140 }
141 return false;
142 }
143
144 static void
g94_sor_war_update_sppll1(struct nvkm_disp * disp)145 g94_sor_war_update_sppll1(struct nvkm_disp *disp)
146 {
147 struct nvkm_device *device = disp->engine.subdev.device;
148 struct nvkm_ior *ior;
149 bool used = false;
150 u32 clksor;
151
152 list_for_each_entry(ior, &disp->ior, head) {
153 if (ior->type != SOR)
154 continue;
155
156 clksor = nvkm_rd32(device, 0x614300 + nv50_ior_base(ior));
157 switch (clksor & 0x03000000) {
158 case 0x02000000:
159 case 0x03000000:
160 used = true;
161 break;
162 default:
163 break;
164 }
165 }
166
167 if (used)
168 return;
169
170 nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
171 }
172
173 static void
g94_sor_war_3(struct nvkm_ior * sor)174 g94_sor_war_3(struct nvkm_ior *sor)
175 {
176 struct nvkm_device *device = sor->disp->engine.subdev.device;
177 const u32 soff = nv50_ior_base(sor);
178 u32 sorpwr;
179
180 if (!g94_sor_war_needed(sor))
181 return;
182
183 sorpwr = nvkm_rd32(device, 0x61c004 + soff);
184 if (sorpwr & 0x00000001) {
185 u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
186 u32 pd_pc = (seqctl & 0x00000f00) >> 8;
187 u32 pu_pc = seqctl & 0x0000000f;
188
189 nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
190
191 nvkm_msec(device, 2000,
192 if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
193 break;
194 );
195 nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
196 nvkm_msec(device, 2000,
197 if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
198 break;
199 );
200
201 nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
202 nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
203 }
204
205 nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
206 nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
207
208 if (sorpwr & 0x00000001) {
209 nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
210 }
211
212 g94_sor_war_update_sppll1(sor->disp);
213 }
214
215 static void
g94_sor_war_2(struct nvkm_ior * sor)216 g94_sor_war_2(struct nvkm_ior *sor)
217 {
218 struct nvkm_device *device = sor->disp->engine.subdev.device;
219 const u32 soff = nv50_ior_base(sor);
220
221 if (!g94_sor_war_needed(sor))
222 return;
223
224 nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
225 nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
226 nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
227
228 nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
229 nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
230 nvkm_usec(device, 400, NVKM_DELAY);
231 nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
232 nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
233
234 if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
235 u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
236 u32 pu_pc = seqctl & 0x0000000f;
237 nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
238 }
239 }
240
241 void
g94_sor_state(struct nvkm_ior * sor,struct nvkm_ior_state * state)242 g94_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
243 {
244 struct nvkm_device *device = sor->disp->engine.subdev.device;
245 const u32 coff = sor->id * 8 + (state == &sor->arm) * 4;
246 u32 ctrl = nvkm_rd32(device, 0x610794 + coff);
247
248 state->proto_evo = (ctrl & 0x00000f00) >> 8;
249 switch (state->proto_evo) {
250 case 0: state->proto = LVDS; state->link = 1; break;
251 case 1: state->proto = TMDS; state->link = 1; break;
252 case 2: state->proto = TMDS; state->link = 2; break;
253 case 5: state->proto = TMDS; state->link = 3; break;
254 case 8: state->proto = DP; state->link = 1; break;
255 case 9: state->proto = DP; state->link = 2; break;
256 default:
257 state->proto = UNKNOWN;
258 break;
259 }
260
261 state->head = ctrl & 0x00000003;
262 nv50_pior_depth(sor, state, ctrl);
263 }
264
265 static const struct nvkm_ior_func
266 g94_sor = {
267 .state = g94_sor_state,
268 .power = nv50_sor_power,
269 .clock = nv50_sor_clock,
270 .war_2 = g94_sor_war_2,
271 .war_3 = g94_sor_war_3,
272 .dp = {
273 .lanes = { 2, 1, 0, 3},
274 .links = g94_sor_dp_links,
275 .power = g94_sor_dp_power,
276 .pattern = g94_sor_dp_pattern,
277 .drive = g94_sor_dp_drive,
278 .audio_sym = g94_sor_dp_audio_sym,
279 .activesym = g94_sor_dp_activesym,
280 .watermark = g94_sor_dp_watermark,
281 },
282 };
283
284 int
g94_sor_new(struct nvkm_disp * disp,int id)285 g94_sor_new(struct nvkm_disp *disp, int id)
286 {
287 return nvkm_ior_new_(&g94_sor, disp, SOR, id);
288 }
289
290 int
g94_sor_cnt(struct nvkm_disp * disp,unsigned long * pmask)291 g94_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
292 {
293 struct nvkm_device *device = disp->engine.subdev.device;
294 *pmask = (nvkm_rd32(device, 0x610184) & 0x0f000000) >> 24;
295 return 4;
296 }
297