xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c (revision db10cb9b)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 #include "chan.h"
26 #include "head.h"
27 #include "ior.h"
28 
29 #include <subdev/timer.h>
30 
31 #include <nvif/class.h>
32 
33 void
34 g94_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
35 {
36 	struct nvkm_device *device = sor->disp->engine.subdev.device;
37 	const u32 loff = nv50_sor_link(sor);
38 
39 	nvkm_mask(device, 0x61c128 + loff, 0x0000003f, watermark);
40 }
41 
42 void
43 g94_sor_dp_activesym(struct nvkm_ior *sor, int head,
44 		     u8 TU, u8 VTUa, u8 VTUf, u8 VTUi)
45 {
46 	struct nvkm_device *device = sor->disp->engine.subdev.device;
47 	const u32 loff = nv50_sor_link(sor);
48 
49 	nvkm_mask(device, 0x61c10c + loff, 0x000001fc, TU << 2);
50 	nvkm_mask(device, 0x61c128 + loff, 0x010f7f00, VTUa << 24 | VTUf << 16 | VTUi << 8);
51 }
52 
53 void
54 g94_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
55 {
56 	struct nvkm_device *device = sor->disp->engine.subdev.device;
57 	const u32 soff = nv50_ior_base(sor);
58 
59 	nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, h);
60 	nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, v);
61 }
62 
63 void
64 g94_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu)
65 {
66 	struct nvkm_device *device = sor->disp->engine.subdev.device;
67 	const u32  loff = nv50_sor_link(sor);
68 	const u32 shift = sor->func->dp->lanes[ln] * 8;
69 	u32 data[3];
70 
71 	data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
72 	data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
73 	data[2] = nvkm_rd32(device, 0x61c130 + loff);
74 	if ((data[2] & 0x0000ff00) < (pu << 8) || ln == 0)
75 		data[2] = (data[2] & ~0x0000ff00) | (pu << 8);
76 
77 	nvkm_wr32(device, 0x61c118 + loff, data[0] | (dc << shift));
78 	nvkm_wr32(device, 0x61c120 + loff, data[1] | (pe << shift));
79 	nvkm_wr32(device, 0x61c130 + loff, data[2]);
80 }
81 
82 void
83 g94_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
84 {
85 	struct nvkm_device *device = sor->disp->engine.subdev.device;
86 	const u32 loff = nv50_sor_link(sor);
87 	u32 data;
88 
89 	switch (pattern) {
90 	case 0: data = 0x00001000; break;
91 	case 1: data = 0x01000000; break;
92 	case 2: data = 0x02000000; break;
93 	default:
94 		WARN_ON(1);
95 		return;
96 	}
97 
98 	nvkm_mask(device, 0x61c10c + loff, 0x0f001000, data);
99 }
100 
101 void
102 g94_sor_dp_power(struct nvkm_ior *sor, int nr)
103 {
104 	struct nvkm_device *device = sor->disp->engine.subdev.device;
105 	const u32 soff = nv50_ior_base(sor);
106 	const u32 loff = nv50_sor_link(sor);
107 	u32 mask = 0, i;
108 
109 	for (i = 0; i < nr; i++)
110 		mask |= 1 << sor->func->dp->lanes[i];
111 
112 	nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
113 	nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
114 	nvkm_msec(device, 2000,
115 		if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
116 			break;
117 	);
118 }
119 
120 int
121 g94_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
122 {
123 	struct nvkm_device *device = sor->disp->engine.subdev.device;
124 	const u32 soff = nv50_ior_base(sor);
125 	const u32 loff = nv50_sor_link(sor);
126 	u32 dpctrl = 0x00000000;
127 	u32 clksor = 0x00000000;
128 
129 	dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
130 	if (sor->dp.ef)
131 		dpctrl |= 0x00004000;
132 	if (sor->dp.bw > 0x06)
133 		clksor |= 0x00040000;
134 
135 	nvkm_mask(device, 0x614300 + soff, 0x000c0000, clksor);
136 	nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
137 	return 0;
138 }
139 
140 const struct nvkm_ior_func_dp
141 g94_sor_dp = {
142 	.lanes = { 2, 1, 0, 3},
143 	.links = g94_sor_dp_links,
144 	.power = g94_sor_dp_power,
145 	.pattern = g94_sor_dp_pattern,
146 	.drive = g94_sor_dp_drive,
147 	.audio_sym = g94_sor_dp_audio_sym,
148 	.activesym = g94_sor_dp_activesym,
149 	.watermark = g94_sor_dp_watermark,
150 };
151 
152 static bool
153 g94_sor_war_needed(struct nvkm_ior *sor)
154 {
155 	struct nvkm_device *device = sor->disp->engine.subdev.device;
156 	const u32 soff = nv50_ior_base(sor);
157 
158 	if (sor->asy.proto == TMDS) {
159 		switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
160 		case 0x00000000:
161 		case 0x00030000:
162 			return true;
163 		default:
164 			break;
165 		}
166 	}
167 
168 	return false;
169 }
170 
171 static void
172 g94_sor_war_update_sppll1(struct nvkm_disp *disp)
173 {
174 	struct nvkm_device *device = disp->engine.subdev.device;
175 	struct nvkm_ior *ior;
176 	bool used = false;
177 	u32 clksor;
178 
179 	list_for_each_entry(ior, &disp->iors, head) {
180 		if (ior->type != SOR)
181 			continue;
182 
183 		clksor = nvkm_rd32(device, 0x614300 + nv50_ior_base(ior));
184 		switch (clksor & 0x03000000) {
185 		case 0x02000000:
186 		case 0x03000000:
187 			used = true;
188 			break;
189 		default:
190 			break;
191 		}
192 	}
193 
194 	if (used)
195 		return;
196 
197 	nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
198 }
199 
200 static void
201 g94_sor_war_3(struct nvkm_ior *sor)
202 {
203 	struct nvkm_device *device = sor->disp->engine.subdev.device;
204 	const u32 soff = nv50_ior_base(sor);
205 	u32 sorpwr;
206 
207 	if (!g94_sor_war_needed(sor))
208 		return;
209 
210 	sorpwr = nvkm_rd32(device, 0x61c004 + soff);
211 	if (sorpwr & 0x00000001) {
212 		u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
213 		u32  pd_pc = (seqctl & 0x00000f00) >> 8;
214 		u32  pu_pc =  seqctl & 0x0000000f;
215 
216 		nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
217 
218 		nvkm_msec(device, 2000,
219 			if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
220 				break;
221 		);
222 		nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
223 		nvkm_msec(device, 2000,
224 			if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
225 				break;
226 		);
227 
228 		nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
229 		nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
230 	}
231 
232 	nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
233 	nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
234 
235 	if (sorpwr & 0x00000001)
236 		nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
237 
238 	g94_sor_war_update_sppll1(sor->disp);
239 }
240 
241 static void
242 g94_sor_war_2(struct nvkm_ior *sor)
243 {
244 	struct nvkm_device *device = sor->disp->engine.subdev.device;
245 	const u32 soff = nv50_ior_base(sor);
246 
247 	if (!g94_sor_war_needed(sor))
248 		return;
249 
250 	nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
251 	nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
252 	nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
253 
254 	nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
255 	nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
256 	nvkm_usec(device, 400, NVKM_DELAY);
257 	nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
258 	nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
259 
260 	if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
261 		u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
262 		u32  pu_pc = seqctl & 0x0000000f;
263 		nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
264 	}
265 }
266 
267 void
268 g94_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
269 {
270 	struct nvkm_device *device = sor->disp->engine.subdev.device;
271 	const u32 coff = sor->id * 8 + (state == &sor->arm) * 4;
272 	u32 ctrl = nvkm_rd32(device, 0x610794 + coff);
273 
274 	state->proto_evo = (ctrl & 0x00000f00) >> 8;
275 	switch (state->proto_evo) {
276 	case 0: state->proto = LVDS; state->link = 1; break;
277 	case 1: state->proto = TMDS; state->link = 1; break;
278 	case 2: state->proto = TMDS; state->link = 2; break;
279 	case 5: state->proto = TMDS; state->link = 3; break;
280 	case 8: state->proto =   DP; state->link = 1; break;
281 	case 9: state->proto =   DP; state->link = 2; break;
282 	default:
283 		state->proto = UNKNOWN;
284 		break;
285 	}
286 
287 	state->head = ctrl & 0x00000003;
288 	nv50_pior_depth(sor, state, ctrl);
289 }
290 
291 static const struct nvkm_ior_func
292 g94_sor = {
293 	.state = g94_sor_state,
294 	.power = nv50_sor_power,
295 	.clock = nv50_sor_clock,
296 	.war_2 = g94_sor_war_2,
297 	.war_3 = g94_sor_war_3,
298 	.hdmi = &g84_sor_hdmi,
299 	.dp = &g94_sor_dp,
300 };
301 
302 static int
303 g94_sor_new(struct nvkm_disp *disp, int id)
304 {
305 	return nvkm_ior_new_(&g94_sor, disp, SOR, id, false);
306 }
307 
308 int
309 g94_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
310 {
311 	struct nvkm_device *device = disp->engine.subdev.device;
312 
313 	*pmask = (nvkm_rd32(device, 0x610184) & 0x0f000000) >> 24;
314 	return 4;
315 }
316 
317 static const struct nvkm_disp_mthd_list
318 g94_disp_core_mthd_sor = {
319 	.mthd = 0x0040,
320 	.addr = 0x000008,
321 	.data = {
322 		{ 0x0600, 0x610794 },
323 		{}
324 	}
325 };
326 
327 const struct nvkm_disp_chan_mthd
328 g94_disp_core_mthd = {
329 	.name = "Core",
330 	.addr = 0x000000,
331 	.prev = 0x000004,
332 	.data = {
333 		{ "Global", 1, &nv50_disp_core_mthd_base },
334 		{    "DAC", 3, &g84_disp_core_mthd_dac },
335 		{    "SOR", 4, &g94_disp_core_mthd_sor },
336 		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
337 		{   "HEAD", 2, &g84_disp_core_mthd_head },
338 		{}
339 	}
340 };
341 
342 const struct nvkm_disp_chan_user
343 g94_disp_core = {
344 	.func = &nv50_disp_core_func,
345 	.ctrl = 0,
346 	.user = 0,
347 	.mthd = &g94_disp_core_mthd,
348 };
349 
350 static const struct nvkm_disp_func
351 g94_disp = {
352 	.oneinit = nv50_disp_oneinit,
353 	.init = nv50_disp_init,
354 	.fini = nv50_disp_fini,
355 	.intr = nv50_disp_intr,
356 	.super = nv50_disp_super,
357 	.uevent = &nv50_disp_chan_uevent,
358 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
359 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
360 	.sor = { .cnt = g94_sor_cnt, .new = g94_sor_new },
361 	.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
362 	.root = { 0,0,GT206_DISP },
363 	.user = {
364 		{{0,0,  G82_DISP_CURSOR             }, nvkm_disp_chan_new, & nv50_disp_curs },
365 		{{0,0,  G82_DISP_OVERLAY            }, nvkm_disp_chan_new, & nv50_disp_oimm },
366 		{{0,0,GT200_DISP_BASE_CHANNEL_DMA   }, nvkm_disp_chan_new, &  g84_disp_base },
367 		{{0,0,GT206_DISP_CORE_CHANNEL_DMA   }, nvkm_disp_core_new, &  g94_disp_core },
368 		{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, nvkm_disp_chan_new, &gt200_disp_ovly },
369 		{}
370 	},
371 };
372 
373 int
374 g94_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
375 	     struct nvkm_disp **pdisp)
376 {
377 	return nvkm_disp_new_(&g94_disp, device, type, inst, pdisp);
378 }
379