1 /* $NetBSD: nouveau_nvkm_engine_disp_nv50.c,v 1.4 2021/12/18 23:45:35 riastradh Exp $ */
2
3 /*
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs
25 */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_disp_nv50.c,v 1.4 2021/12/18 23:45:35 riastradh Exp $");
28
29 #include "nv50.h"
30 #include "head.h"
31 #include "ior.h"
32 #include "channv50.h"
33 #include "rootnv50.h"
34
35 #include <core/client.h>
36 #include <core/ramht.h>
37 #include <subdev/bios.h>
38 #include <subdev/bios/disp.h>
39 #include <subdev/bios/init.h>
40 #include <subdev/bios/pll.h>
41 #include <subdev/devinit.h>
42 #include <subdev/timer.h>
43
44 static const struct nvkm_disp_oclass *
nv50_disp_root_(struct nvkm_disp * base)45 nv50_disp_root_(struct nvkm_disp *base)
46 {
47 return nv50_disp(base)->func->root;
48 }
49
50 static void
nv50_disp_intr_(struct nvkm_disp * base)51 nv50_disp_intr_(struct nvkm_disp *base)
52 {
53 struct nv50_disp *disp = nv50_disp(base);
54 disp->func->intr(disp);
55 }
56
57 static void
nv50_disp_fini_(struct nvkm_disp * base)58 nv50_disp_fini_(struct nvkm_disp *base)
59 {
60 struct nv50_disp *disp = nv50_disp(base);
61 disp->func->fini(disp);
62 }
63
64 static int
nv50_disp_init_(struct nvkm_disp * base)65 nv50_disp_init_(struct nvkm_disp *base)
66 {
67 struct nv50_disp *disp = nv50_disp(base);
68 return disp->func->init(disp);
69 }
70
71 static void *
nv50_disp_dtor_(struct nvkm_disp * base)72 nv50_disp_dtor_(struct nvkm_disp *base)
73 {
74 struct nv50_disp *disp = nv50_disp(base);
75
76 nvkm_ramht_del(&disp->ramht);
77 nvkm_gpuobj_del(&disp->inst);
78
79 nvkm_event_fini(&disp->uevent);
80 if (disp->wq)
81 destroy_workqueue(disp->wq);
82
83 return disp;
84 }
85
86 static int
nv50_disp_oneinit_(struct nvkm_disp * base)87 nv50_disp_oneinit_(struct nvkm_disp *base)
88 {
89 struct nv50_disp *disp = nv50_disp(base);
90 const struct nv50_disp_func *func = disp->func;
91 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
92 struct nvkm_device *device = subdev->device;
93 int ret, i;
94
95 if (func->wndw.cnt) {
96 disp->wndw.nr = func->wndw.cnt(&disp->base, &disp->wndw.mask);
97 nvkm_debug(subdev, "Window(s): %d (%08lx)\n",
98 disp->wndw.nr, disp->wndw.mask);
99 }
100
101 disp->head.nr = func->head.cnt(&disp->base, &disp->head.mask);
102 nvkm_debug(subdev, " Head(s): %d (%02lx)\n",
103 disp->head.nr, disp->head.mask);
104 for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
105 ret = func->head.new(&disp->base, i);
106 if (ret)
107 return ret;
108 }
109
110 if (func->dac.cnt) {
111 disp->dac.nr = func->dac.cnt(&disp->base, &disp->dac.mask);
112 nvkm_debug(subdev, " DAC(s): %d (%02lx)\n",
113 disp->dac.nr, disp->dac.mask);
114 for_each_set_bit(i, &disp->dac.mask, disp->dac.nr) {
115 ret = func->dac.new(&disp->base, i);
116 if (ret)
117 return ret;
118 }
119 }
120
121 if (func->pior.cnt) {
122 disp->pior.nr = func->pior.cnt(&disp->base, &disp->pior.mask);
123 nvkm_debug(subdev, " PIOR(s): %d (%02lx)\n",
124 disp->pior.nr, disp->pior.mask);
125 for_each_set_bit(i, &disp->pior.mask, disp->pior.nr) {
126 ret = func->pior.new(&disp->base, i);
127 if (ret)
128 return ret;
129 }
130 }
131
132 disp->sor.nr = func->sor.cnt(&disp->base, &disp->sor.mask);
133 nvkm_debug(subdev, " SOR(s): %d (%02lx)\n",
134 disp->sor.nr, disp->sor.mask);
135 for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
136 ret = func->sor.new(&disp->base, i);
137 if (ret)
138 return ret;
139 }
140
141 ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL,
142 &disp->inst);
143 if (ret)
144 return ret;
145
146 return nvkm_ramht_new(device, func->ramht_size ? func->ramht_size :
147 0x1000, 0, disp->inst, &disp->ramht);
148 }
149
150 static const struct nvkm_disp_func
151 nv50_disp_ = {
152 .dtor = nv50_disp_dtor_,
153 .oneinit = nv50_disp_oneinit_,
154 .init = nv50_disp_init_,
155 .fini = nv50_disp_fini_,
156 .intr = nv50_disp_intr_,
157 .root = nv50_disp_root_,
158 };
159
160 int
nv50_disp_new_(const struct nv50_disp_func * func,struct nvkm_device * device,int index,struct nvkm_disp ** pdisp)161 nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
162 int index, struct nvkm_disp **pdisp)
163 {
164 struct nv50_disp *disp;
165 int ret;
166
167 if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
168 return -ENOMEM;
169 disp->func = func;
170 *pdisp = &disp->base;
171
172 ret = nvkm_disp_ctor(&nv50_disp_, device, index, &disp->base);
173 if (ret)
174 return ret;
175
176 disp->wq = create_singlethread_workqueue("nvkm-disp");
177 if (!disp->wq)
178 return -ENOMEM;
179
180 INIT_WORK(&disp->supervisor, func->super);
181
182 return nvkm_event_init(func->uevent, 1, ARRAY_SIZE(disp->chan),
183 &disp->uevent);
184 }
185
186 static u32
nv50_disp_super_iedt(struct nvkm_head * head,struct nvkm_outp * outp,u8 * ver,u8 * hdr,u8 * cnt,u8 * len,struct nvbios_outp * iedt)187 nv50_disp_super_iedt(struct nvkm_head *head, struct nvkm_outp *outp,
188 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
189 struct nvbios_outp *iedt)
190 {
191 struct nvkm_bios *bios = head->disp->engine.subdev.device->bios;
192 const u8 l = ffs(outp->info.link);
193 const u16 t = outp->info.hasht;
194 const u16 m = (0x0100 << head->id) | (l << 6) | outp->info.or;
195 u32 data = nvbios_outp_match(bios, t, m, ver, hdr, cnt, len, iedt);
196 if (!data)
197 OUTP_DBG(outp, "missing IEDT for %04x:%04x", t, m);
198 return data;
199 }
200
201 static void
nv50_disp_super_ied_on(struct nvkm_head * head,struct nvkm_ior * ior,int id,u32 khz)202 nv50_disp_super_ied_on(struct nvkm_head *head,
203 struct nvkm_ior *ior, int id, u32 khz)
204 {
205 struct nvkm_subdev *subdev = &head->disp->engine.subdev;
206 struct nvkm_bios *bios = subdev->device->bios;
207 struct nvkm_outp *outp = ior->asy.outp;
208 struct nvbios_ocfg iedtrs;
209 struct nvbios_outp iedt;
210 u8 ver, hdr, cnt, len, flags = 0x00;
211 u32 data;
212
213 if (!outp) {
214 IOR_DBG(ior, "nothing to attach");
215 return;
216 }
217
218 /* Lookup IED table for the device. */
219 data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt);
220 if (!data)
221 return;
222
223 /* Lookup IEDT runtime settings for the current configuration. */
224 if (ior->type == SOR) {
225 if (ior->asy.proto == LVDS) {
226 if (head->asy.or.depth == 24)
227 flags |= 0x02;
228 }
229 if (ior->asy.link == 3)
230 flags |= 0x01;
231 }
232
233 data = nvbios_ocfg_match(bios, data, ior->asy.proto_evo, flags,
234 &ver, &hdr, &cnt, &len, &iedtrs);
235 if (!data) {
236 OUTP_DBG(outp, "missing IEDT RS for %02x:%02x",
237 ior->asy.proto_evo, flags);
238 return;
239 }
240
241 /* Execute the OnInt[23] script for the current frequency. */
242 data = nvbios_oclk_match(bios, iedtrs.clkcmp[id], khz);
243 if (!data) {
244 OUTP_DBG(outp, "missing IEDT RSS %d for %02x:%02x %d khz",
245 id, ior->asy.proto_evo, flags, khz);
246 return;
247 }
248
249 nvbios_init(subdev, data,
250 init.outp = &outp->info;
251 init.or = ior->id;
252 init.link = ior->asy.link;
253 init.head = head->id;
254 );
255 }
256
257 static void
nv50_disp_super_ied_off(struct nvkm_head * head,struct nvkm_ior * ior,int id)258 nv50_disp_super_ied_off(struct nvkm_head *head, struct nvkm_ior *ior, int id)
259 {
260 struct nvkm_outp *outp = ior->arm.outp;
261 struct nvbios_outp iedt;
262 u8 ver, hdr, cnt, len;
263 u32 data;
264
265 if (!outp) {
266 IOR_DBG(ior, "nothing attached");
267 return;
268 }
269
270 data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt);
271 if (!data)
272 return;
273
274 nvbios_init(&head->disp->engine.subdev, iedt.script[id],
275 init.outp = &outp->info;
276 init.or = ior->id;
277 init.link = ior->arm.link;
278 init.head = head->id;
279 );
280 }
281
282 static struct nvkm_ior *
nv50_disp_super_ior_asy(struct nvkm_head * head)283 nv50_disp_super_ior_asy(struct nvkm_head *head)
284 {
285 struct nvkm_ior *ior;
286 list_for_each_entry(ior, &head->disp->ior, head) {
287 if (ior->asy.head & (1 << head->id)) {
288 HEAD_DBG(head, "to %s", ior->name);
289 return ior;
290 }
291 }
292 HEAD_DBG(head, "nothing to attach");
293 return NULL;
294 }
295
296 static struct nvkm_ior *
nv50_disp_super_ior_arm(struct nvkm_head * head)297 nv50_disp_super_ior_arm(struct nvkm_head *head)
298 {
299 struct nvkm_ior *ior;
300 list_for_each_entry(ior, &head->disp->ior, head) {
301 if (ior->arm.head & (1 << head->id)) {
302 HEAD_DBG(head, "on %s", ior->name);
303 return ior;
304 }
305 }
306 HEAD_DBG(head, "nothing attached");
307 return NULL;
308 }
309
310 void
nv50_disp_super_3_0(struct nv50_disp * disp,struct nvkm_head * head)311 nv50_disp_super_3_0(struct nv50_disp *disp, struct nvkm_head *head)
312 {
313 struct nvkm_ior *ior;
314
315 /* Determine which OR, if any, we're attaching to the head. */
316 HEAD_DBG(head, "supervisor 3.0");
317 ior = nv50_disp_super_ior_asy(head);
318 if (!ior)
319 return;
320
321 /* Execute OnInt3 IED script. */
322 nv50_disp_super_ied_on(head, ior, 1, head->asy.hz / 1000);
323
324 /* OR-specific handling. */
325 if (ior->func->war_3)
326 ior->func->war_3(ior);
327 }
328
329 static void
nv50_disp_super_2_2_dp(struct nvkm_head * head,struct nvkm_ior * ior)330 nv50_disp_super_2_2_dp(struct nvkm_head *head, struct nvkm_ior *ior)
331 {
332 struct nvkm_subdev *subdev = &head->disp->engine.subdev;
333 const u32 khz = head->asy.hz / 1000;
334 const u32 linkKBps = ior->dp.bw * 27000;
335 const u32 symbol = 100000;
336 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
337 int TU, VTUi, VTUf, VTUa;
338 u64 link_data_rate, link_ratio, unk;
339 u32 best_diff = 64 * symbol;
340 u64 h, v;
341
342 /* symbols/hblank - algorithm taken from comments in tegra driver */
343 h = head->asy.hblanke + head->asy.htotal - head->asy.hblanks - 7;
344 h = h * linkKBps;
345 do_div(h, khz);
346 h = h - (3 * ior->dp.ef) - (12 / ior->dp.nr);
347
348 /* symbols/vblank - algorithm taken from comments in tegra driver */
349 v = head->asy.vblanks - head->asy.vblanke - 25;
350 v = v * linkKBps;
351 do_div(v, khz);
352 v = v - ((36 / ior->dp.nr) + 3) - 1;
353
354 ior->func->dp.audio_sym(ior, head->id, h, v);
355
356 /* watermark / activesym */
357 link_data_rate = (khz * head->asy.or.depth / 8) / ior->dp.nr;
358
359 /* calculate ratio of packed data rate to link symbol rate */
360 link_ratio = link_data_rate * symbol;
361 do_div(link_ratio, linkKBps);
362
363 for (TU = 64; ior->func->dp.activesym && TU >= 32; TU--) {
364 /* calculate average number of valid symbols in each TU */
365 u32 tu_valid = link_ratio * TU;
366 u32 calc, diff;
367
368 /* find a hw representation for the fraction.. */
369 VTUi = tu_valid / symbol;
370 calc = VTUi * symbol;
371 diff = tu_valid - calc;
372 if (diff) {
373 if (diff >= (symbol / 2)) {
374 VTUf = symbol / (symbol - diff);
375 if (symbol - (VTUf * diff))
376 VTUf++;
377
378 if (VTUf <= 15) {
379 VTUa = 1;
380 calc += symbol - (symbol / VTUf);
381 } else {
382 VTUa = 0;
383 VTUf = 1;
384 calc += symbol;
385 }
386 } else {
387 VTUa = 0;
388 VTUf = min((int)(symbol / diff), 15);
389 calc += symbol / VTUf;
390 }
391
392 diff = calc - tu_valid;
393 } else {
394 /* no remainder, but the hw doesn't like the fractional
395 * part to be zero. decrement the integer part and
396 * have the fraction add a whole symbol back
397 */
398 VTUa = 0;
399 VTUf = 1;
400 VTUi--;
401 }
402
403 if (diff < best_diff) {
404 best_diff = diff;
405 bestTU = TU;
406 bestVTUa = VTUa;
407 bestVTUf = VTUf;
408 bestVTUi = VTUi;
409 if (diff == 0)
410 break;
411 }
412 }
413
414 if (ior->func->dp.activesym) {
415 if (!bestTU) {
416 nvkm_error(subdev, "unable to determine dp config\n");
417 return;
418 }
419 ior->func->dp.activesym(ior, head->id, bestTU,
420 bestVTUa, bestVTUf, bestVTUi);
421 } else {
422 bestTU = 64;
423 }
424
425 /* XXX close to vbios numbers, but not right */
426 unk = (symbol - link_ratio) * bestTU;
427 unk *= link_ratio;
428 do_div(unk, symbol);
429 do_div(unk, symbol);
430 unk += 6;
431
432 ior->func->dp.watermark(ior, head->id, unk);
433 }
434
435 void
nv50_disp_super_2_2(struct nv50_disp * disp,struct nvkm_head * head)436 nv50_disp_super_2_2(struct nv50_disp *disp, struct nvkm_head *head)
437 {
438 const u32 khz = head->asy.hz / 1000;
439 struct nvkm_outp *outp;
440 struct nvkm_ior *ior;
441
442 /* Determine which OR, if any, we're attaching from the head. */
443 HEAD_DBG(head, "supervisor 2.2");
444 ior = nv50_disp_super_ior_asy(head);
445 if (!ior)
446 return;
447
448 /* For some reason, NVIDIA decided not to:
449 *
450 * A) Give dual-link LVDS a separate EVO protocol, like for TMDS.
451 * and
452 * B) Use SetControlOutputResource.PixelDepth on LVDS.
453 *
454 * Override the values we usually read from HW with the same
455 * data we pass though an ioctl instead.
456 */
457 if (ior->type == SOR && ior->asy.proto == LVDS) {
458 head->asy.or.depth = (disp->sor.lvdsconf & 0x0200) ? 24 : 18;
459 ior->asy.link = (disp->sor.lvdsconf & 0x0100) ? 3 : 1;
460 }
461
462 /* Handle any link training, etc. */
463 if ((outp = ior->asy.outp) && outp->func->acquire)
464 outp->func->acquire(outp);
465
466 /* Execute OnInt2 IED script. */
467 nv50_disp_super_ied_on(head, ior, 0, khz);
468
469 /* Program RG clock divider. */
470 head->func->rgclk(head, ior->asy.rgdiv);
471
472 /* Mode-specific internal DP configuration. */
473 if (ior->type == SOR && ior->asy.proto == DP)
474 nv50_disp_super_2_2_dp(head, ior);
475
476 /* OR-specific handling. */
477 ior->func->clock(ior);
478 if (ior->func->war_2)
479 ior->func->war_2(ior);
480 }
481
482 void
nv50_disp_super_2_1(struct nv50_disp * disp,struct nvkm_head * head)483 nv50_disp_super_2_1(struct nv50_disp *disp, struct nvkm_head *head)
484 {
485 struct nvkm_devinit *devinit = disp->base.engine.subdev.device->devinit;
486 const u32 khz = head->asy.hz / 1000;
487 HEAD_DBG(head, "supervisor 2.1 - %d khz", khz);
488 if (khz)
489 nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head->id, khz);
490 }
491
492 void
nv50_disp_super_2_0(struct nv50_disp * disp,struct nvkm_head * head)493 nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
494 {
495 struct nvkm_outp *outp;
496 struct nvkm_ior *ior;
497
498 /* Determine which OR, if any, we're detaching from the head. */
499 HEAD_DBG(head, "supervisor 2.0");
500 ior = nv50_disp_super_ior_arm(head);
501 if (!ior)
502 return;
503
504 /* Execute OffInt2 IED script. */
505 nv50_disp_super_ied_off(head, ior, 2);
506
507 /* If we're shutting down the OR's only active head, execute
508 * the output path's disable function.
509 */
510 if (ior->arm.head == (1 << head->id)) {
511 if ((outp = ior->arm.outp) && outp->func->disable)
512 outp->func->disable(outp, ior);
513 }
514 }
515
516 void
nv50_disp_super_1_0(struct nv50_disp * disp,struct nvkm_head * head)517 nv50_disp_super_1_0(struct nv50_disp *disp, struct nvkm_head *head)
518 {
519 struct nvkm_ior *ior;
520
521 /* Determine which OR, if any, we're detaching from the head. */
522 HEAD_DBG(head, "supervisor 1.0");
523 ior = nv50_disp_super_ior_arm(head);
524 if (!ior)
525 return;
526
527 /* Execute OffInt1 IED script. */
528 nv50_disp_super_ied_off(head, ior, 1);
529 }
530
531 void
nv50_disp_super_1(struct nv50_disp * disp)532 nv50_disp_super_1(struct nv50_disp *disp)
533 {
534 struct nvkm_head *head;
535 struct nvkm_ior *ior;
536
537 list_for_each_entry(head, &disp->base.head, head) {
538 head->func->state(head, &head->arm);
539 head->func->state(head, &head->asy);
540 }
541
542 list_for_each_entry(ior, &disp->base.ior, head) {
543 ior->func->state(ior, &ior->arm);
544 ior->func->state(ior, &ior->asy);
545 }
546 }
547
548 void
nv50_disp_super(struct work_struct * work)549 nv50_disp_super(struct work_struct *work)
550 {
551 struct nv50_disp *disp =
552 container_of(work, struct nv50_disp, supervisor);
553 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
554 struct nvkm_device *device = subdev->device;
555 struct nvkm_head *head;
556 u32 super = nvkm_rd32(device, 0x610030);
557
558 nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
559
560 if (disp->super & 0x00000010) {
561 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
562 nv50_disp_super_1(disp);
563 list_for_each_entry(head, &disp->base.head, head) {
564 if (!(super & (0x00000020 << head->id)))
565 continue;
566 if (!(super & (0x00000080 << head->id)))
567 continue;
568 nv50_disp_super_1_0(disp, head);
569 }
570 } else
571 if (disp->super & 0x00000020) {
572 list_for_each_entry(head, &disp->base.head, head) {
573 if (!(super & (0x00000080 << head->id)))
574 continue;
575 nv50_disp_super_2_0(disp, head);
576 }
577 nvkm_outp_route(&disp->base);
578 list_for_each_entry(head, &disp->base.head, head) {
579 if (!(super & (0x00000200 << head->id)))
580 continue;
581 nv50_disp_super_2_1(disp, head);
582 }
583 list_for_each_entry(head, &disp->base.head, head) {
584 if (!(super & (0x00000080 << head->id)))
585 continue;
586 nv50_disp_super_2_2(disp, head);
587 }
588 } else
589 if (disp->super & 0x00000040) {
590 list_for_each_entry(head, &disp->base.head, head) {
591 if (!(super & (0x00000080 << head->id)))
592 continue;
593 nv50_disp_super_3_0(disp, head);
594 }
595 }
596
597 nvkm_wr32(device, 0x610030, 0x80000000);
598 }
599
600 const struct nvkm_enum
601 nv50_disp_intr_error_type[] = {
602 { 0, "NONE" },
603 { 1, "PUSHBUFFER_ERR" },
604 { 2, "TRAP" },
605 { 3, "RESERVED_METHOD" },
606 { 4, "INVALID_ARG" },
607 { 5, "INVALID_STATE" },
608 { 7, "UNRESOLVABLE_HANDLE" },
609 {}
610 };
611
612 static const struct nvkm_enum
613 nv50_disp_intr_error_code[] = {
614 { 0x00, "" },
615 {}
616 };
617
618 static void
nv50_disp_intr_error(struct nv50_disp * disp,int chid)619 nv50_disp_intr_error(struct nv50_disp *disp, int chid)
620 {
621 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
622 struct nvkm_device *device = subdev->device;
623 u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
624 u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
625 u32 code = (addr & 0x00ff0000) >> 16;
626 u32 type = (addr & 0x00007000) >> 12;
627 u32 mthd = (addr & 0x00000ffc);
628 const struct nvkm_enum *ec, *et;
629
630 et = nvkm_enum_find(nv50_disp_intr_error_type, type);
631 ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
632
633 nvkm_error(subdev,
634 "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
635 type, et ? et->name : "", code, ec ? ec->name : "",
636 chid, mthd, data);
637
638 if (chid < ARRAY_SIZE(disp->chan)) {
639 switch (mthd) {
640 case 0x0080:
641 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
642 break;
643 default:
644 break;
645 }
646 }
647
648 nvkm_wr32(device, 0x610020, 0x00010000 << chid);
649 nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
650 }
651
652 void
nv50_disp_intr(struct nv50_disp * disp)653 nv50_disp_intr(struct nv50_disp *disp)
654 {
655 struct nvkm_device *device = disp->base.engine.subdev.device;
656 u32 intr0 = nvkm_rd32(device, 0x610020);
657 u32 intr1 = nvkm_rd32(device, 0x610024);
658
659 while (intr0 & 0x001f0000) {
660 u32 chid = __ffs(intr0 & 0x001f0000) - 16;
661 nv50_disp_intr_error(disp, chid);
662 intr0 &= ~(0x00010000 << chid);
663 }
664
665 while (intr0 & 0x0000001f) {
666 u32 chid = __ffs(intr0 & 0x0000001f);
667 nv50_disp_chan_uevent_send(disp, chid);
668 intr0 &= ~(0x00000001 << chid);
669 }
670
671 if (intr1 & 0x00000004) {
672 nvkm_disp_vblank(&disp->base, 0);
673 nvkm_wr32(device, 0x610024, 0x00000004);
674 }
675
676 if (intr1 & 0x00000008) {
677 nvkm_disp_vblank(&disp->base, 1);
678 nvkm_wr32(device, 0x610024, 0x00000008);
679 }
680
681 if (intr1 & 0x00000070) {
682 disp->super = (intr1 & 0x00000070);
683 queue_work(disp->wq, &disp->supervisor);
684 nvkm_wr32(device, 0x610024, disp->super);
685 }
686 }
687
688 void
nv50_disp_fini(struct nv50_disp * disp)689 nv50_disp_fini(struct nv50_disp *disp)
690 {
691 struct nvkm_device *device = disp->base.engine.subdev.device;
692 /* disable all interrupts */
693 nvkm_wr32(device, 0x610024, 0x00000000);
694 nvkm_wr32(device, 0x610020, 0x00000000);
695 }
696
697 int
nv50_disp_init(struct nv50_disp * disp)698 nv50_disp_init(struct nv50_disp *disp)
699 {
700 struct nvkm_device *device = disp->base.engine.subdev.device;
701 struct nvkm_head *head;
702 u32 tmp;
703 int i;
704
705 /* The below segments of code copying values from one register to
706 * another appear to inform EVO of the display capabilities or
707 * something similar. NFI what the 0x614004 caps are for..
708 */
709 tmp = nvkm_rd32(device, 0x614004);
710 nvkm_wr32(device, 0x610184, tmp);
711
712 /* ... CRTC caps */
713 list_for_each_entry(head, &disp->base.head, head) {
714 tmp = nvkm_rd32(device, 0x616100 + (head->id * 0x800));
715 nvkm_wr32(device, 0x610190 + (head->id * 0x10), tmp);
716 tmp = nvkm_rd32(device, 0x616104 + (head->id * 0x800));
717 nvkm_wr32(device, 0x610194 + (head->id * 0x10), tmp);
718 tmp = nvkm_rd32(device, 0x616108 + (head->id * 0x800));
719 nvkm_wr32(device, 0x610198 + (head->id * 0x10), tmp);
720 tmp = nvkm_rd32(device, 0x61610c + (head->id * 0x800));
721 nvkm_wr32(device, 0x61019c + (head->id * 0x10), tmp);
722 }
723
724 /* ... DAC caps */
725 for (i = 0; i < disp->dac.nr; i++) {
726 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
727 nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
728 }
729
730 /* ... SOR caps */
731 for (i = 0; i < disp->sor.nr; i++) {
732 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
733 nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
734 }
735
736 /* ... PIOR caps */
737 for (i = 0; i < disp->pior.nr; i++) {
738 tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
739 nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
740 }
741
742 /* steal display away from vbios, or something like that */
743 if (nvkm_rd32(device, 0x610024) & 0x00000100) {
744 nvkm_wr32(device, 0x610024, 0x00000100);
745 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
746 if (nvkm_msec(device, 2000,
747 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
748 break;
749 ) < 0)
750 return -EBUSY;
751 }
752
753 /* point at display engine memory area (hash table, objects) */
754 nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
755
756 /* enable supervisor interrupts, disable everything else */
757 nvkm_wr32(device, 0x61002c, 0x00000370);
758 nvkm_wr32(device, 0x610028, 0x00000000);
759 return 0;
760 }
761
762 static const struct nv50_disp_func
763 nv50_disp = {
764 .init = nv50_disp_init,
765 .fini = nv50_disp_fini,
766 .intr = nv50_disp_intr,
767 .uevent = &nv50_disp_chan_uevent,
768 .super = nv50_disp_super,
769 .root = &nv50_disp_root_oclass,
770 .head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
771 .dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
772 .sor = { .cnt = nv50_sor_cnt, .new = nv50_sor_new },
773 .pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
774 };
775
776 int
nv50_disp_new(struct nvkm_device * device,int index,struct nvkm_disp ** pdisp)777 nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
778 {
779 return nv50_disp_new_(&nv50_disp, device, index, pdisp);
780 }
781