1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv50.h"
25 #include "head.h"
26 #include "ior.h"
27 #include "channv50.h"
28 #include "rootnv50.h"
29 
30 #include <core/ramht.h>
31 #include <subdev/timer.h>
32 
33 void
gf119_disp_super(struct work_struct * work)34 gf119_disp_super(struct work_struct *work)
35 {
36 	struct nv50_disp *disp =
37 		container_of(work, struct nv50_disp, supervisor);
38 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
39 	struct nvkm_device *device = subdev->device;
40 	struct nvkm_head *head;
41 	u32 mask[4];
42 
43 	nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super));
44 	list_for_each_entry(head, &disp->base.head, head) {
45 		mask[head->id] = nvkm_rd32(device, 0x6101d4 + (head->id * 0x800));
46 		HEAD_DBG(head, "%08x", mask[head->id]);
47 	}
48 
49 	if (disp->super & 0x00000001) {
50 		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
51 		nv50_disp_super_1(disp);
52 		list_for_each_entry(head, &disp->base.head, head) {
53 			if (!(mask[head->id] & 0x00001000))
54 				continue;
55 			nv50_disp_super_1_0(disp, head);
56 		}
57 	} else
58 	if (disp->super & 0x00000002) {
59 		list_for_each_entry(head, &disp->base.head, head) {
60 			if (!(mask[head->id] & 0x00001000))
61 				continue;
62 			nv50_disp_super_2_0(disp, head);
63 		}
64 		nvkm_outp_route(&disp->base);
65 		list_for_each_entry(head, &disp->base.head, head) {
66 			if (!(mask[head->id] & 0x00010000))
67 				continue;
68 			nv50_disp_super_2_1(disp, head);
69 		}
70 		list_for_each_entry(head, &disp->base.head, head) {
71 			if (!(mask[head->id] & 0x00001000))
72 				continue;
73 			nv50_disp_super_2_2(disp, head);
74 		}
75 	} else
76 	if (disp->super & 0x00000004) {
77 		list_for_each_entry(head, &disp->base.head, head) {
78 			if (!(mask[head->id] & 0x00001000))
79 				continue;
80 			nv50_disp_super_3_0(disp, head);
81 		}
82 	}
83 
84 	list_for_each_entry(head, &disp->base.head, head)
85 		nvkm_wr32(device, 0x6101d4 + (head->id * 0x800), 0x00000000);
86 	nvkm_wr32(device, 0x6101d0, 0x80000000);
87 }
88 
89 void
gf119_disp_intr_error(struct nv50_disp * disp,int chid)90 gf119_disp_intr_error(struct nv50_disp *disp, int chid)
91 {
92 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
93 	struct nvkm_device *device = subdev->device;
94 	u32 stat = nvkm_rd32(device, 0x6101f0 + (chid * 12));
95 	u32 type = (stat & 0x00007000) >> 12;
96 	u32 mthd = (stat & 0x00000ffc);
97 	u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
98 	u32 code = nvkm_rd32(device, 0x6101f8 + (chid * 12));
99 	const struct nvkm_enum *reason =
100 		nvkm_enum_find(nv50_disp_intr_error_type, type);
101 
102 	nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
103 			   "data %08x code %08x\n",
104 		   chid, stat, type, reason ? reason->name : "",
105 		   mthd, data, code);
106 
107 	if (chid < ARRAY_SIZE(disp->chan)) {
108 		switch (mthd) {
109 		case 0x0080:
110 			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
111 			break;
112 		default:
113 			break;
114 		}
115 	}
116 
117 	nvkm_wr32(device, 0x61009c, (1 << chid));
118 	nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
119 }
120 
121 void
gf119_disp_intr(struct nv50_disp * disp)122 gf119_disp_intr(struct nv50_disp *disp)
123 {
124 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
125 	struct nvkm_device *device = subdev->device;
126 	struct nvkm_head *head;
127 	u32 intr = nvkm_rd32(device, 0x610088);
128 
129 	if (intr & 0x00000001) {
130 		u32 stat = nvkm_rd32(device, 0x61008c);
131 		while (stat) {
132 			int chid = __ffs(stat); stat &= ~(1 << chid);
133 			nv50_disp_chan_uevent_send(disp, chid);
134 			nvkm_wr32(device, 0x61008c, 1 << chid);
135 		}
136 		intr &= ~0x00000001;
137 	}
138 
139 	if (intr & 0x00000002) {
140 		u32 stat = nvkm_rd32(device, 0x61009c);
141 		int chid = ffs(stat) - 1;
142 		if (chid >= 0)
143 			disp->func->intr_error(disp, chid);
144 		intr &= ~0x00000002;
145 	}
146 
147 	if (intr & 0x00100000) {
148 		u32 stat = nvkm_rd32(device, 0x6100ac);
149 		if (stat & 0x00000007) {
150 			disp->super = (stat & 0x00000007);
151 			queue_work(disp->wq, &disp->supervisor);
152 			nvkm_wr32(device, 0x6100ac, disp->super);
153 			stat &= ~0x00000007;
154 		}
155 
156 		if (stat) {
157 			nvkm_warn(subdev, "intr24 %08x\n", stat);
158 			nvkm_wr32(device, 0x6100ac, stat);
159 		}
160 
161 		intr &= ~0x00100000;
162 	}
163 
164 	list_for_each_entry(head, &disp->base.head, head) {
165 		const u32 hoff = head->id * 0x800;
166 		u32 mask = 0x01000000 << head->id;
167 		if (mask & intr) {
168 			u32 stat = nvkm_rd32(device, 0x6100bc + hoff);
169 			if (stat & 0x00000001)
170 				nvkm_disp_vblank(&disp->base, head->id);
171 			nvkm_mask(device, 0x6100bc + hoff, 0, 0);
172 			nvkm_rd32(device, 0x6100c0 + hoff);
173 		}
174 	}
175 }
176 
177 void
gf119_disp_fini(struct nv50_disp * disp)178 gf119_disp_fini(struct nv50_disp *disp)
179 {
180 	struct nvkm_device *device = disp->base.engine.subdev.device;
181 	/* disable all interrupts */
182 	nvkm_wr32(device, 0x6100b0, 0x00000000);
183 }
184 
185 int
gf119_disp_init(struct nv50_disp * disp)186 gf119_disp_init(struct nv50_disp *disp)
187 {
188 	struct nvkm_device *device = disp->base.engine.subdev.device;
189 	struct nvkm_head *head;
190 	u32 tmp;
191 	int i;
192 
193 	/* The below segments of code copying values from one register to
194 	 * another appear to inform EVO of the display capabilities or
195 	 * something similar.
196 	 */
197 
198 	/* ... CRTC caps */
199 	list_for_each_entry(head, &disp->base.head, head) {
200 		const u32 hoff = head->id * 0x800;
201 		tmp = nvkm_rd32(device, 0x616104 + hoff);
202 		nvkm_wr32(device, 0x6101b4 + hoff, tmp);
203 		tmp = nvkm_rd32(device, 0x616108 + hoff);
204 		nvkm_wr32(device, 0x6101b8 + hoff, tmp);
205 		tmp = nvkm_rd32(device, 0x61610c + hoff);
206 		nvkm_wr32(device, 0x6101bc + hoff, tmp);
207 	}
208 
209 	/* ... DAC caps */
210 	for (i = 0; i < disp->dac.nr; i++) {
211 		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
212 		nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
213 	}
214 
215 	/* ... SOR caps */
216 	for (i = 0; i < disp->sor.nr; i++) {
217 		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
218 		nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
219 	}
220 
221 	/* steal display away from vbios, or something like that */
222 	if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
223 		nvkm_wr32(device, 0x6100ac, 0x00000100);
224 		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
225 		if (nvkm_msec(device, 2000,
226 			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
227 				break;
228 		) < 0)
229 			return -EBUSY;
230 	}
231 
232 	/* point at display engine memory area (hash table, objects) */
233 	nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
234 
235 	/* enable supervisor interrupts, disable everything else */
236 	nvkm_wr32(device, 0x610090, 0x00000000);
237 	nvkm_wr32(device, 0x6100a0, 0x00000000);
238 	nvkm_wr32(device, 0x6100b0, 0x00000307);
239 
240 	/* disable underflow reporting, preventing an intermittent issue
241 	 * on some gk104 boards where the production vbios left this
242 	 * setting enabled by default.
243 	 *
244 	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
245 	 */
246 	list_for_each_entry(head, &disp->base.head, head) {
247 		const u32 hoff = head->id * 0x800;
248 		nvkm_mask(device, 0x616308 + hoff, 0x00000111, 0x00000010);
249 	}
250 
251 	return 0;
252 }
253 
254 static const struct nv50_disp_func
255 gf119_disp = {
256 	.init = gf119_disp_init,
257 	.fini = gf119_disp_fini,
258 	.intr = gf119_disp_intr,
259 	.intr_error = gf119_disp_intr_error,
260 	.uevent = &gf119_disp_chan_uevent,
261 	.super = gf119_disp_super,
262 	.root = &gf119_disp_root_oclass,
263 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
264 	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
265 	.sor = { .cnt = gf119_sor_cnt, .new = gf119_sor_new },
266 };
267 
268 int
gf119_disp_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_disp ** pdisp)269 gf119_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
270 	       struct nvkm_disp **pdisp)
271 {
272 	return nv50_disp_new_(&gf119_disp, device, type, inst, pdisp);
273 }
274