1 /*	$NetBSD: nouveau_nvkm_engine_gr_nv50.c,v 1.4 2021/12/19 11:34:45 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Ben Skeggs
25  */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_gr_nv50.c,v 1.4 2021/12/19 11:34:45 riastradh Exp $");
28 
29 #include "nv50.h"
30 
31 #include <core/client.h>
32 #include <core/gpuobj.h>
33 #include <engine/fifo.h>
34 
35 #include <nvif/class.h>
36 
37 u64
nv50_gr_units(struct nvkm_gr * gr)38 nv50_gr_units(struct nvkm_gr *gr)
39 {
40 	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
41 }
42 
43 /*******************************************************************************
44  * Graphics object classes
45  ******************************************************************************/
46 
47 static int
nv50_gr_object_bind(struct nvkm_object * object,struct nvkm_gpuobj * parent,int align,struct nvkm_gpuobj ** pgpuobj)48 nv50_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
49 		    int align, struct nvkm_gpuobj **pgpuobj)
50 {
51 	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
52 				  align, false, parent, pgpuobj);
53 	if (ret == 0) {
54 		nvkm_kmap(*pgpuobj);
55 		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
56 		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
57 		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
58 		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
59 		nvkm_done(*pgpuobj);
60 	}
61 	return ret;
62 }
63 
64 const struct nvkm_object_func
65 nv50_gr_object = {
66 	.bind = nv50_gr_object_bind,
67 };
68 
69 /*******************************************************************************
70  * PGRAPH context
71  ******************************************************************************/
72 
73 static int
nv50_gr_chan_bind(struct nvkm_object * object,struct nvkm_gpuobj * parent,int align,struct nvkm_gpuobj ** pgpuobj)74 nv50_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
75 		  int align, struct nvkm_gpuobj **pgpuobj)
76 {
77 	struct nv50_gr *gr = nv50_gr_chan(object)->gr;
78 	int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
79 				  align, true, parent, pgpuobj);
80 	if (ret == 0) {
81 		nvkm_kmap(*pgpuobj);
82 		nv50_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
83 		nvkm_done(*pgpuobj);
84 	}
85 	return ret;
86 }
87 
88 static const struct nvkm_object_func
89 nv50_gr_chan = {
90 	.bind = nv50_gr_chan_bind,
91 };
92 
93 int
nv50_gr_chan_new(struct nvkm_gr * base,struct nvkm_fifo_chan * fifoch,const struct nvkm_oclass * oclass,struct nvkm_object ** pobject)94 nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
95 		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
96 {
97 	struct nv50_gr *gr = nv50_gr(base);
98 	struct nv50_gr_chan *chan;
99 
100 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
101 		return -ENOMEM;
102 	nvkm_object_ctor(&nv50_gr_chan, oclass, &chan->object);
103 	chan->gr = gr;
104 	*pobject = &chan->object;
105 	return 0;
106 }
107 
108 /*******************************************************************************
109  * PGRAPH engine/subdev functions
110  ******************************************************************************/
111 
112 static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
113 	{ 0x01, "STACK_UNDERFLOW" },
114 	{ 0x02, "STACK_MISMATCH" },
115 	{ 0x04, "QUADON_ACTIVE" },
116 	{ 0x08, "TIMEOUT" },
117 	{ 0x10, "INVALID_OPCODE" },
118 	{ 0x20, "PM_OVERFLOW" },
119 	{ 0x40, "BREAKPOINT" },
120 	{}
121 };
122 
123 static const struct nvkm_bitfield nv50_mpc_traps[] = {
124 	{ 0x0000001, "LOCAL_LIMIT_READ" },
125 	{ 0x0000010, "LOCAL_LIMIT_WRITE" },
126 	{ 0x0000040, "STACK_LIMIT" },
127 	{ 0x0000100, "GLOBAL_LIMIT_READ" },
128 	{ 0x0001000, "GLOBAL_LIMIT_WRITE" },
129 	{ 0x0010000, "MP0" },
130 	{ 0x0020000, "MP1" },
131 	{ 0x0040000, "GLOBAL_LIMIT_RED" },
132 	{ 0x0400000, "GLOBAL_LIMIT_ATOM" },
133 	{ 0x4000000, "MP2" },
134 	{}
135 };
136 
137 static const struct nvkm_bitfield nv50_tex_traps[] = {
138 	{ 0x00000001, "" }, /* any bit set? */
139 	{ 0x00000002, "FAULT" },
140 	{ 0x00000004, "STORAGE_TYPE_MISMATCH" },
141 	{ 0x00000008, "LINEAR_MISMATCH" },
142 	{ 0x00000020, "WRONG_MEMTYPE" },
143 	{}
144 };
145 
146 static const struct nvkm_bitfield nv50_gr_trap_m2mf[] = {
147 	{ 0x00000001, "NOTIFY" },
148 	{ 0x00000002, "IN" },
149 	{ 0x00000004, "OUT" },
150 	{}
151 };
152 
153 static const struct nvkm_bitfield nv50_gr_trap_vfetch[] = {
154 	{ 0x00000001, "FAULT" },
155 	{}
156 };
157 
158 static const struct nvkm_bitfield nv50_gr_trap_strmout[] = {
159 	{ 0x00000001, "FAULT" },
160 	{}
161 };
162 
163 static const struct nvkm_bitfield nv50_gr_trap_ccache[] = {
164 	{ 0x00000001, "FAULT" },
165 	{}
166 };
167 
168 /* There must be a *lot* of these. Will take some time to gather them up. */
169 const struct nvkm_enum nv50_data_error_names[] = {
170 	{ 0x00000003, "INVALID_OPERATION", NULL },
171 	{ 0x00000004, "INVALID_VALUE", NULL },
172 	{ 0x00000005, "INVALID_ENUM", NULL },
173 	{ 0x00000008, "INVALID_OBJECT", NULL },
174 	{ 0x00000009, "READ_ONLY_OBJECT", NULL },
175 	{ 0x0000000a, "SUPERVISOR_OBJECT", NULL },
176 	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
177 	{ 0x0000000c, "INVALID_BITFIELD", NULL },
178 	{ 0x0000000d, "BEGIN_END_ACTIVE", NULL },
179 	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
180 	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
181 	{ 0x00000010, "RT_DOUBLE_BIND", NULL },
182 	{ 0x00000011, "RT_TYPES_MISMATCH", NULL },
183 	{ 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
184 	{ 0x00000015, "FP_TOO_FEW_REGS", NULL },
185 	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
186 	{ 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
187 	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
188 	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
189 	{ 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
190 	{ 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
191 	{ 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
192 	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
193 	{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
194 	{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
195 	{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
196 	{ 0x00000024, "VP_ZERO_INPUTS", NULL },
197 	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
198 	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
199 	{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
200 	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
201 	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
202 	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
203 	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
204 	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
205 	{ 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
206 	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
207 	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
208 	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
209 	{ 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
210 	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
211 	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
212 	{}
213 };
214 
215 static const struct nvkm_bitfield nv50_gr_intr_name[] = {
216 	{ 0x00000001, "NOTIFY" },
217 	{ 0x00000002, "COMPUTE_QUERY" },
218 	{ 0x00000010, "ILLEGAL_MTHD" },
219 	{ 0x00000020, "ILLEGAL_CLASS" },
220 	{ 0x00000040, "DOUBLE_NOTIFY" },
221 	{ 0x00001000, "CONTEXT_SWITCH" },
222 	{ 0x00010000, "BUFFER_NOTIFY" },
223 	{ 0x00100000, "DATA_ERROR" },
224 	{ 0x00200000, "TRAP" },
225 	{ 0x01000000, "SINGLE_STEP" },
226 	{}
227 };
228 
229 static const struct nvkm_bitfield nv50_gr_trap_prop[] = {
230 	{ 0x00000004, "SURF_WIDTH_OVERRUN" },
231 	{ 0x00000008, "SURF_HEIGHT_OVERRUN" },
232 	{ 0x00000010, "DST2D_FAULT" },
233 	{ 0x00000020, "ZETA_FAULT" },
234 	{ 0x00000040, "RT_FAULT" },
235 	{ 0x00000080, "CUDA_FAULT" },
236 	{ 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
237 	{ 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
238 	{ 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
239 	{ 0x00000800, "DST2D_LINEAR_MISMATCH" },
240 	{ 0x00001000, "RT_LINEAR_MISMATCH" },
241 	{}
242 };
243 
244 static void
nv50_gr_prop_trap(struct nv50_gr * gr,u32 ustatus_addr,u32 ustatus,u32 tp)245 nv50_gr_prop_trap(struct nv50_gr *gr, u32 ustatus_addr, u32 ustatus, u32 tp)
246 {
247 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
248 	struct nvkm_device *device = subdev->device;
249 	u32 e0c = nvkm_rd32(device, ustatus_addr + 0x04);
250 	u32 e10 = nvkm_rd32(device, ustatus_addr + 0x08);
251 	u32 e14 = nvkm_rd32(device, ustatus_addr + 0x0c);
252 	u32 e18 = nvkm_rd32(device, ustatus_addr + 0x10);
253 	u32 e1c = nvkm_rd32(device, ustatus_addr + 0x14);
254 	u32 e20 = nvkm_rd32(device, ustatus_addr + 0x18);
255 	u32 e24 = nvkm_rd32(device, ustatus_addr + 0x1c);
256 	char msg[128];
257 
258 	/* CUDA memory: l[], g[] or stack. */
259 	if (ustatus & 0x00000080) {
260 		if (e18 & 0x80000000) {
261 			/* g[] read fault? */
262 			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
263 					 tp, e14, e10 | ((e18 >> 24) & 0x1f));
264 			e18 &= ~0x1f000000;
265 		} else if (e18 & 0xc) {
266 			/* g[] write fault? */
267 			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
268 				 tp, e14, e10 | ((e18 >> 7) & 0x1f));
269 			e18 &= ~0x00000f80;
270 		} else {
271 			nvkm_error(subdev, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
272 				 tp, e14, e10);
273 		}
274 		ustatus &= ~0x00000080;
275 	}
276 	if (ustatus) {
277 		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_trap_prop, ustatus);
278 		nvkm_error(subdev, "TRAP_PROP - TP %d - %08x [%s] - "
279 				   "Address %02x%08x\n",
280 			   tp, ustatus, msg, e14, e10);
281 	}
282 	nvkm_error(subdev, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
283 		 tp, e0c, e18, e1c, e20, e24);
284 }
285 
286 static void
nv50_gr_mp_trap(struct nv50_gr * gr,int tpid,int display)287 nv50_gr_mp_trap(struct nv50_gr *gr, int tpid, int display)
288 {
289 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
290 	struct nvkm_device *device = subdev->device;
291 	u32 units = nvkm_rd32(device, 0x1540);
292 	u32 addr, mp10, status, pc, oplow, ophigh;
293 	char msg[128];
294 	int i;
295 	int mps = 0;
296 	for (i = 0; i < 4; i++) {
297 		if (!(units & 1 << (i+24)))
298 			continue;
299 		if (device->chipset < 0xa0)
300 			addr = 0x408200 + (tpid << 12) + (i << 7);
301 		else
302 			addr = 0x408100 + (tpid << 11) + (i << 7);
303 		mp10 = nvkm_rd32(device, addr + 0x10);
304 		status = nvkm_rd32(device, addr + 0x14);
305 		if (!status)
306 			continue;
307 		if (display) {
308 			nvkm_rd32(device, addr + 0x20);
309 			pc = nvkm_rd32(device, addr + 0x24);
310 			oplow = nvkm_rd32(device, addr + 0x70);
311 			ophigh = nvkm_rd32(device, addr + 0x74);
312 			nvkm_snprintbf(msg, sizeof(msg),
313 				       nv50_mp_exec_errors, status);
314 			nvkm_error(subdev, "TRAP_MP_EXEC - TP %d MP %d: "
315 					   "%08x [%s] at %06x warp %d, "
316 					   "opcode %08x %08x\n",
317 				   tpid, i, status, msg, pc & 0xffffff,
318 				   pc >> 24, oplow, ophigh);
319 		}
320 		nvkm_wr32(device, addr + 0x10, mp10);
321 		nvkm_wr32(device, addr + 0x14, 0);
322 		mps++;
323 	}
324 	if (!mps && display)
325 		nvkm_error(subdev, "TRAP_MP_EXEC - TP %d: "
326 				"No MPs claiming errors?\n", tpid);
327 }
328 
329 static void
nv50_gr_tp_trap(struct nv50_gr * gr,int type,u32 ustatus_old,u32 ustatus_new,int display,const char * name)330 nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
331 		  u32 ustatus_new, int display, const char *name)
332 {
333 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
334 	struct nvkm_device *device = subdev->device;
335 	u32 units = nvkm_rd32(device, 0x1540);
336 	int tps = 0;
337 	int i, r;
338 	char msg[128];
339 	u32 ustatus_addr, ustatus;
340 	for (i = 0; i < 16; i++) {
341 		if (!(units & (1 << i)))
342 			continue;
343 		if (device->chipset < 0xa0)
344 			ustatus_addr = ustatus_old + (i << 12);
345 		else
346 			ustatus_addr = ustatus_new + (i << 11);
347 		ustatus = nvkm_rd32(device, ustatus_addr) & 0x7fffffff;
348 		if (!ustatus)
349 			continue;
350 		tps++;
351 		switch (type) {
352 		case 6: /* texture error... unknown for now */
353 			if (display) {
354 				nvkm_error(subdev, "magic set %d:\n", i);
355 				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
356 					nvkm_error(subdev, "\t%08x: %08x\n", r,
357 						   nvkm_rd32(device, r));
358 				if (ustatus) {
359 					nvkm_snprintbf(msg, sizeof(msg),
360 						       nv50_tex_traps, ustatus);
361 					nvkm_error(subdev,
362 						   "%s - TP%d: %08x [%s]\n",
363 						   name, i, ustatus, msg);
364 					ustatus = 0;
365 				}
366 			}
367 			break;
368 		case 7: /* MP error */
369 			if (ustatus & 0x04030000) {
370 				nv50_gr_mp_trap(gr, i, display);
371 				ustatus &= ~0x04030000;
372 			}
373 			if (ustatus && display) {
374 				nvkm_snprintbf(msg, sizeof(msg),
375 					       nv50_mpc_traps, ustatus);
376 				nvkm_error(subdev, "%s - TP%d: %08x [%s]\n",
377 					   name, i, ustatus, msg);
378 				ustatus = 0;
379 			}
380 			break;
381 		case 8: /* PROP error */
382 			if (display)
383 				nv50_gr_prop_trap(
384 						gr, ustatus_addr, ustatus, i);
385 			ustatus = 0;
386 			break;
387 		}
388 		if (ustatus) {
389 			if (display)
390 				nvkm_error(subdev, "%s - TP%d: Unhandled ustatus %08x\n", name, i, ustatus);
391 		}
392 		nvkm_wr32(device, ustatus_addr, 0xc0000000);
393 	}
394 
395 	if (!tps && display)
396 		nvkm_warn(subdev, "%s - No TPs claiming errors?\n", name);
397 }
398 
399 static int
nv50_gr_trap_handler(struct nv50_gr * gr,u32 display,int chid,u64 inst,const char * name)400 nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
401 		     int chid, u64 inst, const char *name)
402 {
403 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
404 	struct nvkm_device *device = subdev->device;
405 	u32 status = nvkm_rd32(device, 0x400108);
406 	u32 ustatus;
407 	char msg[128];
408 
409 	if (!status && display) {
410 		nvkm_error(subdev, "TRAP: no units reporting traps?\n");
411 		return 1;
412 	}
413 
414 	/* DISPATCH: Relays commands to other units and handles NOTIFY,
415 	 * COND, QUERY. If you get a trap from it, the command is still stuck
416 	 * in DISPATCH and you need to do something about it. */
417 	if (status & 0x001) {
418 		ustatus = nvkm_rd32(device, 0x400804) & 0x7fffffff;
419 		if (!ustatus && display) {
420 			nvkm_error(subdev, "TRAP_DISPATCH - no ustatus?\n");
421 		}
422 
423 		nvkm_wr32(device, 0x400500, 0x00000000);
424 
425 		/* Known to be triggered by screwed up NOTIFY and COND... */
426 		if (ustatus & 0x00000001) {
427 			u32 addr = nvkm_rd32(device, 0x400808);
428 			u32 subc = (addr & 0x00070000) >> 16;
429 			u32 mthd = (addr & 0x00001ffc);
430 			u32 datal = nvkm_rd32(device, 0x40080c);
431 			u32 datah = nvkm_rd32(device, 0x400810);
432 			u32 class = nvkm_rd32(device, 0x400814);
433 			u32 r848 = nvkm_rd32(device, 0x400848);
434 
435 			nvkm_error(subdev, "TRAP DISPATCH_FAULT\n");
436 			if (display && (addr & 0x80000000)) {
437 				nvkm_error(subdev,
438 					   "ch %d [%010"PRIx64" %s] subc %d "
439 					   "class %04x mthd %04x data %08x%08x "
440 					   "400808 %08x 400848 %08x\n",
441 					   chid, inst, name, subc, class, mthd,
442 					   datah, datal, addr, r848);
443 			} else
444 			if (display) {
445 				nvkm_error(subdev, "no stuck command?\n");
446 			}
447 
448 			nvkm_wr32(device, 0x400808, 0);
449 			nvkm_wr32(device, 0x4008e8, nvkm_rd32(device, 0x4008e8) & 3);
450 			nvkm_wr32(device, 0x400848, 0);
451 			ustatus &= ~0x00000001;
452 		}
453 
454 		if (ustatus & 0x00000002) {
455 			u32 addr = nvkm_rd32(device, 0x40084c);
456 			u32 subc = (addr & 0x00070000) >> 16;
457 			u32 mthd = (addr & 0x00001ffc);
458 			u32 data = nvkm_rd32(device, 0x40085c);
459 			u32 class = nvkm_rd32(device, 0x400814);
460 
461 			nvkm_error(subdev, "TRAP DISPATCH_QUERY\n");
462 			if (display && (addr & 0x80000000)) {
463 				nvkm_error(subdev,
464 					   "ch %d [%010"PRIx64" %s] subc %d "
465 					   "class %04x mthd %04x data %08x "
466 					   "40084c %08x\n", chid, inst, name,
467 					   subc, class, mthd, data, addr);
468 			} else
469 			if (display) {
470 				nvkm_error(subdev, "no stuck command?\n");
471 			}
472 
473 			nvkm_wr32(device, 0x40084c, 0);
474 			ustatus &= ~0x00000002;
475 		}
476 
477 		if (ustatus && display) {
478 			nvkm_error(subdev, "TRAP_DISPATCH "
479 					   "(unknown %08x)\n", ustatus);
480 		}
481 
482 		nvkm_wr32(device, 0x400804, 0xc0000000);
483 		nvkm_wr32(device, 0x400108, 0x001);
484 		status &= ~0x001;
485 		if (!status)
486 			return 0;
487 	}
488 
489 	/* M2MF: Memory to memory copy engine. */
490 	if (status & 0x002) {
491 		u32 ustatus = nvkm_rd32(device, 0x406800) & 0x7fffffff;
492 		if (display) {
493 			nvkm_snprintbf(msg, sizeof(msg),
494 				       nv50_gr_trap_m2mf, ustatus);
495 			nvkm_error(subdev, "TRAP_M2MF %08x [%s]\n",
496 				   ustatus, msg);
497 			nvkm_error(subdev, "TRAP_M2MF %08x %08x %08x %08x\n",
498 				   nvkm_rd32(device, 0x406804),
499 				   nvkm_rd32(device, 0x406808),
500 				   nvkm_rd32(device, 0x40680c),
501 				   nvkm_rd32(device, 0x406810));
502 		}
503 
504 		/* No sane way found yet -- just reset the bugger. */
505 		nvkm_wr32(device, 0x400040, 2);
506 		nvkm_wr32(device, 0x400040, 0);
507 		nvkm_wr32(device, 0x406800, 0xc0000000);
508 		nvkm_wr32(device, 0x400108, 0x002);
509 		status &= ~0x002;
510 	}
511 
512 	/* VFETCH: Fetches data from vertex buffers. */
513 	if (status & 0x004) {
514 		u32 ustatus = nvkm_rd32(device, 0x400c04) & 0x7fffffff;
515 		if (display) {
516 			nvkm_snprintbf(msg, sizeof(msg),
517 				       nv50_gr_trap_vfetch, ustatus);
518 			nvkm_error(subdev, "TRAP_VFETCH %08x [%s]\n",
519 				   ustatus, msg);
520 			nvkm_error(subdev, "TRAP_VFETCH %08x %08x %08x %08x\n",
521 				   nvkm_rd32(device, 0x400c00),
522 				   nvkm_rd32(device, 0x400c08),
523 				   nvkm_rd32(device, 0x400c0c),
524 				   nvkm_rd32(device, 0x400c10));
525 		}
526 
527 		nvkm_wr32(device, 0x400c04, 0xc0000000);
528 		nvkm_wr32(device, 0x400108, 0x004);
529 		status &= ~0x004;
530 	}
531 
532 	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
533 	if (status & 0x008) {
534 		ustatus = nvkm_rd32(device, 0x401800) & 0x7fffffff;
535 		if (display) {
536 			nvkm_snprintbf(msg, sizeof(msg),
537 				       nv50_gr_trap_strmout, ustatus);
538 			nvkm_error(subdev, "TRAP_STRMOUT %08x [%s]\n",
539 				   ustatus, msg);
540 			nvkm_error(subdev, "TRAP_STRMOUT %08x %08x %08x %08x\n",
541 				   nvkm_rd32(device, 0x401804),
542 				   nvkm_rd32(device, 0x401808),
543 				   nvkm_rd32(device, 0x40180c),
544 				   nvkm_rd32(device, 0x401810));
545 		}
546 
547 		/* No sane way found yet -- just reset the bugger. */
548 		nvkm_wr32(device, 0x400040, 0x80);
549 		nvkm_wr32(device, 0x400040, 0);
550 		nvkm_wr32(device, 0x401800, 0xc0000000);
551 		nvkm_wr32(device, 0x400108, 0x008);
552 		status &= ~0x008;
553 	}
554 
555 	/* CCACHE: Handles code and c[] caches and fills them. */
556 	if (status & 0x010) {
557 		ustatus = nvkm_rd32(device, 0x405018) & 0x7fffffff;
558 		if (display) {
559 			nvkm_snprintbf(msg, sizeof(msg),
560 				       nv50_gr_trap_ccache, ustatus);
561 			nvkm_error(subdev, "TRAP_CCACHE %08x [%s]\n",
562 				   ustatus, msg);
563 			nvkm_error(subdev, "TRAP_CCACHE %08x %08x %08x %08x "
564 					   "%08x %08x %08x\n",
565 				   nvkm_rd32(device, 0x405000),
566 				   nvkm_rd32(device, 0x405004),
567 				   nvkm_rd32(device, 0x405008),
568 				   nvkm_rd32(device, 0x40500c),
569 				   nvkm_rd32(device, 0x405010),
570 				   nvkm_rd32(device, 0x405014),
571 				   nvkm_rd32(device, 0x40501c));
572 		}
573 
574 		nvkm_wr32(device, 0x405018, 0xc0000000);
575 		nvkm_wr32(device, 0x400108, 0x010);
576 		status &= ~0x010;
577 	}
578 
579 	/* Unknown, not seen yet... 0x402000 is the only trap status reg
580 	 * remaining, so try to handle it anyway. Perhaps related to that
581 	 * unknown DMA slot on tesla? */
582 	if (status & 0x20) {
583 		ustatus = nvkm_rd32(device, 0x402000) & 0x7fffffff;
584 		if (display)
585 			nvkm_error(subdev, "TRAP_UNKC04 %08x\n", ustatus);
586 		nvkm_wr32(device, 0x402000, 0xc0000000);
587 		/* no status modifiction on purpose */
588 	}
589 
590 	/* TEXTURE: CUDA texturing units */
591 	if (status & 0x040) {
592 		nv50_gr_tp_trap(gr, 6, 0x408900, 0x408600, display,
593 				    "TRAP_TEXTURE");
594 		nvkm_wr32(device, 0x400108, 0x040);
595 		status &= ~0x040;
596 	}
597 
598 	/* MP: CUDA execution engines. */
599 	if (status & 0x080) {
600 		nv50_gr_tp_trap(gr, 7, 0x408314, 0x40831c, display,
601 				    "TRAP_MP");
602 		nvkm_wr32(device, 0x400108, 0x080);
603 		status &= ~0x080;
604 	}
605 
606 	/* PROP:  Handles TP-initiated uncached memory accesses:
607 	 * l[], g[], stack, 2d surfaces, render targets. */
608 	if (status & 0x100) {
609 		nv50_gr_tp_trap(gr, 8, 0x408e08, 0x408708, display,
610 				    "TRAP_PROP");
611 		nvkm_wr32(device, 0x400108, 0x100);
612 		status &= ~0x100;
613 	}
614 
615 	if (status) {
616 		if (display)
617 			nvkm_error(subdev, "TRAP: unknown %08x\n", status);
618 		nvkm_wr32(device, 0x400108, status);
619 	}
620 
621 	return 1;
622 }
623 
624 void
nv50_gr_intr(struct nvkm_gr * base)625 nv50_gr_intr(struct nvkm_gr *base)
626 {
627 	struct nv50_gr *gr = nv50_gr(base);
628 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
629 	struct nvkm_device *device = subdev->device;
630 	struct nvkm_fifo_chan *chan;
631 	u32 stat = nvkm_rd32(device, 0x400100);
632 	u32 inst = nvkm_rd32(device, 0x40032c) & 0x0fffffff;
633 	u32 addr = nvkm_rd32(device, 0x400704);
634 	u32 subc = (addr & 0x00070000) >> 16;
635 	u32 mthd = (addr & 0x00001ffc);
636 	u32 data = nvkm_rd32(device, 0x400708);
637 	u32 class = nvkm_rd32(device, 0x400814);
638 	u32 show = stat, show_bitfield = stat;
639 	const struct nvkm_enum *en;
640 	unsigned long flags;
641 	const char *name = "unknown";
642 	char msg[128];
643 	int chid = -1;
644 
645 	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
646 	if (chan)  {
647 		name = chan->object.client->name;
648 		chid = chan->chid;
649 	}
650 
651 	if (show & 0x00100000) {
652 		u32 ecode = nvkm_rd32(device, 0x400110);
653 		en = nvkm_enum_find(nv50_data_error_names, ecode);
654 		nvkm_error(subdev, "DATA_ERROR %08x [%s]\n",
655 			   ecode, en ? en->name : "");
656 		show_bitfield &= ~0x00100000;
657 	}
658 
659 	if (stat & 0x00200000) {
660 		if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12, name))
661 			show &= ~0x00200000;
662 		show_bitfield &= ~0x00200000;
663 	}
664 
665 	nvkm_wr32(device, 0x400100, stat);
666 	nvkm_wr32(device, 0x400500, 0x00010001);
667 
668 	if (show) {
669 		show &= show_bitfield;
670 		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show);
671 		nvkm_error(subdev, "%08x [%s] ch %d [%010"PRIx64" %s] subc %d "
672 				   "class %04x mthd %04x data %08x\n",
673 			   stat, msg, chid, (u64)inst << 12, name,
674 			   subc, class, mthd, data);
675 	}
676 
677 	if (nvkm_rd32(device, 0x400824) & (1 << 31))
678 		nvkm_wr32(device, 0x400824, nvkm_rd32(device, 0x400824) & ~(1 << 31));
679 
680 	nvkm_fifo_chan_put(device->fifo, flags, &chan);
681 }
682 
683 int
nv50_gr_init(struct nvkm_gr * base)684 nv50_gr_init(struct nvkm_gr *base)
685 {
686 	struct nv50_gr *gr = nv50_gr(base);
687 	struct nvkm_device *device = gr->base.engine.subdev.device;
688 	int ret, units, i;
689 
690 	/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
691 	nvkm_wr32(device, 0x40008c, 0x00000004);
692 
693 	/* reset/enable traps and interrupts */
694 	nvkm_wr32(device, 0x400804, 0xc0000000);
695 	nvkm_wr32(device, 0x406800, 0xc0000000);
696 	nvkm_wr32(device, 0x400c04, 0xc0000000);
697 	nvkm_wr32(device, 0x401800, 0xc0000000);
698 	nvkm_wr32(device, 0x405018, 0xc0000000);
699 	nvkm_wr32(device, 0x402000, 0xc0000000);
700 
701 	units = nvkm_rd32(device, 0x001540);
702 	for (i = 0; i < 16; i++) {
703 		if (!(units & (1 << i)))
704 			continue;
705 
706 		if (device->chipset < 0xa0) {
707 			nvkm_wr32(device, 0x408900 + (i << 12), 0xc0000000);
708 			nvkm_wr32(device, 0x408e08 + (i << 12), 0xc0000000);
709 			nvkm_wr32(device, 0x408314 + (i << 12), 0xc0000000);
710 		} else {
711 			nvkm_wr32(device, 0x408600 + (i << 11), 0xc0000000);
712 			nvkm_wr32(device, 0x408708 + (i << 11), 0xc0000000);
713 			nvkm_wr32(device, 0x40831c + (i << 11), 0xc0000000);
714 		}
715 	}
716 
717 	nvkm_wr32(device, 0x400108, 0xffffffff);
718 	nvkm_wr32(device, 0x400138, 0xffffffff);
719 	nvkm_wr32(device, 0x400100, 0xffffffff);
720 	nvkm_wr32(device, 0x40013c, 0xffffffff);
721 	nvkm_wr32(device, 0x400500, 0x00010001);
722 
723 	/* upload context program, initialise ctxctl defaults */
724 	ret = nv50_grctx_init(device, &gr->size);
725 	if (ret)
726 		return ret;
727 
728 	nvkm_wr32(device, 0x400824, 0x00000000);
729 	nvkm_wr32(device, 0x400828, 0x00000000);
730 	nvkm_wr32(device, 0x40082c, 0x00000000);
731 	nvkm_wr32(device, 0x400830, 0x00000000);
732 	nvkm_wr32(device, 0x40032c, 0x00000000);
733 	nvkm_wr32(device, 0x400330, 0x00000000);
734 
735 	/* some unknown zcull magic */
736 	switch (device->chipset & 0xf0) {
737 	case 0x50:
738 	case 0x80:
739 	case 0x90:
740 		nvkm_wr32(device, 0x402ca8, 0x00000800);
741 		break;
742 	case 0xa0:
743 	default:
744 		if (device->chipset == 0xa0 ||
745 		    device->chipset == 0xaa ||
746 		    device->chipset == 0xac) {
747 			nvkm_wr32(device, 0x402ca8, 0x00000802);
748 		} else {
749 			nvkm_wr32(device, 0x402cc0, 0x00000000);
750 			nvkm_wr32(device, 0x402ca8, 0x00000002);
751 		}
752 
753 		break;
754 	}
755 
756 	/* zero out zcull regions */
757 	for (i = 0; i < 8; i++) {
758 		nvkm_wr32(device, 0x402c20 + (i * 0x10), 0x00000000);
759 		nvkm_wr32(device, 0x402c24 + (i * 0x10), 0x00000000);
760 		nvkm_wr32(device, 0x402c28 + (i * 0x10), 0x00000000);
761 		nvkm_wr32(device, 0x402c2c + (i * 0x10), 0x00000000);
762 	}
763 
764 	return 0;
765 }
766 
767 int
nv50_gr_new_(const struct nvkm_gr_func * func,struct nvkm_device * device,int index,struct nvkm_gr ** pgr)768 nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
769 	     int index, struct nvkm_gr **pgr)
770 {
771 	struct nv50_gr *gr;
772 
773 	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
774 		return -ENOMEM;
775 	spin_lock_init(&gr->lock);
776 	*pgr = &gr->base;
777 
778 	return nvkm_gr_ctor(func, device, index, true, &gr->base);
779 }
780 
781 static void *
nv50_gr_dtor(struct nvkm_gr * gr)782 nv50_gr_dtor(struct nvkm_gr *gr)
783 {
784 	spin_lock_destroy(&nv50_gr(gr)->lock);
785 	return gr;
786 }
787 
788 static const struct nvkm_gr_func
789 nv50_gr = {
790 	.init = nv50_gr_init,
791 	.dtor = nv50_gr_dtor,
792 	.intr = nv50_gr_intr,
793 	.chan_new = nv50_gr_chan_new,
794 	.units = nv50_gr_units,
795 	.sclass = {
796 		{ -1, -1, NV_NULL_CLASS, &nv50_gr_object },
797 		{ -1, -1, NV50_TWOD, &nv50_gr_object },
798 		{ -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
799 		{ -1, -1, NV50_TESLA, &nv50_gr_object },
800 		{ -1, -1, NV50_COMPUTE, &nv50_gr_object },
801 		{}
802 	}
803 };
804 
805 int
nv50_gr_new(struct nvkm_device * device,int index,struct nvkm_gr ** pgr)806 nv50_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
807 {
808 	return nv50_gr_new_(&nv50_gr, device, index, pgr);
809 }
810