1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #define tu102_mc(p) container_of((p), struct tu102_mc, base)
23 #include "priv.h"
24 
25 struct tu102_mc {
26 	struct nvkm_mc base;
27 	spinlock_t lock;
28 	bool intr;
29 	u32 mask;
30 };
31 
32 static void
tu102_mc_intr_update(struct tu102_mc * mc)33 tu102_mc_intr_update(struct tu102_mc *mc)
34 {
35 	struct nvkm_device *device = mc->base.subdev.device;
36 	u32 mask = mc->intr ? mc->mask : 0, i;
37 
38 	for (i = 0; i < 2; i++) {
39 		nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
40 		nvkm_wr32(device, 0x000160 + (i * 0x04),  mask);
41 	}
42 
43 	if (mask & 0x00000200)
44 		nvkm_wr32(device, 0xb81608, 0x6);
45 	else
46 		nvkm_wr32(device, 0xb81610, 0x6);
47 }
48 
49 void
tu102_mc_intr_unarm(struct nvkm_mc * base)50 tu102_mc_intr_unarm(struct nvkm_mc *base)
51 {
52 	struct tu102_mc *mc = tu102_mc(base);
53 	unsigned long flags;
54 
55 	spin_lock_irqsave(&mc->lock, flags);
56 	mc->intr = false;
57 	tu102_mc_intr_update(mc);
58 	spin_unlock_irqrestore(&mc->lock, flags);
59 }
60 
61 void
tu102_mc_intr_rearm(struct nvkm_mc * base)62 tu102_mc_intr_rearm(struct nvkm_mc *base)
63 {
64 	struct tu102_mc *mc = tu102_mc(base);
65 	unsigned long flags;
66 
67 	spin_lock_irqsave(&mc->lock, flags);
68 	mc->intr = true;
69 	tu102_mc_intr_update(mc);
70 	spin_unlock_irqrestore(&mc->lock, flags);
71 }
72 
73 void
tu102_mc_intr_mask(struct nvkm_mc * base,u32 mask,u32 intr)74 tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
75 {
76 	struct tu102_mc *mc = tu102_mc(base);
77 	unsigned long flags;
78 
79 	spin_lock_irqsave(&mc->lock, flags);
80 	mc->mask = (mc->mask & ~mask) | intr;
81 	tu102_mc_intr_update(mc);
82 	spin_unlock_irqrestore(&mc->lock, flags);
83 }
84 
85 static u32
tu102_mc_intr_stat(struct nvkm_mc * mc)86 tu102_mc_intr_stat(struct nvkm_mc *mc)
87 {
88 	struct nvkm_device *device = mc->subdev.device;
89 	u32 intr0 = nvkm_rd32(device, 0x000100);
90 	u32 intr1 = nvkm_rd32(device, 0x000104);
91 	u32 intr_top = nvkm_rd32(device, 0xb81600);
92 
93 	/* Turing and above route the MMU fault interrupts via a different
94 	 * interrupt tree with different control registers. For the moment remap
95 	 * them back to the old PMC vector.
96 	 */
97 	if (intr_top & 0x00000006)
98 		intr0 |= 0x00000200;
99 
100 	return intr0 | intr1;
101 }
102 
103 
104 static const struct nvkm_mc_func
105 tu102_mc = {
106 	.init = nv50_mc_init,
107 	.intr = gp100_mc_intr,
108 	.intr_unarm = tu102_mc_intr_unarm,
109 	.intr_rearm = tu102_mc_intr_rearm,
110 	.intr_mask = tu102_mc_intr_mask,
111 	.intr_stat = tu102_mc_intr_stat,
112 	.reset = gk104_mc_reset,
113 };
114 
115 static int
tu102_mc_new_(const struct nvkm_mc_func * func,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_mc ** pmc)116 tu102_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
117 	      enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
118 {
119 	struct tu102_mc *mc;
120 
121 	if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
122 		return -ENOMEM;
123 	nvkm_mc_ctor(func, device, type, inst, &mc->base);
124 	*pmc = &mc->base;
125 
126 	spin_lock_init(&mc->lock);
127 	mc->intr = false;
128 	mc->mask = 0x7fffffff;
129 	return 0;
130 }
131 
132 int
tu102_mc_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_mc ** pmc)133 tu102_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
134 {
135 	return tu102_mc_new_(&tu102_mc, device, type, inst, pmc);
136 }
137