xref: /openbsd/sys/arch/powerpc64/dev/xicp.c (revision 471aeecf)
1 /*	$OpenBSD: xicp.c,v 1.5 2022/04/06 18:59:27 naddy Exp $	*/
2 /*
3  * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/device.h>
21 #include <sys/evcount.h>
22 #include <sys/malloc.h>
23 #include <sys/queue.h>
24 
25 #include <machine/bus.h>
26 #include <machine/fdt.h>
27 #include <machine/opal.h>
28 
29 #include <dev/ofw/openfirm.h>
30 #include <dev/ofw/fdt.h>
31 
32 #define XICP_NUM_IRQS	1024
33 
34 #define XICP_CPPR		0x04
35 #define XICP_XIRR		0x04
36 #define  XICP_XIRR_XISR_MASK	0x00ffffff
37 #define  XICP_XIRR_CPPR_SHIFT	24
38 #define XICP_MFRR		0x0c
39 
40 static inline uint8_t
xicp_prio(int ipl)41 xicp_prio(int ipl)
42 {
43 	return ((IPL_IPI - ipl) > 7 ? 0xff : IPL_IPI - ipl);
44 }
45 
46 struct intrhand {
47 	LIST_ENTRY(intrhand)	ih_hash;
48 	int			(*ih_func)(void *);
49 	void			*ih_arg;
50 	int			ih_ipl;
51 	int			ih_flags;
52 	uint32_t		ih_girq;
53 	struct evcount		ih_count;
54 	const char		*ih_name;
55 };
56 
57 struct xicp_softc {
58 	struct device		sc_dev;
59 	bus_space_tag_t		sc_iot;
60 	bus_space_handle_t	sc_ioh;
61 };
62 
63 struct xicp_softc *xicp_sc[MAXCPUS];
64 
65 /* Hash table for interrupt handlers. */
66 #define XICP_GIRQHASH(girq)	(&xicp_girqhashtbl[(girq) & xicp_girqhash])
67 LIST_HEAD(,intrhand) *xicp_girqhashtbl;
68 u_long	xicp_girqhash;
69 
70 static inline void
xicp_write_1(struct xicp_softc * sc,bus_size_t off,uint8_t val)71 xicp_write_1(struct xicp_softc *sc, bus_size_t off, uint8_t val)
72 {
73 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, off, val);
74 }
75 
76 static inline uint32_t
xicp_read_4(struct xicp_softc * sc,bus_size_t off)77 xicp_read_4(struct xicp_softc *sc, bus_size_t off)
78 {
79 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
80 }
81 
82 static inline void
xicp_write_4(struct xicp_softc * sc,bus_size_t off,uint32_t val)83 xicp_write_4(struct xicp_softc *sc, bus_size_t off, uint32_t val)
84 {
85 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
86 }
87 
88 int	xicp_match(struct device *, void *, void *);
89 void	xicp_attach(struct device *, struct device *, void *);
90 
91 const struct cfattach xicp_ca = {
92 	sizeof (struct xicp_softc), xicp_match, xicp_attach
93 };
94 
95 struct cfdriver xicp_cd = {
96 	NULL, "xicp", DV_DULL
97 };
98 
99 void	xicp_exi(struct trapframe *);
100 void 	*xicp_intr_establish(uint32_t, int, int, struct cpu_info *,
101 	    int (*)(void *), void *, const char *);
102 void	xicp_intr_send_ipi(void *);
103 void	xicp_setipl(int);
104 
105 int
xicp_match(struct device * parent,void * match,void * aux)106 xicp_match(struct device *parent, void *match, void *aux)
107 {
108 	struct fdt_attach_args *faa = aux;
109 
110 	return (OF_is_compatible(faa->fa_node, "ibm,ppc-xicp") ||
111 	    OF_is_compatible(faa->fa_node, "IBM,ppc-xicp"));
112 }
113 
114 void
xicp_attach(struct device * parent,struct device * self,void * aux)115 xicp_attach(struct device *parent, struct device *self, void *aux)
116 {
117 	struct xicp_softc *sc = (struct xicp_softc *)self;
118 	struct fdt_attach_args *faa = aux;
119 	struct cpu_info *ci;
120 	CPU_INFO_ITERATOR cii;
121 	uint32_t ranges[2];
122 
123 	if (faa->fa_nreg < 1) {
124 		printf(": no registers\n");
125 		return;
126 	}
127 
128 	ranges[0] = ranges[1] = 0;
129 	OF_getpropintarray(faa->fa_node, "ibm,interrupt-server-ranges",
130 	    ranges, sizeof(ranges));
131 	if (ranges[1] == 0)
132 		return;
133 
134 	/*
135 	 * There is supposed to be one ICP node for each core.  Since
136 	 * we only support a single thread, we only need to map the
137 	 * first set of registers.
138 	 */
139 	sc->sc_iot = faa->fa_iot;
140 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
141 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
142 		printf(": can't map registers\n");
143 		return;
144 	}
145 
146 	printf("\n");
147 
148 	/*
149 	 * Allocate global hash table for interrupt handlers if we
150 	 * haven't done so already.
151 	 */
152 	if (xicp_girqhash == 0) {
153 		xicp_girqhashtbl = hashinit(XICP_NUM_IRQS,
154 		    M_DEVBUF, M_WAITOK, &xicp_girqhash);
155 	}
156 
157 	CPU_INFO_FOREACH(cii, ci) {
158 		if (ranges[0] == ci->ci_pir)
159 			xicp_sc[ci->ci_cpuid] = sc;
160 	}
161 
162 	_exi = xicp_exi;
163 	_intr_establish = xicp_intr_establish;
164 	_intr_send_ipi = xicp_intr_send_ipi;
165 	_setipl = xicp_setipl;
166 
167 	/* Synchronize hardware state to software state. */
168 	xicp_write_1(sc, XICP_CPPR, xicp_prio(curcpu()->ci_cpl));
169 }
170 
171 void
xicp_intr_send_ipi(void * cookie)172 xicp_intr_send_ipi(void *cookie)
173 {
174 	panic("%s", __func__);
175 }
176 
177 void *
xicp_intr_establish(uint32_t girq,int type,int level,struct cpu_info * ci,int (* func)(void *),void * arg,const char * name)178 xicp_intr_establish(uint32_t girq, int type, int level, struct cpu_info *ci,
179     int (*func)(void *), void *arg, const char *name)
180 {
181 	struct intrhand *ih;
182 	int64_t error;
183 	uint16_t server;
184 
185 	if (ci == NULL)
186 		ci = cpu_info_primary;
187 
188 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
189 	ih->ih_func = func;
190 	ih->ih_arg = arg;
191 	ih->ih_ipl = level & IPL_IRQMASK;
192 	ih->ih_flags = level & IPL_FLAGMASK;
193 	ih->ih_girq = girq;
194 	ih->ih_name = name;
195 	LIST_INSERT_HEAD(XICP_GIRQHASH(girq), ih, ih_hash);
196 
197 	if (name != NULL)
198 		evcount_attach(&ih->ih_count, name, &ih->ih_girq);
199 
200 	server = ci->ci_pir << 2;
201 	error = opal_set_xive(girq, server, xicp_prio(level & IPL_IRQMASK));
202 	if (error != OPAL_SUCCESS) {
203 		if (name)
204 			evcount_detach(&ih->ih_count);
205 		LIST_REMOVE(ih, ih_hash);
206 		free(ih, M_DEVBUF, sizeof(*ih));
207 		return NULL;
208 	}
209 
210 	return ih;
211 }
212 
213 void
xicp_setipl(int new)214 xicp_setipl(int new)
215 {
216 	struct xicp_softc *sc = xicp_sc[cpu_number()];
217 	struct cpu_info *ci = curcpu();
218 	uint8_t oldprio = xicp_prio(ci->ci_cpl);
219 	uint8_t newprio = xicp_prio(new);
220 	u_long msr;
221 
222 	msr = intr_disable();
223 	ci->ci_cpl = new;
224 	if (newprio != oldprio)
225 		xicp_write_1(sc, XICP_CPPR, newprio);
226 	intr_restore(msr);
227 }
228 
229 void
xicp_exi(struct trapframe * frame)230 xicp_exi(struct trapframe *frame)
231 {
232 	struct xicp_softc *sc = xicp_sc[cpu_number()];
233 	struct cpu_info *ci = curcpu();
234 	struct intrhand *ih;
235 	uint32_t xirr, xisr;
236 	int handled, old;
237 
238 	KASSERT(sc);
239 
240 	old = ci->ci_cpl;
241 
242 	while (1) {
243 		xirr = xicp_read_4(sc, XICP_XIRR);
244 		xisr = xirr & XICP_XIRR_XISR_MASK;
245 
246 		if (xisr == 0)
247 			break;
248 
249 		/* Lookup the interrupt handle in the has table. */
250 		LIST_FOREACH(ih, XICP_GIRQHASH(xisr), ih_hash) {
251 			if (ih->ih_girq == xisr)
252 				break;
253 		}
254 
255 		if (ih != NULL) {
256 #ifdef MULTIPROCESSOR
257 			int need_lock;
258 
259 			if (ih->ih_flags & IPL_MPSAFE)
260 				need_lock = 0;
261 			else
262 				need_lock = (ih->ih_ipl < IPL_SCHED);
263 
264 			if (need_lock)
265 				KERNEL_LOCK();
266 #endif
267 			ci->ci_cpl = ih->ih_ipl;
268 			xicp_write_1(sc, XICP_CPPR, xicp_prio(ih->ih_ipl));
269 
270 			intr_enable();
271 			handled = ih->ih_func(ih->ih_arg);
272 			intr_disable();
273 			if (handled)
274 				ih->ih_count.ec_count++;
275 #ifdef MULTIPROCESSOR
276 			if (need_lock)
277 				KERNEL_UNLOCK();
278 #endif
279 		}
280 
281 		/* Signal EOI. */
282 		xicp_write_4(sc, XICP_XIRR, xirr);
283 		ci->ci_cpl = old;
284 	}
285 }
286