xref: /openbsd/sys/arch/mips64/mips64/ipifuncs.c (revision a5b656f8)
1 /* $OpenBSD: ipifuncs.c,v 1.26 2022/12/05 08:59:28 visa Exp $ */
2 /* $NetBSD: ipifuncs.c,v 1.40 2008/04/28 20:23:10 martin Exp $ */
3 
4 /*-
5  * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/atomic.h>
37 #include <sys/device.h>
38 #include <sys/evcount.h>
39 #include <sys/proc.h>
40 
41 #include <uvm/uvm_extern.h>
42 
43 #include <machine/cpu.h>
44 #include <machine/intr.h>
45 
46 int	mips64_ipi_intr(void *);
47 void	mips64_ipi_nop(void);
48 void	smp_rendezvous_action(void);
49 void	mips64_ipi_ddb(void);
50 void	mips64_multicast_ipi(unsigned int, unsigned int);
51 
52 struct evcount ipi_count;
53 unsigned int ipi_irq = 0;
54 unsigned int ipi_mailbox[MAXCPUS];
55 
56 /* Variables needed for SMP rendezvous. */
57 struct mutex smp_rv_mtx;
58 volatile unsigned long smp_rv_map;
59 void (*volatile smp_rv_action_func)(void *arg);
60 void * volatile smp_rv_func_arg;
61 volatile unsigned int smp_rv_waiters[2];
62 
63 /*
64  * NOTE: This table must be kept in order with the bit definitions
65  * in <machine/intr.h>.
66  */
67 typedef void (*ipifunc_t)(void);
68 
69 ipifunc_t ipifuncs[MIPS64_NIPIS] = {
70 	mips64_ipi_nop,
71 	smp_rendezvous_action,
72 	mips64_ipi_ddb
73 };
74 
75 /*
76  * Initialize IPI state for a CPU.
77  */
78 void
mips64_ipi_init(void)79 mips64_ipi_init(void)
80 {
81 	cpuid_t cpuid = cpu_number();
82 	int error;
83 
84 	if (!cpuid) {
85 		mtx_init(&smp_rv_mtx, IPL_HIGH);
86 		evcount_attach(&ipi_count, "ipi", &ipi_irq);
87 		evcount_percpu(&ipi_count);
88 	}
89 
90 	hw_ipi_intr_clear(cpuid);
91 
92 	error = hw_ipi_intr_establish(mips64_ipi_intr, cpuid);
93 	if (error)
94 		panic("hw_ipi_intr_establish failed:%d", error);
95 }
96 
97 /*
98  * Process IPIs for a CPU.
99  */
100 int
mips64_ipi_intr(void * arg)101 mips64_ipi_intr(void *arg)
102 {
103 	unsigned int pending_ipis, bit;
104 	unsigned int cpuid = (unsigned int)(unsigned long)arg;
105 
106 	KASSERT (cpuid == cpu_number());
107 
108 	/* clear ipi interrupt */
109 	hw_ipi_intr_clear(cpuid);
110 	/* get and clear pending ipis */
111 	pending_ipis = atomic_swap_uint(&ipi_mailbox[cpuid], 0);
112 
113 	if (pending_ipis > 0) {
114 		for (bit = 0; bit < MIPS64_NIPIS; bit++) {
115 			if (pending_ipis & (1UL << bit)) {
116 				(*ipifuncs[bit])();
117 				evcount_inc(&ipi_count);
118 			}
119 		}
120 	}
121 
122 	return 1;
123 }
124 
125 static void
do_send_ipi(unsigned int cpuid,unsigned int ipimask)126 do_send_ipi(unsigned int cpuid, unsigned int ipimask)
127 {
128 #ifdef DEBUG
129 	struct cpu_info *ci = get_cpu_info(cpuid);
130 
131 	if (ci == NULL)
132 		panic("mips_send_ipi: bogus cpu_id");
133 	if (!CPU_IS_RUNNING(ci))
134 		panic("mips_send_ipi: CPU %u not running", cpuid);
135 #endif
136 
137 	atomic_setbits_int(&ipi_mailbox[cpuid], ipimask);
138 
139 	hw_ipi_intr_set(cpuid);
140 }
141 
142 /*
143  * Send an interprocessor interrupt.
144  */
145 void
mips64_send_ipi(unsigned int cpuid,unsigned int ipimask)146 mips64_send_ipi(unsigned int cpuid, unsigned int ipimask)
147 {
148 	/*
149 	 * Ensure that preceding stores are visible to other CPUs
150 	 * before sending the IPI.
151 	 */
152 	membar_producer();
153 
154 	do_send_ipi(cpuid, ipimask);
155 }
156 
157 /*
158  * Send an IPI to all in the list but ourselves.
159  */
160 void
mips64_multicast_ipi(unsigned int cpumask,unsigned int ipimask)161 mips64_multicast_ipi(unsigned int cpumask, unsigned int ipimask)
162 {
163 	struct cpu_info *ci;
164 	CPU_INFO_ITERATOR cii;
165 
166 	cpumask &= ~(1 << cpu_number());
167 
168 	CPU_INFO_FOREACH(cii, ci) {
169 		if (!(cpumask & (1UL << ci->ci_cpuid)) || !CPU_IS_RUNNING(ci))
170 			continue;
171 		do_send_ipi(ci->ci_cpuid, ipimask);
172 	}
173 }
174 
175 void
mips64_ipi_nop(void)176 mips64_ipi_nop(void)
177 {
178 #ifdef DEBUG
179 	printf("mips64_ipi_nop on cpu%lu\n", cpu_number());
180 #endif
181 }
182 
183 /*
184  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
185  * (if specified), rendezvous, execute the action function (if specified),
186  * rendezvous again, execute the teardown function (if specified), and then
187  * resume.
188  *
189  * Note that the supplied external functions _must_ be reentrant and aware
190  * that they are running in parallel and in an unknown lock context.
191  */
192 
193 void
smp_rendezvous_action(void)194 smp_rendezvous_action(void)
195 {
196 	unsigned int cpumask = 1 << cpu_number();
197 
198 	/* Signal readiness and acquire pre-action state. */
199 	atomic_setbits_int(&smp_rv_waiters[0], cpumask);
200 	membar_enter_after_atomic();
201 
202 	while (smp_rv_waiters[0] != smp_rv_map)
203 		CPU_BUSY_CYCLE();
204 
205 	(*smp_rv_action_func)(smp_rv_func_arg);
206 
207 	/* Release post-action state and signal completion. */
208 	membar_exit_before_atomic();
209 	atomic_setbits_int(&smp_rv_waiters[1], cpumask);
210 }
211 
212 void
smp_rendezvous_cpus(unsigned long map,void (* action_func)(void *),void * arg)213 smp_rendezvous_cpus(unsigned long map,
214 	void (* action_func)(void *),
215 	void *arg)
216 {
217 	unsigned int cpumask = 1 << cpu_number();
218 
219 	if (cpumask == map) {
220 		(*action_func)(arg);
221 		return;
222 	}
223 
224 	mtx_enter(&smp_rv_mtx);
225 
226 	/* set static function pointers */
227 	smp_rv_map = map;
228 	smp_rv_action_func = action_func;
229 	smp_rv_func_arg = arg;
230 	smp_rv_waiters[0] = 0;
231 	smp_rv_waiters[1] = 0;
232 
233 	/* Release pre-action state before IPI send. */
234 	membar_exit();
235 
236 	/* signal other processors, which will enter the IPI with interrupts off */
237 	mips64_multicast_ipi(map, MIPS64_IPI_RENDEZVOUS);
238 
239 	/* Check if the current CPU is in the map */
240 	if (map & cpumask)
241 		smp_rendezvous_action();
242 
243 	while (smp_rv_waiters[1] != smp_rv_map)
244 		CPU_BUSY_CYCLE();
245 
246 	/* Acquire post-action state after read. */
247 	membar_sync();
248 
249 	smp_rv_action_func = NULL;
250 
251 	mtx_leave(&smp_rv_mtx);
252 }
253 
254 void
mips64_ipi_ddb(void)255 mips64_ipi_ddb(void)
256 {
257 #ifdef DDB
258 	db_enter();
259 #endif
260 }
261