xref: /dragonfly/sys/kern/kern_umtx.c (revision d9d67b59)
1 /*
2  * Copyright (c) 2003,2004,2010,2017 The DragonFly Project.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com> and David Xu <davidxu@freebsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 /*
37  * This module implements userland mutex helper functions.  umtx_sleep()
38  * handling blocking and umtx_wakeup() handles wakeups.  The sleep/wakeup
39  * functions operate on user addresses.
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/cdefs.h>
45 #include <sys/kernel.h>
46 #include <sys/sysproto.h>
47 #include <sys/sysunion.h>
48 #include <sys/sysent.h>
49 #include <sys/syscall.h>
50 #include <sys/sysctl.h>
51 #include <sys/module.h>
52 
53 #include <cpu/lwbuf.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <sys/lock.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_object.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_pager.h>
63 #include <vm/vm_pageout.h>
64 #include <vm/vm_extern.h>
65 #include <vm/vm_kern.h>
66 
67 #include <vm/vm_page2.h>
68 
69 #include <machine/vmm.h>
70 
71 /*
72  * Improve umtx performance by polling for 4000nS before going to sleep.
73  * This can avoid many IPIs in typical pthreads mutex situations.
74  */
75 #ifdef _RDTSC_SUPPORTED_
76 static int umtx_delay = 4000;		/* nS */
77 SYSCTL_INT(_kern, OID_AUTO, umtx_delay, CTLFLAG_RW,
78 	   &umtx_delay, 0, "");
79 #endif
80 static int umtx_timeout_max = 2000000;	/* microseconds */
81 SYSCTL_INT(_kern, OID_AUTO, umtx_timeout_max, CTLFLAG_RW,
82 	   &umtx_timeout_max, 0, "");
83 
84 /*
85  * If the contents of the userland-supplied pointer matches the specified
86  * value enter an interruptable sleep for up to <timeout> microseconds.
87  * If the contents does not match then return immediately.
88  *
89  * Returns 0 if we slept and were woken up, -1 and EWOULDBLOCK if we slept
90  * and timed out, and EBUSY if the contents of the pointer already does
91  * not match the specified value.  A timeout of 0 indicates an unlimited sleep.
92  * EINTR is returned if the call was interrupted by a signal (even if
93  * the signal specifies that the system call should restart).
94  *
95  * This function interlocks against call to umtx_wakeup.  It does NOT interlock
96  * against changes in *ptr.  However, it does not have to.  The standard use
97  * of *ptr is to differentiate between an uncontested and a contested mutex
98  * and call umtx_wakeup when releasing a contested mutex.  Therefore we can
99  * safely race against changes in *ptr as long as we are properly interlocked
100  * against the umtx_wakeup() call.
101  *
102  * For performance reasons, we do not try to track the underlying page for
103  * mapping changes.  Instead, the timeout is capped at kern.umtx_timeout_max
104  * (default 1 second) and the caller is expected to retry.  The kernel
105  * will wake all umtx_sleep()s if the process fork()s, but not if it vfork()s.
106  * Other mapping changes must be caught by the timeout.
107  *
108  * umtx_sleep { const int *ptr, int value, int timeout }
109  */
110 int
111 sys_umtx_sleep(struct umtx_sleep_args *uap)
112 {
113     void *waddr;
114     void *uptr;
115     int offset;
116     int timeout;
117     int error;
118     int value;
119 
120     if (uap->timeout < 0)
121 	return (EINVAL);
122 
123     if (curthread->td_vmm) {
124 	register_t gpa;
125 	vmm_vm_get_gpa(curproc, &gpa, (register_t)uap->ptr);
126 	uap->ptr = (const int *)gpa;
127     }
128 
129     uptr = __DEQUALIFY(void *, uap->ptr);
130     if ((vm_offset_t)uptr & (sizeof(int) - 1))
131 	return EFAULT;
132 
133     offset = (vm_offset_t)uptr & PAGE_MASK;
134 
135     /*
136      * Initial quick check.  If -1 is returned distinguish between
137      * EBUSY and EINVAL.
138      */
139     value = fuword32(uptr);
140     if (value == -1 && uservtophys((intptr_t)uap->ptr) == (vm_paddr_t)-1)
141 	return EINVAL;
142 
143     error = EBUSY;
144     if (value == uap->value) {
145 #ifdef _RDTSC_SUPPORTED_
146 	/*
147 	 * Poll a little while before sleeping, most mutexes are
148 	 * short-lived.
149 	 */
150 	if (umtx_delay) {
151 		int64_t tsc_target;
152 		int good = 0;
153 
154 		tsc_target = tsc_get_target(umtx_delay);
155 		while (tsc_test_target(tsc_target) == 0) {
156 			cpu_lfence();
157 			if (fuword32(uptr) != uap->value) {
158 				good = 1;
159 				break;
160 			}
161 			cpu_pause();
162 		}
163 		if (good) {
164 			error = EBUSY;
165 			goto done;
166 		}
167 	}
168 #endif
169 	/*
170 	 * Calculate the timeout.  This will be acccurate to within ~2 ticks.
171 	 * uap->timeout is in microseconds.
172 	 */
173 	timeout = umtx_timeout_max;
174 	if (uap->timeout && uap->timeout < timeout)
175 		timeout = uap->timeout;
176 	timeout = (timeout / 1000000) * hz +
177 		  ((timeout % 1000000) * hz + 999999) / 1000000;
178 
179 	/*
180 	 * Calculate the physical address of the mutex.  This gives us
181 	 * good distribution between unrelated processes using the
182 	 * feature.
183 	 */
184 	waddr = (void *)uservtophys((intptr_t)uap->ptr);
185 	if (waddr == (void *)(intptr_t)-1) {
186 	    error = EINVAL;
187 	    goto done;
188 	}
189 
190 	/*
191 	 * Wake us up if the memory location COWs while we are sleeping.
192 	 * Use a critical section to tighten up the interlock.  Also,
193 	 * tsleep_remove() requires the caller be in a critical section.
194 	 */
195 	crit_enter();
196 
197 	/*
198 	 * We must interlock just before sleeping.  If we interlock before
199 	 * registration the lock operations done by the registration can
200 	 * interfere with it.
201 	 *
202 	 * We cannot leave our interlock hanging on return because this
203 	 * will interfere with umtx_wakeup() calls with limited wakeup
204 	 * counts.
205 	 */
206 	tsleep_interlock(waddr, PCATCH | PDOMAIN_UMTX);
207 	cpu_lfence();
208 	if (fuword32(uptr) == uap->value) {
209 		error = tsleep(waddr, PCATCH | PINTERLOCKED | PDOMAIN_UMTX,
210 			       "umtxsl", timeout);
211 	} else {
212 		error = EBUSY;
213 	}
214 	crit_exit();
215 	/* Always break out in case of signal, even if restartable */
216 	if (error == ERESTART)
217 		error = EINTR;
218     } else {
219 	error = EBUSY;
220     }
221 done:
222     return(error);
223 }
224 
225 /*
226  * umtx_wakeup { const int *ptr, int count }
227  *
228  * Wakeup the specified number of processes held in umtx_sleep() on the
229  * specified user address.  A count of 0 wakes up all waiting processes.
230  */
231 int
232 sys_umtx_wakeup(struct umtx_wakeup_args *uap)
233 {
234     int offset;
235     int error;
236     void *waddr;
237 
238     if (curthread->td_vmm) {
239 	register_t gpa;
240 	vmm_vm_get_gpa(curproc, &gpa, (register_t)uap->ptr);
241 	uap->ptr = (const int *)gpa;
242     }
243 
244     /*
245      * WARNING! We can only use vm_fault_page*() for reading data.  We
246      *		cannot use it for writing data because there is no pmap
247      *	        interlock to protect against flushes/pageouts.
248      */
249     cpu_mfence();
250     if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
251 	return EFAULT;
252 
253     offset = (vm_offset_t)uap->ptr & PAGE_MASK;
254     waddr = (void *)uservtophys((intptr_t)uap->ptr);
255     if (waddr == (void *)(intptr_t)-1)
256 	return EINVAL;
257 
258     if (uap->count == 1) {
259 	wakeup_domain_one(waddr, PDOMAIN_UMTX);
260     } else {
261 	/* XXX wakes them all up for now */
262 	wakeup_domain(waddr, PDOMAIN_UMTX);
263     }
264     error = 0;
265 
266     return(error);
267 }
268