xref: /dragonfly/sys/kern/kern_umtx.c (revision b8c93cad)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 2003,2004,2010 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com> and David Xu <davidxu@freebsd.org>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 /*
38  * This module implements userland mutex helper functions.  umtx_sleep()
39  * handling blocking and umtx_wakeup() handles wakeups.  The sleep/wakeup
40  * functions operate on user addresses.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysproto.h>
47 #include <sys/sysunion.h>
48 #include <sys/sysent.h>
49 #include <sys/syscall.h>
50 #include <sys/module.h>
51 
52 #include <cpu/lwbuf.h>
53 
54 #include <vm/vm.h>
55 #include <vm/vm_param.h>
56 #include <sys/lock.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62 #include <vm/vm_pageout.h>
63 #include <vm/vm_extern.h>
64 #include <vm/vm_kern.h>
65 
66 #include <vm/vm_page2.h>
67 
68 #include <machine/vmm.h>
69 
70 static void umtx_sleep_page_action_cow(vm_page_t m, vm_page_action_t action);
71 
72 /*
73  * If the contents of the userland-supplied pointer matches the specified
74  * value enter an interruptable sleep for up to <timeout> microseconds.
75  * If the contents does not match then return immediately.
76  *
77  * Returns 0 if we slept and were woken up, -1 and EWOULDBLOCK if we slept
78  * and timed out, and EBUSY if the contents of the pointer already does
79  * not match the specified value.  A timeout of 0 indicates an unlimited sleep.
80  * EINTR is returned if the call was interrupted by a signal (even if
81  * the signal specifies that the system call should restart).
82  *
83  * This function interlocks against call to umtx_wakeup.  It does NOT interlock
84  * against changes in *ptr.  However, it does not have to.  The standard use
85  * of *ptr is to differentiate between an uncontested and a contested mutex
86  * and call umtx_wakeup when releasing a contested mutex.  Therefore we can
87  * safely race against changes in *ptr as long as we are properly interlocked
88  * against the umtx_wakeup() call.
89  *
90  * The VM page associated with the mutex is held in an attempt to keep
91  * the mutex's physical address consistent, allowing umtx_sleep() and
92  * umtx_wakeup() to use the physical address as their rendezvous.  BUT
93  * situations can arise where the physical address may change, particularly
94  * if a threaded program fork()'s and the mutex's memory becomes
95  * copy-on-write.  We register an event on the VM page to catch COWs.
96  *
97  * umtx_sleep { const int *ptr, int value, int timeout }
98  */
99 int
100 sys_umtx_sleep(struct umtx_sleep_args *uap)
101 {
102     struct lwbuf lwb_cache;
103     struct lwbuf *lwb;
104     struct vm_page_action action;
105     vm_page_t m;
106     void *waddr;
107     int offset;
108     int timeout;
109     int error = EBUSY;
110 
111     if (uap->timeout < 0)
112 	return (EINVAL);
113 
114     if (curthread->td_vmm) {
115 	register_t gpa;
116 	vmm_vm_get_gpa(curproc, &gpa, (register_t) uap->ptr);
117 	uap->ptr = (const int *)gpa;
118     }
119 
120     if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
121 	return (EFAULT);
122 
123     /*
124      * When faulting in the page, force any COW pages to be resolved.
125      * Otherwise the physical page we sleep on my not match the page
126      * being woken up.
127      *
128      * WARNING! We can only use vm_fault_page*() for reading data.  We
129      *		cannot use it for writing data because there is no pmap
130      *	        interlock to protect against flushes/pageouts.
131      */
132     m = vm_fault_page_quick((vm_offset_t)uap->ptr,
133 			    VM_PROT_READ|VM_PROT_WRITE, &error, NULL);
134     if (m == NULL) {
135 	error = EFAULT;
136 	goto done;
137     }
138     lwb = lwbuf_alloc(m, &lwb_cache);
139     offset = (vm_offset_t)uap->ptr & PAGE_MASK;
140 
141     /*
142      * The critical section is required to interlock the tsleep against
143      * a wakeup from another cpu.  The lfence forces synchronization.
144      */
145     if (*(int *)(lwbuf_kva(lwb) + offset) == uap->value) {
146 	if ((timeout = uap->timeout) != 0) {
147 	    timeout = (timeout / 1000000) * hz +
148 		      ((timeout % 1000000) * hz + 999999) / 1000000;
149 	}
150 	waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset);
151 
152 	/*
153 	 * Wake us up if the memory location COWs while we are sleeping.
154 	 */
155 	crit_enter();
156 	vm_page_init_action(m, &action, umtx_sleep_page_action_cow, waddr);
157 	vm_page_register_action(&action, VMEVENT_COW);
158 
159 	/*
160 	 * We must interlock just before sleeping.  If we interlock before
161 	 * registration the lock operations done by the registration can
162 	 * interfere with it.
163 	 */
164 	tsleep_interlock(waddr, PCATCH | PDOMAIN_UMTX);
165 	if (*(int *)(lwbuf_kva(lwb) + offset) == uap->value &&
166 	    action.event == VMEVENT_COW) {
167 		error = tsleep(waddr, PCATCH | PINTERLOCKED | PDOMAIN_UMTX,
168 			       "umtxsl", timeout);
169 	} else {
170 		error = EBUSY;
171 	}
172 	vm_page_unregister_action(&action);
173 	crit_exit();
174 	/* Always break out in case of signal, even if restartable */
175 	if (error == ERESTART)
176 		error = EINTR;
177     } else {
178 	error = EBUSY;
179     }
180 
181     lwbuf_free(lwb);
182     vm_page_unhold(m);
183 done:
184     return(error);
185 }
186 
187 /*
188  * If this page is being copied it may no longer represent the page
189  * underlying our virtual address.  Wake up any umtx_sleep()'s
190  * that were waiting on its physical address to force them to retry.
191  */
192 static void
193 umtx_sleep_page_action_cow(vm_page_t m, vm_page_action_t action)
194 {
195     wakeup_domain(action->data, PDOMAIN_UMTX);
196 }
197 
198 /*
199  * umtx_wakeup { const int *ptr, int count }
200  *
201  * Wakeup the specified number of processes held in umtx_sleep() on the
202  * specified user address.  A count of 0 wakes up all waiting processes.
203  *
204  * XXX assumes that the physical address space does not exceed the virtual
205  * address space.
206  */
207 int
208 sys_umtx_wakeup(struct umtx_wakeup_args *uap)
209 {
210     vm_page_t m;
211     int offset;
212     int error;
213     void *waddr;
214 
215     if (curthread->td_vmm) {
216 	register_t gpa;
217 	vmm_vm_get_gpa(curproc, &gpa, (register_t) uap->ptr);
218 	uap->ptr = (const int *)gpa;
219     }
220 
221     /*
222      * WARNING! We can only use vm_fault_page*() for reading data.  We
223      *		cannot use it for writing data because there is no pmap
224      *	        interlock to protect against flushes/pageouts.
225      */
226     cpu_mfence();
227     if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
228 	return (EFAULT);
229     m = vm_fault_page_quick((vm_offset_t)uap->ptr,
230 			    VM_PROT_READ, &error, NULL);
231     if (m == NULL) {
232 	error = EFAULT;
233 	goto done;
234     }
235     offset = (vm_offset_t)uap->ptr & PAGE_MASK;
236     waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset);
237 
238     if (uap->count == 1) {
239 	wakeup_domain_one(waddr, PDOMAIN_UMTX);
240     } else {
241 	/* XXX wakes them all up for now */
242 	wakeup_domain(waddr, PDOMAIN_UMTX);
243     }
244     vm_page_unhold(m);
245     error = 0;
246 done:
247     return(error);
248 }
249