xref: /dragonfly/sys/kern/kern_umtx.c (revision 0bb9290e)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com> and David Xu <davidxu@freebsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/kern_umtx.c,v 1.5 2006/06/05 07:26:10 dillon Exp $
35  */
36 
37 /*
38  * This module implements userland mutex helper functions.  umtx_sleep()
39  * handling blocking and umtx_wakeup() handles wakeups.  The sleep/wakeup
40  * functions operate on user addresses.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysproto.h>
47 #include <sys/sysunion.h>
48 #include <sys/sysent.h>
49 #include <sys/syscall.h>
50 #include <sys/module.h>
51 
52 #include <vm/vm.h>
53 #include <vm/vm_param.h>
54 #include <sys/lock.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pager.h>
60 #include <vm/vm_pageout.h>
61 #include <vm/vm_extern.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_kern.h>
64 
65 /*
66  * If the contents of the userland-supplied pointer matches the specified
67  * value enter an interruptable sleep for up to <timeout> microseconds.
68  * If the contents does not match then return immediately.
69  *
70  * The specified timeout may not exceed 1 second.
71  *
72  * Returns 0 if we slept and were woken up, -1 and ETIMEDOUT if we slept
73  * and timed out, and EBUSY if the contents of the pointer did not match
74  * the specified value.  A timeout of 0 indicates an unlimited sleep.
75  * EINTR is returned if the call was interrupted by a signal.
76  *
77  * This function interlocks against call to umtx_wakeup.  It does NOT interlock
78  * against changes in *ptr.  However, it does not have to.  The standard use
79  * of *ptr is to differentiate between an uncontested and a contested mutex
80  * and call umtx_wakeup when releasing a contested mutex.  Therefore we can
81  * safely race against changes in *ptr as long as we are properly interlocked
82  * against the umtx_wakeup() call.
83  *
84  * The VM page associated with the mutex is held to prevent reuse in order
85  * to guarentee that the physical address remains consistent.
86  *
87  * umtx_sleep { const int *ptr, int value, int timeout }
88  */
89 int
90 sys_umtx_sleep(struct umtx_sleep_args *uap)
91 {
92     int error = EBUSY;
93     vm_paddr_t pa;
94     vm_page_t m;
95     void *waddr;
96     int timeout;
97 
98     if ((unsigned int)uap->timeout > 1000000)
99 	return (EINVAL);
100     if (vm_fault_quick((caddr_t)__DEQUALIFY(int *, uap->ptr), VM_PROT_READ) < 0)
101 	return (EFAULT);
102 
103     if (fuword(__DEQUALIFY(const int *, uap->ptr)) == uap->value) {
104 	if ((pa = pmap_kextract((vm_offset_t)uap->ptr)) == 0)
105 	    return (EFAULT);
106 	m = PHYS_TO_VM_PAGE(pa);
107 	vm_page_hold(m);
108 
109 	if ((timeout = uap->timeout) != 0)
110 	    timeout = (timeout * hz + 999999) / 1000000;
111 	waddr = (void *)((intptr_t)pa + ((intptr_t)uap->ptr & PAGE_MASK));
112 	error = tsleep(waddr, PCATCH|PDOMAIN_UMTX, "umtxsl", timeout);
113 	vm_page_unhold(m);
114 	/* Can not restart timeout wait. */
115 	if (timeout != 0 && error == ERESTART)
116 		error = EINTR;
117     } else {
118 	error = EBUSY;
119     }
120     return(error);
121 }
122 
123 /*
124  * umtx_wakeup { const int *ptr, int count }
125  *
126  * Wakeup the specified number of processes held in umtx_sleep() on the
127  * specified user address.  A count of 0 wakes up all waiting processes.
128  *
129  * XXX assumes that the physical address space does not exceed the virtual
130  * address space.
131  */
132 int
133 sys_umtx_wakeup(struct umtx_wakeup_args *uap)
134 {
135     vm_paddr_t pa;
136     void *waddr;
137 
138     cpu_mfence();
139     if (vm_fault_quick((caddr_t)__DEQUALIFY(int *, uap->ptr), VM_PROT_READ) < 0)
140 	return (EFAULT);
141     if ((pa = pmap_kextract((vm_offset_t)uap->ptr)) == 0)
142 	return (EFAULT);
143     waddr = (void *)((intptr_t)pa + ((intptr_t)uap->ptr & PAGE_MASK));
144     if (uap->count == 1) {
145 	wakeup_domain_one(waddr, PDOMAIN_UMTX);
146     } else {
147 	/* XXX wakes them all up for now */
148 	wakeup_domain(waddr, PDOMAIN_UMTX);
149     }
150     return(0);
151 }
152 
153