1 /* $NetBSD: klock.c,v 1.10 2016/07/07 06:55:44 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2007-2010 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: klock.c,v 1.10 2016/07/07 06:55:44 msaitoh Exp $");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/evcnt.h>
37
38 #include <rump-sys/kern.h>
39
40 #include <rump/rumpuser.h>
41
42 /*
43 * giant lock
44 */
45
46 struct rumpuser_mtx *rump_giantlock;
47 static int giantcnt;
48 static struct lwp *giantowner;
49
50 static struct evcnt ev_biglock_fast;
51 static struct evcnt ev_biglock_slow;
52 static struct evcnt ev_biglock_recurse;
53
54 void
rump_biglock_init(void)55 rump_biglock_init(void)
56 {
57
58 evcnt_attach_dynamic(&ev_biglock_fast, EVCNT_TYPE_MISC, NULL,
59 "rump biglock", "fast");
60 evcnt_attach_dynamic(&ev_biglock_slow, EVCNT_TYPE_MISC, NULL,
61 "rump biglock", "slow");
62 evcnt_attach_dynamic(&ev_biglock_recurse, EVCNT_TYPE_MISC, NULL,
63 "rump biglock", "recurse");
64 }
65
66 void
rump_kernel_bigwrap(int * nlocks)67 rump_kernel_bigwrap(int *nlocks)
68 {
69
70 KASSERT(giantcnt > 0 && curlwp == giantowner);
71 giantowner = NULL;
72 *nlocks = giantcnt;
73 giantcnt = 0;
74 }
75
76 void
rump_kernel_bigunwrap(int nlocks)77 rump_kernel_bigunwrap(int nlocks)
78 {
79
80 KASSERT(giantowner == NULL);
81 giantowner = curlwp;
82 giantcnt = nlocks;
83 }
84
85 void
_kernel_lock(int nlocks)86 _kernel_lock(int nlocks)
87 {
88 struct lwp *l = curlwp;
89
90 while (nlocks) {
91 if (giantowner == l) {
92 giantcnt += nlocks;
93 nlocks = 0;
94 ev_biglock_recurse.ev_count++;
95 } else {
96 if (rumpuser_mutex_tryenter(rump_giantlock) != 0) {
97 rump_unschedule_cpu1(l, NULL);
98 rumpuser_mutex_enter_nowrap(rump_giantlock);
99 rump_schedule_cpu(l);
100 ev_biglock_slow.ev_count++;
101 } else {
102 ev_biglock_fast.ev_count++;
103 }
104 giantowner = l;
105 giantcnt = 1;
106 nlocks--;
107 }
108 }
109 }
110
111 void
_kernel_unlock(int nlocks,int * countp)112 _kernel_unlock(int nlocks, int *countp)
113 {
114
115 if (giantowner != curlwp) {
116 KASSERT(nlocks == 0);
117 if (countp)
118 *countp = 0;
119 return;
120 }
121
122 if (countp)
123 *countp = giantcnt;
124 if (nlocks == 0)
125 nlocks = giantcnt;
126 if (nlocks == -1) {
127 KASSERT(giantcnt == 1);
128 nlocks = 1;
129 }
130 KASSERT(nlocks <= giantcnt);
131 while (nlocks--) {
132 giantcnt--;
133 }
134
135 if (giantcnt == 0) {
136 giantowner = NULL;
137 rumpuser_mutex_exit(rump_giantlock);
138 }
139 }
140
141 bool
_kernel_locked_p(void)142 _kernel_locked_p(void)
143 {
144
145 return giantowner == curlwp;
146 }
147
148 void
rump_user_unschedule(int nlocks,int * countp,void * interlock)149 rump_user_unschedule(int nlocks, int *countp, void *interlock)
150 {
151
152 _kernel_unlock(nlocks, countp);
153 /*
154 * XXX: technically we should unschedule_cpu1() here, but that
155 * requires rump_intr_enter/exit to be implemented.
156 */
157 rump_unschedule_cpu_interlock(curlwp, interlock);
158 }
159
160 void
rump_user_schedule(int nlocks,void * interlock)161 rump_user_schedule(int nlocks, void *interlock)
162 {
163
164 rump_schedule_cpu_interlock(curlwp, interlock);
165
166 if (nlocks)
167 _kernel_lock(nlocks);
168 }
169