xref: /netbsd/sys/rump/librump/rumpkern/rumpcopy.c (revision 8e615153)
1 /*	$NetBSD: rumpcopy.c,v 1.25 2020/07/01 00:42:13 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2009 Antti Kantee.  All Rights Reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: rumpcopy.c,v 1.25 2020/07/01 00:42:13 riastradh Exp $");
30 
31 #define	__UFETCHSTORE_PRIVATE
32 #define	__UCAS_PRIVATE
33 
34 #include <sys/param.h>
35 #include <sys/lwp.h>
36 #include <sys/systm.h>
37 #include <sys/uio.h>
38 
39 #include <rump-sys/kern.h>
40 
41 #include <rump/rumpuser.h>
42 
43 int
copyin(const void * uaddr,void * kaddr,size_t len)44 copyin(const void *uaddr, void *kaddr, size_t len)
45 {
46 	int error = 0;
47 
48 	if (len == 0)
49 		return 0;
50 
51 	if (__predict_false(uaddr == NULL && len)) {
52 		return EFAULT;
53 	}
54 
55 	if (RUMP_LOCALPROC_P(curproc)) {
56 		memcpy(kaddr, uaddr, len);
57 	} else if (len) {
58 		error = rump_sysproxy_copyin(RUMP_SPVM2CTL(curproc->p_vmspace),
59 		    uaddr, kaddr, len);
60 	}
61 
62 	return error;
63 }
64 
65 int
copyout(const void * kaddr,void * uaddr,size_t len)66 copyout(const void *kaddr, void *uaddr, size_t len)
67 {
68 	int error = 0;
69 
70 	if (len == 0)
71 		return 0;
72 
73 	if (__predict_false(uaddr == NULL && len)) {
74 		return EFAULT;
75 	}
76 
77 	if (RUMP_LOCALPROC_P(curproc)) {
78 		memcpy(uaddr, kaddr, len);
79 	} else if (len) {
80 		error = rump_sysproxy_copyout(RUMP_SPVM2CTL(curproc->p_vmspace),
81 		    kaddr, uaddr, len);
82 	}
83 	return error;
84 }
85 
86 int
copyinstr(const void * uaddr,void * kaddr,size_t len,size_t * done)87 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
88 {
89 	uint8_t *to;
90 	int rv;
91 
92 	if (len == 0)
93 		return 0;
94 
95 	if (__predict_false(uaddr == NULL)) {
96 		return EFAULT;
97 	}
98 
99 	if (RUMP_LOCALPROC_P(curproc))
100 		return copystr(uaddr, kaddr, len, done);
101 
102 	if ((rv = rump_sysproxy_copyinstr(RUMP_SPVM2CTL(curproc->p_vmspace),
103 	    uaddr, kaddr, &len)) != 0)
104 		return rv;
105 
106 	/* figure out if we got a terminated string or not */
107 	to = (uint8_t *)kaddr + (len-1);
108 	while (to >= (uint8_t *)kaddr) {
109 		if (*to == 0)
110 			goto found;
111 		to--;
112 	}
113 	return ENAMETOOLONG;
114 
115  found:
116 	if (done)
117 		*done = strlen(kaddr)+1; /* includes termination */
118 
119 	return 0;
120 }
121 
122 int
copyoutstr(const void * kaddr,void * uaddr,size_t len,size_t * done)123 copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
124 {
125 	size_t slen;
126 	int error;
127 
128 	if (len == 0)
129 		return 0;
130 
131 	if (__predict_false(uaddr == NULL && len)) {
132 		return EFAULT;
133 	}
134 
135 	if (RUMP_LOCALPROC_P(curproc))
136 		return copystr(kaddr, uaddr, len, done);
137 
138 	slen = strlen(kaddr)+1;
139 	if (slen > len)
140 		return ENAMETOOLONG;
141 
142 	error = rump_sysproxy_copyoutstr(RUMP_SPVM2CTL(curproc->p_vmspace),
143 	    kaddr, uaddr, &slen);
144 	if (done)
145 		*done = slen;
146 
147 	return error;
148 }
149 
150 int
kcopy(const void * src,void * dst,size_t len)151 kcopy(const void *src, void *dst, size_t len)
152 {
153 
154 	if (len == 0)
155 		return 0;
156 
157 	memcpy(dst, src, len);
158 	return 0;
159 }
160 
161 /*
162  * Low-level I/O routine.  This is used only when "all else fails",
163  * i.e. the current thread does not have an appropriate vm context.
164  */
165 int
uvm_io(struct vm_map * vm,struct uio * uio,int flag)166 uvm_io(struct vm_map *vm, struct uio *uio, int flag)
167 {
168 	int error = 0;
169 
170 	/* loop over iovecs one-by-one and copyout */
171 	for (; uio->uio_resid && uio->uio_iovcnt;
172 	    uio->uio_iovcnt--, uio->uio_iov++) {
173 		struct iovec *iov = uio->uio_iov;
174 		size_t curlen = MIN(uio->uio_resid, iov->iov_len);
175 
176 		if (__predict_false(curlen == 0))
177 			continue;
178 
179 		if (uio->uio_rw == UIO_READ) {
180 			error = rump_sysproxy_copyin(RUMP_SPVM2CTL(vm),
181 			    (void *)(vaddr_t)uio->uio_offset, iov->iov_base,
182 			    curlen);
183 		} else {
184 			error = rump_sysproxy_copyout(RUMP_SPVM2CTL(vm),
185 			    iov->iov_base, (void *)(vaddr_t)uio->uio_offset,
186 			    curlen);
187 		}
188 		if (error)
189 			break;
190 
191 		iov->iov_base = (uint8_t *)iov->iov_base + curlen;
192 		iov->iov_len -= curlen;
193 
194 		uio->uio_resid -= curlen;
195 		uio->uio_offset += curlen;
196 	}
197 
198 	return error;
199 }
200 
201 int
_ucas_32(volatile uint32_t * uaddr,uint32_t old,uint32_t new,uint32_t * ret)202 _ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
203 {
204 	uint32_t *uva = ((void *)(uintptr_t)uaddr);
205 	int error;
206 
207 	/* XXXXJRT do we need a MP CPU gate? */
208 
209 	kpreempt_disable();
210 	error = _ufetch_32(uva, ret);
211 	if (error == 0 && *ret == old) {
212 		error = _ustore_32(uva, new);
213 	}
214 	kpreempt_enable();
215 
216 	return error;
217 }
218 
219 #ifdef _LP64
220 int
_ucas_64(volatile uint64_t * uaddr,uint64_t old,uint64_t new,uint64_t * ret)221 _ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
222 {
223 	uint64_t *uva = ((void *)(uintptr_t)uaddr);
224 	int error;
225 
226 	/* XXXXJRT do we need a MP CPU gate? */
227 
228 	kpreempt_disable();
229 	error = _ufetch_64(uva, ret);
230 	if (error == 0 && *ret == old) {
231 		error = _ustore_64(uva, new);
232 	}
233 	kpreempt_enable();
234 
235 	return error;
236 }
237 #endif /* _LP64 */
238 
239 #define	UFETCH(sz)							\
240 int									\
241 _ufetch_ ## sz(const uint ## sz ##_t *uaddr, uint ## sz ## _t *valp)	\
242 {									\
243 	int error = 0;							\
244 									\
245 	if (RUMP_LOCALPROC_P(curproc)) {				\
246 		*valp = *uaddr;						\
247 	} else {							\
248 		error = rump_sysproxy_copyin(				\
249 		    RUMP_SPVM2CTL(curproc->p_vmspace),			\
250 		    uaddr, valp, sizeof(*valp));			\
251 	}								\
252 	return error;							\
253 }
254 
255 UFETCH(8)
256 UFETCH(16)
257 UFETCH(32)
258 #ifdef _LP64
259 UFETCH(64)
260 #endif
261 
262 #undef UFETCH
263 
264 #define	USTORE(sz)							\
265 int									\
266 _ustore_ ## sz(uint ## sz ## _t *uaddr, uint ## sz ## _t val)		\
267 {									\
268 	int error = 0;							\
269 									\
270 	if (RUMP_LOCALPROC_P(curproc)) {				\
271 		*uaddr = val;						\
272 	} else {							\
273 		error = rump_sysproxy_copyout(				\
274 		    RUMP_SPVM2CTL(curproc->p_vmspace),			\
275 		    &val, uaddr, sizeof(val));				\
276 	}								\
277 	return error;							\
278 }
279 
280 USTORE(8)
281 USTORE(16)
282 USTORE(32)
283 #ifdef _LP64
284 USTORE(64)
285 #endif
286 
287 #undef USTORE
288