xref: /netbsd/sys/arch/powerpc/booke/copyout.c (revision a685c6b4)
1 /*	$NetBSD: copyout.c,v 1.2 2011/01/18 01:02:52 matt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
9  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
10  *
11  * This material is based upon work supported by the Defense Advanced Research
12  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
13  * Contract No. N66001-09-C-2073.
14  * Approved for Public Release, Distribution Unlimited
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: copyout.c,v 1.2 2011/01/18 01:02:52 matt Exp $");
40 
41 #include <sys/param.h>
42 #include <sys/lwp.h>
43 
44 #include <machine/pcb.h>
45 
46 static inline void
47 copyout_uint8(uint8_t *udaddr, uint8_t data, register_t ds_msr)
48 {
49 	register_t msr;
50 	__asm volatile(
51 		"mfmsr	%[msr]"				/* Save MSR */
52 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
53 	"\n\t"	"stb	%[data],0(%[udaddr])"		/* store user byte */
54 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
55 	    : [msr] "=&r" (msr)
56 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
57 }
58 
59 static inline void
60 copyout_uint16(uint8_t *udaddr, uint8_t data, register_t ds_msr)
61 {
62 	register_t msr;
63 	__asm volatile(
64 		"mfmsr	%[msr]"				/* Save MSR */
65 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
66 	"\n\t"	"stb	%[data],0(%[udaddr])"		/* store user byte */
67 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
68 	    : [msr] "=&r" (msr)
69 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
70 }
71 
72 static inline void
73 copyout_uint32(uint32_t * const udaddr, uint32_t data, register_t ds_msr)
74 {
75 	register_t msr;
76 	__asm volatile(
77 		"mfmsr	%[msr]"				/* Save MSR */
78 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
79 	"\n\t"	"stw	%[data],0(%[udaddr])"		/* store user data */
80 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
81 	    : [msr] "=&r" (msr)
82 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
83 }
84 
85 static inline void
86 copyout_le32(uint32_t * const udaddr, uint32_t data, register_t ds_msr)
87 {
88 	register_t msr;
89 	__asm volatile(
90 		"mfmsr	%[msr]"				/* Save MSR */
91 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
92 	"\n\t"	"stwbrx	%[data],0,%[udaddr]"		/* store user data */
93 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
94 	    : [msr] "=&r" (msr)
95 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
96 }
97 
98 static inline void
99 copyout_le32_with_mask(uint32_t * const udaddr, uint32_t data,
100 	uint32_t mask, register_t ds_msr)
101 {
102 	register_t msr;
103 	uint32_t tmp;
104 	KASSERT((data & ~mask) == 0);
105 	__asm volatile(
106 		"mfmsr	%[msr]"				/* Save MSR */
107 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
108 	"\n\t"	"lwbrx	%[tmp],0,%[udaddr]"		/* fetch user data */
109 	"\n\t"	"andc	%[tmp],%[tmp],%[mask]"		/* mask out new data */
110 	"\n\t"	"or	%[tmp],%[tmp],%[data]"		/* merge new data */
111 	"\n\t"	"stwbrx	%[tmp],0,%[udaddr]"		/* store user data */
112 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
113 	    : [msr] "=&r" (msr), [tmp] "=&r" (tmp)
114 	    : [ds_msr] "r" (ds_msr), [data] "r" (data),
115 	      [mask] "r" (mask), [udaddr] "b" (udaddr));
116 }
117 
118 static inline void
119 copyout_16uint8s(const uint8_t *ksaddr8, uint8_t *udaddr8, register_t ds_msr)
120 {
121 	register_t msr;
122 	__asm volatile(
123 		"mfmsr	%[msr]"				/* Save MSR */
124 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
125 	"\n\t"	"stb	%[data0],0(%[udaddr8])"		/* store user data */
126 	"\n\t"	"stb	%[data1],1(%[udaddr8])"		/* store user data */
127 	"\n\t"	"stb	%[data2],2(%[udaddr8])"		/* store user data */
128 	"\n\t"	"stb	%[data3],3(%[udaddr8])"		/* store user data */
129 	"\n\t"	"stb	%[data4],4(%[udaddr8])"		/* store user data */
130 	"\n\t"	"stb	%[data5],5(%[udaddr8])"		/* store user data */
131 	"\n\t"	"stb	%[data6],6(%[udaddr8])"		/* store user data */
132 	"\n\t"	"stb	%[data7],7(%[udaddr8])"		/* store user data */
133 	"\n\t"	"stb	%[data8],8(%[udaddr8])"		/* store user data */
134 	"\n\t"	"stb	%[data9],9(%[udaddr8])"		/* store user data */
135 	"\n\t"	"stb	%[data10],10(%[udaddr8])"	/* store user data */
136 	"\n\t"	"stb	%[data11],11(%[udaddr8])"	/* store user data */
137 	"\n\t"	"stb	%[data12],12(%[udaddr8])"	/* store user data */
138 	"\n\t"	"stb	%[data13],13(%[udaddr8])"	/* store user data */
139 	"\n\t"	"stb	%[data14],14(%[udaddr8])"	/* store user data */
140 	"\n\t"	"stb	%[data15],15(%[udaddr8])"	/* store user data */
141 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
142 	    : [msr] "=&r" (msr)
143 	    : [ds_msr] "r" (ds_msr), [udaddr8] "b" (udaddr8),
144 	      [data0] "r" (ksaddr8[0]), [data1] "r" (ksaddr8[1]),
145 	      [data2] "r" (ksaddr8[2]), [data3] "r" (ksaddr8[3]),
146 	      [data4] "r" (ksaddr8[4]), [data5] "r" (ksaddr8[5]),
147 	      [data6] "r" (ksaddr8[6]), [data7] "r" (ksaddr8[7]),
148 	      [data8] "r" (ksaddr8[8]), [data9] "r" (ksaddr8[9]),
149 	      [data10] "r" (ksaddr8[10]), [data11] "r" (ksaddr8[11]),
150 	      [data12] "r" (ksaddr8[12]), [data13] "r" (ksaddr8[13]),
151 	      [data14] "r" (ksaddr8[14]), [data15] "r" (ksaddr8[15]));
152 }
153 
154 static inline void
155 copyout_8uint32s(const uint32_t * const ksaddr32, uint32_t * const udaddr32,
156 	const register_t ds_msr, const size_t line_mask)
157 {
158 	register_t msr;
159 	register_t tmp;
160 	__asm volatile(
161 		"and.	%[tmp],%[line_mask],%[udaddr32]"
162 	"\n\t"	"mfmsr	%[msr]"				/* Save MSR */
163 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
164 	"\n\t"	"bne	0,1f"
165 	"\n\t"	"dcba	0,%[udaddr32]"
166 	"\n"	"1:"
167 	"\n\t"	"stw	%[data0],0(%[udaddr32])"	/* store user data */
168 	"\n\t"	"stw	%[data1],4(%[udaddr32])"	/* store user data */
169 	"\n\t"	"stw	%[data2],8(%[udaddr32])"	/* store user data */
170 	"\n\t"	"stw	%[data3],12(%[udaddr32])"	/* store user data */
171 	"\n\t"	"stw	%[data4],16(%[udaddr32])"	/* store user data */
172 	"\n\t"	"stw	%[data5],20(%[udaddr32])"	/* store user data */
173 	"\n\t"	"stw	%[data6],24(%[udaddr32])"	/* store user data */
174 	"\n\t"	"stw	%[data7],28(%[udaddr32])"	/* store user data */
175 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
176 	    : [msr] "=&r" (msr), [tmp] "=&r" (tmp)
177 	    : [ds_msr] "r" (ds_msr), [udaddr32] "b" (udaddr32),
178 	      [line_mask] "r" (line_mask),
179 	      [data0] "r" (ksaddr32[0]), [data1] "r" (ksaddr32[1]),
180 	      [data2] "r" (ksaddr32[2]), [data3] "r" (ksaddr32[3]),
181 	      [data4] "r" (ksaddr32[4]), [data5] "r" (ksaddr32[5]),
182 	      [data6] "r" (ksaddr32[6]), [data7] "r" (ksaddr32[7])
183 	    : "cr0");
184 }
185 
186 static inline void
187 copyout_16uint32s(const uint32_t * const ksaddr32, uint32_t * const udaddr32,
188 	const register_t ds_msr, const size_t line_mask)
189 {
190 	KASSERT(((uintptr_t)udaddr32 & line_mask) == 0);
191 	register_t msr;
192 	register_t tmp;
193 	__asm volatile(
194 		"and.	%[tmp],%[line_mask],%[udaddr32]"
195 	"\n\t"	"cmplwi	2,%[line_size],32"
196 	"\n\t"	"mfmsr	%[msr]"				/* Save MSR */
197 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
198 	"\n\t"	"bne	0,1f"
199 	"\n\t"	"dcba	0,%[udaddr32]"
200 	"\n\t"	"bne	2,1f"
201 	"\n\t"	"dcba	%[line_size],%[udaddr32]"
202 	"\n"	"1:"
203 	"\n\t"	"stw	%[data0],0(%[udaddr32])"	/* store user data */
204 	"\n\t"	"stw	%[data1],4(%[udaddr32])"	/* store user data */
205 	"\n\t"	"stw	%[data2],8(%[udaddr32])"	/* store user data */
206 	"\n\t"	"stw	%[data3],12(%[udaddr32])"	/* store user data */
207 	"\n\t"	"stw	%[data4],16(%[udaddr32])"	/* store user data */
208 	"\n\t"	"stw	%[data5],20(%[udaddr32])"	/* store user data */
209 	"\n\t"	"stw	%[data6],24(%[udaddr32])"	/* store user data */
210 	"\n\t"	"stw	%[data7],28(%[udaddr32])"	/* store user data */
211 	"\n\t"	"stw	%[data8],32(%[udaddr32])"	/* store user data */
212 	"\n\t"	"stw	%[data9],36(%[udaddr32])"	/* store user data */
213 	"\n\t"	"stw	%[data10],40(%[udaddr32])"	/* store user data */
214 	"\n\t"	"stw	%[data11],44(%[udaddr32])"	/* store user data */
215 	"\n\t"	"stw	%[data12],48(%[udaddr32])"	/* store user data */
216 	"\n\t"	"stw	%[data13],52(%[udaddr32])"	/* store user data */
217 	"\n\t"	"stw	%[data14],56(%[udaddr32])"	/* store user data */
218 	"\n\t"	"stw	%[data15],60(%[udaddr32])"	/* store user data */
219 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
220 	    : [msr] "=&r" (msr), [tmp] "=&r" (tmp)
221 	    : [ds_msr] "r" (ds_msr), [udaddr32] "b" (udaddr32),
222 	      [line_size] "r" (line_mask + 1), [line_mask] "r" (line_mask),
223 	      [data0] "r" (ksaddr32[0]), [data1] "r" (ksaddr32[1]),
224 	      [data2] "r" (ksaddr32[2]), [data3] "r" (ksaddr32[3]),
225 	      [data4] "r" (ksaddr32[4]), [data5] "r" (ksaddr32[5]),
226 	      [data6] "r" (ksaddr32[6]), [data7] "r" (ksaddr32[7]),
227 	      [data8] "r" (ksaddr32[8]), [data9] "r" (ksaddr32[9]),
228 	      [data10] "r" (ksaddr32[10]), [data11] "r" (ksaddr32[11]),
229 	      [data12] "r" (ksaddr32[12]), [data13] "r" (ksaddr32[13]),
230 	      [data14] "r" (ksaddr32[14]), [data15] "r" (ksaddr32[15])
231 	    : "cr0", "cr2");
232 }
233 
234 static inline void
235 copyout_uint8s(vaddr_t ksaddr, vaddr_t udaddr, size_t len, register_t ds_msr)
236 {
237 	const uint8_t *ksaddr8 = (void *)ksaddr;
238 	uint8_t *udaddr8 = (void *)udaddr;
239 
240 	__builtin_prefetch(ksaddr8, 0, 1);
241 
242 	for (; len >= 16; len -= 16, ksaddr8 += 16, udaddr8 += 16) {
243 		__builtin_prefetch(ksaddr8 + 16, 0, 1);
244 		copyout_16uint8s(ksaddr8, udaddr8, ds_msr);
245 	}
246 
247 	while (len-- > 0) {
248 		copyout_uint8(udaddr8++, *ksaddr8++, ds_msr);
249 	}
250 }
251 
252 static inline void
253 copyout_uint32s(vaddr_t ksaddr, vaddr_t udaddr, size_t len, register_t ds_msr)
254 {
255 	const size_t line_size = curcpu()->ci_ci.dcache_line_size;
256 	const size_t line_mask = line_size - 1;
257 	const size_t udalignment = udaddr & line_mask;
258 	KASSERT((ksaddr & 3) == 0);
259 	KASSERT((udaddr & 3) == 0);
260 	const uint32_t *ksaddr32 = (void *)ksaddr;
261 	uint32_t *udaddr32 = (void *)udaddr;
262 	len >>= 2;
263 	__builtin_prefetch(ksaddr32, 0, 1);
264 	if (udalignment != 0 && udalignment + 4*len > line_size) {
265 		size_t slen = (line_size - udalignment) >> 2;
266 		len -= slen;
267 		for (; slen >= 8; ksaddr32 += 8, udaddr32 += 8, slen -= 8) {
268 			copyout_8uint32s(ksaddr32, udaddr32, ds_msr, line_mask);
269 		}
270 		while (slen-- > 0) {
271 			copyout_uint32(udaddr32++, *ksaddr32++, ds_msr);
272 		}
273 		if (len == 0)
274 			return;
275 	}
276 	__builtin_prefetch(ksaddr32, 0, 1);
277 	while (len >= 16) {
278 		__builtin_prefetch(ksaddr32 + 8, 0, 1);
279 		__builtin_prefetch(ksaddr32 + 16, 0, 1);
280 		copyout_16uint32s(ksaddr32, udaddr32, ds_msr, line_mask);
281 		ksaddr32 += 16, udaddr32 += 16, len -= 16;
282 	}
283 	KASSERT(len <= 16);
284 	if (len >= 8) {
285 		__builtin_prefetch(ksaddr32 + 8, 0, 1);
286 		copyout_8uint32s(ksaddr32, udaddr32, ds_msr, line_mask);
287 		ksaddr32 += 8, udaddr32 += 8, len -= 8;
288 	}
289 	while (len-- > 0) {
290 		copyout_uint32(udaddr32++, *ksaddr32++, ds_msr);
291 	}
292 }
293 
294 int
295 copyout(const void *vksaddr, void *vudaddr, size_t len)
296 {
297 	struct pcb * const pcb = lwp_getpcb(curlwp);
298 	struct faultbuf env;
299 	vaddr_t udaddr = (vaddr_t) vudaddr;
300 	vaddr_t ksaddr = (vaddr_t) vksaddr;
301 
302 	if (__predict_false(len == 0)) {
303 		return 0;
304 	}
305 
306 	const register_t ds_msr = mfmsr() | PSL_DS;
307 
308 	int rv = setfault(&env);
309 	if (rv != 0) {
310 		pcb->pcb_onfault = NULL;
311 		return rv;
312 	}
313 
314 	if (__predict_false(len < 4)) {
315 		copyout_uint8s(ksaddr, udaddr, len, ds_msr);
316 		pcb->pcb_onfault = NULL;
317 		return 0;
318 	}
319 
320 	const size_t alignment = (udaddr ^ ksaddr) & 3;
321 	if (__predict_true(alignment == 0)) {
322 		size_t slen;
323 		if (__predict_false(ksaddr & 3)) {
324 			slen = 4 - (ksaddr & 3);
325 			copyout_uint8s(ksaddr, udaddr, slen, ds_msr);
326 			udaddr += slen, ksaddr += slen, len -= slen;
327 		}
328 		slen = len & ~3;
329 		if (__predict_true(slen >= 4)) {
330 			copyout_uint32s(ksaddr, udaddr, slen, ds_msr);
331 			udaddr += slen, ksaddr += slen, len -= slen;
332 		}
333 	}
334 
335 	if (len > 0) {
336 		copyout_uint8s(ksaddr, udaddr, len, ds_msr);
337 	}
338 	pcb->pcb_onfault = NULL;
339 	return 0;
340 }
341 
342 int
343 copyoutstr(const void *ksaddr, void *udaddr, size_t len, size_t *lenp)
344 {
345 	struct pcb * const pcb = lwp_getpcb(curlwp);
346 	struct faultbuf env;
347 
348 	if (__predict_false(len == 0)) {
349 		if (lenp)
350 			*lenp = 0;
351 		return 0;
352 	}
353 
354 	if (setfault(&env)) {
355 		pcb->pcb_onfault = NULL;
356 		if (lenp)
357 			*lenp = 0;
358 		return EFAULT;
359 	}
360 
361 	const register_t ds_msr = mfmsr() | PSL_DS;
362 	const uint8_t *ksaddr8 = ksaddr;
363 	size_t copylen = 0;
364 
365 #if 1
366 	uint8_t *udaddr8 = (void *)udaddr;
367 
368 	while (copylen++ < len) {
369 		const uint8_t data = *ksaddr8++;
370 		copyout_uint8(udaddr8++, data, ds_msr);
371 		if (data == 0)
372 			break;
373 	}
374 #else
375 	uint32_t *udaddr32 = (void *)((uintptr_t)udaddr & ~3);
376 
377 	size_t boff = (uintptr_t)udaddr & 3;
378 	bool done = false;
379 	size_t wlen = 0;
380 	size_t data = 0;
381 
382 	/*
383 	 * If the destination buffer doesn't start on a 32-bit boundary
384 	 * try to partially fill in the first word.  If we succeed we can
385 	 * finish writing it while preserving the bytes on front.
386 	 */
387 	if (boff > 0) {
388 		KASSERT(len > 0);
389 		do {
390 			data = (data << 8) | *ksaddr8++;
391 			wlen++;
392 			done = ((uint8_t)data == 0 || len == wlen);
393 		} while (!done && boff + wlen < 4);
394 		KASSERT(wlen > 0);
395 		data <<= 8 * boff;
396 		if (!done || boff + wlen == 4) {
397 			uint32_t mask = 0xffffffff << (8 * boff);
398 			copyout_le32_with_mask(udaddr32++, data, mask, ds_msr);
399 			boff = 0;
400 			copylen = wlen;
401 			wlen = 0;
402 			data = 0;
403 		}
404 	}
405 
406 	/*
407 	 * Now we get to the heart of the routine.  Build up complete words
408 	 * if possible.  When we have one, write it to the user's address
409 	 * space and go for the next.  If we ran out of space or we found the
410 	 * end of the string, stop building.  If we managed to build a complete
411 	 * word, just write it and be happy.  Otherwise we have to deal with
412 	 * the trailing bytes.
413 	 */
414 	KASSERT(done || boff == 0);
415 	KASSERT(done || copylen < len);
416 	while (!done) {
417 		KASSERT(wlen == 0);
418 		KASSERT(copylen < len);
419 		do {
420 			data = (data << 8) | *ksaddr8++;
421 			wlen++;
422 			done = ((uint8_t)data == 0 || copylen + wlen == len);
423 		} while (!done && wlen < 4);
424 		KASSERT(done || wlen == 4);
425 		if (__predict_true(wlen == 4)) {
426 			copyout_le32(udaddr32++, data, ds_msr);
427 			data = 0;
428 			copylen += wlen;
429 			wlen = 0;
430 			KASSERT(copylen < len || done);
431 		}
432 	}
433 	KASSERT(wlen < 3);
434 	if (wlen) {
435 		/*
436 		 * Remember even though we are running big-endian we are using
437 		 * byte reversed load/stores so we need to deal with things as
438 		 * little endian.
439 		 *
440 		 * wlen=1 boff=0:
441 		 * (~(~0 <<  8) <<  0) -> (~(0xffffff00) <<  0) -> 0x000000ff
442 		 * wlen=1 boff=1:
443 		 * (~(~0 <<  8) <<  8) -> (~(0xffffff00) <<  8) -> 0x0000ff00
444 		 * wlen=1 boff=2:
445 		 * (~(~0 <<  8) << 16) -> (~(0xffffff00) << 16) -> 0x00ff0000
446 		 * wlen=1 boff=3:
447 		 * (~(~0 <<  8) << 24) -> (~(0xffffff00) << 24) -> 0xff000000
448 		 * wlen=2 boff=0:
449 		 * (~(~0 << 16) <<  0) -> (~(0xffff0000) <<  0) -> 0x0000ffff
450 		 * wlen=2 boff=1:
451 		 * (~(~0 << 16) <<  8) -> (~(0xffff0000) <<  8) -> 0x00ffff00
452 		 * wlen=2 boff=2:
453 		 * (~(~0 << 16) << 16) -> (~(0xffff0000) << 16) -> 0xffff0000
454 		 * wlen=3 boff=0:
455 		 * (~(~0 << 24) <<  0) -> (~(0xff000000) <<  0) -> 0x00ffffff
456 		 * wlen=3 boff=1:
457 		 * (~(~0 << 24) <<  8) -> (~(0xff000000) <<  8) -> 0xffffff00
458 		 */
459 		KASSERT(boff + wlen <= 4);
460 		uint32_t mask = (~(~0 << (8 * wlen))) << (8 * boff);
461 		KASSERT(mask != 0xffffffff);
462 		copyout_le32_with_mask(udaddr32, data, mask, ds_msr);
463 		copylen += wlen;
464 	}
465 #endif
466 
467 	pcb->pcb_onfault = NULL;
468 	if (lenp)
469 		*lenp = copylen;
470 	return 0;
471 }
472