xref: /freebsd/sys/opencrypto/criov.c (revision 315ee00f)
1 /*      $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999 Theo de Raadt
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright
11  *   notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *   notice, this list of conditions and the following disclaimer in the
14  *   documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *   derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/proc.h>
34 #include <sys/errno.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/mbuf.h>
38 #include <sys/uio.h>
39 #include <sys/limits.h>
40 #include <sys/lock.h>
41 #include <sys/sdt.h>
42 
43 #include <machine/vmparam.h>
44 
45 #include <vm/vm.h>
46 #include <vm/vm_page.h>
47 #include <vm/pmap.h>
48 
49 #include <opencrypto/cryptodev.h>
50 
51 SDT_PROVIDER_DECLARE(opencrypto);
52 
53 /*
54  * These macros are only for avoiding code duplication, as we need to skip
55  * given number of bytes in the same way in several functions below.
56  */
57 #define	CUIO_SKIP()	do {						\
58 	KASSERT(off >= 0, ("%s: off %d < 0", __func__, off));		\
59 	KASSERT(len >= 0, ("%s: len %d < 0", __func__, len));		\
60 	while (off > 0) {						\
61 		KASSERT(iol >= 0, ("%s: empty in skip", __func__));	\
62 		if (off < iov->iov_len)					\
63 			break;						\
64 		off -= iov->iov_len;					\
65 		iol--;							\
66 		iov++;							\
67 	}								\
68 } while (0)
69 
70 #define CVM_PAGE_SKIP()	do {					\
71 	KASSERT(off >= 0, ("%s: off %d < 0", __func__, off));		\
72 	KASSERT(len >= 0, ("%s: len %d < 0", __func__, len));		\
73 	while (off > 0) {						\
74 		if (off < PAGE_SIZE)					\
75 			break;						\
76 		processed += PAGE_SIZE - off;				\
77 		off -= PAGE_SIZE - off;					\
78 		pages++;						\
79 	}								\
80 } while (0)
81 
82 static void
83 cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
84 {
85 	struct iovec *iov = uio->uio_iov;
86 	int iol __diagused = uio->uio_iovcnt;
87 	unsigned count;
88 
89 	CUIO_SKIP();
90 	while (len > 0) {
91 		KASSERT(iol >= 0, ("%s: empty", __func__));
92 		count = min(iov->iov_len - off, len);
93 		bcopy(((caddr_t)iov->iov_base) + off, cp, count);
94 		len -= count;
95 		cp += count;
96 		off = 0;
97 		iol--;
98 		iov++;
99 	}
100 }
101 
102 static void
103 cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp)
104 {
105 	struct iovec *iov = uio->uio_iov;
106 	int iol __diagused = uio->uio_iovcnt;
107 	unsigned count;
108 
109 	CUIO_SKIP();
110 	while (len > 0) {
111 		KASSERT(iol >= 0, ("%s: empty", __func__));
112 		count = min(iov->iov_len - off, len);
113 		bcopy(cp, ((caddr_t)iov->iov_base) + off, count);
114 		len -= count;
115 		cp += count;
116 		off = 0;
117 		iol--;
118 		iov++;
119 	}
120 }
121 
122 /*
123  * Return the index and offset of location in iovec list.
124  */
125 static int
126 cuio_getptr(struct uio *uio, int loc, int *off)
127 {
128 	int ind, len;
129 
130 	ind = 0;
131 	while (loc >= 0 && ind < uio->uio_iovcnt) {
132 		len = uio->uio_iov[ind].iov_len;
133 		if (len > loc) {
134 	    		*off = loc;
135 	    		return (ind);
136 		}
137 		loc -= len;
138 		ind++;
139 	}
140 
141 	if (ind > 0 && loc == 0) {
142 		ind--;
143 		*off = uio->uio_iov[ind].iov_len;
144 		return (ind);
145 	}
146 
147 	return (-1);
148 }
149 
150 #if CRYPTO_MAY_HAVE_VMPAGE
151 /*
152  * Apply function f to the data in a vm_page_t list starting "off" bytes from
153  * the beginning, continuing for "len" bytes.
154  */
155 static int
156 cvm_page_apply(vm_page_t *pages, int off, int len,
157     int (*f)(void *, const void *, u_int), void *arg)
158 {
159 	int processed __unused;
160 	unsigned count;
161 	int rval;
162 
163 	processed = 0;
164 	CVM_PAGE_SKIP();
165 	while (len > 0) {
166 		char *kaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages));
167 		count = min(PAGE_SIZE - off, len);
168 		rval = (*f)(arg, kaddr + off, count);
169 		if (rval)
170 			return (rval);
171 		len -= count;
172 		processed += count;
173 		off = 0;
174 		pages++;
175 	}
176 	return (0);
177 }
178 
179 static inline void *
180 cvm_page_contiguous_segment(vm_page_t *pages, size_t skip, int len)
181 {
182 	if ((skip + len - 1) / PAGE_SIZE > skip / PAGE_SIZE)
183 		return (NULL);
184 
185 	pages += (skip / PAGE_SIZE);
186 	skip -= rounddown(skip, PAGE_SIZE);
187 	return (((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages))) + skip);
188 }
189 
190 /*
191  * Copy len bytes of data from the vm_page_t array, skipping the first off
192  * bytes, into the pointer cp.  Return the number of bytes skipped and copied.
193  * Does not verify the length of the array.
194  */
195 static int
196 cvm_page_copyback(vm_page_t *pages, int off, int len, c_caddr_t cp)
197 {
198 	int processed = 0;
199 	unsigned count;
200 
201 	CVM_PAGE_SKIP();
202 	while (len > 0) {
203 		count = min(PAGE_SIZE - off, len);
204 		bcopy(cp, (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off,
205 		    count);
206 		len -= count;
207 		cp += count;
208 		processed += count;
209 		off = 0;
210 		pages++;
211 	}
212 	return (processed);
213 }
214 
215 /*
216  * Copy len bytes of data from the pointer cp into the vm_page_t array,
217  * skipping the first off bytes, Return the number of bytes skipped and copied.
218  * Does not verify the length of the array.
219  */
220 static int
221 cvm_page_copydata(vm_page_t *pages, int off, int len, caddr_t cp)
222 {
223 	int processed = 0;
224 	unsigned count;
225 
226 	CVM_PAGE_SKIP();
227 	while (len > 0) {
228 		count = min(PAGE_SIZE - off, len);
229 		bcopy(((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off), cp,
230 		    count);
231 		len -= count;
232 		cp += count;
233 		processed += count;
234 		off = 0;
235 		pages++;
236 	}
237 	return processed;
238 }
239 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
240 
241 /*
242  * Given a starting page in an m_epg, determine the length of the
243  * current physically contiguous segment.
244  */
245 static __inline size_t
246 m_epg_pages_extent(struct mbuf *m, int idx, u_int pglen)
247 {
248 	size_t len;
249 	u_int i;
250 
251 	len = pglen;
252 	for (i = idx + 1; i < m->m_epg_npgs; i++) {
253 		if (m->m_epg_pa[i - 1] + PAGE_SIZE != m->m_epg_pa[i])
254 			break;
255 		len += m_epg_pagelen(m, i, 0);
256 	}
257 	return (len);
258 }
259 
260 static void *
261 m_epg_segment(struct mbuf *m, size_t offset, size_t *len)
262 {
263 	u_int i, pglen, pgoff;
264 
265 	offset += mtod(m, vm_offset_t);
266 	if (offset < m->m_epg_hdrlen) {
267 		*len = m->m_epg_hdrlen - offset;
268 		return (m->m_epg_hdr + offset);
269 	}
270 	offset -= m->m_epg_hdrlen;
271 	pgoff = m->m_epg_1st_off;
272 	for (i = 0; i < m->m_epg_npgs; i++) {
273 		pglen = m_epg_pagelen(m, i, pgoff);
274 		if (offset < pglen) {
275 			*len = m_epg_pages_extent(m, i, pglen) - offset;
276 			return ((void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff +
277 			    offset));
278 		}
279 		offset -= pglen;
280 		pgoff = 0;
281 	}
282 	KASSERT(offset <= m->m_epg_trllen, ("%s: offset beyond trailer",
283 	    __func__));
284 	*len = m->m_epg_trllen - offset;
285 	return (m->m_epg_trail + offset);
286 }
287 
288 static __inline void *
289 m_epg_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len)
290 {
291 	void *base;
292 	size_t seglen;
293 
294 	base = m_epg_segment(m, skip, &seglen);
295 	if (len > seglen)
296 		return (NULL);
297 	return (base);
298 }
299 
300 void
301 crypto_cursor_init(struct crypto_buffer_cursor *cc,
302     const struct crypto_buffer *cb)
303 {
304 	memset(cc, 0, sizeof(*cc));
305 	cc->cc_type = cb->cb_type;
306 	switch (cc->cc_type) {
307 	case CRYPTO_BUF_CONTIG:
308 		cc->cc_buf = cb->cb_buf;
309 		cc->cc_buf_len = cb->cb_buf_len;
310 		break;
311 	case CRYPTO_BUF_MBUF:
312 	case CRYPTO_BUF_SINGLE_MBUF:
313 		cc->cc_mbuf = cb->cb_mbuf;
314 		break;
315 	case CRYPTO_BUF_VMPAGE:
316 		cc->cc_vmpage = cb->cb_vm_page;
317 		cc->cc_buf_len = cb->cb_vm_page_len;
318 		cc->cc_offset = cb->cb_vm_page_offset;
319 		break;
320 	case CRYPTO_BUF_UIO:
321 		cc->cc_iov = cb->cb_uio->uio_iov;
322 		cc->cc_buf_len = cb->cb_uio->uio_resid;
323 		break;
324 	default:
325 #ifdef INVARIANTS
326 		panic("%s: invalid buffer type %d", __func__, cb->cb_type);
327 #endif
328 		break;
329 	}
330 }
331 
332 SDT_PROBE_DEFINE2(opencrypto, criov, cursor_advance, vmpage, "struct crypto_buffer_cursor*", "size_t");
333 
334 void
335 crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount)
336 {
337 	size_t remain;
338 
339 	switch (cc->cc_type) {
340 	case CRYPTO_BUF_CONTIG:
341 		MPASS(cc->cc_buf_len >= amount);
342 		cc->cc_buf += amount;
343 		cc->cc_buf_len -= amount;
344 		break;
345 	case CRYPTO_BUF_MBUF:
346 		for (;;) {
347 			remain = cc->cc_mbuf->m_len - cc->cc_offset;
348 			if (amount < remain) {
349 				cc->cc_offset += amount;
350 				break;
351 			}
352 			amount -= remain;
353 			cc->cc_mbuf = cc->cc_mbuf->m_next;
354 			cc->cc_offset = 0;
355 			if (amount == 0)
356 				break;
357 		}
358 		break;
359 	case CRYPTO_BUF_SINGLE_MBUF:
360 		MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + amount);
361 		cc->cc_offset += amount;
362 		break;
363 	case CRYPTO_BUF_VMPAGE:
364 		for (;;) {
365 			SDT_PROBE2(opencrypto, criov, cursor_advance, vmpage,
366 			    cc, amount);
367 			remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
368 			if (amount < remain) {
369 				cc->cc_buf_len -= amount;
370 				cc->cc_offset += amount;
371 				break;
372 			}
373 			cc->cc_buf_len -= remain;
374 			amount -= remain;
375 			cc->cc_vmpage++;
376 			cc->cc_offset = 0;
377 			if (amount == 0 || cc->cc_buf_len == 0)
378 				break;
379 		}
380 		break;
381 	case CRYPTO_BUF_UIO:
382 		for (;;) {
383 			remain = cc->cc_iov->iov_len - cc->cc_offset;
384 			if (amount < remain) {
385 				cc->cc_offset += amount;
386 				break;
387 			}
388 			cc->cc_buf_len -= remain;
389 			amount -= remain;
390 			cc->cc_iov++;
391 			cc->cc_offset = 0;
392 			if (amount == 0)
393 				break;
394 		}
395 		break;
396 	default:
397 #ifdef INVARIANTS
398 		panic("%s: invalid buffer type %d", __func__, cc->cc_type);
399 #endif
400 		break;
401 	}
402 }
403 
404 void *
405 crypto_cursor_segment(struct crypto_buffer_cursor *cc, size_t *len)
406 {
407 	switch (cc->cc_type) {
408 	case CRYPTO_BUF_CONTIG:
409 	case CRYPTO_BUF_UIO:
410 	case CRYPTO_BUF_VMPAGE:
411 		if (cc->cc_buf_len == 0) {
412 			*len = 0;
413 			return (NULL);
414 		}
415 		break;
416 	case CRYPTO_BUF_MBUF:
417 	case CRYPTO_BUF_SINGLE_MBUF:
418 		if (cc->cc_mbuf == NULL) {
419 			*len = 0;
420 			return (NULL);
421 		}
422 		break;
423 	default:
424 #ifdef INVARIANTS
425 		panic("%s: invalid buffer type %d", __func__, cc->cc_type);
426 #endif
427 		*len = 0;
428 		return (NULL);
429 	}
430 
431 	switch (cc->cc_type) {
432 	case CRYPTO_BUF_CONTIG:
433 		*len = cc->cc_buf_len;
434 		return (cc->cc_buf);
435 	case CRYPTO_BUF_MBUF:
436 	case CRYPTO_BUF_SINGLE_MBUF:
437 		if (cc->cc_mbuf->m_flags & M_EXTPG)
438 			return (m_epg_segment(cc->cc_mbuf, cc->cc_offset, len));
439 		*len = cc->cc_mbuf->m_len - cc->cc_offset;
440 		return (mtod(cc->cc_mbuf, char *) + cc->cc_offset);
441 	case CRYPTO_BUF_VMPAGE:
442 		*len = PAGE_SIZE - cc->cc_offset;
443 		return ((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
444 		    *cc->cc_vmpage)) + cc->cc_offset);
445 	case CRYPTO_BUF_UIO:
446 		*len = cc->cc_iov->iov_len - cc->cc_offset;
447 		return ((char *)cc->cc_iov->iov_base + cc->cc_offset);
448 	default:
449 		__assert_unreachable();
450 	}
451 }
452 
453 void
454 crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size,
455     const void *vsrc)
456 {
457 	size_t remain, todo;
458 	const char *src;
459 	char *dst;
460 
461 	src = vsrc;
462 	switch (cc->cc_type) {
463 	case CRYPTO_BUF_CONTIG:
464 		MPASS(cc->cc_buf_len >= size);
465 		memcpy(cc->cc_buf, src, size);
466 		cc->cc_buf += size;
467 		cc->cc_buf_len -= size;
468 		break;
469 	case CRYPTO_BUF_MBUF:
470 		for (;;) {
471 			/*
472 			 * This uses m_copyback() for individual
473 			 * mbufs so that cc_mbuf and cc_offset are
474 			 * updated.
475 			 */
476 			remain = cc->cc_mbuf->m_len - cc->cc_offset;
477 			todo = MIN(remain, size);
478 			m_copyback(cc->cc_mbuf, cc->cc_offset, todo, src);
479 			src += todo;
480 			if (todo < remain) {
481 				cc->cc_offset += todo;
482 				break;
483 			}
484 			size -= todo;
485 			cc->cc_mbuf = cc->cc_mbuf->m_next;
486 			cc->cc_offset = 0;
487 			if (size == 0)
488 				break;
489 		}
490 		break;
491 	case CRYPTO_BUF_SINGLE_MBUF:
492 		MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size);
493 		m_copyback(cc->cc_mbuf, cc->cc_offset, size, src);
494 		cc->cc_offset += size;
495 		break;
496 	case CRYPTO_BUF_VMPAGE:
497 		for (;;) {
498 			dst = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
499 			    *cc->cc_vmpage)) + cc->cc_offset;
500 			remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
501 			todo = MIN(remain, size);
502 			memcpy(dst, src, todo);
503 			src += todo;
504 			cc->cc_buf_len -= todo;
505 			if (todo < remain) {
506 				cc->cc_offset += todo;
507 				break;
508 			}
509 			size -= todo;
510 			cc->cc_vmpage++;
511 			cc->cc_offset = 0;
512 			if (size == 0)
513 				break;
514 		}
515 		break;
516 	case CRYPTO_BUF_UIO:
517 		for (;;) {
518 			dst = (char *)cc->cc_iov->iov_base + cc->cc_offset;
519 			remain = cc->cc_iov->iov_len - cc->cc_offset;
520 			todo = MIN(remain, size);
521 			memcpy(dst, src, todo);
522 			src += todo;
523 			cc->cc_buf_len -= todo;
524 			if (todo < remain) {
525 				cc->cc_offset += todo;
526 				break;
527 			}
528 			size -= todo;
529 			cc->cc_iov++;
530 			cc->cc_offset = 0;
531 			if (size == 0)
532 				break;
533 		}
534 		break;
535 	default:
536 #ifdef INVARIANTS
537 		panic("%s: invalid buffer type %d", __func__, cc->cc_type);
538 #endif
539 		break;
540 	}
541 }
542 
543 void
544 crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst)
545 {
546 	size_t remain, todo;
547 	const char *src;
548 	char *dst;
549 
550 	dst = vdst;
551 	switch (cc->cc_type) {
552 	case CRYPTO_BUF_CONTIG:
553 		MPASS(cc->cc_buf_len >= size);
554 		memcpy(dst, cc->cc_buf, size);
555 		cc->cc_buf += size;
556 		cc->cc_buf_len -= size;
557 		break;
558 	case CRYPTO_BUF_MBUF:
559 		for (;;) {
560 			/*
561 			 * This uses m_copydata() for individual
562 			 * mbufs so that cc_mbuf and cc_offset are
563 			 * updated.
564 			 */
565 			remain = cc->cc_mbuf->m_len - cc->cc_offset;
566 			todo = MIN(remain, size);
567 			m_copydata(cc->cc_mbuf, cc->cc_offset, todo, dst);
568 			dst += todo;
569 			if (todo < remain) {
570 				cc->cc_offset += todo;
571 				break;
572 			}
573 			size -= todo;
574 			cc->cc_mbuf = cc->cc_mbuf->m_next;
575 			cc->cc_offset = 0;
576 			if (size == 0)
577 				break;
578 		}
579 		break;
580 	case CRYPTO_BUF_SINGLE_MBUF:
581 		MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size);
582 		m_copydata(cc->cc_mbuf, cc->cc_offset, size, dst);
583 		cc->cc_offset += size;
584 		break;
585 	case CRYPTO_BUF_VMPAGE:
586 		for (;;) {
587 			src = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
588 			    *cc->cc_vmpage)) + cc->cc_offset;
589 			remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
590 			todo = MIN(remain, size);
591 			memcpy(dst, src, todo);
592 			dst += todo;
593 			cc->cc_buf_len -= todo;
594 			if (todo < remain) {
595 				cc->cc_offset += todo;
596 				break;
597 			}
598 			size -= todo;
599 			cc->cc_vmpage++;
600 			cc->cc_offset = 0;
601 			if (size == 0)
602 				break;
603 		}
604 		break;
605 	case CRYPTO_BUF_UIO:
606 		for (;;) {
607 			src = (const char *)cc->cc_iov->iov_base +
608 			    cc->cc_offset;
609 			remain = cc->cc_iov->iov_len - cc->cc_offset;
610 			todo = MIN(remain, size);
611 			memcpy(dst, src, todo);
612 			dst += todo;
613 			cc->cc_buf_len -= todo;
614 			if (todo < remain) {
615 				cc->cc_offset += todo;
616 				break;
617 			}
618 			size -= todo;
619 			cc->cc_iov++;
620 			cc->cc_offset = 0;
621 			if (size == 0)
622 				break;
623 		}
624 		break;
625 	default:
626 #ifdef INVARIANTS
627 		panic("%s: invalid buffer type %d", __func__, cc->cc_type);
628 #endif
629 		break;
630 	}
631 }
632 
633 /*
634  * To avoid advancing 'cursor', make a local copy that gets advanced
635  * instead.
636  */
637 void
638 crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size,
639     void *vdst)
640 {
641 	struct crypto_buffer_cursor copy;
642 
643 	copy = *cc;
644 	crypto_cursor_copydata(&copy, size, vdst);
645 }
646 
647 /*
648  * Apply function f to the data in an iovec list starting "off" bytes from
649  * the beginning, continuing for "len" bytes.
650  */
651 static int
652 cuio_apply(struct uio *uio, int off, int len,
653     int (*f)(void *, const void *, u_int), void *arg)
654 {
655 	struct iovec *iov = uio->uio_iov;
656 	int iol __diagused = uio->uio_iovcnt;
657 	unsigned count;
658 	int rval;
659 
660 	CUIO_SKIP();
661 	while (len > 0) {
662 		KASSERT(iol >= 0, ("%s: empty", __func__));
663 		count = min(iov->iov_len - off, len);
664 		rval = (*f)(arg, ((caddr_t)iov->iov_base) + off, count);
665 		if (rval)
666 			return (rval);
667 		len -= count;
668 		off = 0;
669 		iol--;
670 		iov++;
671 	}
672 	return (0);
673 }
674 
675 void
676 crypto_copyback(struct cryptop *crp, int off, int size, const void *src)
677 {
678 	struct crypto_buffer *cb;
679 
680 	if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE)
681 		cb = &crp->crp_obuf;
682 	else
683 		cb = &crp->crp_buf;
684 	switch (cb->cb_type) {
685 	case CRYPTO_BUF_MBUF:
686 	case CRYPTO_BUF_SINGLE_MBUF:
687 		m_copyback(cb->cb_mbuf, off, size, src);
688 		break;
689 #if CRYPTO_MAY_HAVE_VMPAGE
690 	case CRYPTO_BUF_VMPAGE:
691 		MPASS(size <= cb->cb_vm_page_len);
692 		MPASS(size + off <=
693 		    cb->cb_vm_page_len + cb->cb_vm_page_offset);
694 		cvm_page_copyback(cb->cb_vm_page,
695 		    off + cb->cb_vm_page_offset, size, src);
696 		break;
697 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
698 	case CRYPTO_BUF_UIO:
699 		cuio_copyback(cb->cb_uio, off, size, src);
700 		break;
701 	case CRYPTO_BUF_CONTIG:
702 		MPASS(off + size <= cb->cb_buf_len);
703 		bcopy(src, cb->cb_buf + off, size);
704 		break;
705 	default:
706 #ifdef INVARIANTS
707 		panic("invalid crp buf type %d", cb->cb_type);
708 #endif
709 		break;
710 	}
711 }
712 
713 void
714 crypto_copydata(struct cryptop *crp, int off, int size, void *dst)
715 {
716 
717 	switch (crp->crp_buf.cb_type) {
718 	case CRYPTO_BUF_MBUF:
719 	case CRYPTO_BUF_SINGLE_MBUF:
720 		m_copydata(crp->crp_buf.cb_mbuf, off, size, dst);
721 		break;
722 #if CRYPTO_MAY_HAVE_VMPAGE
723 	case CRYPTO_BUF_VMPAGE:
724 		MPASS(size <= crp->crp_buf.cb_vm_page_len);
725 		MPASS(size + off <= crp->crp_buf.cb_vm_page_len +
726 		    crp->crp_buf.cb_vm_page_offset);
727 		cvm_page_copydata(crp->crp_buf.cb_vm_page,
728 		    off + crp->crp_buf.cb_vm_page_offset, size, dst);
729 		break;
730 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
731 	case CRYPTO_BUF_UIO:
732 		cuio_copydata(crp->crp_buf.cb_uio, off, size, dst);
733 		break;
734 	case CRYPTO_BUF_CONTIG:
735 		MPASS(off + size <= crp->crp_buf.cb_buf_len);
736 		bcopy(crp->crp_buf.cb_buf + off, dst, size);
737 		break;
738 	default:
739 #ifdef INVARIANTS
740 		panic("invalid crp buf type %d", crp->crp_buf.cb_type);
741 #endif
742 		break;
743 	}
744 }
745 
746 int
747 crypto_apply_buf(struct crypto_buffer *cb, int off, int len,
748     int (*f)(void *, const void *, u_int), void *arg)
749 {
750 	int error;
751 
752 	switch (cb->cb_type) {
753 	case CRYPTO_BUF_MBUF:
754 	case CRYPTO_BUF_SINGLE_MBUF:
755 		error = m_apply(cb->cb_mbuf, off, len,
756 		    (int (*)(void *, void *, u_int))f, arg);
757 		break;
758 	case CRYPTO_BUF_UIO:
759 		error = cuio_apply(cb->cb_uio, off, len, f, arg);
760 		break;
761 #if CRYPTO_MAY_HAVE_VMPAGE
762 	case CRYPTO_BUF_VMPAGE:
763 		error = cvm_page_apply(cb->cb_vm_page,
764 		    off + cb->cb_vm_page_offset, len, f, arg);
765 		break;
766 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
767 	case CRYPTO_BUF_CONTIG:
768 		MPASS(off + len <= cb->cb_buf_len);
769 		error = (*f)(arg, cb->cb_buf + off, len);
770 		break;
771 	default:
772 #ifdef INVARIANTS
773 		panic("invalid crypto buf type %d", cb->cb_type);
774 #endif
775 		error = 0;
776 		break;
777 	}
778 	return (error);
779 }
780 
781 int
782 crypto_apply(struct cryptop *crp, int off, int len,
783     int (*f)(void *, const void *, u_int), void *arg)
784 {
785 	return (crypto_apply_buf(&crp->crp_buf, off, len, f, arg));
786 }
787 
788 static inline void *
789 m_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len)
790 {
791 	int rel_off;
792 
793 	MPASS(skip <= INT_MAX);
794 
795 	m = m_getptr(m, (int)skip, &rel_off);
796 	if (m == NULL)
797 		return (NULL);
798 
799 	MPASS(rel_off >= 0);
800 	skip = rel_off;
801 	if (skip + len > m->m_len)
802 		return (NULL);
803 
804 	if (m->m_flags & M_EXTPG)
805 		return (m_epg_contiguous_subsegment(m, skip, len));
806 	return (mtod(m, char*) + skip);
807 }
808 
809 static inline void *
810 cuio_contiguous_segment(struct uio *uio, size_t skip, size_t len)
811 {
812 	int rel_off, idx;
813 
814 	MPASS(skip <= INT_MAX);
815 	idx = cuio_getptr(uio, (int)skip, &rel_off);
816 	if (idx < 0)
817 		return (NULL);
818 
819 	MPASS(rel_off >= 0);
820 	skip = rel_off;
821 	if (skip + len > uio->uio_iov[idx].iov_len)
822 		return (NULL);
823 	return ((char *)uio->uio_iov[idx].iov_base + skip);
824 }
825 
826 void *
827 crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip,
828     size_t len)
829 {
830 
831 	switch (cb->cb_type) {
832 	case CRYPTO_BUF_MBUF:
833 	case CRYPTO_BUF_SINGLE_MBUF:
834 		return (m_contiguous_subsegment(cb->cb_mbuf, skip, len));
835 	case CRYPTO_BUF_UIO:
836 		return (cuio_contiguous_segment(cb->cb_uio, skip, len));
837 #if CRYPTO_MAY_HAVE_VMPAGE
838 	case CRYPTO_BUF_VMPAGE:
839 		MPASS(skip + len <= cb->cb_vm_page_len);
840 		return (cvm_page_contiguous_segment(cb->cb_vm_page,
841 		    skip + cb->cb_vm_page_offset, len));
842 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
843 	case CRYPTO_BUF_CONTIG:
844 		MPASS(skip + len <= cb->cb_buf_len);
845 		return (cb->cb_buf + skip);
846 	default:
847 #ifdef INVARIANTS
848 		panic("invalid crp buf type %d", cb->cb_type);
849 #endif
850 		return (NULL);
851 	}
852 }
853 
854 void *
855 crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len)
856 {
857 	return (crypto_buffer_contiguous_subsegment(&crp->crp_buf, skip, len));
858 }
859