1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7 * Copyright (c) 2016 Matthew Macy
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice unmodified, this list of conditions, and the following
15 * disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31 #ifndef _LINUXKPI_LINUX_SCATTERLIST_H_
32 #define _LINUXKPI_LINUX_SCATTERLIST_H_
33
34 #include <sys/types.h>
35 #include <sys/sf_buf.h>
36
37 #include <linux/page.h>
38 #include <linux/slab.h>
39 #include <linux/mm.h>
40
41 struct bus_dmamap;
42 struct scatterlist {
43 unsigned long page_link;
44 #define SG_PAGE_LINK_CHAIN 0x1UL
45 #define SG_PAGE_LINK_LAST 0x2UL
46 #define SG_PAGE_LINK_MASK 0x3UL
47 unsigned int offset;
48 unsigned int length;
49 dma_addr_t dma_address;
50 struct bus_dmamap *dma_map; /* FreeBSD specific */
51 };
52
53 CTASSERT((sizeof(struct scatterlist) & SG_PAGE_LINK_MASK) == 0);
54
55 struct sg_table {
56 struct scatterlist *sgl;
57 unsigned int nents;
58 unsigned int orig_nents;
59 };
60
61 struct sg_page_iter {
62 struct scatterlist *sg;
63 unsigned int sg_pgoffset;
64 unsigned int maxents;
65 struct {
66 unsigned int nents;
67 int pg_advance;
68 } internal;
69 };
70
71 struct sg_dma_page_iter {
72 struct sg_page_iter base;
73 };
74
75 #define SCATTERLIST_MAX_SEGMENT (-1U & ~(PAGE_SIZE - 1))
76
77 #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
78
79 #define SG_MAGIC 0x87654321UL
80 #define SG_CHAIN SG_PAGE_LINK_CHAIN
81 #define SG_END SG_PAGE_LINK_LAST
82
83 #define sg_is_chain(sg) ((sg)->page_link & SG_PAGE_LINK_CHAIN)
84 #define sg_is_last(sg) ((sg)->page_link & SG_PAGE_LINK_LAST)
85 #define sg_chain_ptr(sg) \
86 ((struct scatterlist *) ((sg)->page_link & ~SG_PAGE_LINK_MASK))
87
88 #define sg_dma_address(sg) (sg)->dma_address
89 #define sg_dma_len(sg) (sg)->length
90
91 #define for_each_sg_page(sgl, iter, nents, pgoffset) \
92 for (_sg_iter_init(sgl, iter, nents, pgoffset); \
93 (iter)->sg; _sg_iter_next(iter))
94 #define for_each_sg_dma_page(sgl, iter, nents, pgoffset) \
95 for_each_sg_page(sgl, &(iter)->base, nents, pgoffset)
96
97 #define for_each_sg(sglist, sg, sgmax, iter) \
98 for (iter = 0, sg = (sglist); iter < (sgmax); iter++, sg = sg_next(sg))
99
100 #define for_each_sgtable_sg(sgt, sg, i) \
101 for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
102
103 #define for_each_sgtable_page(sgt, iter, pgoffset) \
104 for_each_sg_page((sgt)->sgl, iter, (sgt)->orig_nents, pgoffset)
105
106 #define for_each_sgtable_dma_sg(sgt, sg, iter) \
107 for_each_sg((sgt)->sgl, sg, (sgt)->nents, iter)
108
109 #define for_each_sgtable_dma_page(sgt, iter, pgoffset) \
110 for_each_sg_dma_page((sgt)->sgl, iter, (sgt)->nents, pgoffset)
111
112 typedef struct scatterlist *(sg_alloc_fn) (unsigned int, gfp_t);
113 typedef void (sg_free_fn) (struct scatterlist *, unsigned int);
114
115 static inline void
sg_assign_page(struct scatterlist * sg,struct page * page)116 sg_assign_page(struct scatterlist *sg, struct page *page)
117 {
118 unsigned long page_link = sg->page_link & SG_PAGE_LINK_MASK;
119
120 sg->page_link = page_link | (unsigned long)page;
121 }
122
123 static inline void
sg_set_page(struct scatterlist * sg,struct page * page,unsigned int len,unsigned int offset)124 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
125 unsigned int offset)
126 {
127 sg_assign_page(sg, page);
128 sg->offset = offset;
129 sg->length = len;
130 }
131
132 static inline struct page *
sg_page(struct scatterlist * sg)133 sg_page(struct scatterlist *sg)
134 {
135 return ((struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK));
136 }
137
138 static inline void
sg_set_buf(struct scatterlist * sg,const void * buf,unsigned int buflen)139 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
140 {
141 sg_set_page(sg, virt_to_page(buf), buflen,
142 ((uintptr_t)buf) & (PAGE_SIZE - 1));
143 }
144
145 static inline struct scatterlist *
sg_next(struct scatterlist * sg)146 sg_next(struct scatterlist *sg)
147 {
148 if (sg_is_last(sg))
149 return (NULL);
150 sg++;
151 if (sg_is_chain(sg))
152 sg = sg_chain_ptr(sg);
153 return (sg);
154 }
155
156 static inline vm_paddr_t
sg_phys(struct scatterlist * sg)157 sg_phys(struct scatterlist *sg)
158 {
159 return (page_to_phys(sg_page(sg)) + sg->offset);
160 }
161
162 static inline void *
sg_virt(struct scatterlist * sg)163 sg_virt(struct scatterlist *sg)
164 {
165
166 return ((void *)((unsigned long)page_address(sg_page(sg)) + sg->offset));
167 }
168
169 static inline void
sg_chain(struct scatterlist * prv,unsigned int prv_nents,struct scatterlist * sgl)170 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
171 struct scatterlist *sgl)
172 {
173 struct scatterlist *sg = &prv[prv_nents - 1];
174
175 sg->offset = 0;
176 sg->length = 0;
177 sg->page_link = ((unsigned long)sgl |
178 SG_PAGE_LINK_CHAIN) & ~SG_PAGE_LINK_LAST;
179 }
180
181 static inline void
sg_mark_end(struct scatterlist * sg)182 sg_mark_end(struct scatterlist *sg)
183 {
184 sg->page_link |= SG_PAGE_LINK_LAST;
185 sg->page_link &= ~SG_PAGE_LINK_CHAIN;
186 }
187
188 static inline void
sg_init_table(struct scatterlist * sg,unsigned int nents)189 sg_init_table(struct scatterlist *sg, unsigned int nents)
190 {
191 bzero(sg, sizeof(*sg) * nents);
192 sg_mark_end(&sg[nents - 1]);
193 }
194
195 static inline void
sg_init_one(struct scatterlist * sg,const void * buf,unsigned int buflen)196 sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
197 {
198 sg_init_table(sg, 1);
199 sg_set_buf(sg, buf, buflen);
200 }
201
202 static struct scatterlist *
sg_kmalloc(unsigned int nents,gfp_t gfp_mask)203 sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
204 {
205 if (nents == SG_MAX_SINGLE_ALLOC) {
206 return ((void *)__get_free_page(gfp_mask));
207 } else
208 return (kmalloc(nents * sizeof(struct scatterlist), gfp_mask));
209 }
210
211 static inline void
sg_kfree(struct scatterlist * sg,unsigned int nents)212 sg_kfree(struct scatterlist *sg, unsigned int nents)
213 {
214 if (nents == SG_MAX_SINGLE_ALLOC) {
215 free_page((unsigned long)sg);
216 } else
217 kfree(sg);
218 }
219
220 static inline void
__sg_free_table(struct sg_table * table,unsigned int max_ents,bool skip_first_chunk,sg_free_fn * free_fn)221 __sg_free_table(struct sg_table *table, unsigned int max_ents,
222 bool skip_first_chunk, sg_free_fn * free_fn)
223 {
224 struct scatterlist *sgl, *next;
225
226 if (unlikely(!table->sgl))
227 return;
228
229 sgl = table->sgl;
230 while (table->orig_nents) {
231 unsigned int alloc_size = table->orig_nents;
232 unsigned int sg_size;
233
234 if (alloc_size > max_ents) {
235 next = sg_chain_ptr(&sgl[max_ents - 1]);
236 alloc_size = max_ents;
237 sg_size = alloc_size - 1;
238 } else {
239 sg_size = alloc_size;
240 next = NULL;
241 }
242
243 table->orig_nents -= sg_size;
244 if (skip_first_chunk)
245 skip_first_chunk = 0;
246 else
247 free_fn(sgl, alloc_size);
248 sgl = next;
249 }
250
251 table->sgl = NULL;
252 }
253
254 static inline void
sg_free_table(struct sg_table * table)255 sg_free_table(struct sg_table *table)
256 {
257 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
258 }
259
260 static inline int
__sg_alloc_table(struct sg_table * table,unsigned int nents,unsigned int max_ents,struct scatterlist * first_chunk,gfp_t gfp_mask,sg_alloc_fn * alloc_fn)261 __sg_alloc_table(struct sg_table *table, unsigned int nents,
262 unsigned int max_ents, struct scatterlist *first_chunk,
263 gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
264 {
265 struct scatterlist *sg, *prv;
266 unsigned int left;
267
268 memset(table, 0, sizeof(*table));
269
270 if (nents == 0)
271 return (-EINVAL);
272 left = nents;
273 prv = NULL;
274 do {
275 unsigned int sg_size;
276 unsigned int alloc_size = left;
277
278 if (alloc_size > max_ents) {
279 alloc_size = max_ents;
280 sg_size = alloc_size - 1;
281 } else
282 sg_size = alloc_size;
283
284 left -= sg_size;
285
286 if (first_chunk) {
287 sg = first_chunk;
288 first_chunk = NULL;
289 } else {
290 sg = alloc_fn(alloc_size, gfp_mask);
291 }
292 if (unlikely(!sg)) {
293 if (prv)
294 table->nents = ++table->orig_nents;
295
296 return (-ENOMEM);
297 }
298 sg_init_table(sg, alloc_size);
299 table->nents = table->orig_nents += sg_size;
300
301 if (prv)
302 sg_chain(prv, max_ents, sg);
303 else
304 table->sgl = sg;
305
306 if (!left)
307 sg_mark_end(&sg[sg_size - 1]);
308
309 prv = sg;
310 } while (left);
311
312 return (0);
313 }
314
315 static inline int
sg_alloc_table(struct sg_table * table,unsigned int nents,gfp_t gfp_mask)316 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
317 {
318 int ret;
319
320 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
321 NULL, gfp_mask, sg_kmalloc);
322 if (unlikely(ret))
323 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
324
325 return (ret);
326 }
327
328 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
329 static inline struct scatterlist *
__sg_alloc_table_from_pages(struct sg_table * sgt,struct page ** pages,unsigned int count,unsigned long off,unsigned long size,unsigned int max_segment,struct scatterlist * prv,unsigned int left_pages,gfp_t gfp_mask)330 __sg_alloc_table_from_pages(struct sg_table *sgt,
331 struct page **pages, unsigned int count,
332 unsigned long off, unsigned long size,
333 unsigned int max_segment,
334 struct scatterlist *prv, unsigned int left_pages,
335 gfp_t gfp_mask)
336 #else
337 static inline int
338 __sg_alloc_table_from_pages(struct sg_table *sgt,
339 struct page **pages, unsigned int count,
340 unsigned long off, unsigned long size,
341 unsigned int max_segment, gfp_t gfp_mask)
342 #endif
343 {
344 unsigned int i, segs, cur, len;
345 int rc;
346 struct scatterlist *s, *sg_iter;
347
348 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
349 if (prv != NULL) {
350 panic(
351 "Support for prv != NULL not implemented in "
352 "__sg_alloc_table_from_pages()");
353 }
354 #endif
355
356 if (__predict_false(!max_segment || offset_in_page(max_segment)))
357 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
358 return (ERR_PTR(-EINVAL));
359 #else
360 return (-EINVAL);
361 #endif
362
363 len = 0;
364 for (segs = i = 1; i < count; ++i) {
365 len += PAGE_SIZE;
366 if (len >= max_segment ||
367 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
368 ++segs;
369 len = 0;
370 }
371 }
372 if (__predict_false((rc = sg_alloc_table(sgt, segs, gfp_mask))))
373 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
374 return (ERR_PTR(rc));
375 #else
376 return (rc);
377 #endif
378
379 cur = 0;
380 for_each_sg(sgt->sgl, sg_iter, sgt->orig_nents, i) {
381 unsigned long seg_size;
382 unsigned int j;
383
384 /*
385 * We need to make sure that when we exit this loop "s" has the
386 * last sg in the chain so we can call sg_mark_end() on it.
387 * Only set this inside the loop since sg_iter will be iterated
388 * until it is NULL.
389 */
390 s = sg_iter;
391
392 len = 0;
393 for (j = cur + 1; j < count; ++j) {
394 len += PAGE_SIZE;
395 if (len >= max_segment || page_to_pfn(pages[j]) !=
396 page_to_pfn(pages[j - 1]) + 1)
397 break;
398 }
399
400 seg_size = ((j - cur) << PAGE_SHIFT) - off;
401 sg_set_page(s, pages[cur], MIN(size, seg_size), off);
402 size -= seg_size;
403 off = 0;
404 cur = j;
405 }
406 KASSERT(s != NULL, ("s is NULL after loop in __sg_alloc_table_from_pages()"));
407
408 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
409 if (left_pages == 0)
410 sg_mark_end(s);
411
412 return (s);
413 #else
414 return (0);
415 #endif
416 }
417
418 static inline int
sg_alloc_table_from_pages(struct sg_table * sgt,struct page ** pages,unsigned int count,unsigned long off,unsigned long size,gfp_t gfp_mask)419 sg_alloc_table_from_pages(struct sg_table *sgt,
420 struct page **pages, unsigned int count,
421 unsigned long off, unsigned long size,
422 gfp_t gfp_mask)
423 {
424
425 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
426 return (PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, count, off, size,
427 SCATTERLIST_MAX_SEGMENT, NULL, 0, gfp_mask)));
428 #else
429 return (__sg_alloc_table_from_pages(sgt, pages, count, off, size,
430 SCATTERLIST_MAX_SEGMENT, gfp_mask));
431 #endif
432 }
433
434 static inline int
sg_alloc_table_from_pages_segment(struct sg_table * sgt,struct page ** pages,unsigned int count,unsigned int off,unsigned long size,unsigned int max_segment,gfp_t gfp_mask)435 sg_alloc_table_from_pages_segment(struct sg_table *sgt,
436 struct page **pages, unsigned int count, unsigned int off,
437 unsigned long size, unsigned int max_segment, gfp_t gfp_mask)
438 {
439 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
440 return (PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, count, off, size,
441 max_segment, NULL, 0, gfp_mask)));
442 #else
443 return (__sg_alloc_table_from_pages(sgt, pages, count, off, size,
444 max_segment, gfp_mask));
445 #endif
446 }
447
448 static inline int
sg_nents(struct scatterlist * sg)449 sg_nents(struct scatterlist *sg)
450 {
451 int nents;
452
453 for (nents = 0; sg; sg = sg_next(sg))
454 nents++;
455 return (nents);
456 }
457
458 static inline void
__sg_page_iter_start(struct sg_page_iter * piter,struct scatterlist * sglist,unsigned int nents,unsigned long pgoffset)459 __sg_page_iter_start(struct sg_page_iter *piter,
460 struct scatterlist *sglist, unsigned int nents,
461 unsigned long pgoffset)
462 {
463 piter->internal.pg_advance = 0;
464 piter->internal.nents = nents;
465
466 piter->sg = sglist;
467 piter->sg_pgoffset = pgoffset;
468 }
469
470 static inline void
_sg_iter_next(struct sg_page_iter * iter)471 _sg_iter_next(struct sg_page_iter *iter)
472 {
473 struct scatterlist *sg;
474 unsigned int pgcount;
475
476 sg = iter->sg;
477 pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
478
479 ++iter->sg_pgoffset;
480 while (iter->sg_pgoffset >= pgcount) {
481 iter->sg_pgoffset -= pgcount;
482 sg = sg_next(sg);
483 --iter->maxents;
484 if (sg == NULL || iter->maxents == 0)
485 break;
486 pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
487 }
488 iter->sg = sg;
489 }
490
491 static inline int
sg_page_count(struct scatterlist * sg)492 sg_page_count(struct scatterlist *sg)
493 {
494 return (PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT);
495 }
496 #define sg_dma_page_count(sg) \
497 sg_page_count(sg)
498
499 static inline bool
__sg_page_iter_next(struct sg_page_iter * piter)500 __sg_page_iter_next(struct sg_page_iter *piter)
501 {
502 unsigned int pgcount;
503
504 if (piter->internal.nents == 0)
505 return (0);
506 if (piter->sg == NULL)
507 return (0);
508
509 piter->sg_pgoffset += piter->internal.pg_advance;
510 piter->internal.pg_advance = 1;
511
512 while (1) {
513 pgcount = sg_page_count(piter->sg);
514 if (likely(piter->sg_pgoffset < pgcount))
515 break;
516 piter->sg_pgoffset -= pgcount;
517 piter->sg = sg_next(piter->sg);
518 if (--piter->internal.nents == 0)
519 return (0);
520 if (piter->sg == NULL)
521 return (0);
522 }
523 return (1);
524 }
525 #define __sg_page_iter_dma_next(itr) \
526 __sg_page_iter_next(&(itr)->base)
527
528 static inline void
_sg_iter_init(struct scatterlist * sgl,struct sg_page_iter * iter,unsigned int nents,unsigned long pgoffset)529 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
530 unsigned int nents, unsigned long pgoffset)
531 {
532 if (nents) {
533 iter->sg = sgl;
534 iter->sg_pgoffset = pgoffset - 1;
535 iter->maxents = nents;
536 _sg_iter_next(iter);
537 } else {
538 iter->sg = NULL;
539 iter->sg_pgoffset = 0;
540 iter->maxents = 0;
541 }
542 }
543
544 /*
545 * sg_page_iter_dma_address() is implemented as a macro because it
546 * needs to accept two different and identical structure types. This
547 * allows both old and new code to co-exist. The compile time assert
548 * adds some safety, that the structure sizes match.
549 */
550 #define sg_page_iter_dma_address(spi) ({ \
551 struct sg_page_iter *__spi = (void *)(spi); \
552 dma_addr_t __dma_address; \
553 CTASSERT(sizeof(*(spi)) == sizeof(*__spi)); \
554 __dma_address = __spi->sg->dma_address + \
555 (__spi->sg_pgoffset << PAGE_SHIFT); \
556 __dma_address; \
557 })
558
559 static inline struct page *
sg_page_iter_page(struct sg_page_iter * piter)560 sg_page_iter_page(struct sg_page_iter *piter)
561 {
562 return (nth_page(sg_page(piter->sg), piter->sg_pgoffset));
563 }
564
565 static __inline size_t
sg_pcopy_from_buffer(struct scatterlist * sgl,unsigned int nents,const void * buf,size_t buflen,off_t skip)566 sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
567 const void *buf, size_t buflen, off_t skip)
568 {
569 struct sg_page_iter piter;
570 struct page *page;
571 struct sf_buf *sf;
572 size_t len, copied;
573 char *p, *b;
574
575 if (buflen == 0)
576 return (0);
577
578 b = __DECONST(char *, buf);
579 copied = 0;
580 sched_pin();
581 for_each_sg_page(sgl, &piter, nents, 0) {
582
583 /* Skip to the start. */
584 if (piter.sg->length <= skip) {
585 skip -= piter.sg->length;
586 continue;
587 }
588
589 /* See how much to copy. */
590 KASSERT(((piter.sg->length - skip) != 0 && (buflen != 0)),
591 ("%s: sg len %u - skip %ju || buflen %zu is 0\n",
592 __func__, piter.sg->length, (uintmax_t)skip, buflen));
593 len = min(piter.sg->length - skip, buflen);
594
595 page = sg_page_iter_page(&piter);
596 sf = sf_buf_alloc(page, SFB_CPUPRIVATE | SFB_NOWAIT);
597 if (sf == NULL)
598 break;
599 p = (char *)sf_buf_kva(sf) + piter.sg_pgoffset + skip;
600 memcpy(p, b, len);
601 sf_buf_free(sf);
602
603 /* We copied so nothing more to skip. */
604 skip = 0;
605 copied += len;
606 /* Either we exactly filled the page, or we are done. */
607 buflen -= len;
608 if (buflen == 0)
609 break;
610 b += len;
611 }
612 sched_unpin();
613
614 return (copied);
615 }
616
617 static inline size_t
sg_copy_from_buffer(struct scatterlist * sgl,unsigned int nents,const void * buf,size_t buflen)618 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
619 const void *buf, size_t buflen)
620 {
621 return (sg_pcopy_from_buffer(sgl, nents, buf, buflen, 0));
622 }
623
624 static inline size_t
sg_pcopy_to_buffer(struct scatterlist * sgl,unsigned int nents,void * buf,size_t buflen,off_t offset)625 sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
626 void *buf, size_t buflen, off_t offset)
627 {
628 struct sg_page_iter iter;
629 struct scatterlist *sg;
630 struct page *page;
631 struct sf_buf *sf;
632 char *vaddr;
633 size_t total = 0;
634 size_t len;
635
636 if (!PMAP_HAS_DMAP)
637 sched_pin();
638 for_each_sg_page(sgl, &iter, nents, 0) {
639 sg = iter.sg;
640
641 if (offset >= sg->length) {
642 offset -= sg->length;
643 continue;
644 }
645 len = ulmin(buflen, sg->length - offset);
646 if (len == 0)
647 break;
648
649 page = sg_page_iter_page(&iter);
650 if (!PMAP_HAS_DMAP) {
651 sf = sf_buf_alloc(page, SFB_CPUPRIVATE | SFB_NOWAIT);
652 if (sf == NULL)
653 break;
654 vaddr = (char *)sf_buf_kva(sf);
655 } else
656 vaddr = (char *)PHYS_TO_DMAP(page_to_phys(page));
657 memcpy(buf, vaddr + sg->offset + offset, len);
658 if (!PMAP_HAS_DMAP)
659 sf_buf_free(sf);
660
661 /* start at beginning of next page */
662 offset = 0;
663
664 /* advance buffer */
665 buf = (char *)buf + len;
666 buflen -= len;
667 total += len;
668 }
669 if (!PMAP_HAS_DMAP)
670 sched_unpin();
671 return (total);
672 }
673
674 #endif /* _LINUXKPI_LINUX_SCATTERLIST_H_ */
675