1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7  * Copyright (c) 2016 Matt Macy <mmacy@nextbsd.org>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef	_LINUX_SCATTERLIST_H_
33 #define	_LINUX_SCATTERLIST_H_
34 
35 #include <linux/highmem.h>
36 #include <linux/string.h>
37 #include <linux/types.h>
38 #include <linux/bug.h>
39 #include <linux/mm.h>
40 #include <asm/io.h>
41 
42 /*
43  * SG table design.
44  *
45  * If flags bit 0 is set, then the sg field contains a pointer to the next sg
46  * table list. Otherwise the next entry is at sg + 1, can be determined using
47  * the sg_is_chain() function.
48  *
49  * If flags bit 1 is set, then this sg entry is the last element in a list,
50  * can be determined using the sg_is_last() function.
51  *
52  * See sg_next().
53  *
54  */
55 
56 struct scatterlist {
57 	union {
58 		struct vm_page		*page;
59 		struct scatterlist	*sg;
60 	} sl_un;
61 	unsigned long	offset;
62 	uint32_t	length;
63 	dma_addr_t	dma_address;
64 	uint32_t	flags;
65 };
66 
67 struct sg_table {
68 	struct scatterlist *sgl;        /* the list */
69 	unsigned int nents;             /* number of mapped entries */
70 	unsigned int orig_nents;        /* original size of list */
71 };
72 
73 struct sg_page_iter {
74 	struct scatterlist	*sg;
75 	unsigned int		sg_pgoffset;	/* page index */
76 	unsigned int		maxents;
77 	unsigned int		__nents;
78 	int			__pg_advance;
79 };
80 
81 
82 /*
83  * Maximum number of entries that will be allocated in one piece, if
84  * a list larger than this is required then chaining will be utilized.
85  */
86 #define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
87 
88 #define	sg_dma_address(sg)	(sg)->dma_address
89 #define	sg_dma_len(sg)		(sg)->length
90 #define	sg_page(sg)		(sg)->sl_un.page
91 #define	sg_scatternext(sg)	(sg)->sl_un.sg
92 
93 #define	SG_END		0x01
94 #define	SG_CHAIN	0x02
95 
96 static inline void
97 sg_set_page(struct scatterlist *sg, struct vm_page *page, unsigned int len,
98     unsigned int offset)
99 {
100 	sg_page(sg) = page;
101 	sg_dma_len(sg) = len;
102 	sg->offset = offset;
103 	if (offset > PAGE_SIZE)
104 		panic("sg_set_page: Invalid offset %d\n", offset);
105 }
106 
107 #if 0
108 static inline void
109 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
110 {
111 	sg_set_page(sg, virt_to_page(buf), buflen,
112 	    ((uintptr_t)buf) & ~PAGE_MASK);
113 }
114 #endif
115 
116 static inline void
117 sg_init_table(struct scatterlist *sg, unsigned int nents)
118 {
119 	bzero(sg, sizeof(*sg) * nents);
120 	sg[nents - 1].flags = SG_END;
121 }
122 
123 static inline struct scatterlist *
124 sg_next(struct scatterlist *sg)
125 {
126 	if (sg->flags & SG_END)
127 		return (NULL);
128 	sg++;
129 	if (sg->flags & SG_CHAIN)
130 		sg = sg_scatternext(sg);
131 	return (sg);
132 }
133 
134 static inline vm_paddr_t
135 sg_phys(struct scatterlist *sg)
136 {
137 	return sg_page(sg)->phys_addr + sg->offset;
138 }
139 
140 /**
141  * sg_chain - Chain two sglists together
142  * @prv:        First scatterlist
143  * @prv_nents:  Number of entries in prv
144  * @sgl:        Second scatterlist
145  *
146  * Description:
147  *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
148  *
149  **/
150 static inline void
151 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
152 					struct scatterlist *sgl)
153 {
154 /*
155  * offset and length are unused for chain entry.  Clear them.
156  */
157 	struct scatterlist *sg = &prv[prv_nents - 1];
158 
159 	sg->offset = 0;
160 	sg->length = 0;
161 
162 	/*
163 	 * Indicate a link pointer, and set the link to the second list.
164 	 */
165 	sg->flags = SG_CHAIN;
166 	sg->sl_un.sg = sgl;
167 }
168 
169 /**
170  * sg_mark_end - Mark the end of the scatterlist
171  * @sg:          SG entryScatterlist
172  *
173  * Description:
174  *   Marks the passed in sg entry as the termination point for the sg
175  *   table. A call to sg_next() on this entry will return NULL.
176  *
177  **/
178 static inline void sg_mark_end(struct scatterlist *sg)
179 {
180         sg->flags = SG_END;
181 }
182 
183 /**
184  * __sg_free_table - Free a previously mapped sg table
185  * @table:      The sg table header to use
186  * @max_ents:   The maximum number of entries per single scatterlist
187  *
188  *  Description:
189  *    Free an sg table previously allocated and setup with
190  *    __sg_alloc_table().  The @max_ents value must be identical to
191  *    that previously used with __sg_alloc_table().
192  *
193  **/
194 static inline void
195 __sg_free_table(struct sg_table *table, unsigned int max_ents)
196 {
197 	struct scatterlist *sgl, *next;
198 
199 	if (unlikely(!table->sgl))
200 		return;
201 
202 	sgl = table->sgl;
203 	while (table->orig_nents) {
204 		unsigned int alloc_size = table->orig_nents;
205 		unsigned int sg_size;
206 
207 		/*
208 		 * If we have more than max_ents segments left,
209 		 * then assign 'next' to the sg table after the current one.
210 		 * sg_size is then one less than alloc size, since the last
211 		 * element is the chain pointer.
212 		 */
213 		if (alloc_size > max_ents) {
214 			next = sgl[max_ents - 1].sl_un.sg;
215 			alloc_size = max_ents;
216 			sg_size = alloc_size - 1;
217 		} else {
218 			sg_size = alloc_size;
219 			next = NULL;
220 		}
221 
222 		table->orig_nents -= sg_size;
223 		kfree(sgl);
224 		sgl = next;
225 	}
226 
227 	table->sgl = NULL;
228 }
229 
230 /**
231  * sg_free_table - Free a previously allocated sg table
232  * @table:      The mapped sg table header
233  *
234  **/
235 static inline void
236 sg_free_table(struct sg_table *table)
237 {
238 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
239 }
240 
241 /**
242  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
243  * @table:      The sg table header to use
244  * @nents:      Number of entries in sg list
245  * @max_ents:   The maximum number of entries the allocator returns per call
246  * @gfp_mask:   GFP allocation mask
247  *
248  * Description:
249  *   This function returns a @table @nents long. The allocator is
250  *   defined to return scatterlist chunks of maximum size @max_ents.
251  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
252  *   chained in units of @max_ents.
253  *
254  * Notes:
255  *   If this function returns non-0 (eg failure), the caller must call
256  *   __sg_free_table() to cleanup any leftover allocations.
257  *
258  **/
259 static inline int
260 __sg_alloc_table(struct sg_table *table, unsigned int nents,
261 		unsigned int max_ents, gfp_t gfp_mask)
262 {
263 	struct scatterlist *sg, *prv;
264 	unsigned int left;
265 
266 	memset(table, 0, sizeof(*table));
267 
268 	if (nents == 0)
269 		return -EINVAL;
270 	left = nents;
271 	prv = NULL;
272 	do {
273 		unsigned int sg_size, alloc_size = left;
274 
275 		if (alloc_size > max_ents) {
276 			alloc_size = max_ents;
277 			sg_size = alloc_size - 1;
278 		} else
279 			sg_size = alloc_size;
280 
281 		left -= sg_size;
282 
283 		sg = kmalloc(alloc_size * sizeof(struct scatterlist), M_DRM, gfp_mask);
284 		if (unlikely(!sg)) {
285 		/*
286 		 * Adjust entry count to reflect that the last
287 		 * entry of the previous table won't be used for
288 		 * linkage.  Without this, sg_kfree() may get
289 		 * confused.
290 		 */
291 			if (prv)
292 				table->nents = ++table->orig_nents;
293 
294 			return -ENOMEM;
295 		}
296 
297 		sg_init_table(sg, alloc_size);
298 		table->nents = table->orig_nents += sg_size;
299 
300 		/*
301 		 * If this is the first mapping, assign the sg table header.
302 		 * If this is not the first mapping, chain previous part.
303 		 */
304 		if (prv)
305 			sg_chain(prv, max_ents, sg);
306 		else
307 			table->sgl = sg;
308 
309 		/*
310 		* If no more entries after this one, mark the end
311 		*/
312 		if (!left)
313 			sg_mark_end(&sg[sg_size - 1]);
314 
315 		prv = sg;
316 	} while (left);
317 
318 	return 0;
319 }
320 
321 /**
322  * sg_alloc_table - Allocate and initialize an sg table
323  * @table:      The sg table header to use
324  * @nents:      Number of entries in sg list
325  * @gfp_mask:   GFP allocation mask
326  *
327  *  Description:
328  *    Allocate and initialize an sg table. If @nents@ is larger than
329  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
330  *
331  **/
332 
333 static inline int
334 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
335 {
336 	int ret;
337 
338 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
339 		gfp_mask);
340 	if (unlikely(ret))
341 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
342 
343 	return ret;
344 }
345 
346 static inline int
347 sg_nents(struct scatterlist *sg)
348 {
349 	int nents;
350 	for (nents = 0; sg; sg = sg_next(sg))
351 		nents++;
352 	return nents;
353 }
354 
355 static inline void
356 __sg_page_iter_start(struct sg_page_iter *piter,
357 			  struct scatterlist *sglist, unsigned int nents,
358 			  unsigned long pgoffset)
359 {
360 	piter->__pg_advance = 0;
361 	piter->__nents = nents;
362 
363 	piter->sg = sglist;
364 	piter->sg_pgoffset = pgoffset;
365 }
366 
367 /*
368  * Iterate pages in sg list.
369  */
370 static inline void
371 _sg_iter_next(struct sg_page_iter *iter)
372 {
373 	struct scatterlist *sg;
374 	unsigned int pgcount;
375 
376 	sg = iter->sg;
377 	pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
378 
379 	++iter->sg_pgoffset;
380 	while (iter->sg_pgoffset >= pgcount) {
381 		iter->sg_pgoffset -= pgcount;
382 		sg = sg_next(sg);
383 		--iter->maxents;
384 		if (sg == NULL || iter->maxents == 0)
385 			break;
386 		pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
387 	}
388 	iter->sg = sg;
389 }
390 
391 static inline int
392 sg_page_count(struct scatterlist *sg)
393 {
394 	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
395 }
396 
397 static inline bool
398 __sg_page_iter_next(struct sg_page_iter *piter)
399 {
400 	if (piter->__nents == 0)
401 		return (false);
402 	if (piter->sg == NULL)
403 		return (false);
404 
405 	piter->sg_pgoffset += piter->__pg_advance;
406 	piter->__pg_advance = 1;
407 
408 	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
409 		piter->sg_pgoffset -= sg_page_count(piter->sg);
410 		piter->sg = sg_next(piter->sg);
411 		if (--piter->__nents == 0)
412 			return (false);
413 		if (piter->sg == NULL)
414 			return (false);
415 	}
416 	return (true);
417 }
418 
419 /*
420  * NOTE: pgoffset is really a page index, not a byte offset.
421  */
422 static inline void
423 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
424 	      unsigned int nents, unsigned long pgoffset)
425 {
426 	if (nents) {
427 		/*
428 		 * Nominal case.  Note subtract 1 from starting page index
429 		 * for initial _sg_iter_next() call.
430 		 */
431 		iter->sg = sgl;
432 		iter->sg_pgoffset = pgoffset - 1;
433 		iter->maxents = nents;
434 		_sg_iter_next(iter);
435 	} else {
436 		/*
437 		 * Degenerate case
438 		 */
439 		iter->sg = NULL;
440 		iter->sg_pgoffset = 0;
441 		iter->maxents = 0;
442 	}
443 }
444 
445 static inline struct vm_page *
446 sg_page_iter_page(struct sg_page_iter *piter)
447 {
448 	return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
449 }
450 
451 static inline dma_addr_t
452 sg_page_iter_dma_address(struct sg_page_iter *spi)
453 {
454 	return spi->sg->dma_address + (spi->sg_pgoffset << PAGE_SHIFT);
455 }
456 
457 #define for_each_sg_page(sgl, iter, nents, pgoffset)			\
458 	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
459 	     (iter)->sg; _sg_iter_next(iter))
460 
461 #define	for_each_sg(sglist, sg, sgmax, _itr)				\
462 	for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
463 
464 /*
465  *
466  * XXX please review these
467  */
468 static inline size_t
469 sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
470 		      const void *buf, size_t buflen, off_t skip)
471 {
472 	off_t off;
473 	int len, curlen, curoff;
474 	struct sg_page_iter iter;
475 	struct scatterlist *sg;
476 	struct vm_page *page;
477 	char *vaddr;
478 
479 	off = 0;
480 	for_each_sg_page(sgl, &iter, nents, 0) {
481 		sg = iter.sg;
482 		curlen = sg->length;
483 		curoff = sg->offset;
484 		if (skip && curlen >= skip) {
485 			skip -= curlen;
486 			continue;
487 		}
488 		if (skip) {
489 			curlen -= skip;
490 			curoff += skip;
491 			skip = 0;
492 		}
493 		len = min(curlen, buflen - off);
494 		page = sg_page_iter_page(&iter);
495 		vaddr = (char *)kmap(page) + sg->offset;
496 		memcpy(vaddr, (const char *)buf + off, len);
497 		off += len;
498 		kunmap(page);
499 	}
500 
501 	return (off);
502 }
503 
504 
505 static inline size_t
506 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
507 		     const char *buf, size_t buflen)
508 {
509 	return (sg_pcopy_from_buffer(sgl, nents, buf, buflen, 0));
510 }
511 
512 static inline size_t
513 sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
514 		   void *buf, size_t buflen, off_t skip)
515 {
516 	off_t off;
517 	int len, curlen, curoff;
518 	struct sg_page_iter iter;
519 	struct scatterlist *sg;
520 	struct vm_page *page;
521 	char *vaddr;
522 
523 	off = 0;
524 	for_each_sg_page(sgl, &iter, nents, 0) {
525 		sg = iter.sg;
526 		curlen = sg->length;
527 		curoff = sg->offset;
528 		if (skip && curlen >= skip) {
529 			skip -= curlen;
530 			continue;
531 		}
532 		if (skip) {
533 			curlen -= skip;
534 			curoff += skip;
535 			skip = 0;
536 		}
537 		len = min(curlen, buflen - off);
538 		page = sg_page_iter_page(&iter);
539 		vaddr = (char *)kmap(page) + sg->offset;
540 		memcpy((char *)buf + off, vaddr, len);
541 		off += len;
542 		kunmap(page);
543 	}
544 
545 	return (off);
546 }
547 
548 static inline size_t
549 sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
550 		  char *buf, size_t buflen)
551 {
552 
553 	return (sg_pcopy_to_buffer(sgl, nents, buf, buflen, 0));
554 }
555 
556 #endif	/* _LINUX_SCATTERLIST_H_ */
557