1 /*
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7  * Copyright (c) 2016 Matt Macy <mmacy@nextbsd.org>
8  * Copyright (c) 2017 François Tigeot <ftigeot@wolfpond.org>
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice unmodified, this list of conditions, and the following
16  *    disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifndef	_LINUX_SCATTERLIST_H_
34 #define	_LINUX_SCATTERLIST_H_
35 
36 #include <linux/highmem.h>
37 #include <linux/string.h>
38 #include <linux/types.h>
39 #include <linux/bug.h>
40 #include <linux/mm.h>
41 #include <asm/io.h>
42 
43 /*
44  * SG table design.
45  *
46  * If flags bit 0 is set, then the sg field contains a pointer to the next sg
47  * table list. Otherwise the next entry is at sg + 1, can be determined using
48  * the sg_is_chain() function.
49  *
50  * If flags bit 1 is set, then this sg entry is the last element in a list,
51  * can be determined using the sg_is_last() function.
52  *
53  * See sg_next().
54  *
55  */
56 
57 struct scatterlist {
58 	union {
59 		struct vm_page		*page;
60 		struct scatterlist	*sg;
61 	} sl_un;
62 	unsigned long	offset;
63 	uint32_t	length;
64 	dma_addr_t	dma_address;
65 	uint32_t	flags;
66 };
67 
68 struct sg_table {
69 	struct scatterlist *sgl;        /* the list */
70 	unsigned int nents;             /* number of mapped entries */
71 	unsigned int orig_nents;        /* original size of list */
72 };
73 
74 struct sg_page_iter {
75 	struct scatterlist	*sg;
76 	unsigned int		sg_pgoffset;	/* page index */
77 	unsigned int		maxents;
78 	unsigned int		__nents;
79 	int			__pg_advance;
80 };
81 
82 
83 /*
84  * Maximum number of entries that will be allocated in one piece, if
85  * a list larger than this is required then chaining will be utilized.
86  */
87 #define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
88 
89 #define	sg_dma_address(sg)	(sg)->dma_address
90 #define	sg_dma_len(sg)		(sg)->length
91 #define	sg_page(sg)		(sg)->sl_un.page
92 #define	sg_scatternext(sg)	(sg)->sl_un.sg
93 
94 #define	SG_END		0x01
95 #define	SG_CHAIN	0x02
96 
97 static inline void
98 sg_set_page(struct scatterlist *sg, struct vm_page *page, unsigned int len,
99     unsigned int offset)
100 {
101 	sg_page(sg) = page;
102 	sg_dma_len(sg) = len;
103 	sg->offset = offset;
104 	if (offset > PAGE_SIZE)
105 		panic("sg_set_page: Invalid offset %d\n", offset);
106 }
107 
108 static inline void
109 sg_init_table(struct scatterlist *sg, unsigned int nents)
110 {
111 	bzero(sg, sizeof(*sg) * nents);
112 	sg[nents - 1].flags = SG_END;
113 }
114 
115 static inline struct scatterlist *
116 sg_next(struct scatterlist *sg)
117 {
118 	if (sg->flags & SG_END)
119 		return (NULL);
120 	sg++;
121 	if (sg->flags & SG_CHAIN)
122 		sg = sg_scatternext(sg);
123 	return (sg);
124 }
125 
126 static inline vm_paddr_t
127 sg_phys(struct scatterlist *sg)
128 {
129 	return sg_page(sg)->phys_addr + sg->offset;
130 }
131 
132 /**
133  * sg_chain - Chain two sglists together
134  * @prv:        First scatterlist
135  * @prv_nents:  Number of entries in prv
136  * @sgl:        Second scatterlist
137  *
138  * Description:
139  *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
140  *
141  **/
142 static inline void
143 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
144 					struct scatterlist *sgl)
145 {
146 /*
147  * offset and length are unused for chain entry.  Clear them.
148  */
149 	struct scatterlist *sg = &prv[prv_nents - 1];
150 
151 	sg->offset = 0;
152 	sg->length = 0;
153 
154 	/*
155 	 * Indicate a link pointer, and set the link to the second list.
156 	 */
157 	sg->flags = SG_CHAIN;
158 	sg->sl_un.sg = sgl;
159 }
160 
161 /**
162  * sg_mark_end - Mark the end of the scatterlist
163  * @sg:          SG entryScatterlist
164  *
165  * Description:
166  *   Marks the passed in sg entry as the termination point for the sg
167  *   table. A call to sg_next() on this entry will return NULL.
168  *
169  **/
170 static inline void sg_mark_end(struct scatterlist *sg)
171 {
172         sg->flags = SG_END;
173 }
174 
175 /**
176  * __sg_free_table - Free a previously mapped sg table
177  * @table:      The sg table header to use
178  * @max_ents:   The maximum number of entries per single scatterlist
179  *
180  *  Description:
181  *    Free an sg table previously allocated and setup with
182  *    __sg_alloc_table().  The @max_ents value must be identical to
183  *    that previously used with __sg_alloc_table().
184  *
185  **/
186 static inline void
187 __sg_free_table(struct sg_table *table, unsigned int max_ents)
188 {
189 	struct scatterlist *sgl, *next;
190 
191 	if (unlikely(!table->sgl))
192 		return;
193 
194 	sgl = table->sgl;
195 	while (table->orig_nents) {
196 		unsigned int alloc_size = table->orig_nents;
197 		unsigned int sg_size;
198 
199 		/*
200 		 * If we have more than max_ents segments left,
201 		 * then assign 'next' to the sg table after the current one.
202 		 * sg_size is then one less than alloc size, since the last
203 		 * element is the chain pointer.
204 		 */
205 		if (alloc_size > max_ents) {
206 			next = sgl[max_ents - 1].sl_un.sg;
207 			alloc_size = max_ents;
208 			sg_size = alloc_size - 1;
209 		} else {
210 			sg_size = alloc_size;
211 			next = NULL;
212 		}
213 
214 		table->orig_nents -= sg_size;
215 		kfree(sgl);
216 		sgl = next;
217 	}
218 
219 	table->sgl = NULL;
220 }
221 
222 /**
223  * sg_free_table - Free a previously allocated sg table
224  * @table:      The mapped sg table header
225  *
226  **/
227 static inline void
228 sg_free_table(struct sg_table *table)
229 {
230 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
231 }
232 
233 /**
234  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
235  * @table:      The sg table header to use
236  * @nents:      Number of entries in sg list
237  * @max_ents:   The maximum number of entries the allocator returns per call
238  * @gfp_mask:   GFP allocation mask
239  *
240  * Description:
241  *   This function returns a @table @nents long. The allocator is
242  *   defined to return scatterlist chunks of maximum size @max_ents.
243  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
244  *   chained in units of @max_ents.
245  *
246  * Notes:
247  *   If this function returns non-0 (eg failure), the caller must call
248  *   __sg_free_table() to cleanup any leftover allocations.
249  *
250  **/
251 static inline int
252 __sg_alloc_table(struct sg_table *table, unsigned int nents,
253 		unsigned int max_ents, gfp_t gfp_mask)
254 {
255 	struct scatterlist *sg, *prv;
256 	unsigned int left;
257 
258 	memset(table, 0, sizeof(*table));
259 
260 	if (nents == 0)
261 		return -EINVAL;
262 	left = nents;
263 	prv = NULL;
264 	do {
265 		unsigned int sg_size, alloc_size = left;
266 
267 		if (alloc_size > max_ents) {
268 			alloc_size = max_ents;
269 			sg_size = alloc_size - 1;
270 		} else
271 			sg_size = alloc_size;
272 
273 		left -= sg_size;
274 
275 		sg = kmalloc(alloc_size * sizeof(struct scatterlist), M_DRM, gfp_mask);
276 		if (unlikely(!sg)) {
277 		/*
278 		 * Adjust entry count to reflect that the last
279 		 * entry of the previous table won't be used for
280 		 * linkage.  Without this, sg_kfree() may get
281 		 * confused.
282 		 */
283 			if (prv)
284 				table->nents = ++table->orig_nents;
285 
286 			return -ENOMEM;
287 		}
288 
289 		sg_init_table(sg, alloc_size);
290 		table->nents = table->orig_nents += sg_size;
291 
292 		/*
293 		 * If this is the first mapping, assign the sg table header.
294 		 * If this is not the first mapping, chain previous part.
295 		 */
296 		if (prv)
297 			sg_chain(prv, max_ents, sg);
298 		else
299 			table->sgl = sg;
300 
301 		/*
302 		* If no more entries after this one, mark the end
303 		*/
304 		if (!left)
305 			sg_mark_end(&sg[sg_size - 1]);
306 
307 		prv = sg;
308 	} while (left);
309 
310 	return 0;
311 }
312 
313 /**
314  * sg_alloc_table - Allocate and initialize an sg table
315  * @table:      The sg table header to use
316  * @nents:      Number of entries in sg list
317  * @gfp_mask:   GFP allocation mask
318  *
319  *  Description:
320  *    Allocate and initialize an sg table. If @nents@ is larger than
321  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
322  *
323  **/
324 
325 static inline int
326 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
327 {
328 	int ret;
329 
330 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
331 		gfp_mask);
332 	if (unlikely(ret))
333 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
334 
335 	return ret;
336 }
337 
338 static inline int
339 sg_nents(struct scatterlist *sg)
340 {
341 	int nents;
342 	for (nents = 0; sg; sg = sg_next(sg))
343 		nents++;
344 	return nents;
345 }
346 
347 static inline void
348 __sg_page_iter_start(struct sg_page_iter *piter,
349 			  struct scatterlist *sglist, unsigned int nents,
350 			  unsigned long pgoffset)
351 {
352 	piter->__pg_advance = 0;
353 	piter->__nents = nents;
354 
355 	piter->sg = sglist;
356 	piter->sg_pgoffset = pgoffset;
357 }
358 
359 /*
360  * Iterate pages in sg list.
361  */
362 static inline void
363 _sg_iter_next(struct sg_page_iter *iter)
364 {
365 	struct scatterlist *sg;
366 	unsigned int pgcount;
367 
368 	sg = iter->sg;
369 	pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
370 
371 	++iter->sg_pgoffset;
372 	while (iter->sg_pgoffset >= pgcount) {
373 		iter->sg_pgoffset -= pgcount;
374 		sg = sg_next(sg);
375 		--iter->maxents;
376 		if (sg == NULL || iter->maxents == 0)
377 			break;
378 		pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
379 	}
380 	iter->sg = sg;
381 }
382 
383 static inline int
384 sg_page_count(struct scatterlist *sg)
385 {
386 	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
387 }
388 
389 static inline bool
390 __sg_page_iter_next(struct sg_page_iter *piter)
391 {
392 	if (piter->__nents == 0)
393 		return (false);
394 	if (piter->sg == NULL)
395 		return (false);
396 
397 	piter->sg_pgoffset += piter->__pg_advance;
398 	piter->__pg_advance = 1;
399 
400 	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
401 		piter->sg_pgoffset -= sg_page_count(piter->sg);
402 		piter->sg = sg_next(piter->sg);
403 		if (--piter->__nents == 0)
404 			return (false);
405 		if (piter->sg == NULL)
406 			return (false);
407 	}
408 	return (true);
409 }
410 
411 /*
412  * NOTE: pgoffset is really a page index, not a byte offset.
413  */
414 static inline void
415 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
416 	      unsigned int nents, unsigned long pgoffset)
417 {
418 	if (nents) {
419 		/*
420 		 * Nominal case.  Note subtract 1 from starting page index
421 		 * for initial _sg_iter_next() call.
422 		 */
423 		iter->sg = sgl;
424 		iter->sg_pgoffset = pgoffset - 1;
425 		iter->maxents = nents;
426 		_sg_iter_next(iter);
427 	} else {
428 		/*
429 		 * Degenerate case
430 		 */
431 		iter->sg = NULL;
432 		iter->sg_pgoffset = 0;
433 		iter->maxents = 0;
434 	}
435 }
436 
437 static inline struct vm_page *
438 sg_page_iter_page(struct sg_page_iter *piter)
439 {
440 	return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
441 }
442 
443 static inline dma_addr_t
444 sg_page_iter_dma_address(struct sg_page_iter *spi)
445 {
446 	return spi->sg->dma_address + (spi->sg_pgoffset << PAGE_SHIFT);
447 }
448 
449 #define for_each_sg_page(sgl, iter, nents, pgoffset)			\
450 	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
451 	     (iter)->sg; _sg_iter_next(iter))
452 
453 #define	for_each_sg(sglist, sg, sgmax, _itr)				\
454 	for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
455 
456 /*
457  *
458  * XXX please review these
459  */
460 static inline size_t
461 sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
462 		      const void *buf, size_t buflen, off_t skip)
463 {
464 	off_t off;
465 	int len, curlen, curoff;
466 	struct sg_page_iter iter;
467 	struct scatterlist *sg;
468 	struct vm_page *page;
469 	char *vaddr;
470 
471 	off = 0;
472 	for_each_sg_page(sgl, &iter, nents, 0) {
473 		sg = iter.sg;
474 		curlen = sg->length;
475 		curoff = sg->offset;
476 		if (skip && curlen >= skip) {
477 			skip -= curlen;
478 			continue;
479 		}
480 		if (skip) {
481 			curlen -= skip;
482 			curoff += skip;
483 			skip = 0;
484 		}
485 		len = min(curlen, buflen - off);
486 		page = sg_page_iter_page(&iter);
487 		vaddr = (char *)kmap(page) + sg->offset;
488 		memcpy(vaddr, (const char *)buf + off, len);
489 		off += len;
490 		kunmap(page);
491 	}
492 
493 	return (off);
494 }
495 
496 
497 static inline size_t
498 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
499 		     const char *buf, size_t buflen)
500 {
501 	return (sg_pcopy_from_buffer(sgl, nents, buf, buflen, 0));
502 }
503 
504 static inline size_t
505 sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
506 		   void *buf, size_t buflen, off_t skip)
507 {
508 	off_t off;
509 	int len, curlen, curoff;
510 	struct sg_page_iter iter;
511 	struct scatterlist *sg;
512 	struct vm_page *page;
513 	char *vaddr;
514 
515 	off = 0;
516 	for_each_sg_page(sgl, &iter, nents, 0) {
517 		sg = iter.sg;
518 		curlen = sg->length;
519 		curoff = sg->offset;
520 		if (skip && curlen >= skip) {
521 			skip -= curlen;
522 			continue;
523 		}
524 		if (skip) {
525 			curlen -= skip;
526 			curoff += skip;
527 			skip = 0;
528 		}
529 		len = min(curlen, buflen - off);
530 		page = sg_page_iter_page(&iter);
531 		vaddr = (char *)kmap(page) + sg->offset;
532 		memcpy((char *)buf + off, vaddr, len);
533 		off += len;
534 		kunmap(page);
535 	}
536 
537 	return (off);
538 }
539 
540 static inline size_t
541 sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
542 		  char *buf, size_t buflen)
543 {
544 
545 	return (sg_pcopy_to_buffer(sgl, nents, buf, buflen, 0));
546 }
547 
548 static inline bool
549 sg_is_last(struct scatterlist *sg)
550 {
551 	return (sg->flags & SG_END);
552 }
553 
554 static inline bool
555 sg_is_chain(struct scatterlist *sg)
556 {
557 	return (sg->flags & SG_CHAIN);
558 }
559 
560 static inline struct scatterlist *
561 sg_chain_ptr(struct scatterlist *sg)
562 {
563 	return sg->sl_un.sg;
564 }
565 
566 #endif	/* _LINUX_SCATTERLIST_H_ */
567