1 /*
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7  * Copyright (c) 2016 Matt Macy <mmacy@nextbsd.org>
8  * Copyright (c) 2017-2020 François Tigeot <ftigeot@wolfpond.org>
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice unmodified, this list of conditions, and the following
16  *    disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifndef	_LINUX_SCATTERLIST_H_
34 #define	_LINUX_SCATTERLIST_H_
35 
36 #include <linux/string.h>
37 #include <linux/types.h>
38 #include <linux/bug.h>
39 #include <linux/mm.h>
40 #include <asm/io.h>
41 
42 /*
43  * SG table design.
44  *
45  * If flags bit 0 is set, then the sg field contains a pointer to the next sg
46  * table list. Otherwise the next entry is at sg + 1, can be determined using
47  * the sg_is_chain() function.
48  *
49  * If flags bit 1 is set, then this sg entry is the last element in a list,
50  * can be determined using the sg_is_last() function.
51  *
52  * See sg_next().
53  *
54  */
55 
56 struct scatterlist {
57 	union {
58 		struct page		*page;
59 		struct scatterlist	*sg;
60 	} sl_un;
61 	unsigned long	offset;
62 	uint32_t	length;
63 	dma_addr_t	dma_address;
64 	uint32_t	flags;
65 };
66 
67 struct sg_table {
68 	struct scatterlist *sgl;        /* the list */
69 	unsigned int nents;             /* number of mapped entries */
70 	unsigned int orig_nents;        /* original size of list */
71 };
72 
73 struct sg_page_iter {
74 	struct scatterlist	*sg;
75 	unsigned int		sg_pgoffset;	/* page index */
76 	unsigned int		maxents;
77 	unsigned int		__nents;
78 	int			__pg_advance;
79 };
80 
81 #define SCATTERLIST_MAX_SEGMENT	(UINT_MAX & LINUX_PAGE_MASK)
82 
83 /*
84  * Maximum number of entries that will be allocated in one piece, if
85  * a list larger than this is required then chaining will be utilized.
86  */
87 #define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
88 
89 #define	sg_dma_address(sg)	(sg)->dma_address
90 #define	sg_dma_len(sg)		(sg)->length
91 #define	sg_page(sg)		(sg)->sl_un.page
92 #define	sg_scatternext(sg)	(sg)->sl_un.sg
93 
94 #define	SG_END		0x01
95 #define	SG_CHAIN	0x02
96 
97 static inline void
98 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
99     unsigned int offset)
100 {
101 	sg_page(sg) = page;
102 	sg_dma_len(sg) = len;
103 	sg->offset = offset;
104 	if (offset > PAGE_SIZE)
105 		panic("sg_set_page: Invalid offset %d\n", offset);
106 }
107 
108 static inline void
109 sg_init_table(struct scatterlist *sg, unsigned int nents)
110 {
111 	bzero(sg, sizeof(*sg) * nents);
112 	sg[nents - 1].flags = SG_END;
113 }
114 
115 static inline struct scatterlist *
116 sg_next(struct scatterlist *sg)
117 {
118 	if (sg->flags & SG_END)
119 		return (NULL);
120 	sg++;
121 	if (sg->flags & SG_CHAIN)
122 		sg = sg_scatternext(sg);
123 	return (sg);
124 }
125 
126 static inline vm_paddr_t
127 sg_phys(struct scatterlist *sg)
128 {
129 	return ((struct vm_page *)sg_page(sg))->phys_addr + sg->offset;
130 }
131 
132 /**
133  * sg_chain - Chain two sglists together
134  * @prv:        First scatterlist
135  * @prv_nents:  Number of entries in prv
136  * @sgl:        Second scatterlist
137  *
138  * Description:
139  *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
140  *
141  **/
142 static inline void
143 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
144 					struct scatterlist *sgl)
145 {
146 /*
147  * offset and length are unused for chain entry.  Clear them.
148  */
149 	struct scatterlist *sg = &prv[prv_nents - 1];
150 
151 	sg->offset = 0;
152 	sg->length = 0;
153 
154 	/*
155 	 * Indicate a link pointer, and set the link to the second list.
156 	 */
157 	sg->flags = SG_CHAIN;
158 	sg->sl_un.sg = sgl;
159 }
160 
161 /**
162  * sg_mark_end - Mark the end of the scatterlist
163  * @sg:          SG entryScatterlist
164  *
165  * Description:
166  *   Marks the passed in sg entry as the termination point for the sg
167  *   table. A call to sg_next() on this entry will return NULL.
168  *
169  **/
170 static inline void sg_mark_end(struct scatterlist *sg)
171 {
172         sg->flags = SG_END;
173 }
174 
175 /**
176  * __sg_free_table - Free a previously mapped sg table
177  * @table:      The sg table header to use
178  * @max_ents:   The maximum number of entries per single scatterlist
179  *
180  *  Description:
181  *    Free an sg table previously allocated and setup with
182  *    __sg_alloc_table().  The @max_ents value must be identical to
183  *    that previously used with __sg_alloc_table().
184  *
185  **/
186 void __sg_free_table(struct sg_table *table, unsigned int max_ents);
187 
188 /**
189  * sg_free_table - Free a previously allocated sg table
190  * @table:      The mapped sg table header
191  *
192  **/
193 static inline void
194 sg_free_table(struct sg_table *table)
195 {
196 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
197 }
198 
199 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
200 		unsigned int max_ents, gfp_t gfp_mask);
201 
202 /**
203  * sg_alloc_table - Allocate and initialize an sg table
204  * @table:      The sg table header to use
205  * @nents:      Number of entries in sg list
206  * @gfp_mask:   GFP allocation mask
207  *
208  *  Description:
209  *    Allocate and initialize an sg table. If @nents@ is larger than
210  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
211  *
212  **/
213 
214 static inline int
215 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
216 {
217 	int ret;
218 
219 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
220 		gfp_mask);
221 	if (unlikely(ret))
222 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
223 
224 	return ret;
225 }
226 
227 static inline int
228 sg_nents(struct scatterlist *sg)
229 {
230 	int nents;
231 	for (nents = 0; sg; sg = sg_next(sg))
232 		nents++;
233 	return nents;
234 }
235 
236 static inline void
237 __sg_page_iter_start(struct sg_page_iter *piter,
238 			  struct scatterlist *sglist, unsigned int nents,
239 			  unsigned long pgoffset)
240 {
241 	piter->__pg_advance = 0;
242 	piter->__nents = nents;
243 
244 	piter->sg = sglist;
245 	piter->sg_pgoffset = pgoffset;
246 }
247 
248 /*
249  * Iterate pages in sg list.
250  */
251 static inline void
252 _sg_iter_next(struct sg_page_iter *iter)
253 {
254 	struct scatterlist *sg;
255 	unsigned int pgcount;
256 
257 	sg = iter->sg;
258 	pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
259 
260 	++iter->sg_pgoffset;
261 	while (iter->sg_pgoffset >= pgcount) {
262 		iter->sg_pgoffset -= pgcount;
263 		sg = sg_next(sg);
264 		--iter->maxents;
265 		if (sg == NULL || iter->maxents == 0)
266 			break;
267 		pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
268 	}
269 	iter->sg = sg;
270 }
271 
272 static inline int
273 sg_page_count(struct scatterlist *sg)
274 {
275 	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
276 }
277 
278 static inline bool
279 __sg_page_iter_next(struct sg_page_iter *piter)
280 {
281 	if (piter->__nents == 0)
282 		return (false);
283 	if (piter->sg == NULL)
284 		return (false);
285 
286 	piter->sg_pgoffset += piter->__pg_advance;
287 	piter->__pg_advance = 1;
288 
289 	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
290 		piter->sg_pgoffset -= sg_page_count(piter->sg);
291 		piter->sg = sg_next(piter->sg);
292 		if (--piter->__nents == 0)
293 			return (false);
294 		if (piter->sg == NULL)
295 			return (false);
296 	}
297 	return (true);
298 }
299 
300 /*
301  * NOTE: pgoffset is really a page index, not a byte offset.
302  */
303 static inline void
304 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
305 	      unsigned int nents, unsigned long pgoffset)
306 {
307 	if (nents) {
308 		/*
309 		 * Nominal case.  Note subtract 1 from starting page index
310 		 * for initial _sg_iter_next() call.
311 		 */
312 		iter->sg = sgl;
313 		iter->sg_pgoffset = pgoffset - 1;
314 		iter->maxents = nents;
315 		_sg_iter_next(iter);
316 	} else {
317 		/*
318 		 * Degenerate case
319 		 */
320 		iter->sg = NULL;
321 		iter->sg_pgoffset = 0;
322 		iter->maxents = 0;
323 	}
324 }
325 
326 static inline struct page *
327 sg_page_iter_page(struct sg_page_iter *piter)
328 {
329 	return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
330 }
331 
332 static inline dma_addr_t
333 sg_page_iter_dma_address(struct sg_page_iter *spi)
334 {
335 	return spi->sg->dma_address + (spi->sg_pgoffset << PAGE_SHIFT);
336 }
337 
338 #define for_each_sg_page(sgl, iter, nents, pgoffset)			\
339 	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
340 	     (iter)->sg; _sg_iter_next(iter))
341 
342 #define	for_each_sg(sglist, sg, sgmax, _itr)				\
343 	for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
344 
345 /*
346  *
347  * XXX please review these
348  */
349 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
350 		      const void *buf, size_t buflen, off_t skip);
351 
352 static inline size_t
353 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
354 		     const char *buf, size_t buflen)
355 {
356 	return (sg_pcopy_from_buffer(sgl, nents, buf, buflen, 0));
357 }
358 
359 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
360 		   void *buf, size_t buflen, off_t skip);
361 
362 static inline size_t
363 sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
364 		  char *buf, size_t buflen)
365 {
366 
367 	return (sg_pcopy_to_buffer(sgl, nents, buf, buflen, 0));
368 }
369 
370 static inline bool
371 sg_is_last(struct scatterlist *sg)
372 {
373 	return (sg->flags & SG_END);
374 }
375 
376 static inline bool
377 sg_is_chain(struct scatterlist *sg)
378 {
379 	return (sg->flags & SG_CHAIN);
380 }
381 
382 static inline struct scatterlist *
383 sg_chain_ptr(struct scatterlist *sg)
384 {
385 	return sg->sl_un.sg;
386 }
387 
388 static inline int
389 sg_alloc_table_from_pages(struct sg_table *sgt,
390 	struct page **pages, unsigned int n_pages,
391 	unsigned long offset, unsigned long size, gfp_t gfp_mask)
392 {
393 	kprintf("sg_alloc_table_from_pages: Not implemented\n");
394 	return -EINVAL;
395 }
396 
397 int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
398 				unsigned int n_pages, unsigned int offset,
399 				unsigned long size, unsigned int max_segment,
400 				gfp_t gfp_mask);
401 
402 #endif	/* _LINUX_SCATTERLIST_H_ */
403