1 /*
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7  * Copyright (c) 2016 Matt Macy <mmacy@nextbsd.org>
8  * Copyright (c) 2017-2020 François Tigeot <ftigeot@wolfpond.org>
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice unmodified, this list of conditions, and the following
16  *    disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifndef	_LINUX_SCATTERLIST_H_
34 #define	_LINUX_SCATTERLIST_H_
35 
36 #include <linux/string.h>
37 #include <linux/types.h>
38 #include <linux/bug.h>
39 #include <linux/mm.h>
40 #include <asm/io.h>
41 
42 /*
43  * SG table design.
44  *
45  * If flags bit 0 is set, then the sg field contains a pointer to the next sg
46  * table list. Otherwise the next entry is at sg + 1, can be determined using
47  * the sg_is_chain() function.
48  *
49  * If flags bit 1 is set, then this sg entry is the last element in a list,
50  * can be determined using the sg_is_last() function.
51  *
52  * See sg_next().
53  *
54  */
55 
56 struct scatterlist {
57 	union {
58 		struct page		*page;
59 		struct scatterlist	*sg;
60 	} sl_un;
61 	unsigned long	offset;
62 	uint32_t	length;
63 	dma_addr_t	dma_address;
64 	uint32_t	flags;
65 };
66 
67 struct sg_table {
68 	struct scatterlist *sgl;        /* the list */
69 	unsigned int nents;             /* number of mapped entries */
70 	unsigned int orig_nents;        /* original size of list */
71 };
72 
73 struct sg_page_iter {
74 	struct scatterlist	*sg;
75 	unsigned int		sg_pgoffset;	/* page index */
76 	unsigned int		maxents;
77 	unsigned int		__nents;
78 	int			__pg_advance;
79 };
80 
81 
82 /*
83  * Maximum number of entries that will be allocated in one piece, if
84  * a list larger than this is required then chaining will be utilized.
85  */
86 #define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
87 
88 #define	sg_dma_address(sg)	(sg)->dma_address
89 #define	sg_dma_len(sg)		(sg)->length
90 #define	sg_page(sg)		(sg)->sl_un.page
91 #define	sg_scatternext(sg)	(sg)->sl_un.sg
92 
93 #define	SG_END		0x01
94 #define	SG_CHAIN	0x02
95 
96 static inline void
97 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
98     unsigned int offset)
99 {
100 	sg_page(sg) = page;
101 	sg_dma_len(sg) = len;
102 	sg->offset = offset;
103 	if (offset > PAGE_SIZE)
104 		panic("sg_set_page: Invalid offset %d\n", offset);
105 }
106 
107 static inline void
108 sg_init_table(struct scatterlist *sg, unsigned int nents)
109 {
110 	bzero(sg, sizeof(*sg) * nents);
111 	sg[nents - 1].flags = SG_END;
112 }
113 
114 static inline struct scatterlist *
115 sg_next(struct scatterlist *sg)
116 {
117 	if (sg->flags & SG_END)
118 		return (NULL);
119 	sg++;
120 	if (sg->flags & SG_CHAIN)
121 		sg = sg_scatternext(sg);
122 	return (sg);
123 }
124 
125 static inline vm_paddr_t
126 sg_phys(struct scatterlist *sg)
127 {
128 	return ((struct vm_page *)sg_page(sg))->phys_addr + sg->offset;
129 }
130 
131 /**
132  * sg_chain - Chain two sglists together
133  * @prv:        First scatterlist
134  * @prv_nents:  Number of entries in prv
135  * @sgl:        Second scatterlist
136  *
137  * Description:
138  *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
139  *
140  **/
141 static inline void
142 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
143 					struct scatterlist *sgl)
144 {
145 /*
146  * offset and length are unused for chain entry.  Clear them.
147  */
148 	struct scatterlist *sg = &prv[prv_nents - 1];
149 
150 	sg->offset = 0;
151 	sg->length = 0;
152 
153 	/*
154 	 * Indicate a link pointer, and set the link to the second list.
155 	 */
156 	sg->flags = SG_CHAIN;
157 	sg->sl_un.sg = sgl;
158 }
159 
160 /**
161  * sg_mark_end - Mark the end of the scatterlist
162  * @sg:          SG entryScatterlist
163  *
164  * Description:
165  *   Marks the passed in sg entry as the termination point for the sg
166  *   table. A call to sg_next() on this entry will return NULL.
167  *
168  **/
169 static inline void sg_mark_end(struct scatterlist *sg)
170 {
171         sg->flags = SG_END;
172 }
173 
174 /**
175  * __sg_free_table - Free a previously mapped sg table
176  * @table:      The sg table header to use
177  * @max_ents:   The maximum number of entries per single scatterlist
178  *
179  *  Description:
180  *    Free an sg table previously allocated and setup with
181  *    __sg_alloc_table().  The @max_ents value must be identical to
182  *    that previously used with __sg_alloc_table().
183  *
184  **/
185 void __sg_free_table(struct sg_table *table, unsigned int max_ents);
186 
187 /**
188  * sg_free_table - Free a previously allocated sg table
189  * @table:      The mapped sg table header
190  *
191  **/
192 static inline void
193 sg_free_table(struct sg_table *table)
194 {
195 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
196 }
197 
198 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
199 		unsigned int max_ents, gfp_t gfp_mask);
200 
201 /**
202  * sg_alloc_table - Allocate and initialize an sg table
203  * @table:      The sg table header to use
204  * @nents:      Number of entries in sg list
205  * @gfp_mask:   GFP allocation mask
206  *
207  *  Description:
208  *    Allocate and initialize an sg table. If @nents@ is larger than
209  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
210  *
211  **/
212 
213 static inline int
214 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
215 {
216 	int ret;
217 
218 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
219 		gfp_mask);
220 	if (unlikely(ret))
221 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
222 
223 	return ret;
224 }
225 
226 static inline int
227 sg_nents(struct scatterlist *sg)
228 {
229 	int nents;
230 	for (nents = 0; sg; sg = sg_next(sg))
231 		nents++;
232 	return nents;
233 }
234 
235 static inline void
236 __sg_page_iter_start(struct sg_page_iter *piter,
237 			  struct scatterlist *sglist, unsigned int nents,
238 			  unsigned long pgoffset)
239 {
240 	piter->__pg_advance = 0;
241 	piter->__nents = nents;
242 
243 	piter->sg = sglist;
244 	piter->sg_pgoffset = pgoffset;
245 }
246 
247 /*
248  * Iterate pages in sg list.
249  */
250 static inline void
251 _sg_iter_next(struct sg_page_iter *iter)
252 {
253 	struct scatterlist *sg;
254 	unsigned int pgcount;
255 
256 	sg = iter->sg;
257 	pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
258 
259 	++iter->sg_pgoffset;
260 	while (iter->sg_pgoffset >= pgcount) {
261 		iter->sg_pgoffset -= pgcount;
262 		sg = sg_next(sg);
263 		--iter->maxents;
264 		if (sg == NULL || iter->maxents == 0)
265 			break;
266 		pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
267 	}
268 	iter->sg = sg;
269 }
270 
271 static inline int
272 sg_page_count(struct scatterlist *sg)
273 {
274 	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
275 }
276 
277 static inline bool
278 __sg_page_iter_next(struct sg_page_iter *piter)
279 {
280 	if (piter->__nents == 0)
281 		return (false);
282 	if (piter->sg == NULL)
283 		return (false);
284 
285 	piter->sg_pgoffset += piter->__pg_advance;
286 	piter->__pg_advance = 1;
287 
288 	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
289 		piter->sg_pgoffset -= sg_page_count(piter->sg);
290 		piter->sg = sg_next(piter->sg);
291 		if (--piter->__nents == 0)
292 			return (false);
293 		if (piter->sg == NULL)
294 			return (false);
295 	}
296 	return (true);
297 }
298 
299 /*
300  * NOTE: pgoffset is really a page index, not a byte offset.
301  */
302 static inline void
303 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
304 	      unsigned int nents, unsigned long pgoffset)
305 {
306 	if (nents) {
307 		/*
308 		 * Nominal case.  Note subtract 1 from starting page index
309 		 * for initial _sg_iter_next() call.
310 		 */
311 		iter->sg = sgl;
312 		iter->sg_pgoffset = pgoffset - 1;
313 		iter->maxents = nents;
314 		_sg_iter_next(iter);
315 	} else {
316 		/*
317 		 * Degenerate case
318 		 */
319 		iter->sg = NULL;
320 		iter->sg_pgoffset = 0;
321 		iter->maxents = 0;
322 	}
323 }
324 
325 static inline struct page *
326 sg_page_iter_page(struct sg_page_iter *piter)
327 {
328 	return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
329 }
330 
331 static inline dma_addr_t
332 sg_page_iter_dma_address(struct sg_page_iter *spi)
333 {
334 	return spi->sg->dma_address + (spi->sg_pgoffset << PAGE_SHIFT);
335 }
336 
337 #define for_each_sg_page(sgl, iter, nents, pgoffset)			\
338 	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
339 	     (iter)->sg; _sg_iter_next(iter))
340 
341 #define	for_each_sg(sglist, sg, sgmax, _itr)				\
342 	for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
343 
344 /*
345  *
346  * XXX please review these
347  */
348 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
349 		      const void *buf, size_t buflen, off_t skip);
350 
351 static inline size_t
352 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
353 		     const char *buf, size_t buflen)
354 {
355 	return (sg_pcopy_from_buffer(sgl, nents, buf, buflen, 0));
356 }
357 
358 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
359 		   void *buf, size_t buflen, off_t skip);
360 
361 static inline size_t
362 sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
363 		  char *buf, size_t buflen)
364 {
365 
366 	return (sg_pcopy_to_buffer(sgl, nents, buf, buflen, 0));
367 }
368 
369 static inline bool
370 sg_is_last(struct scatterlist *sg)
371 {
372 	return (sg->flags & SG_END);
373 }
374 
375 static inline bool
376 sg_is_chain(struct scatterlist *sg)
377 {
378 	return (sg->flags & SG_CHAIN);
379 }
380 
381 static inline struct scatterlist *
382 sg_chain_ptr(struct scatterlist *sg)
383 {
384 	return sg->sl_un.sg;
385 }
386 
387 static inline int
388 sg_alloc_table_from_pages(struct sg_table *sgt,
389 	struct page **pages, unsigned int n_pages,
390 	unsigned long offset, unsigned long size, gfp_t gfp_mask)
391 {
392 	kprintf("sg_alloc_table_from_pages: Not implemented\n");
393 	return -EINVAL;
394 }
395 
396 #endif	/* _LINUX_SCATTERLIST_H_ */
397