1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7  * Copyright (c) 2016 Matt Macy <mmacy@nextbsd.org>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef	_LINUX_SCATTERLIST_H_
33 #define	_LINUX_SCATTERLIST_H_
34 
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/bug.h>
38 #include <linux/mm.h>
39 #include <asm/io.h>
40 
41 /*
42  * SG table design.
43  *
44  * If flags bit 0 is set, then the sg field contains a pointer to the next sg
45  * table list. Otherwise the next entry is at sg + 1, can be determined using
46  * the sg_is_chain() function.
47  *
48  * If flags bit 1 is set, then this sg entry is the last element in a list,
49  * can be determined using the sg_is_last() function.
50  *
51  * See sg_next().
52  *
53  */
54 
55 struct scatterlist {
56 	union {
57 		struct vm_page		*page;
58 		struct scatterlist	*sg;
59 	} sl_un;
60 	unsigned long	offset;
61 	uint32_t	length;
62 	dma_addr_t	dma_address;
63 	uint32_t	flags;
64 };
65 
66 struct sg_table {
67 	struct scatterlist *sgl;        /* the list */
68 	unsigned int nents;             /* number of mapped entries */
69 	unsigned int orig_nents;        /* original size of list */
70 };
71 
72 struct sg_page_iter {
73 	struct scatterlist	*sg;
74 	unsigned int		sg_pgoffset;	/* page index */
75 	unsigned int		maxents;
76 	unsigned int		__nents;
77 	int			__pg_advance;
78 };
79 
80 
81 /*
82  * Maximum number of entries that will be allocated in one piece, if
83  * a list larger than this is required then chaining will be utilized.
84  */
85 #define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
86 
87 #define	sg_dma_address(sg)	(sg)->dma_address
88 #define	sg_dma_len(sg)		(sg)->length
89 #define	sg_page(sg)		(sg)->sl_un.page
90 #define	sg_scatternext(sg)	(sg)->sl_un.sg
91 
92 #define	SG_END		0x01
93 #define	SG_CHAIN	0x02
94 
95 static inline void
96 sg_set_page(struct scatterlist *sg, struct vm_page *page, unsigned int len,
97     unsigned int offset)
98 {
99 	sg_page(sg) = page;
100 	sg_dma_len(sg) = len;
101 	sg->offset = offset;
102 	if (offset > PAGE_SIZE)
103 		panic("sg_set_page: Invalid offset %d\n", offset);
104 }
105 
106 #if 0
107 static inline void
108 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
109 {
110 	sg_set_page(sg, virt_to_page(buf), buflen,
111 	    ((uintptr_t)buf) & ~PAGE_MASK);
112 }
113 #endif
114 
115 static inline void
116 sg_init_table(struct scatterlist *sg, unsigned int nents)
117 {
118 	bzero(sg, sizeof(*sg) * nents);
119 	sg[nents - 1].flags = SG_END;
120 }
121 
122 static inline struct scatterlist *
123 sg_next(struct scatterlist *sg)
124 {
125 	if (sg->flags & SG_END)
126 		return (NULL);
127 	sg++;
128 	if (sg->flags & SG_CHAIN)
129 		sg = sg_scatternext(sg);
130 	return (sg);
131 }
132 
133 static inline vm_paddr_t
134 sg_phys(struct scatterlist *sg)
135 {
136 	return sg_page(sg)->phys_addr + sg->offset;
137 }
138 
139 /**
140  * sg_chain - Chain two sglists together
141  * @prv:        First scatterlist
142  * @prv_nents:  Number of entries in prv
143  * @sgl:        Second scatterlist
144  *
145  * Description:
146  *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
147  *
148  **/
149 static inline void
150 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
151 					struct scatterlist *sgl)
152 {
153 /*
154  * offset and length are unused for chain entry.  Clear them.
155  */
156 	struct scatterlist *sg = &prv[prv_nents - 1];
157 
158 	sg->offset = 0;
159 	sg->length = 0;
160 
161 	/*
162 	 * Indicate a link pointer, and set the link to the second list.
163 	 */
164 	sg->flags = SG_CHAIN;
165 	sg->sl_un.sg = sgl;
166 }
167 
168 /**
169  * sg_mark_end - Mark the end of the scatterlist
170  * @sg:          SG entryScatterlist
171  *
172  * Description:
173  *   Marks the passed in sg entry as the termination point for the sg
174  *   table. A call to sg_next() on this entry will return NULL.
175  *
176  **/
177 static inline void sg_mark_end(struct scatterlist *sg)
178 {
179         sg->flags = SG_END;
180 }
181 
182 /**
183  * __sg_free_table - Free a previously mapped sg table
184  * @table:      The sg table header to use
185  * @max_ents:   The maximum number of entries per single scatterlist
186  *
187  *  Description:
188  *    Free an sg table previously allocated and setup with
189  *    __sg_alloc_table().  The @max_ents value must be identical to
190  *    that previously used with __sg_alloc_table().
191  *
192  **/
193 static inline void
194 __sg_free_table(struct sg_table *table, unsigned int max_ents)
195 {
196 	struct scatterlist *sgl, *next;
197 
198 	if (unlikely(!table->sgl))
199 		return;
200 
201 	sgl = table->sgl;
202 	while (table->orig_nents) {
203 		unsigned int alloc_size = table->orig_nents;
204 		unsigned int sg_size;
205 
206 		/*
207 		 * If we have more than max_ents segments left,
208 		 * then assign 'next' to the sg table after the current one.
209 		 * sg_size is then one less than alloc size, since the last
210 		 * element is the chain pointer.
211 		 */
212 		if (alloc_size > max_ents) {
213 			next = sgl[max_ents - 1].sl_un.sg;
214 			alloc_size = max_ents;
215 			sg_size = alloc_size - 1;
216 		} else {
217 			sg_size = alloc_size;
218 			next = NULL;
219 		}
220 
221 		table->orig_nents -= sg_size;
222 		kfree(sgl);
223 		sgl = next;
224 	}
225 
226 	table->sgl = NULL;
227 }
228 
229 /**
230  * sg_free_table - Free a previously allocated sg table
231  * @table:      The mapped sg table header
232  *
233  **/
234 static inline void
235 sg_free_table(struct sg_table *table)
236 {
237 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
238 }
239 
240 /**
241  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
242  * @table:      The sg table header to use
243  * @nents:      Number of entries in sg list
244  * @max_ents:   The maximum number of entries the allocator returns per call
245  * @gfp_mask:   GFP allocation mask
246  *
247  * Description:
248  *   This function returns a @table @nents long. The allocator is
249  *   defined to return scatterlist chunks of maximum size @max_ents.
250  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
251  *   chained in units of @max_ents.
252  *
253  * Notes:
254  *   If this function returns non-0 (eg failure), the caller must call
255  *   __sg_free_table() to cleanup any leftover allocations.
256  *
257  **/
258 static inline int
259 __sg_alloc_table(struct sg_table *table, unsigned int nents,
260 		unsigned int max_ents, gfp_t gfp_mask)
261 {
262 	struct scatterlist *sg, *prv;
263 	unsigned int left;
264 
265 	memset(table, 0, sizeof(*table));
266 
267 	if (nents == 0)
268 		return -EINVAL;
269 	left = nents;
270 	prv = NULL;
271 	do {
272 		unsigned int sg_size, alloc_size = left;
273 
274 		if (alloc_size > max_ents) {
275 			alloc_size = max_ents;
276 			sg_size = alloc_size - 1;
277 		} else
278 			sg_size = alloc_size;
279 
280 		left -= sg_size;
281 
282 		sg = kmalloc(alloc_size * sizeof(struct scatterlist), M_DRM, gfp_mask);
283 		if (unlikely(!sg)) {
284 		/*
285 		 * Adjust entry count to reflect that the last
286 		 * entry of the previous table won't be used for
287 		 * linkage.  Without this, sg_kfree() may get
288 		 * confused.
289 		 */
290 			if (prv)
291 				table->nents = ++table->orig_nents;
292 
293 			return -ENOMEM;
294 		}
295 
296 		sg_init_table(sg, alloc_size);
297 		table->nents = table->orig_nents += sg_size;
298 
299 		/*
300 		 * If this is the first mapping, assign the sg table header.
301 		 * If this is not the first mapping, chain previous part.
302 		 */
303 		if (prv)
304 			sg_chain(prv, max_ents, sg);
305 		else
306 			table->sgl = sg;
307 
308 		/*
309 		* If no more entries after this one, mark the end
310 		*/
311 		if (!left)
312 			sg_mark_end(&sg[sg_size - 1]);
313 
314 		prv = sg;
315 	} while (left);
316 
317 	return 0;
318 }
319 
320 /**
321  * sg_alloc_table - Allocate and initialize an sg table
322  * @table:      The sg table header to use
323  * @nents:      Number of entries in sg list
324  * @gfp_mask:   GFP allocation mask
325  *
326  *  Description:
327  *    Allocate and initialize an sg table. If @nents@ is larger than
328  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
329  *
330  **/
331 
332 static inline int
333 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
334 {
335 	int ret;
336 
337 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
338 		gfp_mask);
339 	if (unlikely(ret))
340 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
341 
342 	return ret;
343 }
344 
345 static inline int
346 sg_nents(struct scatterlist *sg)
347 {
348 	int nents;
349 	for (nents = 0; sg; sg = sg_next(sg))
350 		nents++;
351 	return nents;
352 }
353 
354 static inline void
355 __sg_page_iter_start(struct sg_page_iter *piter,
356 			  struct scatterlist *sglist, unsigned int nents,
357 			  unsigned long pgoffset)
358 {
359 	piter->__pg_advance = 0;
360 	piter->__nents = nents;
361 
362 	piter->sg = sglist;
363 	piter->sg_pgoffset = pgoffset;
364 }
365 
366 /*
367  * Iterate pages in sg list.
368  */
369 static inline void
370 _sg_iter_next(struct sg_page_iter *iter)
371 {
372 	struct scatterlist *sg;
373 	unsigned int pgcount;
374 
375 	sg = iter->sg;
376 	pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
377 
378 	++iter->sg_pgoffset;
379 	while (iter->sg_pgoffset >= pgcount) {
380 		iter->sg_pgoffset -= pgcount;
381 		sg = sg_next(sg);
382 		--iter->maxents;
383 		if (sg == NULL || iter->maxents == 0)
384 			break;
385 		pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
386 	}
387 	iter->sg = sg;
388 }
389 
390 static inline int
391 sg_page_count(struct scatterlist *sg)
392 {
393 	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
394 }
395 
396 static inline bool
397 __sg_page_iter_next(struct sg_page_iter *piter)
398 {
399 	if (piter->__nents == 0)
400 		return (false);
401 	if (piter->sg == NULL)
402 		return (false);
403 
404 	piter->sg_pgoffset += piter->__pg_advance;
405 	piter->__pg_advance = 1;
406 
407 	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
408 		piter->sg_pgoffset -= sg_page_count(piter->sg);
409 		piter->sg = sg_next(piter->sg);
410 		if (--piter->__nents == 0)
411 			return (false);
412 		if (piter->sg == NULL)
413 			return (false);
414 	}
415 	return (true);
416 }
417 
418 /*
419  * NOTE: pgoffset is really a page index, not a byte offset.
420  */
421 static inline void
422 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
423 	      unsigned int nents, unsigned long pgoffset)
424 {
425 	if (nents) {
426 		/*
427 		 * Nominal case.  Note subtract 1 from starting page index
428 		 * for initial _sg_iter_next() call.
429 		 */
430 		iter->sg = sgl;
431 		iter->sg_pgoffset = pgoffset - 1;
432 		iter->maxents = nents;
433 		_sg_iter_next(iter);
434 	} else {
435 		/*
436 		 * Degenerate case
437 		 */
438 		iter->sg = NULL;
439 		iter->sg_pgoffset = 0;
440 		iter->maxents = 0;
441 	}
442 }
443 
444 static inline struct vm_page *
445 sg_page_iter_page(struct sg_page_iter *piter)
446 {
447 	return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
448 }
449 
450 static inline dma_addr_t
451 sg_page_iter_dma_address(struct sg_page_iter *spi)
452 {
453 	return spi->sg->dma_address + (spi->sg_pgoffset << PAGE_SHIFT);
454 }
455 
456 #define for_each_sg_page(sgl, iter, nents, pgoffset)			\
457 	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
458 	     (iter)->sg; _sg_iter_next(iter))
459 
460 #define	for_each_sg(sglist, sg, sgmax, _itr)				\
461 	for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
462 
463 #endif	/* _LINUX_SCATTERLIST_H_ */
464