1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #ifndef	_LINUX_SCATTERLIST_H_
32 #define	_LINUX_SCATTERLIST_H_
33 
34 /*
35  * SG table design.
36  *
37  * If flags bit 0 is set, then the sg field contains a pointer to the next sg
38  * table list. Otherwise the next entry is at sg + 1, can be determined using
39  * the sg_is_chain() function.
40  *
41  * If flags bit 1 is set, then this sg entry is the last element in a list,
42  * can be determined using the sg_is_last() function.
43  *
44  * See sg_next().
45  *
46  */
47 
48 struct scatterlist {
49 	union {
50 		struct vm_page		*page;
51 		struct scatterlist	*sg;
52 	} sl_un;
53 	dma_addr_t	address;
54 	unsigned long	offset;
55 	uint32_t	length;
56 	uint32_t	flags;
57 };
58 
59 struct sg_table {
60 	struct scatterlist *sgl;        /* the list */
61 	unsigned int nents;             /* number of mapped entries */
62 	unsigned int orig_nents;        /* original size of list */
63 };
64 
65 struct sg_page_iter {
66 	struct scatterlist	*sg;
67 	unsigned int		sg_pgoffset;	/* page index */
68 	unsigned int		maxents;
69 };
70 
71 /*
72  * Maximum number of entries that will be allocated in one piece, if
73  * a list larger than this is required then chaining will be utilized.
74  */
75 #define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
76 
77 #define	sg_dma_address(sg)	(sg)->address
78 #define	sg_dma_len(sg)		(sg)->length
79 #define	sg_page(sg)		(sg)->sl_un.page
80 #define	sg_scatternext(sg)	(sg)->sl_un.sg
81 
82 #define	SG_END		0x01
83 #define	SG_CHAIN	0x02
84 
85 static inline void
86 sg_set_page(struct scatterlist *sg, struct vm_page *page, unsigned int len,
87     unsigned int offset)
88 {
89 	sg_page(sg) = page;
90 	sg_dma_len(sg) = len;
91 	sg->offset = offset;
92 	if (offset > PAGE_SIZE)
93 		panic("sg_set_page: Invalid offset %d\n", offset);
94 }
95 
96 #if 0
97 static inline void
98 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
99 {
100 	sg_set_page(sg, virt_to_page(buf), buflen,
101 	    ((uintptr_t)buf) & ~PAGE_MASK);
102 }
103 #endif
104 
105 static inline void
106 sg_init_table(struct scatterlist *sg, unsigned int nents)
107 {
108 	bzero(sg, sizeof(*sg) * nents);
109 	sg[nents - 1].flags = SG_END;
110 }
111 
112 static inline struct scatterlist *
113 sg_next(struct scatterlist *sg)
114 {
115 	if (sg->flags & SG_END)
116 		return (NULL);
117 	sg++;
118 	if (sg->flags & SG_CHAIN)
119 		sg = sg_scatternext(sg);
120 	return (sg);
121 }
122 
123 #if 0
124 static inline vm_paddr_t
125 sg_phys(struct scatterlist *sg)
126 {
127 	return sg_page(sg)->phys_addr + sg->offset;
128 }
129 #endif
130 
131 /**
132  * sg_chain - Chain two sglists together
133  * @prv:        First scatterlist
134  * @prv_nents:  Number of entries in prv
135  * @sgl:        Second scatterlist
136  *
137  * Description:
138  *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
139  *
140  **/
141 static inline void
142 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
143 					struct scatterlist *sgl)
144 {
145 /*
146  * offset and length are unused for chain entry.  Clear them.
147  */
148 	struct scatterlist *sg = &prv[prv_nents - 1];
149 
150 	sg->offset = 0;
151 	sg->length = 0;
152 
153 	/*
154 	 * Indicate a link pointer, and set the link to the second list.
155 	 */
156 	sg->flags = SG_CHAIN;
157 	sg->sl_un.sg = sgl;
158 }
159 
160 /**
161  * sg_mark_end - Mark the end of the scatterlist
162  * @sg:          SG entryScatterlist
163  *
164  * Description:
165  *   Marks the passed in sg entry as the termination point for the sg
166  *   table. A call to sg_next() on this entry will return NULL.
167  *
168  **/
169 static inline void sg_mark_end(struct scatterlist *sg)
170 {
171         sg->flags = SG_END;
172 }
173 
174 /**
175  * __sg_free_table - Free a previously mapped sg table
176  * @table:      The sg table header to use
177  * @max_ents:   The maximum number of entries per single scatterlist
178  *
179  *  Description:
180  *    Free an sg table previously allocated and setup with
181  *    __sg_alloc_table().  The @max_ents value must be identical to
182  *    that previously used with __sg_alloc_table().
183  *
184  **/
185 static inline void
186 __sg_free_table(struct sg_table *table, unsigned int max_ents)
187 {
188 	struct scatterlist *sgl, *next;
189 
190 	if (unlikely(!table->sgl))
191 		return;
192 
193 	sgl = table->sgl;
194 	while (table->orig_nents) {
195 		unsigned int alloc_size = table->orig_nents;
196 		unsigned int sg_size;
197 
198 		/*
199 		 * If we have more than max_ents segments left,
200 		 * then assign 'next' to the sg table after the current one.
201 		 * sg_size is then one less than alloc size, since the last
202 		 * element is the chain pointer.
203 		 */
204 		if (alloc_size > max_ents) {
205 			next = sgl[max_ents - 1].sl_un.sg;
206 			alloc_size = max_ents;
207 			sg_size = alloc_size - 1;
208 		} else {
209 			sg_size = alloc_size;
210 			next = NULL;
211 		}
212 
213 		table->orig_nents -= sg_size;
214 		kfree(sgl);
215 		sgl = next;
216 	}
217 
218 	table->sgl = NULL;
219 }
220 
221 /**
222  * sg_free_table - Free a previously allocated sg table
223  * @table:      The mapped sg table header
224  *
225  **/
226 static inline void
227 sg_free_table(struct sg_table *table)
228 {
229 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
230 }
231 
232 /**
233  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
234  * @table:      The sg table header to use
235  * @nents:      Number of entries in sg list
236  * @max_ents:   The maximum number of entries the allocator returns per call
237  * @gfp_mask:   GFP allocation mask
238  *
239  * Description:
240  *   This function returns a @table @nents long. The allocator is
241  *   defined to return scatterlist chunks of maximum size @max_ents.
242  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
243  *   chained in units of @max_ents.
244  *
245  * Notes:
246  *   If this function returns non-0 (eg failure), the caller must call
247  *   __sg_free_table() to cleanup any leftover allocations.
248  *
249  **/
250 static inline int
251 __sg_alloc_table(struct sg_table *table, unsigned int nents,
252 		unsigned int max_ents, gfp_t gfp_mask)
253 {
254 	struct scatterlist *sg, *prv;
255 	unsigned int left;
256 
257 	memset(table, 0, sizeof(*table));
258 
259 	if (nents == 0)
260 		return -EINVAL;
261 	left = nents;
262 	prv = NULL;
263 	do {
264 		unsigned int sg_size, alloc_size = left;
265 
266 		if (alloc_size > max_ents) {
267 			alloc_size = max_ents;
268 			sg_size = alloc_size - 1;
269 		} else
270 			sg_size = alloc_size;
271 
272 		left -= sg_size;
273 
274 		sg = kmalloc(alloc_size * sizeof(struct scatterlist), M_DRM, gfp_mask);
275 		if (unlikely(!sg)) {
276 		/*
277 		 * Adjust entry count to reflect that the last
278 		 * entry of the previous table won't be used for
279 		 * linkage.  Without this, sg_kfree() may get
280 		 * confused.
281 		 */
282 			if (prv)
283 				table->nents = ++table->orig_nents;
284 
285 			return -ENOMEM;
286 		}
287 
288 		sg_init_table(sg, alloc_size);
289 		table->nents = table->orig_nents += sg_size;
290 
291 		/*
292 		 * If this is the first mapping, assign the sg table header.
293 		 * If this is not the first mapping, chain previous part.
294 		 */
295 		if (prv)
296 			sg_chain(prv, max_ents, sg);
297 		else
298 			table->sgl = sg;
299 
300 		/*
301 		* If no more entries after this one, mark the end
302 		*/
303 		if (!left)
304 			sg_mark_end(&sg[sg_size - 1]);
305 
306 		prv = sg;
307 	} while (left);
308 
309 	return 0;
310 }
311 
312 /**
313  * sg_alloc_table - Allocate and initialize an sg table
314  * @table:      The sg table header to use
315  * @nents:      Number of entries in sg list
316  * @gfp_mask:   GFP allocation mask
317  *
318  *  Description:
319  *    Allocate and initialize an sg table. If @nents@ is larger than
320  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
321  *
322  **/
323 
324 static inline int
325 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
326 {
327 	int ret;
328 
329 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
330 		gfp_mask);
331 	if (unlikely(ret))
332 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
333 
334 	return ret;
335 }
336 
337 /*
338  * Iterate pages in sg list.
339  */
340 static inline void
341 _sg_iter_next(struct sg_page_iter *iter)
342 {
343 	struct scatterlist *sg;
344 	unsigned int pgcount;
345 
346 	sg = iter->sg;
347 	pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
348 
349 	++iter->sg_pgoffset;
350 	while (iter->sg_pgoffset >= pgcount) {
351 		iter->sg_pgoffset -= pgcount;
352 		sg = sg_next(sg);
353 		--iter->maxents;
354 		if (sg == NULL || iter->maxents == 0)
355 			break;
356 		pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
357 	}
358 	iter->sg = sg;
359 }
360 
361 /*
362  * NOTE: pgoffset is really a page index, not a byte offset.
363  */
364 static inline void
365 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
366 	      unsigned int nents, unsigned long pgoffset)
367 {
368 	if (nents) {
369 		/*
370 		 * Nominal case.  Note subtract 1 from starting page index
371 		 * for initial _sg_iter_next() call.
372 		 */
373 		iter->sg = sgl;
374 		iter->sg_pgoffset = pgoffset - 1;
375 		iter->maxents = nents;
376 		_sg_iter_next(iter);
377 	} else {
378 		/*
379 		 * Degenerate case
380 		 */
381 		iter->sg = NULL;
382 		iter->sg_pgoffset = 0;
383 		iter->maxents = 0;
384 	}
385 }
386 
387 static inline dma_addr_t
388 sg_page_iter_dma_address(struct sg_page_iter *spi)
389 {
390 	return spi->sg->address + (spi->sg_pgoffset << PAGE_SHIFT);
391 }
392 
393 #define for_each_sg_page(sgl, iter, nents, pgoffset)			\
394 	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
395 	     (iter)->sg; _sg_iter_next(iter))
396 
397 #define	for_each_sg(sglist, sg, sgmax, _itr)				\
398 	for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
399 
400 #endif	/* _LINUX_SCATTERLIST_H_ */
401