1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23  * Copyright (c) 2019 by Delphix. All rights reserved.
24  */
25 
26 /*
27  * See abd.c for a general overview of the arc buffered data (ABD).
28  *
29  * Linear buffers act exactly like normal buffers and are always mapped into the
30  * kernel's virtual memory space, while scattered ABD data chunks are allocated
31  * as physical pages and then mapped in only while they are actually being
32  * accessed through one of the abd_* library functions. Using scattered ABDs
33  * provides several benefits:
34  *
35  *  (1) They avoid use of kmem_*, preventing performance problems where running
36  *      kmem_reap on very large memory systems never finishes and causes
37  *      constant TLB shootdowns.
38  *
39  *  (2) Fragmentation is less of an issue since when we are at the limit of
40  *      allocatable space, we won't have to search around for a long free
41  *      hole in the VA space for large ARC allocations. Each chunk is mapped in
42  *      individually, so even if we are using HIGHMEM (see next point) we
43  *      wouldn't need to worry about finding a contiguous address range.
44  *
45  *  (3) If we are not using HIGHMEM, then all physical memory is always
46  *      mapped into the kernel's address space, so we also avoid the map /
47  *      unmap costs on each ABD access.
48  *
49  * If we are not using HIGHMEM, scattered buffers which have only one chunk
50  * can be treated as linear buffers, because they are contiguous in the
51  * kernel's virtual address space.  See abd_alloc_chunks() for details.
52  */
53 
54 #include <sys/abd_impl.h>
55 #include <sys/param.h>
56 #include <sys/zio.h>
57 #include <sys/arc.h>
58 #include <sys/zfs_context.h>
59 #include <sys/zfs_znode.h>
60 #ifdef _KERNEL
61 #include <linux/kmap_compat.h>
62 #include <linux/scatterlist.h>
63 #else
64 #define	MAX_ORDER	1
65 #endif
66 
67 typedef struct abd_stats {
68 	kstat_named_t abdstat_struct_size;
69 	kstat_named_t abdstat_linear_cnt;
70 	kstat_named_t abdstat_linear_data_size;
71 	kstat_named_t abdstat_scatter_cnt;
72 	kstat_named_t abdstat_scatter_data_size;
73 	kstat_named_t abdstat_scatter_chunk_waste;
74 	kstat_named_t abdstat_scatter_orders[MAX_ORDER];
75 	kstat_named_t abdstat_scatter_page_multi_chunk;
76 	kstat_named_t abdstat_scatter_page_multi_zone;
77 	kstat_named_t abdstat_scatter_page_alloc_retry;
78 	kstat_named_t abdstat_scatter_sg_table_retry;
79 } abd_stats_t;
80 
81 static abd_stats_t abd_stats = {
82 	/* Amount of memory occupied by all of the abd_t struct allocations */
83 	{ "struct_size",			KSTAT_DATA_UINT64 },
84 	/*
85 	 * The number of linear ABDs which are currently allocated, excluding
86 	 * ABDs which don't own their data (for instance the ones which were
87 	 * allocated through abd_get_offset() and abd_get_from_buf()). If an
88 	 * ABD takes ownership of its buf then it will become tracked.
89 	 */
90 	{ "linear_cnt",				KSTAT_DATA_UINT64 },
91 	/* Amount of data stored in all linear ABDs tracked by linear_cnt */
92 	{ "linear_data_size",			KSTAT_DATA_UINT64 },
93 	/*
94 	 * The number of scatter ABDs which are currently allocated, excluding
95 	 * ABDs which don't own their data (for instance the ones which were
96 	 * allocated through abd_get_offset()).
97 	 */
98 	{ "scatter_cnt",			KSTAT_DATA_UINT64 },
99 	/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
100 	{ "scatter_data_size",			KSTAT_DATA_UINT64 },
101 	/*
102 	 * The amount of space wasted at the end of the last chunk across all
103 	 * scatter ABDs tracked by scatter_cnt.
104 	 */
105 	{ "scatter_chunk_waste",		KSTAT_DATA_UINT64 },
106 	/*
107 	 * The number of compound allocations of a given order.  These
108 	 * allocations are spread over all currently allocated ABDs, and
109 	 * act as a measure of memory fragmentation.
110 	 */
111 	{ { "scatter_order_N",			KSTAT_DATA_UINT64 } },
112 	/*
113 	 * The number of scatter ABDs which contain multiple chunks.
114 	 * ABDs are preferentially allocated from the minimum number of
115 	 * contiguous multi-page chunks, a single chunk is optimal.
116 	 */
117 	{ "scatter_page_multi_chunk",		KSTAT_DATA_UINT64 },
118 	/*
119 	 * The number of scatter ABDs which are split across memory zones.
120 	 * ABDs are preferentially allocated using pages from a single zone.
121 	 */
122 	{ "scatter_page_multi_zone",		KSTAT_DATA_UINT64 },
123 	/*
124 	 *  The total number of retries encountered when attempting to
125 	 *  allocate the pages to populate the scatter ABD.
126 	 */
127 	{ "scatter_page_alloc_retry",		KSTAT_DATA_UINT64 },
128 	/*
129 	 *  The total number of retries encountered when attempting to
130 	 *  allocate the sg table for an ABD.
131 	 */
132 	{ "scatter_sg_table_retry",		KSTAT_DATA_UINT64 },
133 };
134 
135 struct {
136 	wmsum_t abdstat_struct_size;
137 	wmsum_t abdstat_linear_cnt;
138 	wmsum_t abdstat_linear_data_size;
139 	wmsum_t abdstat_scatter_cnt;
140 	wmsum_t abdstat_scatter_data_size;
141 	wmsum_t abdstat_scatter_chunk_waste;
142 	wmsum_t abdstat_scatter_orders[MAX_ORDER];
143 	wmsum_t abdstat_scatter_page_multi_chunk;
144 	wmsum_t abdstat_scatter_page_multi_zone;
145 	wmsum_t abdstat_scatter_page_alloc_retry;
146 	wmsum_t abdstat_scatter_sg_table_retry;
147 } abd_sums;
148 
149 #define	abd_for_each_sg(abd, sg, n, i)	\
150 	for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
151 
152 unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
153 
154 /*
155  * zfs_abd_scatter_min_size is the minimum allocation size to use scatter
156  * ABD's.  Smaller allocations will use linear ABD's which uses
157  * zio_[data_]buf_alloc().
158  *
159  * Scatter ABD's use at least one page each, so sub-page allocations waste
160  * some space when allocated as scatter (e.g. 2KB scatter allocation wastes
161  * half of each page).  Using linear ABD's for small allocations means that
162  * they will be put on slabs which contain many allocations.  This can
163  * improve memory efficiency, but it also makes it much harder for ARC
164  * evictions to actually free pages, because all the buffers on one slab need
165  * to be freed in order for the slab (and underlying pages) to be freed.
166  * Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's
167  * possible for them to actually waste more memory than scatter (one page per
168  * buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th).
169  *
170  * Spill blocks are typically 512B and are heavily used on systems running
171  * selinux with the default dnode size and the `xattr=sa` property set.
172  *
173  * By default we use linear allocations for 512B and 1KB, and scatter
174  * allocations for larger (1.5KB and up).
175  */
176 int zfs_abd_scatter_min_size = 512 * 3;
177 
178 /*
179  * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
180  * just a single zero'd page. This allows us to conserve memory by
181  * only using a single zero page for the scatterlist.
182  */
183 abd_t *abd_zero_scatter = NULL;
184 
185 struct page;
186 /*
187  * abd_zero_page we will be an allocated zero'd PAGESIZE buffer, which is
188  * assigned to set each of the pages of abd_zero_scatter.
189  */
190 static struct page *abd_zero_page = NULL;
191 
192 static kmem_cache_t *abd_cache = NULL;
193 static kstat_t *abd_ksp;
194 
195 static uint_t
196 abd_chunkcnt_for_bytes(size_t size)
197 {
198 	return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
199 }
200 
201 abd_t *
202 abd_alloc_struct_impl(size_t size)
203 {
204 	/*
205 	 * In Linux we do not use the size passed in during ABD
206 	 * allocation, so we just ignore it.
207 	 */
208 	abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
209 	ASSERT3P(abd, !=, NULL);
210 	ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
211 
212 	return (abd);
213 }
214 
215 void
216 abd_free_struct_impl(abd_t *abd)
217 {
218 	kmem_cache_free(abd_cache, abd);
219 	ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t));
220 }
221 
222 #ifdef _KERNEL
223 /*
224  * Mark zfs data pages so they can be excluded from kernel crash dumps
225  */
226 #ifdef _LP64
227 #define	ABD_FILE_CACHE_PAGE	0x2F5ABDF11ECAC4E
228 
229 static inline void
230 abd_mark_zfs_page(struct page *page)
231 {
232 	get_page(page);
233 	SetPagePrivate(page);
234 	set_page_private(page, ABD_FILE_CACHE_PAGE);
235 }
236 
237 static inline void
238 abd_unmark_zfs_page(struct page *page)
239 {
240 	set_page_private(page, 0UL);
241 	ClearPagePrivate(page);
242 	put_page(page);
243 }
244 #else
245 #define	abd_mark_zfs_page(page)
246 #define	abd_unmark_zfs_page(page)
247 #endif /* _LP64 */
248 
249 #ifndef CONFIG_HIGHMEM
250 
251 #ifndef __GFP_RECLAIM
252 #define	__GFP_RECLAIM		__GFP_WAIT
253 #endif
254 
255 /*
256  * The goal is to minimize fragmentation by preferentially populating ABDs
257  * with higher order compound pages from a single zone.  Allocation size is
258  * progressively decreased until it can be satisfied without performing
259  * reclaim or compaction.  When necessary this function will degenerate to
260  * allocating individual pages and allowing reclaim to satisfy allocations.
261  */
262 void
263 abd_alloc_chunks(abd_t *abd, size_t size)
264 {
265 	struct list_head pages;
266 	struct sg_table table;
267 	struct scatterlist *sg;
268 	struct page *page, *tmp_page = NULL;
269 	gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
270 	gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM;
271 	int max_order = MIN(zfs_abd_scatter_max_order, MAX_ORDER - 1);
272 	int nr_pages = abd_chunkcnt_for_bytes(size);
273 	int chunks = 0, zones = 0;
274 	size_t remaining_size;
275 	int nid = NUMA_NO_NODE;
276 	int alloc_pages = 0;
277 
278 	INIT_LIST_HEAD(&pages);
279 
280 	while (alloc_pages < nr_pages) {
281 		unsigned chunk_pages;
282 		int order;
283 
284 		order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order);
285 		chunk_pages = (1U << order);
286 
287 		page = alloc_pages_node(nid, order ? gfp_comp : gfp, order);
288 		if (page == NULL) {
289 			if (order == 0) {
290 				ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
291 				schedule_timeout_interruptible(1);
292 			} else {
293 				max_order = MAX(0, order - 1);
294 			}
295 			continue;
296 		}
297 
298 		list_add_tail(&page->lru, &pages);
299 
300 		if ((nid != NUMA_NO_NODE) && (page_to_nid(page) != nid))
301 			zones++;
302 
303 		nid = page_to_nid(page);
304 		ABDSTAT_BUMP(abdstat_scatter_orders[order]);
305 		chunks++;
306 		alloc_pages += chunk_pages;
307 	}
308 
309 	ASSERT3S(alloc_pages, ==, nr_pages);
310 
311 	while (sg_alloc_table(&table, chunks, gfp)) {
312 		ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
313 		schedule_timeout_interruptible(1);
314 	}
315 
316 	sg = table.sgl;
317 	remaining_size = size;
318 	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
319 		size_t sg_size = MIN(PAGESIZE << compound_order(page),
320 		    remaining_size);
321 		sg_set_page(sg, page, sg_size, 0);
322 		abd_mark_zfs_page(page);
323 		remaining_size -= sg_size;
324 
325 		sg = sg_next(sg);
326 		list_del(&page->lru);
327 	}
328 
329 	/*
330 	 * These conditions ensure that a possible transformation to a linear
331 	 * ABD would be valid.
332 	 */
333 	ASSERT(!PageHighMem(sg_page(table.sgl)));
334 	ASSERT0(ABD_SCATTER(abd).abd_offset);
335 
336 	if (table.nents == 1) {
337 		/*
338 		 * Since there is only one entry, this ABD can be represented
339 		 * as a linear buffer.  All single-page (4K) ABD's can be
340 		 * represented this way.  Some multi-page ABD's can also be
341 		 * represented this way, if we were able to allocate a single
342 		 * "chunk" (higher-order "page" which represents a power-of-2
343 		 * series of physically-contiguous pages).  This is often the
344 		 * case for 2-page (8K) ABD's.
345 		 *
346 		 * Representing a single-entry scatter ABD as a linear ABD
347 		 * has the performance advantage of avoiding the copy (and
348 		 * allocation) in abd_borrow_buf_copy / abd_return_buf_copy.
349 		 * A performance increase of around 5% has been observed for
350 		 * ARC-cached reads (of small blocks which can take advantage
351 		 * of this).
352 		 *
353 		 * Note that this optimization is only possible because the
354 		 * pages are always mapped into the kernel's address space.
355 		 * This is not the case for highmem pages, so the
356 		 * optimization can not be made there.
357 		 */
358 		abd->abd_flags |= ABD_FLAG_LINEAR;
359 		abd->abd_flags |= ABD_FLAG_LINEAR_PAGE;
360 		abd->abd_u.abd_linear.abd_sgl = table.sgl;
361 		ABD_LINEAR_BUF(abd) = page_address(sg_page(table.sgl));
362 	} else if (table.nents > 1) {
363 		ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
364 		abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
365 
366 		if (zones) {
367 			ABDSTAT_BUMP(abdstat_scatter_page_multi_zone);
368 			abd->abd_flags |= ABD_FLAG_MULTI_ZONE;
369 		}
370 
371 		ABD_SCATTER(abd).abd_sgl = table.sgl;
372 		ABD_SCATTER(abd).abd_nents = table.nents;
373 	}
374 }
375 #else
376 
377 /*
378  * Allocate N individual pages to construct a scatter ABD.  This function
379  * makes no attempt to request contiguous pages and requires the minimal
380  * number of kernel interfaces.  It's designed for maximum compatibility.
381  */
382 void
383 abd_alloc_chunks(abd_t *abd, size_t size)
384 {
385 	struct scatterlist *sg = NULL;
386 	struct sg_table table;
387 	struct page *page;
388 	gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
389 	int nr_pages = abd_chunkcnt_for_bytes(size);
390 	int i = 0;
391 
392 	while (sg_alloc_table(&table, nr_pages, gfp)) {
393 		ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
394 		schedule_timeout_interruptible(1);
395 	}
396 
397 	ASSERT3U(table.nents, ==, nr_pages);
398 	ABD_SCATTER(abd).abd_sgl = table.sgl;
399 	ABD_SCATTER(abd).abd_nents = nr_pages;
400 
401 	abd_for_each_sg(abd, sg, nr_pages, i) {
402 		while ((page = __page_cache_alloc(gfp)) == NULL) {
403 			ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
404 			schedule_timeout_interruptible(1);
405 		}
406 
407 		ABDSTAT_BUMP(abdstat_scatter_orders[0]);
408 		sg_set_page(sg, page, PAGESIZE, 0);
409 		abd_mark_zfs_page(page);
410 	}
411 
412 	if (nr_pages > 1) {
413 		ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
414 		abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
415 	}
416 }
417 #endif /* !CONFIG_HIGHMEM */
418 
419 /*
420  * This must be called if any of the sg_table allocation functions
421  * are called.
422  */
423 static void
424 abd_free_sg_table(abd_t *abd)
425 {
426 	struct sg_table table;
427 
428 	table.sgl = ABD_SCATTER(abd).abd_sgl;
429 	table.nents = table.orig_nents = ABD_SCATTER(abd).abd_nents;
430 	sg_free_table(&table);
431 }
432 
433 void
434 abd_free_chunks(abd_t *abd)
435 {
436 	struct scatterlist *sg = NULL;
437 	struct page *page;
438 	int nr_pages = ABD_SCATTER(abd).abd_nents;
439 	int order, i = 0;
440 
441 	if (abd->abd_flags & ABD_FLAG_MULTI_ZONE)
442 		ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone);
443 
444 	if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK)
445 		ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
446 
447 	abd_for_each_sg(abd, sg, nr_pages, i) {
448 		page = sg_page(sg);
449 		abd_unmark_zfs_page(page);
450 		order = compound_order(page);
451 		__free_pages(page, order);
452 		ASSERT3U(sg->length, <=, PAGE_SIZE << order);
453 		ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]);
454 	}
455 	abd_free_sg_table(abd);
456 }
457 
458 /*
459  * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
460  * the scatterlist will be set to the zero'd out buffer abd_zero_page.
461  */
462 static void
463 abd_alloc_zero_scatter(void)
464 {
465 	struct scatterlist *sg = NULL;
466 	struct sg_table table;
467 	gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
468 	gfp_t gfp_zero_page = gfp | __GFP_ZERO;
469 	int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
470 	int i = 0;
471 
472 	while ((abd_zero_page = __page_cache_alloc(gfp_zero_page)) == NULL) {
473 		ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
474 		schedule_timeout_interruptible(1);
475 	}
476 	abd_mark_zfs_page(abd_zero_page);
477 
478 	while (sg_alloc_table(&table, nr_pages, gfp)) {
479 		ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
480 		schedule_timeout_interruptible(1);
481 	}
482 	ASSERT3U(table.nents, ==, nr_pages);
483 
484 	abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
485 	abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
486 	ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
487 	ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl;
488 	ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
489 	abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
490 	abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
491 
492 	abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
493 		sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
494 	}
495 
496 	ABDSTAT_BUMP(abdstat_scatter_cnt);
497 	ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
498 	ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
499 }
500 
501 #else /* _KERNEL */
502 
503 #ifndef PAGE_SHIFT
504 #define	PAGE_SHIFT (highbit64(PAGESIZE)-1)
505 #endif
506 
507 #define	zfs_kmap_atomic(chunk)		((void *)chunk)
508 #define	zfs_kunmap_atomic(addr)		do { (void)(addr); } while (0)
509 #define	local_irq_save(flags)		do { (void)(flags); } while (0)
510 #define	local_irq_restore(flags)	do { (void)(flags); } while (0)
511 #define	nth_page(pg, i) \
512 	((struct page *)((void *)(pg) + (i) * PAGESIZE))
513 
514 struct scatterlist {
515 	struct page *page;
516 	int length;
517 	int end;
518 };
519 
520 static void
521 sg_init_table(struct scatterlist *sg, int nr)
522 {
523 	memset(sg, 0, nr * sizeof (struct scatterlist));
524 	sg[nr - 1].end = 1;
525 }
526 
527 /*
528  * This must be called if any of the sg_table allocation functions
529  * are called.
530  */
531 static void
532 abd_free_sg_table(abd_t *abd)
533 {
534 	int nents = ABD_SCATTER(abd).abd_nents;
535 	vmem_free(ABD_SCATTER(abd).abd_sgl,
536 	    nents * sizeof (struct scatterlist));
537 }
538 
539 #define	for_each_sg(sgl, sg, nr, i)	\
540 	for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg))
541 
542 static inline void
543 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
544     unsigned int offset)
545 {
546 	/* currently we don't use offset */
547 	ASSERT(offset == 0);
548 	sg->page = page;
549 	sg->length = len;
550 }
551 
552 static inline struct page *
553 sg_page(struct scatterlist *sg)
554 {
555 	return (sg->page);
556 }
557 
558 static inline struct scatterlist *
559 sg_next(struct scatterlist *sg)
560 {
561 	if (sg->end)
562 		return (NULL);
563 
564 	return (sg + 1);
565 }
566 
567 void
568 abd_alloc_chunks(abd_t *abd, size_t size)
569 {
570 	unsigned nr_pages = abd_chunkcnt_for_bytes(size);
571 	struct scatterlist *sg;
572 	int i;
573 
574 	ABD_SCATTER(abd).abd_sgl = vmem_alloc(nr_pages *
575 	    sizeof (struct scatterlist), KM_SLEEP);
576 	sg_init_table(ABD_SCATTER(abd).abd_sgl, nr_pages);
577 
578 	abd_for_each_sg(abd, sg, nr_pages, i) {
579 		struct page *p = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
580 		sg_set_page(sg, p, PAGESIZE, 0);
581 	}
582 	ABD_SCATTER(abd).abd_nents = nr_pages;
583 }
584 
585 void
586 abd_free_chunks(abd_t *abd)
587 {
588 	int i, n = ABD_SCATTER(abd).abd_nents;
589 	struct scatterlist *sg;
590 
591 	abd_for_each_sg(abd, sg, n, i) {
592 		for (int j = 0; j < sg->length; j += PAGESIZE) {
593 			struct page *p = nth_page(sg_page(sg), j >> PAGE_SHIFT);
594 			umem_free(p, PAGESIZE);
595 		}
596 	}
597 	abd_free_sg_table(abd);
598 }
599 
600 static void
601 abd_alloc_zero_scatter(void)
602 {
603 	unsigned nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
604 	struct scatterlist *sg;
605 	int i;
606 
607 	abd_zero_page = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
608 	memset(abd_zero_page, 0, PAGESIZE);
609 	abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
610 	abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
611 	abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
612 	ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
613 	ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
614 	abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
615 	zfs_refcount_create(&abd_zero_scatter->abd_children);
616 	ABD_SCATTER(abd_zero_scatter).abd_sgl = vmem_alloc(nr_pages *
617 	    sizeof (struct scatterlist), KM_SLEEP);
618 
619 	sg_init_table(ABD_SCATTER(abd_zero_scatter).abd_sgl, nr_pages);
620 
621 	abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
622 		sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
623 	}
624 
625 	ABDSTAT_BUMP(abdstat_scatter_cnt);
626 	ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
627 	ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
628 }
629 
630 #endif /* _KERNEL */
631 
632 boolean_t
633 abd_size_alloc_linear(size_t size)
634 {
635 	return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size);
636 }
637 
638 void
639 abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
640 {
641 	ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
642 	int waste = P2ROUNDUP(abd->abd_size, PAGESIZE) - abd->abd_size;
643 	if (op == ABDSTAT_INCR) {
644 		ABDSTAT_BUMP(abdstat_scatter_cnt);
645 		ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
646 		ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
647 		arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
648 	} else {
649 		ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
650 		ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
651 		ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
652 		arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
653 	}
654 }
655 
656 void
657 abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
658 {
659 	ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
660 	if (op == ABDSTAT_INCR) {
661 		ABDSTAT_BUMP(abdstat_linear_cnt);
662 		ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
663 	} else {
664 		ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
665 		ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
666 	}
667 }
668 
669 void
670 abd_verify_scatter(abd_t *abd)
671 {
672 	size_t n;
673 	int i = 0;
674 	struct scatterlist *sg = NULL;
675 
676 	ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0);
677 	ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
678 	    ABD_SCATTER(abd).abd_sgl->length);
679 	n = ABD_SCATTER(abd).abd_nents;
680 	abd_for_each_sg(abd, sg, n, i) {
681 		ASSERT3P(sg_page(sg), !=, NULL);
682 	}
683 }
684 
685 static void
686 abd_free_zero_scatter(void)
687 {
688 	ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
689 	ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE);
690 	ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
691 
692 	abd_free_sg_table(abd_zero_scatter);
693 	abd_free_struct(abd_zero_scatter);
694 	abd_zero_scatter = NULL;
695 	ASSERT3P(abd_zero_page, !=, NULL);
696 #if defined(_KERNEL)
697 	abd_unmark_zfs_page(abd_zero_page);
698 	__free_page(abd_zero_page);
699 #else
700 	umem_free(abd_zero_page, PAGESIZE);
701 #endif /* _KERNEL */
702 }
703 
704 static int
705 abd_kstats_update(kstat_t *ksp, int rw)
706 {
707 	abd_stats_t *as = ksp->ks_data;
708 
709 	if (rw == KSTAT_WRITE)
710 		return (EACCES);
711 	as->abdstat_struct_size.value.ui64 =
712 	    wmsum_value(&abd_sums.abdstat_struct_size);
713 	as->abdstat_linear_cnt.value.ui64 =
714 	    wmsum_value(&abd_sums.abdstat_linear_cnt);
715 	as->abdstat_linear_data_size.value.ui64 =
716 	    wmsum_value(&abd_sums.abdstat_linear_data_size);
717 	as->abdstat_scatter_cnt.value.ui64 =
718 	    wmsum_value(&abd_sums.abdstat_scatter_cnt);
719 	as->abdstat_scatter_data_size.value.ui64 =
720 	    wmsum_value(&abd_sums.abdstat_scatter_data_size);
721 	as->abdstat_scatter_chunk_waste.value.ui64 =
722 	    wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
723 	for (int i = 0; i < MAX_ORDER; i++) {
724 		as->abdstat_scatter_orders[i].value.ui64 =
725 		    wmsum_value(&abd_sums.abdstat_scatter_orders[i]);
726 	}
727 	as->abdstat_scatter_page_multi_chunk.value.ui64 =
728 	    wmsum_value(&abd_sums.abdstat_scatter_page_multi_chunk);
729 	as->abdstat_scatter_page_multi_zone.value.ui64 =
730 	    wmsum_value(&abd_sums.abdstat_scatter_page_multi_zone);
731 	as->abdstat_scatter_page_alloc_retry.value.ui64 =
732 	    wmsum_value(&abd_sums.abdstat_scatter_page_alloc_retry);
733 	as->abdstat_scatter_sg_table_retry.value.ui64 =
734 	    wmsum_value(&abd_sums.abdstat_scatter_sg_table_retry);
735 	return (0);
736 }
737 
738 void
739 abd_init(void)
740 {
741 	int i;
742 
743 	abd_cache = kmem_cache_create("abd_t", sizeof (abd_t),
744 	    0, NULL, NULL, NULL, NULL, NULL, 0);
745 
746 	wmsum_init(&abd_sums.abdstat_struct_size, 0);
747 	wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
748 	wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
749 	wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
750 	wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
751 	wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
752 	for (i = 0; i < MAX_ORDER; i++)
753 		wmsum_init(&abd_sums.abdstat_scatter_orders[i], 0);
754 	wmsum_init(&abd_sums.abdstat_scatter_page_multi_chunk, 0);
755 	wmsum_init(&abd_sums.abdstat_scatter_page_multi_zone, 0);
756 	wmsum_init(&abd_sums.abdstat_scatter_page_alloc_retry, 0);
757 	wmsum_init(&abd_sums.abdstat_scatter_sg_table_retry, 0);
758 
759 	abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
760 	    sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
761 	if (abd_ksp != NULL) {
762 		for (i = 0; i < MAX_ORDER; i++) {
763 			snprintf(abd_stats.abdstat_scatter_orders[i].name,
764 			    KSTAT_STRLEN, "scatter_order_%d", i);
765 			abd_stats.abdstat_scatter_orders[i].data_type =
766 			    KSTAT_DATA_UINT64;
767 		}
768 		abd_ksp->ks_data = &abd_stats;
769 		abd_ksp->ks_update = abd_kstats_update;
770 		kstat_install(abd_ksp);
771 	}
772 
773 	abd_alloc_zero_scatter();
774 }
775 
776 void
777 abd_fini(void)
778 {
779 	abd_free_zero_scatter();
780 
781 	if (abd_ksp != NULL) {
782 		kstat_delete(abd_ksp);
783 		abd_ksp = NULL;
784 	}
785 
786 	wmsum_fini(&abd_sums.abdstat_struct_size);
787 	wmsum_fini(&abd_sums.abdstat_linear_cnt);
788 	wmsum_fini(&abd_sums.abdstat_linear_data_size);
789 	wmsum_fini(&abd_sums.abdstat_scatter_cnt);
790 	wmsum_fini(&abd_sums.abdstat_scatter_data_size);
791 	wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
792 	for (int i = 0; i < MAX_ORDER; i++)
793 		wmsum_fini(&abd_sums.abdstat_scatter_orders[i]);
794 	wmsum_fini(&abd_sums.abdstat_scatter_page_multi_chunk);
795 	wmsum_fini(&abd_sums.abdstat_scatter_page_multi_zone);
796 	wmsum_fini(&abd_sums.abdstat_scatter_page_alloc_retry);
797 	wmsum_fini(&abd_sums.abdstat_scatter_sg_table_retry);
798 
799 	if (abd_cache) {
800 		kmem_cache_destroy(abd_cache);
801 		abd_cache = NULL;
802 	}
803 }
804 
805 void
806 abd_free_linear_page(abd_t *abd)
807 {
808 	/* Transform it back into a scatter ABD for freeing */
809 	struct scatterlist *sg = abd->abd_u.abd_linear.abd_sgl;
810 	abd->abd_flags &= ~ABD_FLAG_LINEAR;
811 	abd->abd_flags &= ~ABD_FLAG_LINEAR_PAGE;
812 	ABD_SCATTER(abd).abd_nents = 1;
813 	ABD_SCATTER(abd).abd_offset = 0;
814 	ABD_SCATTER(abd).abd_sgl = sg;
815 	abd_free_chunks(abd);
816 
817 	abd_update_scatter_stats(abd, ABDSTAT_DECR);
818 }
819 
820 /*
821  * If we're going to use this ABD for doing I/O using the block layer, the
822  * consumer of the ABD data doesn't care if it's scattered or not, and we don't
823  * plan to store this ABD in memory for a long period of time, we should
824  * allocate the ABD type that requires the least data copying to do the I/O.
825  *
826  * On Linux the optimal thing to do would be to use abd_get_offset() and
827  * construct a new ABD which shares the original pages thereby eliminating
828  * the copy.  But for the moment a new linear ABD is allocated until this
829  * performance optimization can be implemented.
830  */
831 abd_t *
832 abd_alloc_for_io(size_t size, boolean_t is_metadata)
833 {
834 	return (abd_alloc(size, is_metadata));
835 }
836 
837 abd_t *
838 abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off,
839     size_t size)
840 {
841 	int i = 0;
842 	struct scatterlist *sg = NULL;
843 
844 	abd_verify(sabd);
845 	ASSERT3U(off, <=, sabd->abd_size);
846 
847 	size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
848 
849 	if (abd == NULL)
850 		abd = abd_alloc_struct(0);
851 
852 	/*
853 	 * Even if this buf is filesystem metadata, we only track that
854 	 * if we own the underlying data buffer, which is not true in
855 	 * this case. Therefore, we don't ever use ABD_FLAG_META here.
856 	 */
857 
858 	abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) {
859 		if (new_offset < sg->length)
860 			break;
861 		new_offset -= sg->length;
862 	}
863 
864 	ABD_SCATTER(abd).abd_sgl = sg;
865 	ABD_SCATTER(abd).abd_offset = new_offset;
866 	ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i;
867 
868 	return (abd);
869 }
870 
871 /*
872  * Initialize the abd_iter.
873  */
874 void
875 abd_iter_init(struct abd_iter *aiter, abd_t *abd)
876 {
877 	ASSERT(!abd_is_gang(abd));
878 	abd_verify(abd);
879 	aiter->iter_abd = abd;
880 	aiter->iter_mapaddr = NULL;
881 	aiter->iter_mapsize = 0;
882 	aiter->iter_pos = 0;
883 	if (abd_is_linear(abd)) {
884 		aiter->iter_offset = 0;
885 		aiter->iter_sg = NULL;
886 	} else {
887 		aiter->iter_offset = ABD_SCATTER(abd).abd_offset;
888 		aiter->iter_sg = ABD_SCATTER(abd).abd_sgl;
889 	}
890 }
891 
892 /*
893  * This is just a helper function to see if we have exhausted the
894  * abd_iter and reached the end.
895  */
896 boolean_t
897 abd_iter_at_end(struct abd_iter *aiter)
898 {
899 	return (aiter->iter_pos == aiter->iter_abd->abd_size);
900 }
901 
902 /*
903  * Advance the iterator by a certain amount. Cannot be called when a chunk is
904  * in use. This can be safely called when the aiter has already exhausted, in
905  * which case this does nothing.
906  */
907 void
908 abd_iter_advance(struct abd_iter *aiter, size_t amount)
909 {
910 	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
911 	ASSERT0(aiter->iter_mapsize);
912 
913 	/* There's nothing left to advance to, so do nothing */
914 	if (abd_iter_at_end(aiter))
915 		return;
916 
917 	aiter->iter_pos += amount;
918 	aiter->iter_offset += amount;
919 	if (!abd_is_linear(aiter->iter_abd)) {
920 		while (aiter->iter_offset >= aiter->iter_sg->length) {
921 			aiter->iter_offset -= aiter->iter_sg->length;
922 			aiter->iter_sg = sg_next(aiter->iter_sg);
923 			if (aiter->iter_sg == NULL) {
924 				ASSERT0(aiter->iter_offset);
925 				break;
926 			}
927 		}
928 	}
929 }
930 
931 /*
932  * Map the current chunk into aiter. This can be safely called when the aiter
933  * has already exhausted, in which case this does nothing.
934  */
935 void
936 abd_iter_map(struct abd_iter *aiter)
937 {
938 	void *paddr;
939 	size_t offset = 0;
940 
941 	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
942 	ASSERT0(aiter->iter_mapsize);
943 
944 	/* There's nothing left to iterate over, so do nothing */
945 	if (abd_iter_at_end(aiter))
946 		return;
947 
948 	if (abd_is_linear(aiter->iter_abd)) {
949 		ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
950 		offset = aiter->iter_offset;
951 		aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
952 		paddr = ABD_LINEAR_BUF(aiter->iter_abd);
953 	} else {
954 		offset = aiter->iter_offset;
955 		aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset,
956 		    aiter->iter_abd->abd_size - aiter->iter_pos);
957 
958 		paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg));
959 	}
960 
961 	aiter->iter_mapaddr = (char *)paddr + offset;
962 }
963 
964 /*
965  * Unmap the current chunk from aiter. This can be safely called when the aiter
966  * has already exhausted, in which case this does nothing.
967  */
968 void
969 abd_iter_unmap(struct abd_iter *aiter)
970 {
971 	/* There's nothing left to unmap, so do nothing */
972 	if (abd_iter_at_end(aiter))
973 		return;
974 
975 	if (!abd_is_linear(aiter->iter_abd)) {
976 		/* LINTED E_FUNC_SET_NOT_USED */
977 		zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset);
978 	}
979 
980 	ASSERT3P(aiter->iter_mapaddr, !=, NULL);
981 	ASSERT3U(aiter->iter_mapsize, >, 0);
982 
983 	aiter->iter_mapaddr = NULL;
984 	aiter->iter_mapsize = 0;
985 }
986 
987 void
988 abd_cache_reap_now(void)
989 {
990 }
991 
992 #if defined(_KERNEL)
993 /*
994  * bio_nr_pages for ABD.
995  * @off is the offset in @abd
996  */
997 unsigned long
998 abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
999 {
1000 	unsigned long pos;
1001 
1002 	if (abd_is_gang(abd)) {
1003 		unsigned long count = 0;
1004 
1005 		for (abd_t *cabd = abd_gang_get_offset(abd, &off);
1006 		    cabd != NULL && size != 0;
1007 		    cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1008 			ASSERT3U(off, <, cabd->abd_size);
1009 			int mysize = MIN(size, cabd->abd_size - off);
1010 			count += abd_nr_pages_off(cabd, mysize, off);
1011 			size -= mysize;
1012 			off = 0;
1013 		}
1014 		return (count);
1015 	}
1016 
1017 	if (abd_is_linear(abd))
1018 		pos = (unsigned long)abd_to_buf(abd) + off;
1019 	else
1020 		pos = ABD_SCATTER(abd).abd_offset + off;
1021 
1022 	return (((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
1023 	    (pos >> PAGE_SHIFT));
1024 }
1025 
1026 static unsigned int
1027 bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size)
1028 {
1029 	unsigned int offset, size, i;
1030 	struct page *page;
1031 
1032 	offset = offset_in_page(buf_ptr);
1033 	for (i = 0; i < bio->bi_max_vecs; i++) {
1034 		size = PAGE_SIZE - offset;
1035 
1036 		if (bio_size <= 0)
1037 			break;
1038 
1039 		if (size > bio_size)
1040 			size = bio_size;
1041 
1042 		if (is_vmalloc_addr(buf_ptr))
1043 			page = vmalloc_to_page(buf_ptr);
1044 		else
1045 			page = virt_to_page(buf_ptr);
1046 
1047 		/*
1048 		 * Some network related block device uses tcp_sendpage, which
1049 		 * doesn't behave well when using 0-count page, this is a
1050 		 * safety net to catch them.
1051 		 */
1052 		ASSERT3S(page_count(page), >, 0);
1053 
1054 		if (bio_add_page(bio, page, size, offset) != size)
1055 			break;
1056 
1057 		buf_ptr += size;
1058 		bio_size -= size;
1059 		offset = 0;
1060 	}
1061 
1062 	return (bio_size);
1063 }
1064 
1065 /*
1066  * bio_map for gang ABD.
1067  */
1068 static unsigned int
1069 abd_gang_bio_map_off(struct bio *bio, abd_t *abd,
1070     unsigned int io_size, size_t off)
1071 {
1072 	ASSERT(abd_is_gang(abd));
1073 
1074 	for (abd_t *cabd = abd_gang_get_offset(abd, &off);
1075 	    cabd != NULL;
1076 	    cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1077 		ASSERT3U(off, <, cabd->abd_size);
1078 		int size = MIN(io_size, cabd->abd_size - off);
1079 		int remainder = abd_bio_map_off(bio, cabd, size, off);
1080 		io_size -= (size - remainder);
1081 		if (io_size == 0 || remainder > 0)
1082 			return (io_size);
1083 		off = 0;
1084 	}
1085 	ASSERT0(io_size);
1086 	return (io_size);
1087 }
1088 
1089 /*
1090  * bio_map for ABD.
1091  * @off is the offset in @abd
1092  * Remaining IO size is returned
1093  */
1094 unsigned int
1095 abd_bio_map_off(struct bio *bio, abd_t *abd,
1096     unsigned int io_size, size_t off)
1097 {
1098 	struct abd_iter aiter;
1099 
1100 	ASSERT3U(io_size, <=, abd->abd_size - off);
1101 	if (abd_is_linear(abd))
1102 		return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size));
1103 
1104 	ASSERT(!abd_is_linear(abd));
1105 	if (abd_is_gang(abd))
1106 		return (abd_gang_bio_map_off(bio, abd, io_size, off));
1107 
1108 	abd_iter_init(&aiter, abd);
1109 	abd_iter_advance(&aiter, off);
1110 
1111 	for (int i = 0; i < bio->bi_max_vecs; i++) {
1112 		struct page *pg;
1113 		size_t len, sgoff, pgoff;
1114 		struct scatterlist *sg;
1115 
1116 		if (io_size <= 0)
1117 			break;
1118 
1119 		sg = aiter.iter_sg;
1120 		sgoff = aiter.iter_offset;
1121 		pgoff = sgoff & (PAGESIZE - 1);
1122 		len = MIN(io_size, PAGESIZE - pgoff);
1123 		ASSERT(len > 0);
1124 
1125 		pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT);
1126 		if (bio_add_page(bio, pg, len, pgoff) != len)
1127 			break;
1128 
1129 		io_size -= len;
1130 		abd_iter_advance(&aiter, len);
1131 	}
1132 
1133 	return (io_size);
1134 }
1135 
1136 /* Tunable Parameters */
1137 module_param(zfs_abd_scatter_enabled, int, 0644);
1138 MODULE_PARM_DESC(zfs_abd_scatter_enabled,
1139 	"Toggle whether ABD allocations must be linear.");
1140 module_param(zfs_abd_scatter_min_size, int, 0644);
1141 MODULE_PARM_DESC(zfs_abd_scatter_min_size,
1142 	"Minimum size of scatter allocations.");
1143 /* CSTYLED */
1144 module_param(zfs_abd_scatter_max_order, uint, 0644);
1145 MODULE_PARM_DESC(zfs_abd_scatter_max_order,
1146 	"Maximum order allocation used for a scatter ABD.");
1147 #endif
1148