1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright (c) 2014 by Chunwei Chen. All rights reserved. 14 * Copyright (c) 2016 by Delphix. All rights reserved. 15 */ 16 17 /* 18 * See abd.c for a general overview of the arc buffered data (ABD). 19 * 20 * Using a large proportion of scattered ABDs decreases ARC fragmentation since 21 * when we are at the limit of allocatable space, using equal-size chunks will 22 * allow us to quickly reclaim enough space for a new large allocation (assuming 23 * it is also scattered). 24 * 25 * ABDs are allocated scattered by default unless the caller uses 26 * abd_alloc_linear() or zfs_abd_scatter_enabled is disabled. 27 */ 28 29 #include <sys/abd_impl.h> 30 #include <sys/param.h> 31 #include <sys/types.h> 32 #include <sys/zio.h> 33 #include <sys/zfs_context.h> 34 #include <sys/zfs_znode.h> 35 36 typedef struct abd_stats { 37 kstat_named_t abdstat_struct_size; 38 kstat_named_t abdstat_scatter_cnt; 39 kstat_named_t abdstat_scatter_data_size; 40 kstat_named_t abdstat_scatter_chunk_waste; 41 kstat_named_t abdstat_linear_cnt; 42 kstat_named_t abdstat_linear_data_size; 43 } abd_stats_t; 44 45 static abd_stats_t abd_stats = { 46 /* Amount of memory occupied by all of the abd_t struct allocations */ 47 { "struct_size", KSTAT_DATA_UINT64 }, 48 /* 49 * The number of scatter ABDs which are currently allocated, excluding 50 * ABDs which don't own their data (for instance the ones which were 51 * allocated through abd_get_offset()). 52 */ 53 { "scatter_cnt", KSTAT_DATA_UINT64 }, 54 /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */ 55 { "scatter_data_size", KSTAT_DATA_UINT64 }, 56 /* 57 * The amount of space wasted at the end of the last chunk across all 58 * scatter ABDs tracked by scatter_cnt. 59 */ 60 { "scatter_chunk_waste", KSTAT_DATA_UINT64 }, 61 /* 62 * The number of linear ABDs which are currently allocated, excluding 63 * ABDs which don't own their data (for instance the ones which were 64 * allocated through abd_get_offset() and abd_get_from_buf()). If an 65 * ABD takes ownership of its buf then it will become tracked. 66 */ 67 { "linear_cnt", KSTAT_DATA_UINT64 }, 68 /* Amount of data stored in all linear ABDs tracked by linear_cnt */ 69 { "linear_data_size", KSTAT_DATA_UINT64 }, 70 }; 71 72 struct { 73 wmsum_t abdstat_struct_size; 74 wmsum_t abdstat_scatter_cnt; 75 wmsum_t abdstat_scatter_data_size; 76 wmsum_t abdstat_scatter_chunk_waste; 77 wmsum_t abdstat_linear_cnt; 78 wmsum_t abdstat_linear_data_size; 79 } abd_sums; 80 81 /* 82 * zfs_abd_scatter_min_size is the minimum allocation size to use scatter 83 * ABD's for. Smaller allocations will use linear ABD's which use 84 * zio_[data_]buf_alloc(). 85 * 86 * Scatter ABD's use at least one page each, so sub-page allocations waste 87 * some space when allocated as scatter (e.g. 2KB scatter allocation wastes 88 * half of each page). Using linear ABD's for small allocations means that 89 * they will be put on slabs which contain many allocations. 90 * 91 * Linear ABDs for multi-page allocations are easier to use, and in some cases 92 * it allows to avoid buffer copying. But allocation and especially free 93 * of multi-page linear ABDs are expensive operations due to KVA mapping and 94 * unmapping, and with time they cause KVA fragmentations. 95 */ 96 size_t zfs_abd_scatter_min_size = PAGE_SIZE + 1; 97 98 #if defined(_KERNEL) 99 SYSCTL_DECL(_vfs_zfs); 100 101 SYSCTL_INT(_vfs_zfs, OID_AUTO, abd_scatter_enabled, CTLFLAG_RWTUN, 102 &zfs_abd_scatter_enabled, 0, "Enable scattered ARC data buffers"); 103 SYSCTL_ULONG(_vfs_zfs, OID_AUTO, abd_scatter_min_size, CTLFLAG_RWTUN, 104 &zfs_abd_scatter_min_size, 0, "Minimum size of scatter allocations."); 105 #endif 106 107 kmem_cache_t *abd_chunk_cache; 108 static kstat_t *abd_ksp; 109 110 /* 111 * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose chunks are 112 * just a single zero'd page-sized buffer. This allows us to conserve 113 * memory by only using a single zero buffer for the scatter chunks. 114 */ 115 abd_t *abd_zero_scatter = NULL; 116 static char *abd_zero_buf = NULL; 117 118 static uint_t 119 abd_chunkcnt_for_bytes(size_t size) 120 { 121 return ((size + PAGE_MASK) >> PAGE_SHIFT); 122 } 123 124 static inline uint_t 125 abd_scatter_chunkcnt(abd_t *abd) 126 { 127 ASSERT(!abd_is_linear(abd)); 128 return (abd_chunkcnt_for_bytes( 129 ABD_SCATTER(abd).abd_offset + abd->abd_size)); 130 } 131 132 boolean_t 133 abd_size_alloc_linear(size_t size) 134 { 135 return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size); 136 } 137 138 void 139 abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op) 140 { 141 uint_t n = abd_scatter_chunkcnt(abd); 142 ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR); 143 int waste = (n << PAGE_SHIFT) - abd->abd_size; 144 if (op == ABDSTAT_INCR) { 145 ABDSTAT_BUMP(abdstat_scatter_cnt); 146 ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size); 147 ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste); 148 arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE); 149 } else { 150 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); 151 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size); 152 ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste); 153 arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE); 154 } 155 } 156 157 void 158 abd_update_linear_stats(abd_t *abd, abd_stats_op_t op) 159 { 160 ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR); 161 if (op == ABDSTAT_INCR) { 162 ABDSTAT_BUMP(abdstat_linear_cnt); 163 ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size); 164 } else { 165 ABDSTAT_BUMPDOWN(abdstat_linear_cnt); 166 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size); 167 } 168 } 169 170 void 171 abd_verify_scatter(abd_t *abd) 172 { 173 uint_t i, n; 174 175 /* 176 * There is no scatter linear pages in FreeBSD so there is 177 * an error if the ABD has been marked as a linear page. 178 */ 179 ASSERT(!abd_is_linear_page(abd)); 180 ASSERT3U(ABD_SCATTER(abd).abd_offset, <, PAGE_SIZE); 181 n = abd_scatter_chunkcnt(abd); 182 for (i = 0; i < n; i++) { 183 ASSERT3P(ABD_SCATTER(abd).abd_chunks[i], !=, NULL); 184 } 185 } 186 187 void 188 abd_alloc_chunks(abd_t *abd, size_t size) 189 { 190 uint_t i, n; 191 192 n = abd_chunkcnt_for_bytes(size); 193 for (i = 0; i < n; i++) { 194 ABD_SCATTER(abd).abd_chunks[i] = 195 kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE); 196 } 197 } 198 199 void 200 abd_free_chunks(abd_t *abd) 201 { 202 uint_t i, n; 203 204 n = abd_scatter_chunkcnt(abd); 205 for (i = 0; i < n; i++) { 206 kmem_cache_free(abd_chunk_cache, 207 ABD_SCATTER(abd).abd_chunks[i]); 208 } 209 } 210 211 abd_t * 212 abd_alloc_struct_impl(size_t size) 213 { 214 uint_t chunkcnt = abd_chunkcnt_for_bytes(size); 215 /* 216 * In the event we are allocating a gang ABD, the size passed in 217 * will be 0. We must make sure to set abd_size to the size of an 218 * ABD struct as opposed to an ABD scatter with 0 chunks. The gang 219 * ABD struct allocation accounts for an additional 24 bytes over 220 * a scatter ABD with 0 chunks. 221 */ 222 size_t abd_size = MAX(sizeof (abd_t), 223 offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt])); 224 abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE); 225 ASSERT3P(abd, !=, NULL); 226 ABDSTAT_INCR(abdstat_struct_size, abd_size); 227 228 return (abd); 229 } 230 231 void 232 abd_free_struct_impl(abd_t *abd) 233 { 234 uint_t chunkcnt = abd_is_linear(abd) || abd_is_gang(abd) ? 0 : 235 abd_scatter_chunkcnt(abd); 236 ssize_t size = MAX(sizeof (abd_t), 237 offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt])); 238 kmem_free(abd, size); 239 ABDSTAT_INCR(abdstat_struct_size, -size); 240 } 241 242 /* 243 * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where 244 * each chunk in the scatterlist will be set to abd_zero_buf. 245 */ 246 static void 247 abd_alloc_zero_scatter(void) 248 { 249 uint_t i, n; 250 251 n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE); 252 abd_zero_buf = kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE); 253 abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE); 254 255 abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER | ABD_FLAG_ZEROS; 256 abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE; 257 258 ABD_SCATTER(abd_zero_scatter).abd_offset = 0; 259 260 for (i = 0; i < n; i++) { 261 ABD_SCATTER(abd_zero_scatter).abd_chunks[i] = 262 abd_zero_buf; 263 } 264 265 ABDSTAT_BUMP(abdstat_scatter_cnt); 266 ABDSTAT_INCR(abdstat_scatter_data_size, PAGE_SIZE); 267 } 268 269 static void 270 abd_free_zero_scatter(void) 271 { 272 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); 273 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGE_SIZE); 274 275 abd_free_struct(abd_zero_scatter); 276 abd_zero_scatter = NULL; 277 kmem_cache_free(abd_chunk_cache, abd_zero_buf); 278 } 279 280 static int 281 abd_kstats_update(kstat_t *ksp, int rw) 282 { 283 abd_stats_t *as = ksp->ks_data; 284 285 if (rw == KSTAT_WRITE) 286 return (EACCES); 287 as->abdstat_struct_size.value.ui64 = 288 wmsum_value(&abd_sums.abdstat_struct_size); 289 as->abdstat_scatter_cnt.value.ui64 = 290 wmsum_value(&abd_sums.abdstat_scatter_cnt); 291 as->abdstat_scatter_data_size.value.ui64 = 292 wmsum_value(&abd_sums.abdstat_scatter_data_size); 293 as->abdstat_scatter_chunk_waste.value.ui64 = 294 wmsum_value(&abd_sums.abdstat_scatter_chunk_waste); 295 as->abdstat_linear_cnt.value.ui64 = 296 wmsum_value(&abd_sums.abdstat_linear_cnt); 297 as->abdstat_linear_data_size.value.ui64 = 298 wmsum_value(&abd_sums.abdstat_linear_data_size); 299 return (0); 300 } 301 302 void 303 abd_init(void) 304 { 305 abd_chunk_cache = kmem_cache_create("abd_chunk", PAGE_SIZE, 0, 306 NULL, NULL, NULL, NULL, 0, KMC_NODEBUG); 307 308 wmsum_init(&abd_sums.abdstat_struct_size, 0); 309 wmsum_init(&abd_sums.abdstat_scatter_cnt, 0); 310 wmsum_init(&abd_sums.abdstat_scatter_data_size, 0); 311 wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0); 312 wmsum_init(&abd_sums.abdstat_linear_cnt, 0); 313 wmsum_init(&abd_sums.abdstat_linear_data_size, 0); 314 315 abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED, 316 sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 317 if (abd_ksp != NULL) { 318 abd_ksp->ks_data = &abd_stats; 319 abd_ksp->ks_update = abd_kstats_update; 320 kstat_install(abd_ksp); 321 } 322 323 abd_alloc_zero_scatter(); 324 } 325 326 void 327 abd_fini(void) 328 { 329 abd_free_zero_scatter(); 330 331 if (abd_ksp != NULL) { 332 kstat_delete(abd_ksp); 333 abd_ksp = NULL; 334 } 335 336 wmsum_fini(&abd_sums.abdstat_struct_size); 337 wmsum_fini(&abd_sums.abdstat_scatter_cnt); 338 wmsum_fini(&abd_sums.abdstat_scatter_data_size); 339 wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste); 340 wmsum_fini(&abd_sums.abdstat_linear_cnt); 341 wmsum_fini(&abd_sums.abdstat_linear_data_size); 342 343 kmem_cache_destroy(abd_chunk_cache); 344 abd_chunk_cache = NULL; 345 } 346 347 void 348 abd_free_linear_page(abd_t *abd) 349 { 350 /* 351 * FreeBSD does not have scatter linear pages 352 * so there is an error. 353 */ 354 VERIFY(0); 355 } 356 357 /* 358 * If we're going to use this ABD for doing I/O using the block layer, the 359 * consumer of the ABD data doesn't care if it's scattered or not, and we don't 360 * plan to store this ABD in memory for a long period of time, we should 361 * allocate the ABD type that requires the least data copying to do the I/O. 362 * 363 * Currently this is linear ABDs, however if ldi_strategy() can ever issue I/Os 364 * using a scatter/gather list we should switch to that and replace this call 365 * with vanilla abd_alloc(). 366 */ 367 abd_t * 368 abd_alloc_for_io(size_t size, boolean_t is_metadata) 369 { 370 return (abd_alloc_linear(size, is_metadata)); 371 } 372 373 abd_t * 374 abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off, 375 size_t size) 376 { 377 abd_verify(sabd); 378 ASSERT3U(off, <=, sabd->abd_size); 379 380 size_t new_offset = ABD_SCATTER(sabd).abd_offset + off; 381 size_t chunkcnt = abd_chunkcnt_for_bytes( 382 (new_offset & PAGE_MASK) + size); 383 384 ASSERT3U(chunkcnt, <=, abd_scatter_chunkcnt(sabd)); 385 386 /* 387 * If an abd struct is provided, it is only the minimum size. If we 388 * need additional chunks, we need to allocate a new struct. 389 */ 390 if (abd != NULL && 391 offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]) > 392 sizeof (abd_t)) { 393 abd = NULL; 394 } 395 396 if (abd == NULL) 397 abd = abd_alloc_struct(chunkcnt << PAGE_SHIFT); 398 399 /* 400 * Even if this buf is filesystem metadata, we only track that 401 * if we own the underlying data buffer, which is not true in 402 * this case. Therefore, we don't ever use ABD_FLAG_META here. 403 */ 404 405 ABD_SCATTER(abd).abd_offset = new_offset & PAGE_MASK; 406 407 /* Copy the scatterlist starting at the correct offset */ 408 (void) memcpy(&ABD_SCATTER(abd).abd_chunks, 409 &ABD_SCATTER(sabd).abd_chunks[new_offset >> PAGE_SHIFT], 410 chunkcnt * sizeof (void *)); 411 412 return (abd); 413 } 414 415 /* 416 * Initialize the abd_iter. 417 */ 418 void 419 abd_iter_init(struct abd_iter *aiter, abd_t *abd) 420 { 421 ASSERT(!abd_is_gang(abd)); 422 abd_verify(abd); 423 aiter->iter_abd = abd; 424 aiter->iter_pos = 0; 425 aiter->iter_mapaddr = NULL; 426 aiter->iter_mapsize = 0; 427 } 428 429 /* 430 * This is just a helper function to see if we have exhausted the 431 * abd_iter and reached the end. 432 */ 433 boolean_t 434 abd_iter_at_end(struct abd_iter *aiter) 435 { 436 return (aiter->iter_pos == aiter->iter_abd->abd_size); 437 } 438 439 /* 440 * Advance the iterator by a certain amount. Cannot be called when a chunk is 441 * in use. This can be safely called when the aiter has already exhausted, in 442 * which case this does nothing. 443 */ 444 void 445 abd_iter_advance(struct abd_iter *aiter, size_t amount) 446 { 447 ASSERT3P(aiter->iter_mapaddr, ==, NULL); 448 ASSERT0(aiter->iter_mapsize); 449 450 /* There's nothing left to advance to, so do nothing */ 451 if (abd_iter_at_end(aiter)) 452 return; 453 454 aiter->iter_pos += amount; 455 } 456 457 /* 458 * Map the current chunk into aiter. This can be safely called when the aiter 459 * has already exhausted, in which case this does nothing. 460 */ 461 void 462 abd_iter_map(struct abd_iter *aiter) 463 { 464 void *paddr; 465 466 ASSERT3P(aiter->iter_mapaddr, ==, NULL); 467 ASSERT0(aiter->iter_mapsize); 468 469 /* There's nothing left to iterate over, so do nothing */ 470 if (abd_iter_at_end(aiter)) 471 return; 472 473 abd_t *abd = aiter->iter_abd; 474 size_t offset = aiter->iter_pos; 475 if (abd_is_linear(abd)) { 476 aiter->iter_mapsize = abd->abd_size - offset; 477 paddr = ABD_LINEAR_BUF(abd); 478 } else { 479 offset += ABD_SCATTER(abd).abd_offset; 480 paddr = ABD_SCATTER(abd).abd_chunks[offset >> PAGE_SHIFT]; 481 offset &= PAGE_MASK; 482 aiter->iter_mapsize = MIN(PAGE_SIZE - offset, 483 abd->abd_size - aiter->iter_pos); 484 } 485 aiter->iter_mapaddr = (char *)paddr + offset; 486 } 487 488 /* 489 * Unmap the current chunk from aiter. This can be safely called when the aiter 490 * has already exhausted, in which case this does nothing. 491 */ 492 void 493 abd_iter_unmap(struct abd_iter *aiter) 494 { 495 if (!abd_iter_at_end(aiter)) { 496 ASSERT3P(aiter->iter_mapaddr, !=, NULL); 497 ASSERT3U(aiter->iter_mapsize, >, 0); 498 } 499 500 aiter->iter_mapaddr = NULL; 501 aiter->iter_mapsize = 0; 502 } 503 504 void 505 abd_cache_reap_now(void) 506 { 507 kmem_cache_reap_soon(abd_chunk_cache); 508 } 509