1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright (c) 2014 by Chunwei Chen. All rights reserved. 14 * Copyright (c) 2016 by Delphix. All rights reserved. 15 */ 16 17 /* 18 * See abd.c for a general overview of the arc buffered data (ABD). 19 * 20 * Using a large proportion of scattered ABDs decreases ARC fragmentation since 21 * when we are at the limit of allocatable space, using equal-size chunks will 22 * allow us to quickly reclaim enough space for a new large allocation (assuming 23 * it is also scattered). 24 * 25 * ABDs are allocated scattered by default unless the caller uses 26 * abd_alloc_linear() or zfs_abd_scatter_enabled is disabled. 27 */ 28 29 #include <sys/abd_impl.h> 30 #include <sys/param.h> 31 #include <sys/types.h> 32 #include <sys/zio.h> 33 #include <sys/zfs_context.h> 34 #include <sys/zfs_znode.h> 35 36 typedef struct abd_stats { 37 kstat_named_t abdstat_struct_size; 38 kstat_named_t abdstat_scatter_cnt; 39 kstat_named_t abdstat_scatter_data_size; 40 kstat_named_t abdstat_scatter_chunk_waste; 41 kstat_named_t abdstat_linear_cnt; 42 kstat_named_t abdstat_linear_data_size; 43 } abd_stats_t; 44 45 static abd_stats_t abd_stats = { 46 /* Amount of memory occupied by all of the abd_t struct allocations */ 47 { "struct_size", KSTAT_DATA_UINT64 }, 48 /* 49 * The number of scatter ABDs which are currently allocated, excluding 50 * ABDs which don't own their data (for instance the ones which were 51 * allocated through abd_get_offset()). 52 */ 53 { "scatter_cnt", KSTAT_DATA_UINT64 }, 54 /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */ 55 { "scatter_data_size", KSTAT_DATA_UINT64 }, 56 /* 57 * The amount of space wasted at the end of the last chunk across all 58 * scatter ABDs tracked by scatter_cnt. 59 */ 60 { "scatter_chunk_waste", KSTAT_DATA_UINT64 }, 61 /* 62 * The number of linear ABDs which are currently allocated, excluding 63 * ABDs which don't own their data (for instance the ones which were 64 * allocated through abd_get_offset() and abd_get_from_buf()). If an 65 * ABD takes ownership of its buf then it will become tracked. 66 */ 67 { "linear_cnt", KSTAT_DATA_UINT64 }, 68 /* Amount of data stored in all linear ABDs tracked by linear_cnt */ 69 { "linear_data_size", KSTAT_DATA_UINT64 }, 70 }; 71 72 /* 73 * The size of the chunks ABD allocates. Because the sizes allocated from the 74 * kmem_cache can't change, this tunable can only be modified at boot. Changing 75 * it at runtime would cause ABD iteration to work incorrectly for ABDs which 76 * were allocated with the old size, so a safeguard has been put in place which 77 * will cause the machine to panic if you change it and try to access the data 78 * within a scattered ABD. 79 */ 80 size_t zfs_abd_chunk_size = 4096; 81 82 #if defined(_KERNEL) 83 SYSCTL_DECL(_vfs_zfs); 84 85 SYSCTL_INT(_vfs_zfs, OID_AUTO, abd_scatter_enabled, CTLFLAG_RWTUN, 86 &zfs_abd_scatter_enabled, 0, "Enable scattered ARC data buffers"); 87 SYSCTL_ULONG(_vfs_zfs, OID_AUTO, abd_chunk_size, CTLFLAG_RDTUN, 88 &zfs_abd_chunk_size, 0, "The size of the chunks ABD allocates"); 89 #endif 90 91 kmem_cache_t *abd_chunk_cache; 92 static kstat_t *abd_ksp; 93 94 /* 95 * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose chunks are 96 * just a single zero'd sized zfs_abd_chunk_size buffer. This 97 * allows us to conserve memory by only using a single zero buffer 98 * for the scatter chunks. 99 */ 100 abd_t *abd_zero_scatter = NULL; 101 static char *abd_zero_buf = NULL; 102 103 static void 104 abd_free_chunk(void *c) 105 { 106 kmem_cache_free(abd_chunk_cache, c); 107 } 108 109 static uint_t 110 abd_chunkcnt_for_bytes(size_t size) 111 { 112 return (P2ROUNDUP(size, zfs_abd_chunk_size) / zfs_abd_chunk_size); 113 } 114 115 static inline uint_t 116 abd_scatter_chunkcnt(abd_t *abd) 117 { 118 ASSERT(!abd_is_linear(abd)); 119 return (abd_chunkcnt_for_bytes( 120 ABD_SCATTER(abd).abd_offset + abd->abd_size)); 121 } 122 123 boolean_t 124 abd_size_alloc_linear(size_t size) 125 { 126 return (size <= zfs_abd_chunk_size ? B_TRUE : B_FALSE); 127 } 128 129 void 130 abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op) 131 { 132 uint_t n = abd_scatter_chunkcnt(abd); 133 ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR); 134 int waste = n * zfs_abd_chunk_size - abd->abd_size; 135 if (op == ABDSTAT_INCR) { 136 ABDSTAT_BUMP(abdstat_scatter_cnt); 137 ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size); 138 ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste); 139 arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE); 140 } else { 141 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); 142 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size); 143 ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste); 144 arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE); 145 } 146 } 147 148 void 149 abd_update_linear_stats(abd_t *abd, abd_stats_op_t op) 150 { 151 ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR); 152 if (op == ABDSTAT_INCR) { 153 ABDSTAT_BUMP(abdstat_linear_cnt); 154 ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size); 155 } else { 156 ABDSTAT_BUMPDOWN(abdstat_linear_cnt); 157 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size); 158 } 159 } 160 161 void 162 abd_verify_scatter(abd_t *abd) 163 { 164 uint_t i, n; 165 166 /* 167 * There is no scatter linear pages in FreeBSD so there is an 168 * if an error if the ABD has been marked as a linear page. 169 */ 170 ASSERT(!abd_is_linear_page(abd)); 171 ASSERT3U(ABD_SCATTER(abd).abd_offset, <, 172 zfs_abd_chunk_size); 173 n = abd_scatter_chunkcnt(abd); 174 for (i = 0; i < n; i++) { 175 ASSERT3P(ABD_SCATTER(abd).abd_chunks[i], !=, NULL); 176 } 177 } 178 179 void 180 abd_alloc_chunks(abd_t *abd, size_t size) 181 { 182 uint_t i, n; 183 184 n = abd_chunkcnt_for_bytes(size); 185 for (i = 0; i < n; i++) { 186 void *c = kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE); 187 ASSERT3P(c, !=, NULL); 188 ABD_SCATTER(abd).abd_chunks[i] = c; 189 } 190 ABD_SCATTER(abd).abd_chunk_size = zfs_abd_chunk_size; 191 } 192 193 void 194 abd_free_chunks(abd_t *abd) 195 { 196 uint_t i, n; 197 198 n = abd_scatter_chunkcnt(abd); 199 for (i = 0; i < n; i++) { 200 abd_free_chunk(ABD_SCATTER(abd).abd_chunks[i]); 201 } 202 } 203 204 abd_t * 205 abd_alloc_struct(size_t size) 206 { 207 uint_t chunkcnt = abd_chunkcnt_for_bytes(size); 208 /* 209 * In the event we are allocating a gang ABD, the size passed in 210 * will be 0. We must make sure to set abd_size to the size of an 211 * ABD struct as opposed to an ABD scatter with 0 chunks. The gang 212 * ABD struct allocation accounts for an additional 24 bytes over 213 * a scatter ABD with 0 chunks. 214 */ 215 size_t abd_size = MAX(sizeof (abd_t), 216 offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt])); 217 abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE); 218 ASSERT3P(abd, !=, NULL); 219 list_link_init(&abd->abd_gang_link); 220 mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL); 221 ABDSTAT_INCR(abdstat_struct_size, abd_size); 222 223 return (abd); 224 } 225 226 void 227 abd_free_struct(abd_t *abd) 228 { 229 uint_t chunkcnt = abd_is_linear(abd) || abd_is_gang(abd) ? 0 : 230 abd_scatter_chunkcnt(abd); 231 ssize_t size = MAX(sizeof (abd_t), 232 offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt])); 233 mutex_destroy(&abd->abd_mtx); 234 ASSERT(!list_link_active(&abd->abd_gang_link)); 235 kmem_free(abd, size); 236 ABDSTAT_INCR(abdstat_struct_size, -size); 237 } 238 239 /* 240 * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where 241 * each chunk in the scatterlist will be set to abd_zero_buf. 242 */ 243 static void 244 abd_alloc_zero_scatter(void) 245 { 246 uint_t i, n; 247 248 n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE); 249 abd_zero_buf = kmem_zalloc(zfs_abd_chunk_size, KM_SLEEP); 250 abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE); 251 252 abd_zero_scatter->abd_flags = ABD_FLAG_OWNER | ABD_FLAG_ZEROS; 253 abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE; 254 abd_zero_scatter->abd_parent = NULL; 255 zfs_refcount_create(&abd_zero_scatter->abd_children); 256 257 ABD_SCATTER(abd_zero_scatter).abd_offset = 0; 258 ABD_SCATTER(abd_zero_scatter).abd_chunk_size = 259 zfs_abd_chunk_size; 260 261 for (i = 0; i < n; i++) { 262 ABD_SCATTER(abd_zero_scatter).abd_chunks[i] = 263 abd_zero_buf; 264 } 265 266 ABDSTAT_BUMP(abdstat_scatter_cnt); 267 ABDSTAT_INCR(abdstat_scatter_data_size, zfs_abd_chunk_size); 268 } 269 270 static void 271 abd_free_zero_scatter(void) 272 { 273 zfs_refcount_destroy(&abd_zero_scatter->abd_children); 274 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); 275 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)zfs_abd_chunk_size); 276 277 abd_free_struct(abd_zero_scatter); 278 abd_zero_scatter = NULL; 279 kmem_free(abd_zero_buf, zfs_abd_chunk_size); 280 } 281 282 void 283 abd_init(void) 284 { 285 abd_chunk_cache = kmem_cache_create("abd_chunk", zfs_abd_chunk_size, 0, 286 NULL, NULL, NULL, NULL, 0, KMC_NODEBUG); 287 288 abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED, 289 sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 290 if (abd_ksp != NULL) { 291 abd_ksp->ks_data = &abd_stats; 292 kstat_install(abd_ksp); 293 } 294 295 abd_alloc_zero_scatter(); 296 } 297 298 void 299 abd_fini(void) 300 { 301 abd_free_zero_scatter(); 302 303 if (abd_ksp != NULL) { 304 kstat_delete(abd_ksp); 305 abd_ksp = NULL; 306 } 307 308 kmem_cache_destroy(abd_chunk_cache); 309 abd_chunk_cache = NULL; 310 } 311 312 void 313 abd_free_linear_page(abd_t *abd) 314 { 315 /* 316 * FreeBSD does not have have scatter linear pages 317 * so there is an error. 318 */ 319 VERIFY(0); 320 } 321 322 /* 323 * If we're going to use this ABD for doing I/O using the block layer, the 324 * consumer of the ABD data doesn't care if it's scattered or not, and we don't 325 * plan to store this ABD in memory for a long period of time, we should 326 * allocate the ABD type that requires the least data copying to do the I/O. 327 * 328 * Currently this is linear ABDs, however if ldi_strategy() can ever issue I/Os 329 * using a scatter/gather list we should switch to that and replace this call 330 * with vanilla abd_alloc(). 331 */ 332 abd_t * 333 abd_alloc_for_io(size_t size, boolean_t is_metadata) 334 { 335 return (abd_alloc_linear(size, is_metadata)); 336 } 337 338 /* 339 * This is just a helper function to abd_get_offset_scatter() to alloc a 340 * scatter ABD using the calculated chunkcnt based on the offset within the 341 * parent ABD. 342 */ 343 static abd_t * 344 abd_alloc_scatter_offset_chunkcnt(size_t chunkcnt) 345 { 346 size_t abd_size = offsetof(abd_t, 347 abd_u.abd_scatter.abd_chunks[chunkcnt]); 348 abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE); 349 ASSERT3P(abd, !=, NULL); 350 list_link_init(&abd->abd_gang_link); 351 mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL); 352 ABDSTAT_INCR(abdstat_struct_size, abd_size); 353 354 return (abd); 355 } 356 357 abd_t * 358 abd_get_offset_scatter(abd_t *sabd, size_t off) 359 { 360 abd_t *abd = NULL; 361 362 abd_verify(sabd); 363 ASSERT3U(off, <=, sabd->abd_size); 364 365 size_t new_offset = ABD_SCATTER(sabd).abd_offset + off; 366 uint_t chunkcnt = abd_scatter_chunkcnt(sabd) - 367 (new_offset / zfs_abd_chunk_size); 368 369 abd = abd_alloc_scatter_offset_chunkcnt(chunkcnt); 370 371 /* 372 * Even if this buf is filesystem metadata, we only track that 373 * if we own the underlying data buffer, which is not true in 374 * this case. Therefore, we don't ever use ABD_FLAG_META here. 375 */ 376 abd->abd_flags = 0; 377 378 ABD_SCATTER(abd).abd_offset = new_offset % zfs_abd_chunk_size; 379 ABD_SCATTER(abd).abd_chunk_size = zfs_abd_chunk_size; 380 381 /* Copy the scatterlist starting at the correct offset */ 382 (void) memcpy(&ABD_SCATTER(abd).abd_chunks, 383 &ABD_SCATTER(sabd).abd_chunks[new_offset / 384 zfs_abd_chunk_size], 385 chunkcnt * sizeof (void *)); 386 387 return (abd); 388 } 389 390 static inline size_t 391 abd_iter_scatter_chunk_offset(struct abd_iter *aiter) 392 { 393 ASSERT(!abd_is_linear(aiter->iter_abd)); 394 return ((ABD_SCATTER(aiter->iter_abd).abd_offset + 395 aiter->iter_pos) % zfs_abd_chunk_size); 396 } 397 398 static inline size_t 399 abd_iter_scatter_chunk_index(struct abd_iter *aiter) 400 { 401 ASSERT(!abd_is_linear(aiter->iter_abd)); 402 return ((ABD_SCATTER(aiter->iter_abd).abd_offset + 403 aiter->iter_pos) / zfs_abd_chunk_size); 404 } 405 406 /* 407 * Initialize the abd_iter. 408 */ 409 void 410 abd_iter_init(struct abd_iter *aiter, abd_t *abd) 411 { 412 ASSERT(!abd_is_gang(abd)); 413 abd_verify(abd); 414 aiter->iter_abd = abd; 415 aiter->iter_pos = 0; 416 aiter->iter_mapaddr = NULL; 417 aiter->iter_mapsize = 0; 418 } 419 420 /* 421 * This is just a helper function to see if we have exhausted the 422 * abd_iter and reached the end. 423 */ 424 boolean_t 425 abd_iter_at_end(struct abd_iter *aiter) 426 { 427 return (aiter->iter_pos == aiter->iter_abd->abd_size); 428 } 429 430 /* 431 * Advance the iterator by a certain amount. Cannot be called when a chunk is 432 * in use. This can be safely called when the aiter has already exhausted, in 433 * which case this does nothing. 434 */ 435 void 436 abd_iter_advance(struct abd_iter *aiter, size_t amount) 437 { 438 ASSERT3P(aiter->iter_mapaddr, ==, NULL); 439 ASSERT0(aiter->iter_mapsize); 440 441 /* There's nothing left to advance to, so do nothing */ 442 if (abd_iter_at_end(aiter)) 443 return; 444 445 aiter->iter_pos += amount; 446 } 447 448 /* 449 * Map the current chunk into aiter. This can be safely called when the aiter 450 * has already exhausted, in which case this does nothing. 451 */ 452 void 453 abd_iter_map(struct abd_iter *aiter) 454 { 455 void *paddr; 456 size_t offset = 0; 457 458 ASSERT3P(aiter->iter_mapaddr, ==, NULL); 459 ASSERT0(aiter->iter_mapsize); 460 461 /* Panic if someone has changed zfs_abd_chunk_size */ 462 IMPLY(!abd_is_linear(aiter->iter_abd), zfs_abd_chunk_size == 463 ABD_SCATTER(aiter->iter_abd).abd_chunk_size); 464 465 /* There's nothing left to iterate over, so do nothing */ 466 if (abd_iter_at_end(aiter)) 467 return; 468 469 if (abd_is_linear(aiter->iter_abd)) { 470 offset = aiter->iter_pos; 471 aiter->iter_mapsize = aiter->iter_abd->abd_size - offset; 472 paddr = ABD_LINEAR_BUF(aiter->iter_abd); 473 } else { 474 size_t index = abd_iter_scatter_chunk_index(aiter); 475 offset = abd_iter_scatter_chunk_offset(aiter); 476 aiter->iter_mapsize = MIN(zfs_abd_chunk_size - offset, 477 aiter->iter_abd->abd_size - aiter->iter_pos); 478 paddr = ABD_SCATTER(aiter->iter_abd).abd_chunks[index]; 479 } 480 aiter->iter_mapaddr = (char *)paddr + offset; 481 } 482 483 /* 484 * Unmap the current chunk from aiter. This can be safely called when the aiter 485 * has already exhausted, in which case this does nothing. 486 */ 487 void 488 abd_iter_unmap(struct abd_iter *aiter) 489 { 490 /* There's nothing left to unmap, so do nothing */ 491 if (abd_iter_at_end(aiter)) 492 return; 493 494 ASSERT3P(aiter->iter_mapaddr, !=, NULL); 495 ASSERT3U(aiter->iter_mapsize, >, 0); 496 497 aiter->iter_mapaddr = NULL; 498 aiter->iter_mapsize = 0; 499 } 500 501 void 502 abd_cache_reap_now(void) 503 { 504 kmem_cache_reap_soon(abd_chunk_cache); 505 } 506