1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org> 5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@dragonflybsd.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 /* 38 * This module implements the hammer2 helper thread API, including 39 * the frontend/backend XOP API. 40 */ 41 #include "hammer2.h" 42 43 #define H2XOPDESCRIPTOR(label) \ 44 hammer2_xop_desc_t hammer2_##label##_desc = { \ 45 .storage_func = hammer2_xop_##label, \ 46 .id = #label \ 47 } 48 49 H2XOPDESCRIPTOR(ipcluster); 50 H2XOPDESCRIPTOR(readdir); 51 H2XOPDESCRIPTOR(nresolve); 52 H2XOPDESCRIPTOR(unlink); 53 H2XOPDESCRIPTOR(nrename); 54 H2XOPDESCRIPTOR(scanlhc); 55 H2XOPDESCRIPTOR(scanall); 56 H2XOPDESCRIPTOR(lookup); 57 H2XOPDESCRIPTOR(delete); 58 H2XOPDESCRIPTOR(inode_mkdirent); 59 H2XOPDESCRIPTOR(inode_create); 60 H2XOPDESCRIPTOR(inode_create_det); 61 H2XOPDESCRIPTOR(inode_create_ins); 62 H2XOPDESCRIPTOR(inode_destroy); 63 H2XOPDESCRIPTOR(inode_chain_sync); 64 H2XOPDESCRIPTOR(inode_unlinkall); 65 H2XOPDESCRIPTOR(inode_connect); 66 H2XOPDESCRIPTOR(inode_flush); 67 H2XOPDESCRIPTOR(strategy_read); 68 H2XOPDESCRIPTOR(strategy_write); 69 70 //struct objcache *cache_xops; 71 static struct thread dummy_td; 72 struct thread *curthread = &dummy_td; 73 74 /* 75 * Set flags and wakeup any waiters. 76 * 77 * WARNING! During teardown (thr) can disappear the instant our cmpset 78 * succeeds. 79 */ 80 void 81 hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags) 82 { 83 uint32_t oflags; 84 uint32_t nflags; 85 86 for (;;) { 87 oflags = thr->flags; 88 cpu_ccfence(); 89 nflags = (oflags | flags) & ~HAMMER2_THREAD_WAITING; 90 91 if (oflags & HAMMER2_THREAD_WAITING) { 92 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) { 93 wakeup(&thr->flags); 94 break; 95 } 96 } else { 97 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) 98 break; 99 } 100 } 101 } 102 103 /* 104 * Set and clear flags and wakeup any waiters. 105 * 106 * WARNING! During teardown (thr) can disappear the instant our cmpset 107 * succeeds. 108 */ 109 void 110 hammer2_thr_signal2(hammer2_thread_t *thr, uint32_t posflags, uint32_t negflags) 111 { 112 uint32_t oflags; 113 uint32_t nflags; 114 115 for (;;) { 116 oflags = thr->flags; 117 cpu_ccfence(); 118 nflags = (oflags | posflags) & 119 ~(negflags | HAMMER2_THREAD_WAITING); 120 if (oflags & HAMMER2_THREAD_WAITING) { 121 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) { 122 wakeup(&thr->flags); 123 break; 124 } 125 } else { 126 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) 127 break; 128 } 129 } 130 } 131 132 /* 133 * Wait until all the bits in flags are set. 134 * 135 * WARNING! During teardown (thr) can disappear the instant our cmpset 136 * succeeds. 137 */ 138 void 139 hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags) 140 { 141 uint32_t oflags; 142 uint32_t nflags; 143 144 for (;;) { 145 oflags = thr->flags; 146 cpu_ccfence(); 147 if ((oflags & flags) == flags) 148 break; 149 nflags = oflags | HAMMER2_THREAD_WAITING; 150 tsleep_interlock(&thr->flags, 0); 151 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) { 152 tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60); 153 } 154 } 155 } 156 157 /* 158 * Wait until any of the bits in flags are set, with timeout. 159 * 160 * WARNING! During teardown (thr) can disappear the instant our cmpset 161 * succeeds. 162 */ 163 int 164 hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo) 165 { 166 uint32_t oflags; 167 uint32_t nflags; 168 int error; 169 170 error = 0; 171 for (;;) { 172 oflags = thr->flags; 173 cpu_ccfence(); 174 if (oflags & flags) 175 break; 176 nflags = oflags | HAMMER2_THREAD_WAITING; 177 tsleep_interlock(&thr->flags, 0); 178 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) { 179 error = tsleep(&thr->flags, PINTERLOCKED, 180 "h2twait", timo); 181 } 182 if (error == ETIMEDOUT) { 183 error = HAMMER2_ERROR_ETIMEDOUT; 184 break; 185 } 186 } 187 return error; 188 } 189 190 /* 191 * Wait until the bits in flags are clear. 192 * 193 * WARNING! During teardown (thr) can disappear the instant our cmpset 194 * succeeds. 195 */ 196 void 197 hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags) 198 { 199 uint32_t oflags; 200 uint32_t nflags; 201 202 for (;;) { 203 oflags = thr->flags; 204 cpu_ccfence(); 205 if ((oflags & flags) == 0) 206 break; 207 nflags = oflags | HAMMER2_THREAD_WAITING; 208 tsleep_interlock(&thr->flags, 0); 209 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) { 210 tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60); 211 } 212 } 213 } 214 215 /* 216 * Initialize the supplied thread structure, starting the specified 217 * thread. 218 * 219 * NOTE: thr structure can be retained across mounts and unmounts for this 220 * pmp, so make sure the flags are in a sane state. 221 */ 222 void 223 hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp, 224 hammer2_dev_t *hmp, 225 const char *id, int clindex, int repidx, 226 void (*func)(void *arg)) 227 { 228 thr->pmp = pmp; /* xop helpers */ 229 thr->hmp = hmp; /* bulkfree */ 230 thr->clindex = clindex; 231 thr->repidx = repidx; 232 TAILQ_INIT(&thr->xopq); 233 atomic_clear_int(&thr->flags, HAMMER2_THREAD_STOP | 234 HAMMER2_THREAD_STOPPED | 235 HAMMER2_THREAD_FREEZE | 236 HAMMER2_THREAD_FROZEN); 237 if (thr->scratch == NULL) 238 thr->scratch = kmalloc(MAXPHYS, M_HAMMER2, M_WAITOK | M_ZERO); 239 #if 0 240 if (repidx >= 0) { 241 lwkt_create(func, thr, &thr->td, NULL, 0, repidx % ncpus, 242 "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx); 243 } else if (pmp) { 244 lwkt_create(func, thr, &thr->td, NULL, 0, -1, 245 "%s-%s", id, pmp->pfs_names[clindex]); 246 } else { 247 lwkt_create(func, thr, &thr->td, NULL, 0, -1, "%s", id); 248 } 249 #else 250 thr->td = &dummy_td; 251 #endif 252 } 253 254 /* 255 * Terminate a thread. This function will silently return if the thread 256 * was never initialized or has already been deleted. 257 * 258 * This is accomplished by setting the STOP flag and waiting for the td 259 * structure to become NULL. 260 */ 261 void 262 hammer2_thr_delete(hammer2_thread_t *thr) 263 { 264 if (thr->td == NULL) 265 return; 266 hammer2_thr_signal(thr, HAMMER2_THREAD_STOP); 267 /* Don't wait, there's no such thread in makefs */ 268 //hammer2_thr_wait(thr, HAMMER2_THREAD_STOPPED); 269 thr->pmp = NULL; 270 if (thr->scratch) { 271 kfree(thr->scratch, M_HAMMER2); 272 thr->scratch = NULL; 273 } 274 KKASSERT(TAILQ_EMPTY(&thr->xopq)); 275 } 276 277 /* 278 * Asynchronous remaster request. Ask the synchronization thread to 279 * start over soon (as if it were frozen and unfrozen, but without waiting). 280 * The thread always recalculates mastership relationships when restarting. 281 */ 282 void 283 hammer2_thr_remaster(hammer2_thread_t *thr) 284 { 285 if (thr->td == NULL) 286 return; 287 hammer2_thr_signal(thr, HAMMER2_THREAD_REMASTER); 288 } 289 290 void 291 hammer2_thr_freeze_async(hammer2_thread_t *thr) 292 { 293 hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE); 294 } 295 296 void 297 hammer2_thr_freeze(hammer2_thread_t *thr) 298 { 299 if (thr->td == NULL) 300 return; 301 hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE); 302 hammer2_thr_wait(thr, HAMMER2_THREAD_FROZEN); 303 } 304 305 void 306 hammer2_thr_unfreeze(hammer2_thread_t *thr) 307 { 308 if (thr->td == NULL) 309 return; 310 hammer2_thr_signal(thr, HAMMER2_THREAD_UNFREEZE); 311 hammer2_thr_wait_neg(thr, HAMMER2_THREAD_FROZEN); 312 } 313 314 int 315 hammer2_thr_break(hammer2_thread_t *thr) 316 { 317 if (thr->flags & (HAMMER2_THREAD_STOP | 318 HAMMER2_THREAD_REMASTER | 319 HAMMER2_THREAD_FREEZE)) { 320 return 1; 321 } 322 return 0; 323 } 324 325 /**************************************************************************** 326 * HAMMER2 XOPS API * 327 ****************************************************************************/ 328 329 /* 330 * Allocate or reallocate XOP FIFO. This doesn't exist in sys/vfs/hammer2 331 * where XOP is handled by dedicated kernel threads and when FIFO stalls 332 * threads wait for frontend to collect results. 333 */ 334 static void 335 hammer2_xop_fifo_alloc(hammer2_xop_fifo_t *fifo, size_t nmemb) 336 { 337 size_t size; 338 339 /* Assert nmemb requirements. */ 340 KKASSERT((nmemb & (nmemb - 1)) == 0); 341 KKASSERT(nmemb >= HAMMER2_XOPFIFO); 342 343 /* malloc or realloc fifo array. */ 344 size = nmemb * sizeof(hammer2_chain_t *); 345 if (!fifo->array) 346 fifo->array = kmalloc(size, M_HAMMER2, M_WAITOK | M_ZERO); 347 else 348 fifo->array = krealloc(fifo->array, size, M_HAMMER2, 349 M_WAITOK | M_ZERO); 350 KKASSERT(fifo->array); 351 352 /* malloc or realloc fifo errors. */ 353 size = nmemb * sizeof(int); 354 if (!fifo->errors) 355 fifo->errors = kmalloc(size, M_HAMMER2, M_WAITOK | M_ZERO); 356 else 357 fifo->errors = krealloc(fifo->errors, size, M_HAMMER2, 358 M_WAITOK | M_ZERO); 359 KKASSERT(fifo->errors); 360 } 361 362 /* 363 * Allocate a XOP request. 364 * 365 * Once allocated a XOP request can be started, collected, and retired, 366 * and can be retired early if desired. 367 * 368 * NOTE: Fifo indices might not be zero but ri == wi on objcache_get(). 369 */ 370 void * 371 hammer2_xop_alloc(hammer2_inode_t *ip, int flags) 372 { 373 hammer2_xop_t *xop; 374 375 xop = ecalloc(1, sizeof(*xop)); 376 KKASSERT(xop->head.cluster.array[0].chain == NULL); 377 378 xop->head.ip1 = ip; 379 xop->head.desc = NULL; 380 xop->head.flags = flags; 381 xop->head.state = 0; 382 xop->head.error = 0; 383 xop->head.collect_key = 0; 384 xop->head.focus_dio = NULL; 385 386 if (flags & HAMMER2_XOP_MODIFYING) 387 xop->head.mtid = hammer2_trans_sub(ip->pmp); 388 else 389 xop->head.mtid = 0; 390 391 xop->head.cluster.nchains = ip->cluster.nchains; 392 xop->head.cluster.pmp = ip->pmp; 393 xop->head.cluster.flags = HAMMER2_CLUSTER_LOCKED; 394 395 /* 396 * run_mask - Active thread (or frontend) associated with XOP 397 */ 398 xop->head.run_mask = HAMMER2_XOPMASK_VOP; 399 400 hammer2_xop_fifo_t *fifo = &xop->head.collect[0]; 401 xop->head.fifo_size = HAMMER2_XOPFIFO; 402 hammer2_xop_fifo_alloc(fifo, xop->head.fifo_size); 403 404 hammer2_inode_ref(ip); 405 406 return xop; 407 } 408 409 void 410 hammer2_xop_setname(hammer2_xop_head_t *xop, const char *name, size_t name_len) 411 { 412 xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO); 413 xop->name1_len = name_len; 414 bcopy(name, xop->name1, name_len); 415 } 416 417 void 418 hammer2_xop_setname2(hammer2_xop_head_t *xop, const char *name, size_t name_len) 419 { 420 xop->name2 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO); 421 xop->name2_len = name_len; 422 bcopy(name, xop->name2, name_len); 423 } 424 425 size_t 426 hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum) 427 { 428 const size_t name_len = 18; 429 430 xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO); 431 xop->name1_len = name_len; 432 ksnprintf(xop->name1, name_len + 1, "0x%016jx", (intmax_t)inum); 433 434 return name_len; 435 } 436 437 438 void 439 hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2) 440 { 441 xop->ip2 = ip2; 442 hammer2_inode_ref(ip2); 443 } 444 445 void 446 hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3) 447 { 448 xop->ip3 = ip3; 449 hammer2_inode_ref(ip3); 450 } 451 452 void 453 hammer2_xop_setip4(hammer2_xop_head_t *xop, hammer2_inode_t *ip4) 454 { 455 xop->ip4 = ip4; 456 hammer2_inode_ref(ip4); 457 } 458 459 void 460 hammer2_xop_reinit(hammer2_xop_head_t *xop) 461 { 462 xop->state = 0; 463 xop->error = 0; 464 xop->collect_key = 0; 465 xop->run_mask = HAMMER2_XOPMASK_VOP; 466 } 467 468 /* 469 * A mounted PFS needs Xops threads to support frontend operations. 470 */ 471 void 472 hammer2_xop_helper_create(hammer2_pfs_t *pmp) 473 { 474 int i; 475 int j; 476 477 lockmgr(&pmp->lock, LK_EXCLUSIVE); 478 pmp->has_xop_threads = 1; 479 480 pmp->xop_groups = kmalloc(hammer2_xop_nthreads * 481 sizeof(hammer2_xop_group_t), 482 M_HAMMER2, M_WAITOK | M_ZERO); 483 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) { 484 for (j = 0; j < hammer2_xop_nthreads; ++j) { 485 if (pmp->xop_groups[j].thrs[i].td) 486 continue; 487 hammer2_thr_create(&pmp->xop_groups[j].thrs[i], 488 pmp, NULL, 489 "h2xop", i, j, 490 hammer2_primary_xops_thread); 491 } 492 } 493 lockmgr(&pmp->lock, LK_RELEASE); 494 } 495 496 void 497 hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp) 498 { 499 int i; 500 int j; 501 502 if (pmp->xop_groups == NULL) { 503 KKASSERT(pmp->has_xop_threads == 0); 504 return; 505 } 506 507 for (i = 0; i < pmp->pfs_nmasters; ++i) { 508 for (j = 0; j < hammer2_xop_nthreads; ++j) { 509 if (pmp->xop_groups[j].thrs[i].td) 510 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]); 511 } 512 } 513 pmp->has_xop_threads = 0; 514 kfree(pmp->xop_groups, M_HAMMER2); 515 pmp->xop_groups = NULL; 516 } 517 518 /* 519 * Start a XOP request, queueing it to all nodes in the cluster to 520 * execute the cluster op. 521 * 522 * XXX optimize single-target case. 523 */ 524 void 525 hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc, 526 int notidx) 527 { 528 hammer2_inode_t *ip1; 529 hammer2_pfs_t *pmp; 530 hammer2_thread_t *thr; 531 int i; 532 int ng; 533 int nchains; 534 535 ip1 = xop->ip1; 536 pmp = ip1->pmp; 537 if (pmp->has_xop_threads == 0) 538 hammer2_xop_helper_create(pmp); 539 540 /* 541 * The sequencer assigns a worker thread to the XOP. 542 * 543 * (1) The worker threads are partitioned into two sets, one for 544 * NON-STRATEGY XOPs, and the other for STRATEGY XOPs. This 545 * guarantees that strategy calls will always be able to make 546 * progress and will not deadlock against non-strategy calls. 547 * 548 * (2) If clustered, non-strategy operations to the same inode must 549 * be serialized. This is to avoid confusion when issuing 550 * modifying operations because a XOP completes the instant a 551 * quorum is reached. 552 * 553 * TODO - RENAME fails here because it is potentially modifying 554 * three different inodes, but we triple-lock the inodes 555 * involved so it shouldn't create a sequencing schism. 556 */ 557 if (xop->flags & HAMMER2_XOP_STRATEGY) { 558 /* 559 * Use worker space 0 associated with the current cpu 560 * for strategy ops. 561 */ 562 /* 563 hammer2_xop_strategy_t *xopst; 564 u_int which; 565 566 xopst = &((hammer2_xop_t *)xop)->xop_strategy; 567 which = ((unsigned int)ip1->ihash + 568 ((unsigned int)xopst->lbase >> HAMMER2_PBUFRADIX)) % 569 hammer2_xop_sgroups; 570 ng = mycpu->gd_cpuid % hammer2_xop_mod + 571 hammer2_xop_mod * which; 572 */ 573 ng = 0; 574 } else if (hammer2_spread_workers == 0 && ip1->cluster.nchains == 1) { 575 /* 576 * For now try to keep the work on the same cpu to reduce 577 * IPI overhead. Several threads are assigned to each cpu, 578 * don't be very smart and select the one to use based on 579 * the inode hash. 580 */ 581 /* 582 u_int which; 583 584 which = (unsigned int)ip1->ihash % hammer2_xop_xgroups; 585 ng = mycpu->gd_cpuid % hammer2_xop_mod + 586 (which * hammer2_xop_mod) + 587 hammer2_xop_xbase; 588 */ 589 ng = 0; 590 } else { 591 /* 592 * Hash based on inode only, must serialize inode to same 593 * thread regardless of current cpu. 594 */ 595 /* 596 ng = (unsigned int)ip1->ihash % 597 (hammer2_xop_mod * hammer2_xop_xgroups) + 598 hammer2_xop_xbase; 599 */ 600 ng = 0; 601 } 602 xop->desc = desc; 603 604 /* 605 * The instant xop is queued another thread can pick it off. In the 606 * case of asynchronous ops, another thread might even finish and 607 * deallocate it. 608 */ 609 hammer2_spin_ex(&pmp->xop_spin); 610 nchains = ip1->cluster.nchains; 611 for (i = 0; i < nchains; ++i) { 612 /* 613 * XXX ip1->cluster.array* not stable here. This temporary 614 * hack fixes basic issues in target XOPs which need to 615 * obtain a starting chain from the inode but does not 616 * address possible races against inode updates which 617 * might NULL-out a chain. 618 */ 619 if (i != notidx && ip1->cluster.array[i].chain) { 620 thr = &pmp->xop_groups[ng].thrs[i]; 621 atomic_set_64(&xop->run_mask, 1LLU << i); 622 atomic_set_64(&xop->chk_mask, 1LLU << i); 623 xop->collect[i].thr = thr; 624 TAILQ_INSERT_TAIL(&thr->xopq, xop, collect[i].entry); 625 } 626 } 627 hammer2_spin_unex(&pmp->xop_spin); 628 /* xop can become invalid at this point */ 629 630 /* 631 * Each thread has its own xopq 632 */ 633 for (i = 0; i < nchains; ++i) { 634 if (i != notidx) { 635 thr = &pmp->xop_groups[ng].thrs[i]; 636 hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ); 637 hammer2_primary_xops_thread(thr); 638 } 639 } 640 } 641 642 void 643 hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc) 644 { 645 hammer2_xop_start_except(xop, desc, -1); 646 } 647 648 /* 649 * Retire a XOP. Used by both the VOP frontend and by the XOP backend. 650 */ 651 void 652 hammer2_xop_retire(hammer2_xop_head_t *xop, uint64_t mask) 653 { 654 hammer2_chain_t *chain; 655 uint64_t nmask; 656 int i; 657 658 /* 659 * Remove the frontend collector or remove a backend feeder. 660 * 661 * When removing the frontend we must wakeup any backend feeders 662 * who are waiting for FIFO space. 663 * 664 * When removing the last backend feeder we must wakeup any waiting 665 * frontend. 666 */ 667 KKASSERT(xop->run_mask & mask); 668 nmask = atomic_fetchadd_64(&xop->run_mask, 669 -mask + HAMMER2_XOPMASK_FEED); 670 671 /* 672 * More than one entity left 673 */ 674 if ((nmask & HAMMER2_XOPMASK_ALLDONE) != mask) { 675 /* 676 * Frontend terminating, wakeup any backends waiting on 677 * fifo full. 678 * 679 * NOTE!!! The xop can get ripped out from under us at 680 * this point, so do not reference it again. 681 * The wakeup(xop) doesn't touch the xop and 682 * is ok. 683 */ 684 if (mask == HAMMER2_XOPMASK_VOP) { 685 if (nmask & HAMMER2_XOPMASK_FIFOW) 686 wakeup(xop); 687 } 688 689 /* 690 * Wakeup frontend if the last backend is terminating. 691 */ 692 nmask -= mask; 693 if ((nmask & HAMMER2_XOPMASK_ALLDONE) == HAMMER2_XOPMASK_VOP) { 694 if (nmask & HAMMER2_XOPMASK_WAIT) 695 wakeup(xop); 696 } 697 698 return; 699 } 700 /* else nobody else left, we can ignore FIFOW */ 701 702 /* 703 * All collectors are gone, we can cleanup and dispose of the XOP. 704 * Note that this can wind up being a frontend OR a backend. 705 * Pending chains are locked shared and not owned by any thread. 706 * 707 * Cleanup the collection cluster. 708 */ 709 for (i = 0; i < xop->cluster.nchains; ++i) { 710 xop->cluster.array[i].flags = 0; 711 chain = xop->cluster.array[i].chain; 712 if (chain) { 713 xop->cluster.array[i].chain = NULL; 714 hammer2_chain_drop_unhold(chain); 715 } 716 } 717 718 /* 719 * Cleanup the fifos. Since we are the only entity left on this 720 * xop we don't have to worry about fifo flow control, and one 721 * lfence() will do the job. 722 */ 723 cpu_lfence(); 724 mask = xop->chk_mask; 725 for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) { 726 hammer2_xop_fifo_t *fifo = &xop->collect[i]; 727 while (fifo->ri != fifo->wi) { 728 chain = fifo->array[fifo->ri & fifo_mask(xop)]; 729 if (chain) 730 hammer2_chain_drop_unhold(chain); 731 ++fifo->ri; 732 } 733 mask &= ~(1U << i); 734 } 735 736 /* 737 * The inode is only held at this point, simply drop it. 738 */ 739 if (xop->ip1) { 740 hammer2_inode_drop(xop->ip1); 741 xop->ip1 = NULL; 742 } 743 if (xop->ip2) { 744 hammer2_inode_drop(xop->ip2); 745 xop->ip2 = NULL; 746 } 747 if (xop->ip3) { 748 hammer2_inode_drop(xop->ip3); 749 xop->ip3 = NULL; 750 } 751 if (xop->ip4) { 752 hammer2_inode_drop(xop->ip4); 753 xop->ip4 = NULL; 754 } 755 if (xop->name1) { 756 kfree(xop->name1, M_HAMMER2); 757 xop->name1 = NULL; 758 xop->name1_len = 0; 759 } 760 if (xop->name2) { 761 kfree(xop->name2, M_HAMMER2); 762 xop->name2 = NULL; 763 xop->name2_len = 0; 764 } 765 766 free(xop); 767 } 768 769 /* 770 * (Backend) Returns non-zero if the frontend is still attached. 771 */ 772 int 773 hammer2_xop_active(hammer2_xop_head_t *xop) 774 { 775 if (xop->run_mask & HAMMER2_XOPMASK_VOP) 776 return 1; 777 else 778 return 0; 779 } 780 781 /* 782 * (Backend) Feed chain data through the cluster validator and back to 783 * the frontend. Chains are fed from multiple nodes concurrently 784 * and pipelined via per-node FIFOs in the XOP. 785 * 786 * The chain must be locked (either shared or exclusive). The caller may 787 * unlock and drop the chain on return. This function will add an extra 788 * ref and hold the chain's data for the pass-back. 789 * 790 * No xop lock is needed because we are only manipulating fields under 791 * our direct control. 792 * 793 * Returns 0 on success and a hammer2 error code if sync is permanently 794 * lost. The caller retains a ref on the chain but by convention 795 * the lock is typically inherited by the xop (caller loses lock). 796 * 797 * Returns non-zero on error. In this situation the caller retains a 798 * ref on the chain but loses the lock (we unlock here). 799 */ 800 int 801 hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain, 802 int clindex, int error) 803 { 804 hammer2_xop_fifo_t *fifo; 805 uint64_t mask; 806 807 /* 808 * Early termination (typicaly of xop_readir) 809 */ 810 if (hammer2_xop_active(xop) == 0) { 811 error = HAMMER2_ERROR_ABORTED; 812 goto done; 813 } 814 815 /* 816 * Multi-threaded entry into the XOP collector. We own the 817 * fifo->wi for our clindex. 818 */ 819 fifo = &xop->collect[clindex]; 820 821 if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) 822 lwkt_yield(); 823 while (fifo->ri == fifo->wi - xop->fifo_size) { 824 atomic_set_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL); 825 mask = xop->run_mask; 826 if ((mask & HAMMER2_XOPMASK_VOP) == 0) { 827 error = HAMMER2_ERROR_ABORTED; 828 goto done; 829 } 830 xop->fifo_size *= 2; 831 hammer2_xop_fifo_alloc(fifo, xop->fifo_size); 832 } 833 atomic_clear_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL); 834 if (chain) 835 hammer2_chain_ref_hold(chain); 836 if (error == 0 && chain) 837 error = chain->error; 838 fifo->errors[fifo->wi & fifo_mask(xop)] = error; 839 fifo->array[fifo->wi & fifo_mask(xop)] = chain; 840 cpu_sfence(); 841 ++fifo->wi; 842 843 mask = atomic_fetchadd_64(&xop->run_mask, HAMMER2_XOPMASK_FEED); 844 if (mask & HAMMER2_XOPMASK_WAIT) { 845 atomic_clear_64(&xop->run_mask, HAMMER2_XOPMASK_WAIT); 846 wakeup(xop); 847 } 848 error = 0; 849 850 /* 851 * Cleanup. If no error 852 * occurred the fifo inherits the lock and gains an additional ref. 853 * 854 * The caller's ref remains in both cases. 855 */ 856 done: 857 return error; 858 } 859 860 /* 861 * (Frontend) collect a response from a running cluster op. 862 * 863 * Responses are fed from all appropriate nodes concurrently 864 * and collected into a cohesive response >= collect_key. 865 * 866 * The collector will return the instant quorum or other requirements 867 * are met, even if some nodes get behind or become non-responsive. 868 * 869 * HAMMER2_XOP_COLLECT_NOWAIT - Used to 'poll' a completed collection, 870 * usually called synchronously from the 871 * node XOPs for the strategy code to 872 * fake the frontend collection and complete 873 * the BIO as soon as possible. 874 * 875 * Returns 0 on success plus a filled out xop->cluster structure. 876 * Return ENOENT on normal termination. 877 * Otherwise return an error. 878 * 879 * WARNING! If the xop returns a cluster with a non-NULL focus, note that 880 * none of the chains in the cluster (or the focus) are either 881 * locked or I/O synchronized with the cpu. hammer2_xop_gdata() 882 * and hammer2_xop_pdata() must be used to safely access the focus 883 * chain's content. 884 * 885 * The frontend can make certain assumptions based on higher-level 886 * locking done by the frontend, but data integrity absolutely 887 * requires using the gdata/pdata API. 888 */ 889 int 890 hammer2_xop_collect(hammer2_xop_head_t *xop, int flags) 891 { 892 hammer2_xop_fifo_t *fifo; 893 hammer2_chain_t *chain; 894 hammer2_key_t lokey; 895 uint64_t mask; 896 int error; 897 int keynull; 898 int adv; /* advance the element */ 899 int i; 900 901 loop: 902 /* 903 * First loop tries to advance pieces of the cluster which 904 * are out of sync. 905 */ 906 lokey = HAMMER2_KEY_MAX; 907 keynull = HAMMER2_CHECK_NULL; 908 mask = xop->run_mask; 909 cpu_lfence(); 910 911 for (i = 0; i < xop->cluster.nchains; ++i) { 912 chain = xop->cluster.array[i].chain; 913 if (chain == NULL) { 914 adv = 1; 915 } else if (chain->bref.key < xop->collect_key) { 916 adv = 1; 917 } else { 918 keynull &= ~HAMMER2_CHECK_NULL; 919 if (lokey > chain->bref.key) 920 lokey = chain->bref.key; 921 adv = 0; 922 } 923 if (adv == 0) 924 continue; 925 926 /* 927 * Advance element if possible, advanced element may be NULL. 928 */ 929 if (chain) 930 hammer2_chain_drop_unhold(chain); 931 932 fifo = &xop->collect[i]; 933 if (fifo->ri != fifo->wi) { 934 cpu_lfence(); 935 chain = fifo->array[fifo->ri & fifo_mask(xop)]; 936 error = fifo->errors[fifo->ri & fifo_mask(xop)]; 937 ++fifo->ri; 938 xop->cluster.array[i].chain = chain; 939 xop->cluster.array[i].error = error; 940 if (chain == NULL) { 941 /* XXX */ 942 xop->cluster.array[i].flags |= 943 HAMMER2_CITEM_NULL; 944 } 945 if (fifo->wi - fifo->ri <= HAMMER2_XOPFIFO / 2) { 946 if (fifo->flags & HAMMER2_XOP_FIFO_STALL) { 947 atomic_clear_int(&fifo->flags, 948 HAMMER2_XOP_FIFO_STALL); 949 wakeup(xop); 950 lwkt_yield(); 951 } 952 } 953 --i; /* loop on same index */ 954 } else { 955 /* 956 * Retain CITEM_NULL flag. If set just repeat EOF. 957 * If not, the NULL,0 combination indicates an 958 * operation in-progress. 959 */ 960 xop->cluster.array[i].chain = NULL; 961 /* retain any CITEM_NULL setting */ 962 } 963 } 964 965 /* 966 * Determine whether the lowest collected key meets clustering 967 * requirements. Returns HAMMER2_ERROR_*: 968 * 969 * 0 - key valid, cluster can be returned. 970 * 971 * ENOENT - normal end of scan, return ENOENT. 972 * 973 * ESRCH - sufficient elements collected, quorum agreement 974 * that lokey is not a valid element and should be 975 * skipped. 976 * 977 * EDEADLK - sufficient elements collected, no quorum agreement 978 * (and no agreement possible). In this situation a 979 * repair is needed, for now we loop. 980 * 981 * EINPROGRESS - insufficient elements collected to resolve, wait 982 * for event and loop. 983 * 984 * EIO - IO error or CRC check error from hammer2_cluster_check() 985 */ 986 if ((flags & HAMMER2_XOP_COLLECT_WAITALL) && 987 (mask & HAMMER2_XOPMASK_ALLDONE) != HAMMER2_XOPMASK_VOP) { 988 error = HAMMER2_ERROR_EINPROGRESS; 989 } else { 990 error = hammer2_cluster_check(&xop->cluster, lokey, keynull); 991 } 992 if (error == HAMMER2_ERROR_EINPROGRESS) { 993 if (flags & HAMMER2_XOP_COLLECT_NOWAIT) 994 goto done; 995 tsleep_interlock(xop, 0); 996 if (atomic_cmpset_64(&xop->run_mask, 997 mask, mask | HAMMER2_XOPMASK_WAIT)) { 998 tsleep(xop, PINTERLOCKED, "h2coll", hz*60); 999 } 1000 goto loop; 1001 } 1002 if (error == HAMMER2_ERROR_ESRCH) { 1003 if (lokey != HAMMER2_KEY_MAX) { 1004 xop->collect_key = lokey + 1; 1005 goto loop; 1006 } 1007 error = HAMMER2_ERROR_ENOENT; 1008 } 1009 if (error == HAMMER2_ERROR_EDEADLK) { 1010 kprintf("hammer2: no quorum possible lokey %016jx\n", 1011 lokey); 1012 if (lokey != HAMMER2_KEY_MAX) { 1013 xop->collect_key = lokey + 1; 1014 goto loop; 1015 } 1016 error = HAMMER2_ERROR_ENOENT; 1017 } 1018 if (lokey == HAMMER2_KEY_MAX) 1019 xop->collect_key = lokey; 1020 else 1021 xop->collect_key = lokey + 1; 1022 done: 1023 return error; 1024 } 1025 1026 /* 1027 * N x M processing threads are available to handle XOPs, N per cluster 1028 * index x M cluster nodes. 1029 * 1030 * Locate and return the next runnable xop, or NULL if no xops are 1031 * present or none of the xops are currently runnable (for various reasons). 1032 * The xop is left on the queue and serves to block other dependent xops 1033 * from being run. 1034 * 1035 * Dependent xops will not be returned. 1036 * 1037 * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL. 1038 * 1039 * NOTE! Xops run concurrently for each cluster index. 1040 */ 1041 #define XOP_HASH_SIZE 16 1042 #define XOP_HASH_MASK (XOP_HASH_SIZE - 1) 1043 1044 static __inline 1045 int 1046 xop_testhash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash) 1047 { 1048 uint32_t mask; 1049 int hv; 1050 1051 hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t); 1052 mask = 1U << (hv & 31); 1053 hv >>= 5; 1054 1055 return ((int)(hash[hv & XOP_HASH_MASK] & mask)); 1056 } 1057 1058 static __inline 1059 void 1060 xop_sethash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash) 1061 { 1062 uint32_t mask; 1063 int hv; 1064 1065 hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t); 1066 mask = 1U << (hv & 31); 1067 hv >>= 5; 1068 1069 hash[hv & XOP_HASH_MASK] |= mask; 1070 } 1071 1072 static 1073 hammer2_xop_head_t * 1074 hammer2_xop_next(hammer2_thread_t *thr) 1075 { 1076 hammer2_pfs_t *pmp = thr->pmp; 1077 int clindex = thr->clindex; 1078 uint32_t hash[XOP_HASH_SIZE] = { 0 }; 1079 hammer2_xop_head_t *xop; 1080 1081 hammer2_spin_ex(&pmp->xop_spin); 1082 TAILQ_FOREACH(xop, &thr->xopq, collect[clindex].entry) { 1083 /* 1084 * Check dependency 1085 */ 1086 if (xop_testhash(thr, xop->ip1, hash) || 1087 (xop->ip2 && xop_testhash(thr, xop->ip2, hash)) || 1088 (xop->ip3 && xop_testhash(thr, xop->ip3, hash)) || 1089 (xop->ip4 && xop_testhash(thr, xop->ip4, hash))) 1090 { 1091 continue; 1092 } 1093 xop_sethash(thr, xop->ip1, hash); 1094 if (xop->ip2) 1095 xop_sethash(thr, xop->ip2, hash); 1096 if (xop->ip3) 1097 xop_sethash(thr, xop->ip3, hash); 1098 if (xop->ip4) 1099 xop_sethash(thr, xop->ip4, hash); 1100 1101 /* 1102 * Check already running 1103 */ 1104 if (xop->collect[clindex].flags & HAMMER2_XOP_FIFO_RUN) 1105 continue; 1106 1107 /* 1108 * Found a good one, return it. 1109 */ 1110 atomic_set_int(&xop->collect[clindex].flags, 1111 HAMMER2_XOP_FIFO_RUN); 1112 break; 1113 } 1114 hammer2_spin_unex(&pmp->xop_spin); 1115 1116 return xop; 1117 } 1118 1119 /* 1120 * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN. 1121 * 1122 * NOTE! Xops run concurrently for each cluster index. 1123 */ 1124 static 1125 void 1126 hammer2_xop_dequeue(hammer2_thread_t *thr, hammer2_xop_head_t *xop) 1127 { 1128 hammer2_pfs_t *pmp = thr->pmp; 1129 int clindex = thr->clindex; 1130 1131 hammer2_spin_ex(&pmp->xop_spin); 1132 TAILQ_REMOVE(&thr->xopq, xop, collect[clindex].entry); 1133 atomic_clear_int(&xop->collect[clindex].flags, 1134 HAMMER2_XOP_FIFO_RUN); 1135 hammer2_spin_unex(&pmp->xop_spin); 1136 if (TAILQ_FIRST(&thr->xopq)) 1137 hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ); 1138 } 1139 1140 /* 1141 * Primary management thread for xops support. Each node has several such 1142 * threads which replicate front-end operations on cluster nodes. 1143 * 1144 * XOPS thread node operations, allowing the function to focus on a single 1145 * node in the cluster after validating the operation with the cluster. 1146 * This is primarily what prevents dead or stalled nodes from stalling 1147 * the front-end. 1148 */ 1149 void 1150 hammer2_primary_xops_thread(void *arg) 1151 { 1152 hammer2_thread_t *thr = arg; 1153 hammer2_xop_head_t *xop; 1154 uint64_t mask; 1155 uint32_t flags; 1156 uint32_t nflags; 1157 1158 mask = 1LLU << thr->clindex; 1159 1160 for (;;) { 1161 flags = thr->flags; 1162 1163 /* 1164 * Handle stop request 1165 */ 1166 if (flags & HAMMER2_THREAD_STOP) 1167 break; 1168 1169 /* 1170 * Handle freeze request 1171 */ 1172 if (flags & HAMMER2_THREAD_FREEZE) { 1173 hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN, 1174 HAMMER2_THREAD_FREEZE); 1175 continue; 1176 } 1177 1178 if (flags & HAMMER2_THREAD_UNFREEZE) { 1179 hammer2_thr_signal2(thr, 0, 1180 HAMMER2_THREAD_FROZEN | 1181 HAMMER2_THREAD_UNFREEZE); 1182 continue; 1183 } 1184 1185 /* 1186 * Force idle if frozen until unfrozen or stopped. 1187 */ 1188 if (flags & HAMMER2_THREAD_FROZEN) { 1189 hammer2_thr_wait_any(thr, 1190 HAMMER2_THREAD_UNFREEZE | 1191 HAMMER2_THREAD_STOP, 1192 0); 1193 continue; 1194 } 1195 1196 /* 1197 * Reset state on REMASTER request 1198 */ 1199 if (flags & HAMMER2_THREAD_REMASTER) { 1200 hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER); 1201 /* reset state here */ 1202 continue; 1203 } 1204 1205 /* 1206 * Process requests. Each request can be multi-queued. 1207 * 1208 * If we get behind and the frontend VOP is no longer active, 1209 * we retire the request without processing it. The callback 1210 * may also abort processing if the frontend VOP becomes 1211 * inactive. 1212 */ 1213 if (flags & HAMMER2_THREAD_XOPQ) { 1214 nflags = flags & ~HAMMER2_THREAD_XOPQ; 1215 if (!atomic_cmpset_int(&thr->flags, flags, nflags)) 1216 continue; 1217 flags = nflags; 1218 /* fall through */ 1219 } 1220 while ((xop = hammer2_xop_next(thr)) != NULL) { 1221 if (hammer2_xop_active(xop)) { 1222 xop->desc->storage_func((hammer2_xop_t *)xop, 1223 thr->scratch, 1224 thr->clindex); 1225 hammer2_xop_dequeue(thr, xop); 1226 hammer2_xop_retire(xop, mask); 1227 } else { 1228 hammer2_xop_feed(xop, NULL, thr->clindex, 1229 ECONNABORTED); 1230 hammer2_xop_dequeue(thr, xop); 1231 hammer2_xop_retire(xop, mask); 1232 } 1233 } 1234 1235 /* Don't wait, this is a XOP caller thread in makefs */ 1236 break; 1237 1238 /* 1239 * Wait for event, interlock using THREAD_WAITING and 1240 * THREAD_SIGNAL. 1241 * 1242 * For robustness poll on a 30-second interval, but nominally 1243 * expect to be woken up. 1244 */ 1245 nflags = flags | HAMMER2_THREAD_WAITING; 1246 1247 tsleep_interlock(&thr->flags, 0); 1248 if (atomic_cmpset_int(&thr->flags, flags, nflags)) { 1249 tsleep(&thr->flags, PINTERLOCKED, "h2idle", hz*30); 1250 } 1251 } 1252 1253 #if 0 1254 /* 1255 * Cleanup / termination 1256 */ 1257 while ((xop = TAILQ_FIRST(&thr->xopq)) != NULL) { 1258 kprintf("hammer2_thread: aborting xop %s\n", xop->desc->id); 1259 TAILQ_REMOVE(&thr->xopq, xop, 1260 collect[thr->clindex].entry); 1261 hammer2_xop_retire(xop, mask); 1262 } 1263 #endif 1264 thr->td = NULL; 1265 hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED); 1266 /* thr structure can go invalid after this point */ 1267 } 1268