xref: /dragonfly/sys/vfs/hammer2/hammer2_admin.c (revision 9d626b29)
1 /*
2  * Copyright (c) 2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * This module implements the hammer2 helper thread API, including
36  * the frontend/backend XOP API.
37  */
38 #include "hammer2.h"
39 
40 /*
41  * Set flags and wakeup any waiters.
42  *
43  * WARNING! During teardown (thr) can disappear the instant our cmpset
44  *	    succeeds.
45  */
46 void
47 hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags)
48 {
49 	uint32_t oflags;
50 	uint32_t nflags;
51 
52 	for (;;) {
53 		oflags = thr->flags;
54 		cpu_ccfence();
55 		nflags = (oflags | flags) & ~HAMMER2_THREAD_WAITING;
56 
57 		if (oflags & HAMMER2_THREAD_WAITING) {
58 			if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
59 				wakeup(&thr->flags);
60 				break;
61 			}
62 		} else {
63 			if (atomic_cmpset_int(&thr->flags, oflags, nflags))
64 				break;
65 		}
66 	}
67 }
68 
69 /*
70  * Set and clear flags and wakeup any waiters.
71  *
72  * WARNING! During teardown (thr) can disappear the instant our cmpset
73  *	    succeeds.
74  */
75 void
76 hammer2_thr_signal2(hammer2_thread_t *thr, uint32_t posflags, uint32_t negflags)
77 {
78 	uint32_t oflags;
79 	uint32_t nflags;
80 
81 	for (;;) {
82 		oflags = thr->flags;
83 		cpu_ccfence();
84 		nflags = (oflags | posflags) &
85 			~(negflags | HAMMER2_THREAD_WAITING);
86 		if (oflags & HAMMER2_THREAD_WAITING) {
87 			if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
88 				wakeup(&thr->flags);
89 				break;
90 			}
91 		} else {
92 			if (atomic_cmpset_int(&thr->flags, oflags, nflags))
93 				break;
94 		}
95 	}
96 }
97 
98 /*
99  * Wait until all the bits in flags are set.
100  *
101  * WARNING! During teardown (thr) can disappear the instant our cmpset
102  *	    succeeds.
103  */
104 void
105 hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags)
106 {
107 	uint32_t oflags;
108 	uint32_t nflags;
109 
110 	for (;;) {
111 		oflags = thr->flags;
112 		cpu_ccfence();
113 		if ((oflags & flags) == flags)
114 			break;
115 		nflags = oflags | HAMMER2_THREAD_WAITING;
116 		tsleep_interlock(&thr->flags, 0);
117 		if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
118 			tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
119 		}
120 	}
121 }
122 
123 /*
124  * Wait until any of the bits in flags are set, with timeout.
125  *
126  * WARNING! During teardown (thr) can disappear the instant our cmpset
127  *	    succeeds.
128  */
129 int
130 hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo)
131 {
132 	uint32_t oflags;
133 	uint32_t nflags;
134 	int error;
135 
136 	error = 0;
137 	for (;;) {
138 		oflags = thr->flags;
139 		cpu_ccfence();
140 		if (oflags & flags)
141 			break;
142 		nflags = oflags | HAMMER2_THREAD_WAITING;
143 		tsleep_interlock(&thr->flags, 0);
144 		if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
145 			error = tsleep(&thr->flags, PINTERLOCKED,
146 				       "h2twait", timo);
147 		}
148 		if (error == ETIMEDOUT) {
149 			error = HAMMER2_ERROR_ETIMEDOUT;
150 			break;
151 		}
152 	}
153 	return error;
154 }
155 
156 /*
157  * Wait until the bits in flags are clear.
158  *
159  * WARNING! During teardown (thr) can disappear the instant our cmpset
160  *	    succeeds.
161  */
162 void
163 hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags)
164 {
165 	uint32_t oflags;
166 	uint32_t nflags;
167 
168 	for (;;) {
169 		oflags = thr->flags;
170 		cpu_ccfence();
171 		if ((oflags & flags) == 0)
172 			break;
173 		nflags = oflags | HAMMER2_THREAD_WAITING;
174 		tsleep_interlock(&thr->flags, 0);
175 		if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
176 			tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
177 		}
178 	}
179 }
180 
181 /*
182  * Initialize the supplied thread structure, starting the specified
183  * thread.
184  *
185  * NOTE: thr structure can be retained across mounts and unmounts for this
186  *	 pmp, so make sure the flags are in a sane state.
187  */
188 void
189 hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
190 		   hammer2_dev_t *hmp,
191 		   const char *id, int clindex, int repidx,
192 		   void (*func)(void *arg))
193 {
194 	thr->pmp = pmp;		/* xop helpers */
195 	thr->hmp = hmp;		/* bulkfree */
196 	thr->clindex = clindex;
197 	thr->repidx = repidx;
198 	TAILQ_INIT(&thr->xopq);
199 	atomic_clear_int(&thr->flags, HAMMER2_THREAD_STOP |
200 				      HAMMER2_THREAD_STOPPED |
201 				      HAMMER2_THREAD_FREEZE |
202 				      HAMMER2_THREAD_FROZEN);
203 	if (thr->scratch == NULL)
204 		thr->scratch = kmalloc(MAXPHYS, M_HAMMER2, M_WAITOK | M_ZERO);
205 	if (repidx >= 0) {
206 		lwkt_create(func, thr, &thr->td, NULL, 0, repidx % ncpus,
207 			    "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx);
208 	} else if (pmp) {
209 		lwkt_create(func, thr, &thr->td, NULL, 0, -1,
210 			    "%s-%s", id, pmp->pfs_names[clindex]);
211 	} else {
212 		lwkt_create(func, thr, &thr->td, NULL, 0, -1, "%s", id);
213 	}
214 }
215 
216 /*
217  * Terminate a thread.  This function will silently return if the thread
218  * was never initialized or has already been deleted.
219  *
220  * This is accomplished by setting the STOP flag and waiting for the td
221  * structure to become NULL.
222  */
223 void
224 hammer2_thr_delete(hammer2_thread_t *thr)
225 {
226 	if (thr->td == NULL)
227 		return;
228 	hammer2_thr_signal(thr, HAMMER2_THREAD_STOP);
229 	hammer2_thr_wait(thr, HAMMER2_THREAD_STOPPED);
230 	thr->pmp = NULL;
231 	if (thr->scratch) {
232 		kfree(thr->scratch, M_HAMMER2);
233 		thr->scratch = NULL;
234 	}
235 	KKASSERT(TAILQ_EMPTY(&thr->xopq));
236 }
237 
238 /*
239  * Asynchronous remaster request.  Ask the synchronization thread to
240  * start over soon (as if it were frozen and unfrozen, but without waiting).
241  * The thread always recalculates mastership relationships when restarting.
242  */
243 void
244 hammer2_thr_remaster(hammer2_thread_t *thr)
245 {
246 	if (thr->td == NULL)
247 		return;
248 	hammer2_thr_signal(thr, HAMMER2_THREAD_REMASTER);
249 }
250 
251 void
252 hammer2_thr_freeze_async(hammer2_thread_t *thr)
253 {
254 	hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
255 }
256 
257 void
258 hammer2_thr_freeze(hammer2_thread_t *thr)
259 {
260 	if (thr->td == NULL)
261 		return;
262 	hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
263 	hammer2_thr_wait(thr, HAMMER2_THREAD_FROZEN);
264 }
265 
266 void
267 hammer2_thr_unfreeze(hammer2_thread_t *thr)
268 {
269 	if (thr->td == NULL)
270 		return;
271 	hammer2_thr_signal(thr, HAMMER2_THREAD_UNFREEZE);
272 	hammer2_thr_wait_neg(thr, HAMMER2_THREAD_FROZEN);
273 }
274 
275 int
276 hammer2_thr_break(hammer2_thread_t *thr)
277 {
278 	if (thr->flags & (HAMMER2_THREAD_STOP |
279 			  HAMMER2_THREAD_REMASTER |
280 			  HAMMER2_THREAD_FREEZE)) {
281 		return 1;
282 	}
283 	return 0;
284 }
285 
286 /****************************************************************************
287  *			    HAMMER2 XOPS API	 			    *
288  ****************************************************************************/
289 
290 void
291 hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp)
292 {
293 	/* no extra fields in structure at the moment */
294 }
295 
296 /*
297  * Allocate a XOP request.
298  *
299  * Once allocated a XOP request can be started, collected, and retired,
300  * and can be retired early if desired.
301  *
302  * NOTE: Fifo indices might not be zero but ri == wi on objcache_get().
303  */
304 void *
305 hammer2_xop_alloc(hammer2_inode_t *ip, int flags)
306 {
307 	hammer2_xop_t *xop;
308 
309 	xop = objcache_get(cache_xops, M_WAITOK);
310 	KKASSERT(xop->head.cluster.array[0].chain == NULL);
311 
312 	xop->head.ip1 = ip;
313 	xop->head.func = NULL;
314 	xop->head.flags = flags;
315 	xop->head.state = 0;
316 	xop->head.error = 0;
317 	xop->head.collect_key = 0;
318 	if (flags & HAMMER2_XOP_MODIFYING)
319 		xop->head.mtid = hammer2_trans_sub(ip->pmp);
320 	else
321 		xop->head.mtid = 0;
322 
323 	xop->head.cluster.nchains = ip->cluster.nchains;
324 	xop->head.cluster.pmp = ip->pmp;
325 	xop->head.cluster.flags = HAMMER2_CLUSTER_LOCKED;
326 
327 	/*
328 	 * run_mask - Active thread (or frontend) associated with XOP
329 	 */
330 	xop->head.run_mask = HAMMER2_XOPMASK_VOP;
331 
332 	hammer2_inode_ref(ip);
333 
334 	return xop;
335 }
336 
337 void
338 hammer2_xop_setname(hammer2_xop_head_t *xop, const char *name, size_t name_len)
339 {
340 	xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
341 	xop->name1_len = name_len;
342 	bcopy(name, xop->name1, name_len);
343 }
344 
345 void
346 hammer2_xop_setname2(hammer2_xop_head_t *xop, const char *name, size_t name_len)
347 {
348 	xop->name2 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
349 	xop->name2_len = name_len;
350 	bcopy(name, xop->name2, name_len);
351 }
352 
353 size_t
354 hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum)
355 {
356 	const size_t name_len = 18;
357 
358 	xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
359 	xop->name1_len = name_len;
360 	ksnprintf(xop->name1, name_len + 1, "0x%016jx", (intmax_t)inum);
361 
362 	return name_len;
363 }
364 
365 
366 void
367 hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2)
368 {
369 	xop->ip2 = ip2;
370 	hammer2_inode_ref(ip2);
371 }
372 
373 void
374 hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3)
375 {
376 	xop->ip3 = ip3;
377 	hammer2_inode_ref(ip3);
378 }
379 
380 void
381 hammer2_xop_reinit(hammer2_xop_head_t *xop)
382 {
383 	xop->state = 0;
384 	xop->error = 0;
385 	xop->collect_key = 0;
386 	xop->run_mask = HAMMER2_XOPMASK_VOP;
387 }
388 
389 /*
390  * A mounted PFS needs Xops threads to support frontend operations.
391  */
392 void
393 hammer2_xop_helper_create(hammer2_pfs_t *pmp)
394 {
395 	int i;
396 	int j;
397 
398 	lockmgr(&pmp->lock, LK_EXCLUSIVE);
399 	pmp->has_xop_threads = 1;
400 
401 	for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
402 		for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
403 			if (pmp->xop_groups[j].thrs[i].td)
404 				continue;
405 			hammer2_thr_create(&pmp->xop_groups[j].thrs[i],
406 					   pmp, NULL,
407 					   "h2xop", i, j,
408 					   hammer2_primary_xops_thread);
409 		}
410 	}
411 	lockmgr(&pmp->lock, LK_RELEASE);
412 }
413 
414 void
415 hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp)
416 {
417 	int i;
418 	int j;
419 
420 	for (i = 0; i < pmp->pfs_nmasters; ++i) {
421 		for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
422 			if (pmp->xop_groups[j].thrs[i].td)
423 				hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
424 		}
425 	}
426 	pmp->has_xop_threads = 0;
427 }
428 
429 /*
430  * Start a XOP request, queueing it to all nodes in the cluster to
431  * execute the cluster op.
432  *
433  * XXX optimize single-target case.
434  */
435 void
436 hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_func_t func,
437 			 int notidx)
438 {
439 	hammer2_inode_t *ip1;
440 	hammer2_pfs_t *pmp;
441 	hammer2_thread_t *thr;
442 	int i;
443 	int ng;
444 	int nchains;
445 
446 	ip1 = xop->ip1;
447 	pmp = ip1->pmp;
448 	if (pmp->has_xop_threads == 0)
449 		hammer2_xop_helper_create(pmp);
450 
451 	/*
452 	 * The intent of the XOP sequencer is to ensure that ops on the same
453 	 * inode execute in the same order.  This is necessary when issuing
454 	 * modifying operations to multiple targets because some targets might
455 	 * get behind and the frontend is allowed to complete the moment a
456 	 * quorum of targets succeed.
457 	 *
458 	 * Strategy operations must be segregated from non-strategy operations
459 	 * to avoid a deadlock.  For example, if a vfsync and a bread/bwrite
460 	 * were queued to the same worker thread, the locked buffer in the
461 	 * strategy operation can deadlock the vfsync's buffer list scan.
462 	 *
463 	 * TODO - RENAME fails here because it is potentially modifying
464 	 *	  three different inodes.
465 	 */
466 	if (xop->flags & HAMMER2_XOP_STRATEGY) {
467 		hammer2_xop_strategy_t *xopst;
468 
469 		xopst = &((hammer2_xop_t *)xop)->xop_strategy;
470 		ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)) ^
471 			   hammer2_icrc32(&xopst->lbase, sizeof(xopst->lbase)));
472 		ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1);
473 		ng += HAMMER2_XOPGROUPS / 2;
474 	} else {
475 		ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)));
476 		ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1);
477 	}
478 	xop->func = func;
479 
480 	/*
481 	 * The instant xop is queued another thread can pick it off.  In the
482 	 * case of asynchronous ops, another thread might even finish and
483 	 * deallocate it.
484 	 */
485 	hammer2_spin_ex(&pmp->xop_spin);
486 	nchains = ip1->cluster.nchains;
487 	for (i = 0; i < nchains; ++i) {
488 		/*
489 		 * XXX ip1->cluster.array* not stable here.  This temporary
490 		 *     hack fixes basic issues in target XOPs which need to
491 		 *     obtain a starting chain from the inode but does not
492 		 *     address possible races against inode updates which
493 		 *     might NULL-out a chain.
494 		 */
495 		if (i != notidx && ip1->cluster.array[i].chain) {
496 			thr = &pmp->xop_groups[ng].thrs[i];
497 			atomic_set_64(&xop->run_mask, 1LLU << i);
498 			atomic_set_64(&xop->chk_mask, 1LLU << i);
499 			xop->collect[i].thr = thr;
500 			TAILQ_INSERT_TAIL(&thr->xopq, xop, collect[i].entry);
501 		}
502 	}
503 	hammer2_spin_unex(&pmp->xop_spin);
504 	/* xop can become invalid at this point */
505 
506 	/*
507 	 * Each thread has its own xopq
508 	 */
509 	for (i = 0; i < nchains; ++i) {
510 		if (i != notidx) {
511 			thr = &pmp->xop_groups[ng].thrs[i];
512 			hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
513 		}
514 	}
515 }
516 
517 void
518 hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_func_t func)
519 {
520 	hammer2_xop_start_except(xop, func, -1);
521 }
522 
523 /*
524  * Retire a XOP.  Used by both the VOP frontend and by the XOP backend.
525  */
526 void
527 hammer2_xop_retire(hammer2_xop_head_t *xop, uint64_t mask)
528 {
529 	hammer2_chain_t *chain;
530 	uint64_t nmask;
531 	int i;
532 
533 	/*
534 	 * Remove the frontend collector or remove a backend feeder.
535 	 *
536 	 * When removing the frontend we must wakeup any backend feeders
537 	 * who are waiting for FIFO space.
538 	 *
539 	 * When removing the last backend feeder we must wakeup any waiting
540 	 * frontend.
541 	 */
542 	KKASSERT(xop->run_mask & mask);
543 	nmask = atomic_fetchadd_64(&xop->run_mask,
544 				   -mask + HAMMER2_XOPMASK_FEED);
545 
546 	/*
547 	 * More than one entity left
548 	 */
549 	if ((nmask & HAMMER2_XOPMASK_ALLDONE) != mask) {
550 		/*
551 		 * Frontend terminating, wakeup any backends waiting on
552 		 * fifo full.
553 		 *
554 		 * NOTE!!! The xop can get ripped out from under us at
555 		 *	   this point, so do not reference it again.
556 		 *	   The wakeup(xop) doesn't touch the xop and
557 		 *	   is ok.
558 		 */
559 		if (mask == HAMMER2_XOPMASK_VOP) {
560 			if (nmask & HAMMER2_XOPMASK_FIFOW)
561 				wakeup(xop);
562 		}
563 
564 		/*
565 		 * Wakeup frontend if the last backend is terminating.
566 		 */
567 		nmask -= mask;
568 		if ((nmask & HAMMER2_XOPMASK_ALLDONE) == HAMMER2_XOPMASK_VOP) {
569 			if (nmask & HAMMER2_XOPMASK_WAIT)
570 				wakeup(xop);
571 		}
572 
573 		return;
574 	}
575 	/* else nobody else left, we can ignore FIFOW */
576 
577 	/*
578 	 * All collectors are gone, we can cleanup and dispose of the XOP.
579 	 * Note that this can wind up being a frontend OR a backend.
580 	 * Pending chains are locked shared and not owned by any thread.
581 	 *
582 	 * Cleanup the collection cluster.
583 	 */
584 	for (i = 0; i < xop->cluster.nchains; ++i) {
585 		xop->cluster.array[i].flags = 0;
586 		chain = xop->cluster.array[i].chain;
587 		if (chain) {
588 			xop->cluster.array[i].chain = NULL;
589 			hammer2_chain_drop_unhold(chain);
590 		}
591 	}
592 
593 	/*
594 	 * Cleanup the fifos.  Since we are the only entity left on this
595 	 * xop we don't have to worry about fifo flow control, and one
596 	 * lfence() will do the job.
597 	 */
598 	cpu_lfence();
599 	mask = xop->chk_mask;
600 	for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) {
601 		hammer2_xop_fifo_t *fifo = &xop->collect[i];
602 		while (fifo->ri != fifo->wi) {
603 			chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
604 			if (chain)
605 				hammer2_chain_drop_unhold(chain);
606 			++fifo->ri;
607 		}
608 		mask &= ~(1U << i);
609 	}
610 
611 	/*
612 	 * The inode is only held at this point, simply drop it.
613 	 */
614 	if (xop->ip1) {
615 		hammer2_inode_drop(xop->ip1);
616 		xop->ip1 = NULL;
617 	}
618 	if (xop->ip2) {
619 		hammer2_inode_drop(xop->ip2);
620 		xop->ip2 = NULL;
621 	}
622 	if (xop->ip3) {
623 		hammer2_inode_drop(xop->ip3);
624 		xop->ip3 = NULL;
625 	}
626 	if (xop->name1) {
627 		kfree(xop->name1, M_HAMMER2);
628 		xop->name1 = NULL;
629 		xop->name1_len = 0;
630 	}
631 	if (xop->name2) {
632 		kfree(xop->name2, M_HAMMER2);
633 		xop->name2 = NULL;
634 		xop->name2_len = 0;
635 	}
636 
637 	objcache_put(cache_xops, xop);
638 }
639 
640 /*
641  * (Backend) Returns non-zero if the frontend is still attached.
642  */
643 int
644 hammer2_xop_active(hammer2_xop_head_t *xop)
645 {
646 	if (xop->run_mask & HAMMER2_XOPMASK_VOP)
647 		return 1;
648 	else
649 		return 0;
650 }
651 
652 /*
653  * (Backend) Feed chain data through the cluster validator and back to
654  * the frontend.  Chains are fed from multiple nodes concurrently
655  * and pipelined via per-node FIFOs in the XOP.
656  *
657  * The chain must be locked (either shared or exclusive).  The caller may
658  * unlock and drop the chain on return.  This function will add an extra
659  * ref and hold the chain's data for the pass-back.
660  *
661  * No xop lock is needed because we are only manipulating fields under
662  * our direct control.
663  *
664  * Returns 0 on success and a hammer error code if sync is permanently
665  * lost.  The caller retains a ref on the chain but by convention
666  * the lock is typically inherited by the xop (caller loses lock).
667  *
668  * Returns non-zero on error.  In this situation the caller retains a
669  * ref on the chain but loses the lock (we unlock here).
670  */
671 int
672 hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
673 		 int clindex, int error)
674 {
675 	hammer2_xop_fifo_t *fifo;
676 	uint64_t mask;
677 
678 	/*
679 	 * Early termination (typicaly of xop_readir)
680 	 */
681 	if (hammer2_xop_active(xop) == 0) {
682 		error = HAMMER2_ERROR_ABORTED;
683 		goto done;
684 	}
685 
686 	/*
687 	 * Multi-threaded entry into the XOP collector.  We own the
688 	 * fifo->wi for our clindex.
689 	 */
690 	fifo = &xop->collect[clindex];
691 
692 	if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO)
693 		lwkt_yield();
694 	while (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
695 		atomic_set_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
696 		mask = xop->run_mask;
697 		if ((mask & HAMMER2_XOPMASK_VOP) == 0) {
698 			error = HAMMER2_ERROR_ABORTED;
699 			goto done;
700 		}
701 		tsleep_interlock(xop, 0);
702 		if (atomic_cmpset_64(&xop->run_mask, mask,
703 				     mask | HAMMER2_XOPMASK_FIFOW)) {
704 			if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
705 				tsleep(xop, PINTERLOCKED, "h2feed", hz*60);
706 			}
707 		}
708 		/* retry */
709 	}
710 	atomic_clear_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
711 	if (chain)
712 		hammer2_chain_ref_hold(chain);
713 	if (error == 0 && chain)
714 		error = chain->error;
715 	fifo->errors[fifo->wi & HAMMER2_XOPFIFO_MASK] = error;
716 	fifo->array[fifo->wi & HAMMER2_XOPFIFO_MASK] = chain;
717 	cpu_sfence();
718 	++fifo->wi;
719 
720 	mask = atomic_fetchadd_64(&xop->run_mask, HAMMER2_XOPMASK_FEED);
721 	if (mask & HAMMER2_XOPMASK_WAIT) {
722 		atomic_clear_64(&xop->run_mask, HAMMER2_XOPMASK_WAIT);
723 		wakeup(xop);
724 	}
725 	error = 0;
726 
727 	/*
728 	 * Cleanup.  If an error occurred we eat the lock.  If no error
729 	 * occurred the fifo inherits the lock and gains an additional ref.
730 	 *
731 	 * The caller's ref remains in both cases.
732 	 */
733 done:
734 	return error;
735 }
736 
737 /*
738  * (Frontend) collect a response from a running cluster op.
739  *
740  * Responses are fed from all appropriate nodes concurrently
741  * and collected into a cohesive response >= collect_key.
742  *
743  * The collector will return the instant quorum or other requirements
744  * are met, even if some nodes get behind or become non-responsive.
745  *
746  * HAMMER2_XOP_COLLECT_NOWAIT	- Used to 'poll' a completed collection,
747  *				  usually called synchronously from the
748  *				  node XOPs for the strategy code to
749  *				  fake the frontend collection and complete
750  *				  the BIO as soon as possible.
751  *
752  * HAMMER2_XOP_SYNCHRONIZER	- Reqeuest synchronization with a particular
753  *				  cluster index, prevents looping when that
754  *				  index is out of sync so caller can act on
755  *				  the out of sync element.  ESRCH and EDEADLK
756  *				  can be returned if this flag is specified.
757  *
758  * Returns 0 on success plus a filled out xop->cluster structure.
759  * Return ENOENT on normal termination.
760  * Otherwise return an error.
761  */
762 int
763 hammer2_xop_collect(hammer2_xop_head_t *xop, int flags)
764 {
765 	hammer2_xop_fifo_t *fifo;
766 	hammer2_chain_t *chain;
767 	hammer2_key_t lokey;
768 	uint64_t mask;
769 	int error;
770 	int keynull;
771 	int adv;		/* advance the element */
772 	int i;
773 
774 loop:
775 	/*
776 	 * First loop tries to advance pieces of the cluster which
777 	 * are out of sync.
778 	 */
779 	lokey = HAMMER2_KEY_MAX;
780 	keynull = HAMMER2_CHECK_NULL;
781 	mask = xop->run_mask;
782 	cpu_lfence();
783 
784 	for (i = 0; i < xop->cluster.nchains; ++i) {
785 		chain = xop->cluster.array[i].chain;
786 		if (chain == NULL) {
787 			adv = 1;
788 		} else if (chain->bref.key < xop->collect_key) {
789 			adv = 1;
790 		} else {
791 			keynull &= ~HAMMER2_CHECK_NULL;
792 			if (lokey > chain->bref.key)
793 				lokey = chain->bref.key;
794 			adv = 0;
795 		}
796 		if (adv == 0)
797 			continue;
798 
799 		/*
800 		 * Advance element if possible, advanced element may be NULL.
801 		 */
802 		if (chain)
803 			hammer2_chain_drop_unhold(chain);
804 
805 		fifo = &xop->collect[i];
806 		if (fifo->ri != fifo->wi) {
807 			cpu_lfence();
808 			chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
809 			error = fifo->errors[fifo->ri & HAMMER2_XOPFIFO_MASK];
810 			++fifo->ri;
811 			xop->cluster.array[i].chain = chain;
812 			xop->cluster.array[i].error = error;
813 			if (chain == NULL) {
814 				/* XXX */
815 				xop->cluster.array[i].flags |=
816 							HAMMER2_CITEM_NULL;
817 			}
818 			if (fifo->wi - fifo->ri <= HAMMER2_XOPFIFO / 2) {
819 				if (fifo->flags & HAMMER2_XOP_FIFO_STALL) {
820 					atomic_clear_int(&fifo->flags,
821 						    HAMMER2_XOP_FIFO_STALL);
822 					wakeup(xop);
823 					lwkt_yield();
824 				}
825 			}
826 			--i;		/* loop on same index */
827 		} else {
828 			/*
829 			 * Retain CITEM_NULL flag.  If set just repeat EOF.
830 			 * If not, the NULL,0 combination indicates an
831 			 * operation in-progress.
832 			 */
833 			xop->cluster.array[i].chain = NULL;
834 			/* retain any CITEM_NULL setting */
835 		}
836 	}
837 
838 	/*
839 	 * Determine whether the lowest collected key meets clustering
840 	 * requirements.  Returns:
841 	 *
842 	 * 0	 	 - key valid, cluster can be returned.
843 	 *
844 	 * ENOENT	 - normal end of scan, return ENOENT.
845 	 *
846 	 * ESRCH	 - sufficient elements collected, quorum agreement
847 	 *		   that lokey is not a valid element and should be
848 	 *		   skipped.
849 	 *
850 	 * EDEADLK	 - sufficient elements collected, no quorum agreement
851 	 *		   (and no agreement possible).  In this situation a
852 	 *		   repair is needed, for now we loop.
853 	 *
854 	 * EINPROGRESS	 - insufficient elements collected to resolve, wait
855 	 *		   for event and loop.
856 	 */
857 	if ((flags & HAMMER2_XOP_COLLECT_WAITALL) &&
858 	    (mask & HAMMER2_XOPMASK_ALLDONE) != HAMMER2_XOPMASK_VOP) {
859 		error = HAMMER2_ERROR_EINPROGRESS;
860 	} else {
861 		error = hammer2_cluster_check(&xop->cluster, lokey, keynull);
862 	}
863 	if (error == HAMMER2_ERROR_EINPROGRESS) {
864 		if (flags & HAMMER2_XOP_COLLECT_NOWAIT)
865 			goto done;
866 		tsleep_interlock(xop, 0);
867 		if (atomic_cmpset_64(&xop->run_mask,
868 				     mask, mask | HAMMER2_XOPMASK_WAIT)) {
869 			tsleep(xop, PINTERLOCKED, "h2coll", hz*60);
870 		}
871 		goto loop;
872 	}
873 	if (error == HAMMER2_ERROR_ESRCH) {
874 		if (lokey != HAMMER2_KEY_MAX) {
875 			xop->collect_key = lokey + 1;
876 			goto loop;
877 		}
878 		error = HAMMER2_ERROR_ENOENT;
879 	}
880 	if (error == HAMMER2_ERROR_EDEADLK) {
881 		kprintf("hammer2: no quorum possible lokey %016jx\n",
882 			lokey);
883 		if (lokey != HAMMER2_KEY_MAX) {
884 			xop->collect_key = lokey + 1;
885 			goto loop;
886 		}
887 		error = HAMMER2_ERROR_ENOENT;
888 	}
889 	if (lokey == HAMMER2_KEY_MAX)
890 		xop->collect_key = lokey;
891 	else
892 		xop->collect_key = lokey + 1;
893 done:
894 	return error;
895 }
896 
897 /*
898  * N x M processing threads are available to handle XOPs, N per cluster
899  * index x M cluster nodes.
900  *
901  * Locate and return the next runnable xop, or NULL if no xops are
902  * present or none of the xops are currently runnable (for various reasons).
903  * The xop is left on the queue and serves to block other dependent xops
904  * from being run.
905  *
906  * Dependent xops will not be returned.
907  *
908  * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL.
909  *
910  * NOTE! Xops run concurrently for each cluster index.
911  */
912 #define XOP_HASH_SIZE	16
913 #define XOP_HASH_MASK	(XOP_HASH_SIZE - 1)
914 
915 static __inline
916 int
917 xop_testhash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
918 {
919 	uint32_t mask;
920 	int hv;
921 
922 	hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
923 	mask = 1U << (hv & 31);
924 	hv >>= 5;
925 
926 	return ((int)(hash[hv & XOP_HASH_MASK] & mask));
927 }
928 
929 static __inline
930 void
931 xop_sethash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
932 {
933 	uint32_t mask;
934 	int hv;
935 
936 	hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
937 	mask = 1U << (hv & 31);
938 	hv >>= 5;
939 
940 	hash[hv & XOP_HASH_MASK] |= mask;
941 }
942 
943 static
944 hammer2_xop_head_t *
945 hammer2_xop_next(hammer2_thread_t *thr)
946 {
947 	hammer2_pfs_t *pmp = thr->pmp;
948 	int clindex = thr->clindex;
949 	uint32_t hash[XOP_HASH_SIZE] = { 0 };
950 	hammer2_xop_head_t *xop;
951 
952 	hammer2_spin_ex(&pmp->xop_spin);
953 	TAILQ_FOREACH(xop, &thr->xopq, collect[clindex].entry) {
954 		/*
955 		 * Check dependency
956 		 */
957 		if (xop_testhash(thr, xop->ip1, hash) ||
958 		    (xop->ip2 && xop_testhash(thr, xop->ip2, hash)) ||
959 		    (xop->ip3 && xop_testhash(thr, xop->ip3, hash))) {
960 			continue;
961 		}
962 		xop_sethash(thr, xop->ip1, hash);
963 		if (xop->ip2)
964 			xop_sethash(thr, xop->ip2, hash);
965 		if (xop->ip3)
966 			xop_sethash(thr, xop->ip3, hash);
967 
968 		/*
969 		 * Check already running
970 		 */
971 		if (xop->collect[clindex].flags & HAMMER2_XOP_FIFO_RUN)
972 			continue;
973 
974 		/*
975 		 * Found a good one, return it.
976 		 */
977 		atomic_set_int(&xop->collect[clindex].flags,
978 			       HAMMER2_XOP_FIFO_RUN);
979 		break;
980 	}
981 	hammer2_spin_unex(&pmp->xop_spin);
982 
983 	return xop;
984 }
985 
986 /*
987  * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN.
988  *
989  * NOTE! Xops run concurrently for each cluster index.
990  */
991 static
992 void
993 hammer2_xop_dequeue(hammer2_thread_t *thr, hammer2_xop_head_t *xop)
994 {
995 	hammer2_pfs_t *pmp = thr->pmp;
996 	int clindex = thr->clindex;
997 
998 	hammer2_spin_ex(&pmp->xop_spin);
999 	TAILQ_REMOVE(&thr->xopq, xop, collect[clindex].entry);
1000 	atomic_clear_int(&xop->collect[clindex].flags,
1001 			 HAMMER2_XOP_FIFO_RUN);
1002 	hammer2_spin_unex(&pmp->xop_spin);
1003 	if (TAILQ_FIRST(&thr->xopq))
1004 		hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
1005 }
1006 
1007 /*
1008  * Primary management thread for xops support.  Each node has several such
1009  * threads which replicate front-end operations on cluster nodes.
1010  *
1011  * XOPS thread node operations, allowing the function to focus on a single
1012  * node in the cluster after validating the operation with the cluster.
1013  * This is primarily what prevents dead or stalled nodes from stalling
1014  * the front-end.
1015  */
1016 void
1017 hammer2_primary_xops_thread(void *arg)
1018 {
1019 	hammer2_thread_t *thr = arg;
1020 	hammer2_pfs_t *pmp;
1021 	hammer2_xop_head_t *xop;
1022 	uint64_t mask;
1023 	uint32_t flags;
1024 	uint32_t nflags;
1025 	hammer2_xop_func_t last_func = NULL;
1026 
1027 	pmp = thr->pmp;
1028 	/*xgrp = &pmp->xop_groups[thr->repidx]; not needed */
1029 	mask = 1LLU << thr->clindex;
1030 
1031 	for (;;) {
1032 		flags = thr->flags;
1033 
1034 		/*
1035 		 * Handle stop request
1036 		 */
1037 		if (flags & HAMMER2_THREAD_STOP)
1038 			break;
1039 
1040 		/*
1041 		 * Handle freeze request
1042 		 */
1043 		if (flags & HAMMER2_THREAD_FREEZE) {
1044 			hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN,
1045 						 HAMMER2_THREAD_FREEZE);
1046 			continue;
1047 		}
1048 
1049 		if (flags & HAMMER2_THREAD_UNFREEZE) {
1050 			hammer2_thr_signal2(thr, 0,
1051 						 HAMMER2_THREAD_FROZEN |
1052 						 HAMMER2_THREAD_UNFREEZE);
1053 			continue;
1054 		}
1055 
1056 		/*
1057 		 * Force idle if frozen until unfrozen or stopped.
1058 		 */
1059 		if (flags & HAMMER2_THREAD_FROZEN) {
1060 			hammer2_thr_wait_any(thr,
1061 					     HAMMER2_THREAD_UNFREEZE |
1062 					     HAMMER2_THREAD_STOP,
1063 					     0);
1064 			continue;
1065 		}
1066 
1067 		/*
1068 		 * Reset state on REMASTER request
1069 		 */
1070 		if (flags & HAMMER2_THREAD_REMASTER) {
1071 			hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER);
1072 			/* reset state here */
1073 			continue;
1074 		}
1075 
1076 		/*
1077 		 * Process requests.  Each request can be multi-queued.
1078 		 *
1079 		 * If we get behind and the frontend VOP is no longer active,
1080 		 * we retire the request without processing it.  The callback
1081 		 * may also abort processing if the frontend VOP becomes
1082 		 * inactive.
1083 		 */
1084 		if (flags & HAMMER2_THREAD_XOPQ) {
1085 			nflags = flags & ~HAMMER2_THREAD_XOPQ;
1086 			if (!atomic_cmpset_int(&thr->flags, flags, nflags))
1087 				continue;
1088 			flags = nflags;
1089 			/* fall through */
1090 		}
1091 		while ((xop = hammer2_xop_next(thr)) != NULL) {
1092 			if (hammer2_xop_active(xop)) {
1093 				last_func = xop->func;
1094 				xop->func(thr, (hammer2_xop_t *)xop);
1095 				hammer2_xop_dequeue(thr, xop);
1096 				hammer2_xop_retire(xop, mask);
1097 			} else {
1098 				last_func = xop->func;
1099 				hammer2_xop_feed(xop, NULL, thr->clindex,
1100 						 ECONNABORTED);
1101 				hammer2_xop_dequeue(thr, xop);
1102 				hammer2_xop_retire(xop, mask);
1103 			}
1104 		}
1105 
1106 		/*
1107 		 * Wait for event, interlock using THREAD_WAITING and
1108 		 * THREAD_SIGNAL.
1109 		 *
1110 		 * For robustness poll on a 30-second interval, but nominally
1111 		 * expect to be woken up.
1112 		 */
1113 		nflags = flags | HAMMER2_THREAD_WAITING;
1114 
1115 		tsleep_interlock(&thr->flags, 0);
1116 		if (atomic_cmpset_int(&thr->flags, flags, nflags)) {
1117 			tsleep(&thr->flags, PINTERLOCKED, "h2idle", hz*30);
1118 		}
1119 	}
1120 
1121 #if 0
1122 	/*
1123 	 * Cleanup / termination
1124 	 */
1125 	while ((xop = TAILQ_FIRST(&thr->xopq)) != NULL) {
1126 		kprintf("hammer2_thread: aborting xop %p\n", xop->func);
1127 		TAILQ_REMOVE(&thr->xopq, xop,
1128 			     collect[thr->clindex].entry);
1129 		hammer2_xop_retire(xop, mask);
1130 	}
1131 #endif
1132 	thr->td = NULL;
1133 	hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED);
1134 	/* thr structure can go invalid after this point */
1135 }
1136