xref: /dragonfly/sys/kern/lwkt_token.c (revision 43b4d1bd)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/lwkt_token.c,v 1.9 2004/07/16 05:51:10 dillon Exp $
35  */
36 
37 #ifdef _KERNEL
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/queue.h>
45 #include <sys/thread2.h>
46 #include <sys/sysctl.h>
47 #include <sys/kthread.h>
48 #include <machine/cpu.h>
49 #include <sys/lock.h>
50 #include <sys/caps.h>
51 
52 #include <vm/vm.h>
53 #include <vm/vm_param.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_zone.h>
61 
62 #include <machine/stdarg.h>
63 #include <machine/ipl.h>
64 #include <machine/smp.h>
65 
66 #define THREAD_STACK	(UPAGES * PAGE_SIZE)
67 
68 #else
69 
70 #include <sys/stdint.h>
71 #include <libcaps/thread.h>
72 #include <sys/thread.h>
73 #include <sys/msgport.h>
74 #include <sys/errno.h>
75 #include <libcaps/globaldata.h>
76 #include <machine/cpufunc.h>
77 #include <sys/thread2.h>
78 #include <sys/msgport2.h>
79 #include <stdio.h>
80 #include <stdlib.h>
81 #include <string.h>
82 #include <machine/lock.h>
83 
84 #endif
85 
86 #define	MAKE_TOKENS_SPIN
87 /* #define MAKE_TOKENS_YIELD */
88 
89 #ifndef LWKT_NUM_POOL_TOKENS
90 #define LWKT_NUM_POOL_TOKENS	1024	/* power of 2 */
91 #endif
92 #define LWKT_MASK_POOL_TOKENS	(LWKT_NUM_POOL_TOKENS - 1)
93 
94 #ifdef INVARIANTS
95 static int token_debug = 0;
96 #endif
97 
98 static lwkt_token	pool_tokens[LWKT_NUM_POOL_TOKENS];
99 
100 #ifdef _KERNEL
101 
102 #ifdef INVARIANTS
103 SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, "");
104 #endif
105 
106 #endif
107 
108 #ifdef SMP
109 
110 /*
111  * Determine if we own all the tokens in the token reference list.
112  * Return 1 on success, 0 on failure.
113  *
114  * As a side effect, queue requests for tokens we want which are owned
115  * by other cpus.  The magic number is used to communicate when the
116  * target cpu has processed the request.  Note, however, that the
117  * target cpu may not be able to assign the token to us which is why
118  * the scheduler must spin.
119  */
120 int
121 lwkt_chktokens(thread_t td)
122 {
123     globaldata_t gd = td->td_gd;	/* mycpu */
124     lwkt_tokref_t refs;
125     globaldata_t dgd;
126     lwkt_token_t tok;
127     int r = 1;
128 
129     for (refs = td->td_toks; refs; refs = refs->tr_next) {
130 	tok = refs->tr_tok;
131 	if ((dgd = tok->t_cpu) != gd) {
132 	    cpu_mb1();
133 	    r = 0;
134 
135 	    /*
136 	     * Queue a request to the target cpu, exit the loop early if
137 	     * we are unable to queue the IPI message.  The magic number
138 	     * flags whether we have a pending ipi request queued or not.
139 	     */
140 	    if (refs->tr_magic == LWKT_TOKREF_MAGIC1) {
141 		refs->tr_magic = LWKT_TOKREF_MAGIC2;	/* MP synched slowreq*/
142 		refs->tr_reqgd = gd;
143 		tok->t_reqcpu = gd;	/* MP unsynchronized 'fast' req */
144 		if (lwkt_send_ipiq_passive(dgd, lwkt_reqtoken_remote, refs)) {
145 		    /* failed */
146 		    refs->tr_magic = LWKT_TOKREF_MAGIC1;
147 		    break;
148 		}
149 	    }
150 	}
151     }
152     return(r);
153 }
154 
155 #endif
156 
157 /*
158  * Check if we already own the token.  Return 1 on success, 0 on failure.
159  */
160 int
161 lwkt_havetoken(lwkt_token_t tok)
162 {
163     globaldata_t gd = mycpu;
164     thread_t td = gd->gd_curthread;
165     lwkt_tokref_t ref;
166 
167     for (ref = td->td_toks; ref; ref = ref->tr_next) {
168         if (ref->tr_tok == tok)
169             return(1);
170     }
171     return(0);
172 }
173 
174 int
175 lwkt_havetokref(lwkt_tokref_t xref)
176 {
177     globaldata_t gd = mycpu;
178     thread_t td = gd->gd_curthread;
179     lwkt_tokref_t ref;
180 
181     for (ref = td->td_toks; ref; ref = ref->tr_next) {
182         if (ref == xref)
183             return(1);
184     }
185     return(0);
186 }
187 
188 #ifdef SMP
189 
190 /*
191  * Returns 1 if it is ok to give a token away, 0 if it is not.
192  */
193 static int
194 lwkt_oktogiveaway_token(lwkt_token_t tok)
195 {
196     globaldata_t gd = mycpu;
197     lwkt_tokref_t ref;
198     thread_t td;
199 
200     for (td = gd->gd_curthread; td; td = td->td_preempted) {
201 	for (ref = td->td_toks; ref; ref = ref->tr_next) {
202 	    if (ref->tr_tok == tok)
203 		return(0);
204 	}
205     }
206     return(1);
207 }
208 
209 #endif
210 
211 /*
212  * Acquire a serializing token
213  */
214 
215 static __inline
216 void
217 _lwkt_gettokref(lwkt_tokref_t ref)
218 {
219     lwkt_token_t tok;
220     globaldata_t gd;
221     thread_t td;
222 
223     gd = mycpu;			/* our cpu */
224     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1);
225     td = gd->gd_curthread;	/* our thread */
226 
227     /*
228      * Link the request into our thread's list.  This interlocks against
229      * remote requests from other cpus and prevents the token from being
230      * given away if our cpu already owns it.  This also allows us to
231      * avoid using a critical section.
232      */
233     ref->tr_next = td->td_toks;
234     cpu_mb1();		/* order memory / we can be interrupted */
235     td->td_toks = ref;
236 
237     /*
238      * If our cpu does not own the token then let the scheduler deal with
239      * it.  We are guarenteed to own the tokens on our thread's token
240      * list when we are switched back in.
241      *
242      * Otherwise make sure the token is not held by a thread we are
243      * preempting.  If it is, let the scheduler deal with it.
244      */
245     tok = ref->tr_tok;
246 #ifdef SMP
247     if (tok->t_cpu != gd) {
248 	/*
249 	 * Temporarily operate on tokens synchronously.  We have to fix
250 	 * a number of interlocks and especially the softupdates code to
251 	 * be able to properly yield.  ZZZ
252 	 */
253 #if defined(MAKE_TOKENS_SPIN)
254 	int x = 40000000;
255 	crit_enter();
256 	while (lwkt_chktokens(td) == 0) {
257 	    lwkt_process_ipiq();
258 	    lwkt_drain_token_requests();
259 	    if (--x == 0) {
260 		x = 40000000;
261 		printf("CHKTOKEN loop %d\n", gd->gd_cpuid);
262 #ifdef _KERNEL
263 		Debugger("x");
264 #endif
265 	    }
266 	    splz();
267 	}
268 	crit_exit();
269 #elif defined(MAKE_TOKENS_YIELD)
270 	lwkt_yield();
271 #else
272 #error MAKE_TOKENS_XXX ?
273 #endif
274 	KKASSERT(tok->t_cpu == gd);
275     } else /* NOTE CONDITIONAL */
276 #endif
277     if (td->td_preempted) {
278 	while ((td = td->td_preempted) != NULL) {
279 	    lwkt_tokref_t scan;
280 	    for (scan = td->td_toks; scan; scan = scan->tr_next) {
281 		if (scan->tr_tok == tok) {
282 		    lwkt_yield();
283 		    KKASSERT(tok->t_cpu == gd);
284 		    goto breakout;
285 		}
286 	    }
287 	}
288 breakout: ;
289     }
290     /* 'td' variable no longer valid due to preempt loop above */
291 }
292 
293 
294 /*
295  * Attempt to acquire a serializing token
296  */
297 static __inline
298 int
299 _lwkt_trytokref(lwkt_tokref_t ref)
300 {
301     lwkt_token_t tok;
302     globaldata_t gd;
303     thread_t td;
304 
305     gd = mycpu;			/* our cpu */
306     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1);
307     td = gd->gd_curthread;	/* our thread */
308 
309     /*
310      * Link the request into our thread's list.  This interlocks against
311      * remote requests from other cpus and prevents the token from being
312      * given away if our cpu already owns it.  This also allows us to
313      * avoid using a critical section.
314      */
315     ref->tr_next = td->td_toks;
316     cpu_mb1();		/* order memory / we can be interrupted */
317     td->td_toks = ref;
318 
319     /*
320      * If our cpu does not own the token then stop now.
321      *
322      * Otherwise make sure the token is not held by a thread we are
323      * preempting.  If it is, stop.
324      */
325     tok = ref->tr_tok;
326 #ifdef SMP
327     if (tok->t_cpu != gd) {
328 	td->td_toks = ref->tr_next;	/* remove ref */
329 	return(0);
330     } else /* NOTE CONDITIONAL */
331 #endif
332     if (td->td_preempted) {
333 	while ((td = td->td_preempted) != NULL) {
334 	    lwkt_tokref_t scan;
335 	    for (scan = td->td_toks; scan; scan = scan->tr_next) {
336 		if (scan->tr_tok == tok) {
337 		    td = gd->gd_curthread;	/* our thread */
338 		    td->td_toks = ref->tr_next;	/* remove ref */
339 		    return(0);
340 		}
341 	    }
342 	}
343     }
344     /* 'td' variable no longer valid */
345     return(1);
346 }
347 
348 void
349 lwkt_gettoken(lwkt_tokref_t ref, lwkt_token_t tok)
350 {
351     lwkt_tokref_init(ref, tok);
352     _lwkt_gettokref(ref);
353 }
354 
355 void
356 lwkt_gettokref(lwkt_tokref_t ref)
357 {
358     _lwkt_gettokref(ref);
359 }
360 
361 int
362 lwkt_trytoken(lwkt_tokref_t ref, lwkt_token_t tok)
363 {
364     lwkt_tokref_init(ref, tok);
365     return(_lwkt_trytokref(ref));
366 }
367 
368 int
369 lwkt_trytokref(lwkt_tokref_t ref)
370 {
371     return(_lwkt_trytokref(ref));
372 }
373 
374 /*
375  * Release a serializing token
376  */
377 void
378 lwkt_reltoken(lwkt_tokref *_ref)
379 {
380     lwkt_tokref *ref;
381     lwkt_tokref **pref;
382     lwkt_token_t tok;
383     globaldata_t gd;
384     thread_t td;
385 
386     /*
387      * Guard check and stack check (if in the same stack page).  We must
388      * also wait for any action pending on remote cpus which we do by
389      * checking the magic number and yielding in a loop.
390      */
391     ref = _ref;
392 #ifdef INVARIANTS
393     if ((((intptr_t)ref ^ (intptr_t)&_ref) && ~(intptr_t)PAGE_MASK) == 0)
394 	KKASSERT((char *)ref > (char *)&_ref);
395     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1 ||
396 	     ref->tr_magic == LWKT_TOKREF_MAGIC2);
397 #endif
398     /*
399      * Locate and unlink the token.  Interlock with the token's cpureq
400      * to give the token away before we release it from our thread list,
401      * which allows us to avoid using a critical section.
402      */
403     gd = mycpu;
404     td = gd->gd_curthread;
405     for (pref = &td->td_toks; (ref = *pref) != _ref; pref = &ref->tr_next) {
406 	KKASSERT(ref != NULL);
407     }
408     tok = ref->tr_tok;
409     KKASSERT(tok->t_cpu == gd);
410     tok->t_cpu = tok->t_reqcpu;	/* we do not own 'tok' after this */
411     *pref = ref->tr_next;	/* note: also removes giveaway interlock */
412 
413     /*
414      * If we had gotten the token opportunistically and it still happens to
415      * be queued to a target cpu, we have to wait for the target cpu
416      * to finish processing it.  This does not happen very often and does
417      * not need to be optimal.
418      */
419     while (ref->tr_magic == LWKT_TOKREF_MAGIC2) {
420 #if defined(MAKE_TOKENS_SPIN)
421 	crit_enter();
422 #ifdef SMP
423 	lwkt_process_ipiq();
424 #endif
425 	splz();
426 	crit_exit();
427 #elif defined(MAKE_TOKENS_YIELD)
428 	lwkt_yield();
429 #else
430 #error MAKE_TOKENS_XXX ?
431 #endif
432     }
433 }
434 
435 /*
436  * Pool tokens are used to provide a type-stable serializing token
437  * pointer that does not race against disappearing data structures.
438  *
439  * This routine is called in early boot just after we setup the BSP's
440  * globaldata structure.
441  */
442 void
443 lwkt_token_pool_init(void)
444 {
445     int i;
446 
447     for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
448 	lwkt_token_init(&pool_tokens[i]);
449 }
450 
451 lwkt_token_t
452 lwkt_token_pool_get(void *ptraddr)
453 {
454     int i;
455 
456     i = ((int)(intptr_t)ptraddr >> 2) ^ ((int)(intptr_t)ptraddr >> 12);
457     return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
458 }
459 
460 #ifdef SMP
461 
462 /*
463  * This is the receiving side of a remote IPI requesting a token.  If we
464  * cannot immediately hand the token off to another cpu we queue it.
465  *
466  * NOTE!  we 'own' the ref structure, but we only 'own' the token if
467  * t_cpu == mycpu.
468  */
469 void
470 lwkt_reqtoken_remote(void *data)
471 {
472     lwkt_tokref_t ref = data;
473     globaldata_t gd = mycpu;
474     lwkt_token_t tok = ref->tr_tok;
475 
476     /*
477      * We do not have to queue the token if we can give it away
478      * immediately.  Otherwise we queue it to our globaldata structure.
479      */
480     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2);
481     if (lwkt_oktogiveaway_token(tok)) {
482 	if (tok->t_cpu == gd)
483 	    tok->t_cpu = ref->tr_reqgd;
484 	cpu_mb1();
485 	ref->tr_magic = LWKT_TOKREF_MAGIC1;
486     } else {
487 	ref->tr_gdreqnext = gd->gd_tokreqbase;
488 	gd->gd_tokreqbase = ref;
489     }
490 }
491 
492 /*
493  * Must be called from a critical section.  Satisfy all remote token
494  * requests that are pending on our globaldata structure.  The request
495  * does not have to be satisfied with a successful change of ownership
496  * but we do have to acknowledge that we have completed processing the
497  * request by setting the magic number back to MAGIC1.
498  *
499  * NOTE!  we 'own' the ref structure, but we only 'own' the token if
500  * t_cpu == mycpu.
501  */
502 void
503 lwkt_drain_token_requests(void)
504 {
505     globaldata_t gd = mycpu;
506     lwkt_tokref_t ref;
507 
508     while ((ref = gd->gd_tokreqbase) != NULL) {
509 	gd->gd_tokreqbase = ref->tr_gdreqnext;
510 	KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2);
511 	if (ref->tr_tok->t_cpu == gd)
512 	    ref->tr_tok->t_cpu = ref->tr_reqgd;
513 	cpu_mb1();
514 	ref->tr_magic = LWKT_TOKREF_MAGIC1;
515     }
516 }
517 
518 #endif
519 
520 /*
521  * Initialize the owner and release-to cpu to the current cpu
522  * and reset the generation count.
523  */
524 void
525 lwkt_token_init(lwkt_token_t tok)
526 {
527     tok->t_cpu = tok->t_reqcpu = mycpu;
528 }
529 
530 void
531 lwkt_token_uninit(lwkt_token_t tok)
532 {
533     /* empty */
534 }
535