xref: /dragonfly/sys/kern/lwkt_token.c (revision 2cd2d2b5)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/lwkt_token.c,v 1.10 2004/09/21 18:46:00 joerg Exp $
35  */
36 
37 #ifdef _KERNEL
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/queue.h>
45 #include <sys/thread2.h>
46 #include <sys/sysctl.h>
47 #include <sys/kthread.h>
48 #include <machine/cpu.h>
49 #include <sys/lock.h>
50 #include <sys/caps.h>
51 
52 #include <vm/vm.h>
53 #include <vm/vm_param.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_zone.h>
61 
62 #include <machine/stdarg.h>
63 #include <machine/ipl.h>
64 #include <machine/smp.h>
65 
66 #define THREAD_STACK	(UPAGES * PAGE_SIZE)
67 
68 #else
69 
70 #include <sys/stdint.h>
71 #include <libcaps/thread.h>
72 #include <sys/thread.h>
73 #include <sys/msgport.h>
74 #include <sys/errno.h>
75 #include <libcaps/globaldata.h>
76 #include <machine/cpufunc.h>
77 #include <sys/thread2.h>
78 #include <sys/msgport2.h>
79 #include <stdio.h>
80 #include <stdlib.h>
81 #include <string.h>
82 #include <machine/lock.h>
83 #include <machine/cpu.h>
84 
85 #endif
86 
87 #define	MAKE_TOKENS_SPIN
88 /* #define MAKE_TOKENS_YIELD */
89 
90 #ifndef LWKT_NUM_POOL_TOKENS
91 #define LWKT_NUM_POOL_TOKENS	1024	/* power of 2 */
92 #endif
93 #define LWKT_MASK_POOL_TOKENS	(LWKT_NUM_POOL_TOKENS - 1)
94 
95 #ifdef INVARIANTS
96 static int token_debug = 0;
97 #endif
98 
99 static lwkt_token	pool_tokens[LWKT_NUM_POOL_TOKENS];
100 
101 #ifdef _KERNEL
102 
103 #ifdef INVARIANTS
104 SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, "");
105 #endif
106 
107 #endif
108 
109 #ifdef SMP
110 
111 /*
112  * Determine if we own all the tokens in the token reference list.
113  * Return 1 on success, 0 on failure.
114  *
115  * As a side effect, queue requests for tokens we want which are owned
116  * by other cpus.  The magic number is used to communicate when the
117  * target cpu has processed the request.  Note, however, that the
118  * target cpu may not be able to assign the token to us which is why
119  * the scheduler must spin.
120  */
121 int
122 lwkt_chktokens(thread_t td)
123 {
124     globaldata_t gd = td->td_gd;	/* mycpu */
125     lwkt_tokref_t refs;
126     globaldata_t dgd;
127     lwkt_token_t tok;
128     int r = 1;
129 
130     for (refs = td->td_toks; refs; refs = refs->tr_next) {
131 	tok = refs->tr_tok;
132 	if ((dgd = tok->t_cpu) != gd) {
133 	    cpu_mb1();
134 	    r = 0;
135 
136 	    /*
137 	     * Queue a request to the target cpu, exit the loop early if
138 	     * we are unable to queue the IPI message.  The magic number
139 	     * flags whether we have a pending ipi request queued or not.
140 	     */
141 	    if (refs->tr_magic == LWKT_TOKREF_MAGIC1) {
142 		refs->tr_magic = LWKT_TOKREF_MAGIC2;	/* MP synched slowreq*/
143 		refs->tr_reqgd = gd;
144 		tok->t_reqcpu = gd;	/* MP unsynchronized 'fast' req */
145 		if (lwkt_send_ipiq_passive(dgd, lwkt_reqtoken_remote, refs)) {
146 		    /* failed */
147 		    refs->tr_magic = LWKT_TOKREF_MAGIC1;
148 		    break;
149 		}
150 	    }
151 	}
152     }
153     return(r);
154 }
155 
156 #endif
157 
158 /*
159  * Check if we already own the token.  Return 1 on success, 0 on failure.
160  */
161 int
162 lwkt_havetoken(lwkt_token_t tok)
163 {
164     globaldata_t gd = mycpu;
165     thread_t td = gd->gd_curthread;
166     lwkt_tokref_t ref;
167 
168     for (ref = td->td_toks; ref; ref = ref->tr_next) {
169         if (ref->tr_tok == tok)
170             return(1);
171     }
172     return(0);
173 }
174 
175 int
176 lwkt_havetokref(lwkt_tokref_t xref)
177 {
178     globaldata_t gd = mycpu;
179     thread_t td = gd->gd_curthread;
180     lwkt_tokref_t ref;
181 
182     for (ref = td->td_toks; ref; ref = ref->tr_next) {
183         if (ref == xref)
184             return(1);
185     }
186     return(0);
187 }
188 
189 #ifdef SMP
190 
191 /*
192  * Returns 1 if it is ok to give a token away, 0 if it is not.
193  */
194 static int
195 lwkt_oktogiveaway_token(lwkt_token_t tok)
196 {
197     globaldata_t gd = mycpu;
198     lwkt_tokref_t ref;
199     thread_t td;
200 
201     for (td = gd->gd_curthread; td; td = td->td_preempted) {
202 	for (ref = td->td_toks; ref; ref = ref->tr_next) {
203 	    if (ref->tr_tok == tok)
204 		return(0);
205 	}
206     }
207     return(1);
208 }
209 
210 #endif
211 
212 /*
213  * Acquire a serializing token
214  */
215 
216 static __inline
217 void
218 _lwkt_gettokref(lwkt_tokref_t ref)
219 {
220     lwkt_token_t tok;
221     globaldata_t gd;
222     thread_t td;
223 
224     gd = mycpu;			/* our cpu */
225     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1);
226     td = gd->gd_curthread;	/* our thread */
227 
228     /*
229      * Link the request into our thread's list.  This interlocks against
230      * remote requests from other cpus and prevents the token from being
231      * given away if our cpu already owns it.  This also allows us to
232      * avoid using a critical section.
233      */
234     ref->tr_next = td->td_toks;
235     cpu_mb1();		/* order memory / we can be interrupted */
236     td->td_toks = ref;
237 
238     /*
239      * If our cpu does not own the token then let the scheduler deal with
240      * it.  We are guarenteed to own the tokens on our thread's token
241      * list when we are switched back in.
242      *
243      * Otherwise make sure the token is not held by a thread we are
244      * preempting.  If it is, let the scheduler deal with it.
245      */
246     tok = ref->tr_tok;
247 #ifdef SMP
248     if (tok->t_cpu != gd) {
249 	/*
250 	 * Temporarily operate on tokens synchronously.  We have to fix
251 	 * a number of interlocks and especially the softupdates code to
252 	 * be able to properly yield.  ZZZ
253 	 */
254 #if defined(MAKE_TOKENS_SPIN)
255 	int x = 40000000;
256 	crit_enter();
257 	while (lwkt_chktokens(td) == 0) {
258 	    lwkt_process_ipiq();
259 	    lwkt_drain_token_requests();
260 	    if (--x == 0) {
261 		x = 40000000;
262 		printf("CHKTOKEN loop %d\n", gd->gd_cpuid);
263 #ifdef _KERNEL
264 		Debugger("x");
265 #endif
266 	    }
267 	    splz();
268 	}
269 	crit_exit();
270 #elif defined(MAKE_TOKENS_YIELD)
271 	lwkt_yield();
272 #else
273 #error MAKE_TOKENS_XXX ?
274 #endif
275 	KKASSERT(tok->t_cpu == gd);
276     } else /* NOTE CONDITIONAL */
277 #endif
278     if (td->td_preempted) {
279 	while ((td = td->td_preempted) != NULL) {
280 	    lwkt_tokref_t scan;
281 	    for (scan = td->td_toks; scan; scan = scan->tr_next) {
282 		if (scan->tr_tok == tok) {
283 		    lwkt_yield();
284 		    KKASSERT(tok->t_cpu == gd);
285 		    goto breakout;
286 		}
287 	    }
288 	}
289 breakout: ;
290     }
291     /* 'td' variable no longer valid due to preempt loop above */
292 }
293 
294 
295 /*
296  * Attempt to acquire a serializing token
297  */
298 static __inline
299 int
300 _lwkt_trytokref(lwkt_tokref_t ref)
301 {
302     lwkt_token_t tok;
303     globaldata_t gd;
304     thread_t td;
305 
306     gd = mycpu;			/* our cpu */
307     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1);
308     td = gd->gd_curthread;	/* our thread */
309 
310     /*
311      * Link the request into our thread's list.  This interlocks against
312      * remote requests from other cpus and prevents the token from being
313      * given away if our cpu already owns it.  This also allows us to
314      * avoid using a critical section.
315      */
316     ref->tr_next = td->td_toks;
317     cpu_mb1();		/* order memory / we can be interrupted */
318     td->td_toks = ref;
319 
320     /*
321      * If our cpu does not own the token then stop now.
322      *
323      * Otherwise make sure the token is not held by a thread we are
324      * preempting.  If it is, stop.
325      */
326     tok = ref->tr_tok;
327 #ifdef SMP
328     if (tok->t_cpu != gd) {
329 	td->td_toks = ref->tr_next;	/* remove ref */
330 	return(0);
331     } else /* NOTE CONDITIONAL */
332 #endif
333     if (td->td_preempted) {
334 	while ((td = td->td_preempted) != NULL) {
335 	    lwkt_tokref_t scan;
336 	    for (scan = td->td_toks; scan; scan = scan->tr_next) {
337 		if (scan->tr_tok == tok) {
338 		    td = gd->gd_curthread;	/* our thread */
339 		    td->td_toks = ref->tr_next;	/* remove ref */
340 		    return(0);
341 		}
342 	    }
343 	}
344     }
345     /* 'td' variable no longer valid */
346     return(1);
347 }
348 
349 void
350 lwkt_gettoken(lwkt_tokref_t ref, lwkt_token_t tok)
351 {
352     lwkt_tokref_init(ref, tok);
353     _lwkt_gettokref(ref);
354 }
355 
356 void
357 lwkt_gettokref(lwkt_tokref_t ref)
358 {
359     _lwkt_gettokref(ref);
360 }
361 
362 int
363 lwkt_trytoken(lwkt_tokref_t ref, lwkt_token_t tok)
364 {
365     lwkt_tokref_init(ref, tok);
366     return(_lwkt_trytokref(ref));
367 }
368 
369 int
370 lwkt_trytokref(lwkt_tokref_t ref)
371 {
372     return(_lwkt_trytokref(ref));
373 }
374 
375 /*
376  * Release a serializing token
377  */
378 void
379 lwkt_reltoken(lwkt_tokref *_ref)
380 {
381     lwkt_tokref *ref;
382     lwkt_tokref **pref;
383     lwkt_token_t tok;
384     globaldata_t gd;
385     thread_t td;
386 
387     /*
388      * Guard check and stack check (if in the same stack page).  We must
389      * also wait for any action pending on remote cpus which we do by
390      * checking the magic number and yielding in a loop.
391      */
392     ref = _ref;
393 #ifdef INVARIANTS
394     if ((((intptr_t)ref ^ (intptr_t)&_ref) && ~(intptr_t)PAGE_MASK) == 0)
395 	KKASSERT((char *)ref > (char *)&_ref);
396     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1 ||
397 	     ref->tr_magic == LWKT_TOKREF_MAGIC2);
398 #endif
399     /*
400      * Locate and unlink the token.  Interlock with the token's cpureq
401      * to give the token away before we release it from our thread list,
402      * which allows us to avoid using a critical section.
403      */
404     gd = mycpu;
405     td = gd->gd_curthread;
406     for (pref = &td->td_toks; (ref = *pref) != _ref; pref = &ref->tr_next) {
407 	KKASSERT(ref != NULL);
408     }
409     tok = ref->tr_tok;
410     KKASSERT(tok->t_cpu == gd);
411     tok->t_cpu = tok->t_reqcpu;	/* we do not own 'tok' after this */
412     *pref = ref->tr_next;	/* note: also removes giveaway interlock */
413 
414     /*
415      * If we had gotten the token opportunistically and it still happens to
416      * be queued to a target cpu, we have to wait for the target cpu
417      * to finish processing it.  This does not happen very often and does
418      * not need to be optimal.
419      */
420     while (ref->tr_magic == LWKT_TOKREF_MAGIC2) {
421 #if defined(MAKE_TOKENS_SPIN)
422 	crit_enter();
423 #ifdef SMP
424 	lwkt_process_ipiq();
425 #endif
426 	splz();
427 	crit_exit();
428 #elif defined(MAKE_TOKENS_YIELD)
429 	lwkt_yield();
430 #else
431 #error MAKE_TOKENS_XXX ?
432 #endif
433     }
434 }
435 
436 /*
437  * Pool tokens are used to provide a type-stable serializing token
438  * pointer that does not race against disappearing data structures.
439  *
440  * This routine is called in early boot just after we setup the BSP's
441  * globaldata structure.
442  */
443 void
444 lwkt_token_pool_init(void)
445 {
446     int i;
447 
448     for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
449 	lwkt_token_init(&pool_tokens[i]);
450 }
451 
452 lwkt_token_t
453 lwkt_token_pool_get(void *ptraddr)
454 {
455     int i;
456 
457     i = ((int)(intptr_t)ptraddr >> 2) ^ ((int)(intptr_t)ptraddr >> 12);
458     return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
459 }
460 
461 #ifdef SMP
462 
463 /*
464  * This is the receiving side of a remote IPI requesting a token.  If we
465  * cannot immediately hand the token off to another cpu we queue it.
466  *
467  * NOTE!  we 'own' the ref structure, but we only 'own' the token if
468  * t_cpu == mycpu.
469  */
470 void
471 lwkt_reqtoken_remote(void *data)
472 {
473     lwkt_tokref_t ref = data;
474     globaldata_t gd = mycpu;
475     lwkt_token_t tok = ref->tr_tok;
476 
477     /*
478      * We do not have to queue the token if we can give it away
479      * immediately.  Otherwise we queue it to our globaldata structure.
480      */
481     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2);
482     if (lwkt_oktogiveaway_token(tok)) {
483 	if (tok->t_cpu == gd)
484 	    tok->t_cpu = ref->tr_reqgd;
485 	cpu_mb1();
486 	ref->tr_magic = LWKT_TOKREF_MAGIC1;
487     } else {
488 	ref->tr_gdreqnext = gd->gd_tokreqbase;
489 	gd->gd_tokreqbase = ref;
490     }
491 }
492 
493 /*
494  * Must be called from a critical section.  Satisfy all remote token
495  * requests that are pending on our globaldata structure.  The request
496  * does not have to be satisfied with a successful change of ownership
497  * but we do have to acknowledge that we have completed processing the
498  * request by setting the magic number back to MAGIC1.
499  *
500  * NOTE!  we 'own' the ref structure, but we only 'own' the token if
501  * t_cpu == mycpu.
502  */
503 void
504 lwkt_drain_token_requests(void)
505 {
506     globaldata_t gd = mycpu;
507     lwkt_tokref_t ref;
508 
509     while ((ref = gd->gd_tokreqbase) != NULL) {
510 	gd->gd_tokreqbase = ref->tr_gdreqnext;
511 	KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2);
512 	if (ref->tr_tok->t_cpu == gd)
513 	    ref->tr_tok->t_cpu = ref->tr_reqgd;
514 	cpu_mb1();
515 	ref->tr_magic = LWKT_TOKREF_MAGIC1;
516     }
517 }
518 
519 #endif
520 
521 /*
522  * Initialize the owner and release-to cpu to the current cpu
523  * and reset the generation count.
524  */
525 void
526 lwkt_token_init(lwkt_token_t tok)
527 {
528     tok->t_cpu = tok->t_reqcpu = mycpu;
529 }
530 
531 void
532 lwkt_token_uninit(lwkt_token_t tok)
533 {
534     /* empty */
535 }
536