xref: /dragonfly/sys/kern/lwkt_token.c (revision aa8d5dcb)
1 /*
2  * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $DragonFly: src/sys/kern/lwkt_token.c,v 1.6 2004/03/08 03:03:54 dillon Exp $
27  */
28 
29 #ifdef _KERNEL
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/proc.h>
35 #include <sys/rtprio.h>
36 #include <sys/queue.h>
37 #include <sys/thread2.h>
38 #include <sys/sysctl.h>
39 #include <sys/kthread.h>
40 #include <machine/cpu.h>
41 #include <sys/lock.h>
42 #include <sys/caps.h>
43 
44 #include <vm/vm.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_object.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_pager.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_zone.h>
53 
54 #include <machine/stdarg.h>
55 #include <machine/ipl.h>
56 #include <machine/smp.h>
57 
58 #define THREAD_STACK	(UPAGES * PAGE_SIZE)
59 
60 #else
61 
62 #include <sys/stdint.h>
63 #include <libcaps/thread.h>
64 #include <sys/thread.h>
65 #include <sys/msgport.h>
66 #include <sys/errno.h>
67 #include <libcaps/globaldata.h>
68 #include <sys/thread2.h>
69 #include <sys/msgport2.h>
70 #include <stdio.h>
71 #include <stdlib.h>
72 #include <string.h>
73 #include <machine/cpufunc.h>
74 #include <machine/lock.h>
75 
76 #endif
77 
78 #define	MAKE_TOKENS_SPIN
79 /* #define MAKE_TOKENS_YIELD */
80 
81 #ifndef LWKT_NUM_POOL_TOKENS
82 #define LWKT_NUM_POOL_TOKENS	1024	/* power of 2 */
83 #endif
84 #define LWKT_MASK_POOL_TOKENS	(LWKT_NUM_POOL_TOKENS - 1)
85 
86 #ifdef INVARIANTS
87 static int token_debug = 0;
88 #endif
89 
90 static lwkt_token	pool_tokens[LWKT_NUM_POOL_TOKENS];
91 
92 #ifdef _KERNEL
93 
94 #ifdef INVARIANTS
95 SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, "");
96 #endif
97 
98 #endif
99 
100 #ifdef SMP
101 
102 /*
103  * Determine if we own all the tokens in the token reference list.
104  * Return 1 on success, 0 on failure.
105  *
106  * As a side effect, queue requests for tokens we want which are owned
107  * by other cpus.  The magic number is used to communicate when the
108  * target cpu has processed the request.  Note, however, that the
109  * target cpu may not be able to assign the token to us which is why
110  * the scheduler must spin.
111  */
112 int
113 lwkt_chktokens(thread_t td)
114 {
115     globaldata_t gd = td->td_gd;	/* mycpu */
116     lwkt_tokref_t refs;
117     globaldata_t dgd;
118     lwkt_token_t tok;
119     int r = 1;
120 
121     for (refs = td->td_toks; refs; refs = refs->tr_next) {
122 	tok = refs->tr_tok;
123 	if ((dgd = tok->t_cpu) != gd) {
124 	    cpu_mb1();
125 	    r = 0;
126 
127 	    /*
128 	     * Queue a request to the target cpu, exit the loop early if
129 	     * we are unable to queue the IPI message.  The magic number
130 	     * flags whether we have a pending ipi request queued or not.
131 	     */
132 	    if (refs->tr_magic == LWKT_TOKREF_MAGIC1) {
133 		refs->tr_magic = LWKT_TOKREF_MAGIC2;	/* MP synched slowreq*/
134 		refs->tr_reqgd = gd;
135 		tok->t_reqcpu = gd;	/* MP unsynchronized 'fast' req */
136 		if (lwkt_send_ipiq_passive(dgd, lwkt_reqtoken_remote, refs)) {
137 		    /* failed */
138 		    refs->tr_magic = LWKT_TOKREF_MAGIC1;
139 		    break;
140 		}
141 	    }
142 	}
143     }
144     return(r);
145 }
146 
147 #endif
148 
149 /*
150  * Check if we already own the token.  Return 1 on success, 0 on failure.
151  */
152 int
153 lwkt_havetoken(lwkt_token_t tok)
154 {
155     globaldata_t gd = mycpu;
156     thread_t td = gd->gd_curthread;
157     lwkt_tokref_t ref;
158 
159     for (ref = td->td_toks; ref; ref = ref->tr_next) {
160         if (ref->tr_tok == tok)
161             return(1);
162     }
163     return(0);
164 }
165 
166 int
167 lwkt_havetokref(lwkt_tokref_t xref)
168 {
169     globaldata_t gd = mycpu;
170     thread_t td = gd->gd_curthread;
171     lwkt_tokref_t ref;
172 
173     for (ref = td->td_toks; ref; ref = ref->tr_next) {
174         if (ref == xref)
175             return(1);
176     }
177     return(0);
178 }
179 
180 #ifdef SMP
181 
182 /*
183  * Returns 1 if it is ok to give a token away, 0 if it is not.
184  */
185 static int
186 lwkt_oktogiveaway_token(lwkt_token_t tok)
187 {
188     globaldata_t gd = mycpu;
189     lwkt_tokref_t ref;
190     thread_t td;
191 
192     for (td = gd->gd_curthread; td; td = td->td_preempted) {
193 	for (ref = td->td_toks; ref; ref = ref->tr_next) {
194 	    if (ref->tr_tok == tok)
195 		return(0);
196 	}
197     }
198     return(1);
199 }
200 
201 #endif
202 
203 /*
204  * Acquire a serializing token
205  */
206 
207 static __inline
208 void
209 _lwkt_gettokref(lwkt_tokref_t ref)
210 {
211     lwkt_token_t tok;
212     globaldata_t gd;
213     thread_t td;
214 
215     gd = mycpu;			/* our cpu */
216     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1);
217     td = gd->gd_curthread;	/* our thread */
218 
219     /*
220      * Link the request into our thread's list.  This interlocks against
221      * remote requests from other cpus and prevents the token from being
222      * given away if our cpu already owns it.  This also allows us to
223      * avoid using a critical section.
224      */
225     ref->tr_next = td->td_toks;
226     cpu_mb1();		/* order memory / we can be interrupted */
227     td->td_toks = ref;
228 
229     /*
230      * If our cpu does not own the token then let the scheduler deal with
231      * it.  We are guarenteed to own the tokens on our thread's token
232      * list when we are switched back in.
233      *
234      * Otherwise make sure the token is not held by a thread we are
235      * preempting.  If it is, let the scheduler deal with it.
236      */
237     tok = ref->tr_tok;
238 #ifdef SMP
239     if (tok->t_cpu != gd) {
240 	/*
241 	 * Temporarily operate on tokens synchronously.  We have to fix
242 	 * a number of interlocks and especially the softupdates code to
243 	 * be able to properly yield.  ZZZ
244 	 */
245 #if defined(MAKE_TOKENS_SPIN)
246 	int x = 40000000;
247 	crit_enter();
248 	while (lwkt_chktokens(td) == 0) {
249 	    lwkt_process_ipiq();
250 	    lwkt_drain_token_requests();
251 	    if (--x == 0) {
252 		x = 40000000;
253 		printf("CHKTOKEN loop %d\n", gd->gd_cpuid);
254 #ifdef _KERNEL
255 		Debugger("x");
256 #endif
257 	    }
258 	    splz();
259 	}
260 	crit_exit();
261 #elif defined(MAKE_TOKENS_YIELD)
262 	lwkt_yield();
263 #else
264 #error MAKE_TOKENS_XXX ?
265 #endif
266 	KKASSERT(tok->t_cpu == gd);
267     } else /* NOTE CONDITIONAL */
268 #endif
269     if (td->td_preempted) {
270 	while ((td = td->td_preempted) != NULL) {
271 	    lwkt_tokref_t scan;
272 	    for (scan = td->td_toks; scan; scan = scan->tr_next) {
273 		if (scan->tr_tok == tok) {
274 		    lwkt_yield();
275 		    KKASSERT(tok->t_cpu == gd);
276 		    goto breakout;
277 		}
278 	    }
279 	}
280 breakout:
281     }
282     /* 'td' variable no longer valid due to preempt loop above */
283 }
284 
285 
286 /*
287  * Attempt to acquire a serializing token
288  */
289 static __inline
290 int
291 _lwkt_trytokref(lwkt_tokref_t ref)
292 {
293     lwkt_token_t tok;
294     globaldata_t gd;
295     thread_t td;
296 
297     gd = mycpu;			/* our cpu */
298     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1);
299     td = gd->gd_curthread;	/* our thread */
300 
301     /*
302      * Link the request into our thread's list.  This interlocks against
303      * remote requests from other cpus and prevents the token from being
304      * given away if our cpu already owns it.  This also allows us to
305      * avoid using a critical section.
306      */
307     ref->tr_next = td->td_toks;
308     cpu_mb1();		/* order memory / we can be interrupted */
309     td->td_toks = ref;
310 
311     /*
312      * If our cpu does not own the token then stop now.
313      *
314      * Otherwise make sure the token is not held by a thread we are
315      * preempting.  If it is, stop.
316      */
317     tok = ref->tr_tok;
318 #ifdef SMP
319     if (tok->t_cpu != gd) {
320 	td->td_toks = ref->tr_next;	/* remove ref */
321 	return(0);
322     } else /* NOTE CONDITIONAL */
323 #endif
324     if (td->td_preempted) {
325 	while ((td = td->td_preempted) != NULL) {
326 	    lwkt_tokref_t scan;
327 	    for (scan = td->td_toks; scan; scan = scan->tr_next) {
328 		if (scan->tr_tok == tok) {
329 		    td = gd->gd_curthread;	/* our thread */
330 		    td->td_toks = ref->tr_next;	/* remove ref */
331 		    return(0);
332 		}
333 	    }
334 	}
335     }
336     /* 'td' variable no longer valid */
337     return(1);
338 }
339 
340 void
341 lwkt_gettoken(lwkt_tokref_t ref, lwkt_token_t tok)
342 {
343     lwkt_tokref_init(ref, tok);
344     _lwkt_gettokref(ref);
345 }
346 
347 void
348 lwkt_gettokref(lwkt_tokref_t ref)
349 {
350     _lwkt_gettokref(ref);
351 }
352 
353 int
354 lwkt_trytoken(lwkt_tokref_t ref, lwkt_token_t tok)
355 {
356     lwkt_tokref_init(ref, tok);
357     return(_lwkt_trytokref(ref));
358 }
359 
360 int
361 lwkt_trytokref(lwkt_tokref_t ref)
362 {
363     return(_lwkt_trytokref(ref));
364 }
365 
366 /*
367  * Release a serializing token
368  */
369 void
370 lwkt_reltoken(lwkt_tokref *_ref)
371 {
372     lwkt_tokref *ref;
373     lwkt_tokref **pref;
374     lwkt_token_t tok;
375     globaldata_t gd;
376     thread_t td;
377 
378     /*
379      * Guard check and stack check (if in the same stack page).  We must
380      * also wait for any action pending on remote cpus which we do by
381      * checking the magic number and yielding in a loop.
382      */
383     ref = _ref;
384 #ifdef INVARIANTS
385     if ((((intptr_t)ref ^ (intptr_t)&_ref) && ~(intptr_t)PAGE_MASK) == 0)
386 	KKASSERT((char *)ref > (char *)&_ref);
387     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1 ||
388 	     ref->tr_magic == LWKT_TOKREF_MAGIC2);
389 #endif
390     /*
391      * Locate and unlink the token.  Interlock with the token's cpureq
392      * to give the token away before we release it from our thread list,
393      * which allows us to avoid using a critical section.
394      */
395     gd = mycpu;
396     td = gd->gd_curthread;
397     for (pref = &td->td_toks; (ref = *pref) != _ref; pref = &ref->tr_next) {
398 	KKASSERT(ref != NULL);
399     }
400     tok = ref->tr_tok;
401     KKASSERT(tok->t_cpu == gd);
402     tok->t_cpu = tok->t_reqcpu;	/* we do not own 'tok' after this */
403     *pref = ref->tr_next;	/* note: also removes giveaway interlock */
404 
405     /*
406      * If we had gotten the token opportunistically and it still happens to
407      * be queued to a target cpu, we have to wait for the target cpu
408      * to finish processing it.  This does not happen very often and does
409      * not need to be optimal.
410      */
411     while (ref->tr_magic == LWKT_TOKREF_MAGIC2) {
412 #if defined(MAKE_TOKENS_SPIN)
413 	crit_enter();
414 #ifdef SMP
415 	lwkt_process_ipiq();
416 #endif
417 	splz();
418 	crit_exit();
419 #elif defined(MAKE_TOKENS_YIELD)
420 	lwkt_yield();
421 #else
422 #error MAKE_TOKENS_XXX ?
423 #endif
424     }
425 }
426 
427 /*
428  * Pool tokens are used to provide a type-stable serializing token
429  * pointer that does not race against disappearing data structures.
430  *
431  * This routine is called in early boot just after we setup the BSP's
432  * globaldata structure.
433  */
434 void
435 lwkt_token_pool_init(void)
436 {
437     int i;
438 
439     for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
440 	lwkt_token_init(&pool_tokens[i]);
441 }
442 
443 lwkt_token_t
444 lwkt_token_pool_get(void *ptraddr)
445 {
446     int i;
447 
448     i = ((int)(intptr_t)ptraddr >> 2) ^ ((int)(intptr_t)ptraddr >> 12);
449     return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
450 }
451 
452 #ifdef SMP
453 
454 /*
455  * This is the receiving side of a remote IPI requesting a token.  If we
456  * cannot immediately hand the token off to another cpu we queue it.
457  *
458  * NOTE!  we 'own' the ref structure, but we only 'own' the token if
459  * t_cpu == mycpu.
460  */
461 void
462 lwkt_reqtoken_remote(void *data)
463 {
464     lwkt_tokref_t ref = data;
465     globaldata_t gd = mycpu;
466     lwkt_token_t tok = ref->tr_tok;
467 
468     /*
469      * We do not have to queue the token if we can give it away
470      * immediately.  Otherwise we queue it to our globaldata structure.
471      */
472     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2);
473     if (lwkt_oktogiveaway_token(tok)) {
474 	if (tok->t_cpu == gd)
475 	    tok->t_cpu = ref->tr_reqgd;
476 	cpu_mb1();
477 	ref->tr_magic = LWKT_TOKREF_MAGIC1;
478     } else {
479 	ref->tr_gdreqnext = gd->gd_tokreqbase;
480 	gd->gd_tokreqbase = ref;
481     }
482 }
483 
484 /*
485  * Must be called from a critical section.  Satisfy all remote token
486  * requests that are pending on our globaldata structure.  The request
487  * does not have to be satisfied with a successful change of ownership
488  * but we do have to acknowledge that we have completed processing the
489  * request by setting the magic number back to MAGIC1.
490  *
491  * NOTE!  we 'own' the ref structure, but we only 'own' the token if
492  * t_cpu == mycpu.
493  */
494 void
495 lwkt_drain_token_requests(void)
496 {
497     globaldata_t gd = mycpu;
498     lwkt_tokref_t ref;
499 
500     while ((ref = gd->gd_tokreqbase) != NULL) {
501 	gd->gd_tokreqbase = ref->tr_gdreqnext;
502 	KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2);
503 	if (ref->tr_tok->t_cpu == gd)
504 	    ref->tr_tok->t_cpu = ref->tr_reqgd;
505 	cpu_mb1();
506 	ref->tr_magic = LWKT_TOKREF_MAGIC1;
507     }
508 }
509 
510 #endif
511 
512 /*
513  * Initialize the owner and release-to cpu to the current cpu
514  * and reset the generation count.
515  */
516 void
517 lwkt_token_init(lwkt_token_t tok)
518 {
519     tok->t_cpu = tok->t_reqcpu = mycpu;
520 }
521 
522 void
523 lwkt_token_uninit(lwkt_token_t tok)
524 {
525     /* empty */
526 }
527