1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 /* 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 72 * $FreeBSD: src/sys/kern/kern_timeout.c,v 1.59.2.1 2001/11/13 18:24:52 archie Exp $ 73 */ 74 /* 75 * DRAGONFLY BGL STATUS 76 * 77 * All the API functions should be MP safe. 78 * 79 * The callback functions will be flagged as being MP safe if the 80 * timeout structure is initialized with callout_init_mp() instead of 81 * callout_init(). 82 * 83 * The helper threads cannot be made preempt-capable until after we 84 * clean up all the uses of splsoftclock() and related interlocks (which 85 * require the related functions to be MP safe as well). 86 */ 87 /* 88 * The callout mechanism is based on the work of Adam M. Costello and 89 * George Varghese, published in a technical report entitled "Redesigning 90 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 91 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 92 * used in this implementation was published by G. Varghese and T. Lauck in 93 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 94 * the Efficient Implementation of a Timer Facility" in the Proceedings of 95 * the 11th ACM Annual Symposium on Operating Systems Principles, 96 * Austin, Texas Nov 1987. 97 * 98 * The per-cpu augmentation was done by Matthew Dillon. 99 */ 100 101 #include <sys/param.h> 102 #include <sys/systm.h> 103 #include <sys/callout.h> 104 #include <sys/kernel.h> 105 #include <sys/interrupt.h> 106 #include <sys/thread.h> 107 108 #include <sys/thread2.h> 109 #include <sys/mplock2.h> 110 111 #ifndef MAX_SOFTCLOCK_STEPS 112 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 113 #endif 114 115 116 struct softclock_pcpu { 117 struct callout_tailq *callwheel; 118 struct callout * volatile next; 119 struct callout *running;/* currently running callout */ 120 int softticks; /* softticks index */ 121 int curticks; /* per-cpu ticks counter */ 122 int isrunning; 123 struct thread thread; 124 125 }; 126 127 typedef struct softclock_pcpu *softclock_pcpu_t; 128 129 /* 130 * TODO: 131 * allocate more timeout table slots when table overflows. 132 */ 133 static MALLOC_DEFINE(M_CALLOUT, "callout", "callout structures"); 134 static int callwheelsize; 135 static int callwheelmask; 136 static struct softclock_pcpu softclock_pcpu_ary[MAXCPU]; 137 138 static void softclock_handler(void *arg); 139 140 static void 141 swi_softclock_setup(void *arg) 142 { 143 int cpu; 144 int i; 145 int target; 146 147 /* 148 * Figure out how large a callwheel we need. It must be a power of 2. 149 * 150 * ncallout is primarily based on available memory, don't explode 151 * the allocations if the system has a lot of cpus. 152 */ 153 target = ncallout / ncpus + 16; 154 155 callwheelsize = 1; 156 while (callwheelsize < target) 157 callwheelsize <<= 1; 158 callwheelmask = callwheelsize - 1; 159 160 /* 161 * Initialize per-cpu data structures. 162 */ 163 for (cpu = 0; cpu < ncpus; ++cpu) { 164 softclock_pcpu_t sc; 165 166 sc = &softclock_pcpu_ary[cpu]; 167 168 sc->callwheel = kmalloc(sizeof(*sc->callwheel) * callwheelsize, 169 M_CALLOUT, M_WAITOK|M_ZERO); 170 for (i = 0; i < callwheelsize; ++i) 171 TAILQ_INIT(&sc->callwheel[i]); 172 173 /* 174 * Mark the softclock handler as being an interrupt thread 175 * even though it really isn't, but do not allow it to 176 * preempt other threads (do not assign td_preemptable). 177 * 178 * Kernel code now assumes that callouts do not preempt 179 * the cpu they were scheduled on. 180 */ 181 lwkt_create(softclock_handler, sc, NULL, 182 &sc->thread, TDF_NOSTART | TDF_INTTHREAD, 183 cpu, "softclock %d", cpu); 184 } 185 } 186 187 /* 188 * Must occur after ncpus has been initialized. 189 */ 190 SYSINIT(softclock_setup, SI_BOOT2_SOFTCLOCK, SI_ORDER_SECOND, 191 swi_softclock_setup, NULL); 192 193 /* 194 * This routine is called from the hardclock() (basically a FASTint/IPI) on 195 * each cpu in the system. sc->curticks is this cpu's notion of the timebase. 196 * It IS NOT NECESSARILY SYNCHRONIZED WITH 'ticks'! sc->softticks is where 197 * the callwheel is currently indexed. 198 * 199 * WARNING! The MP lock is not necessarily held on call, nor can it be 200 * safely obtained. 201 * 202 * sc->softticks is adjusted by either this routine or our helper thread 203 * depending on whether the helper thread is running or not. 204 */ 205 void 206 hardclock_softtick(globaldata_t gd) 207 { 208 softclock_pcpu_t sc; 209 210 sc = &softclock_pcpu_ary[gd->gd_cpuid]; 211 ++sc->curticks; 212 if (sc->isrunning) 213 return; 214 if (sc->softticks == sc->curticks) { 215 /* 216 * in sync, only wakeup the thread if there is something to 217 * do. 218 */ 219 if (TAILQ_FIRST(&sc->callwheel[sc->softticks & callwheelmask])) 220 { 221 sc->isrunning = 1; 222 lwkt_schedule(&sc->thread); 223 } else { 224 ++sc->softticks; 225 } 226 } else { 227 /* 228 * out of sync, wakeup the thread unconditionally so it can 229 * catch up. 230 */ 231 sc->isrunning = 1; 232 lwkt_schedule(&sc->thread); 233 } 234 } 235 236 /* 237 * This procedure is the main loop of our per-cpu helper thread. The 238 * sc->isrunning flag prevents us from racing hardclock_softtick() and 239 * a critical section is sufficient to interlock sc->curticks and protect 240 * us from remote IPI's / list removal. 241 * 242 * The thread starts with the MP lock released and not in a critical 243 * section. The loop itself is MP safe while individual callbacks 244 * may or may not be, so we obtain or release the MP lock as appropriate. 245 */ 246 static void 247 softclock_handler(void *arg) 248 { 249 softclock_pcpu_t sc; 250 struct callout *c; 251 struct callout_tailq *bucket; 252 void (*c_func)(void *); 253 void *c_arg; 254 int mpsafe = 1; 255 256 /* 257 * Run the callout thread at the same priority as other kernel 258 * threads so it can be round-robined. 259 */ 260 /*lwkt_setpri_self(TDPRI_SOFT_NORM);*/ 261 262 sc = arg; 263 crit_enter(); 264 loop: 265 while (sc->softticks != (int)(sc->curticks + 1)) { 266 bucket = &sc->callwheel[sc->softticks & callwheelmask]; 267 268 for (c = TAILQ_FIRST(bucket); c; c = sc->next) { 269 if (c->c_time != sc->softticks) { 270 sc->next = TAILQ_NEXT(c, c_links.tqe); 271 continue; 272 } 273 if (c->c_flags & CALLOUT_MPSAFE) { 274 if (mpsafe == 0) { 275 mpsafe = 1; 276 rel_mplock(); 277 } 278 } else { 279 /* 280 * The request might be removed while we 281 * are waiting to get the MP lock. If it 282 * was removed sc->next will point to the 283 * next valid request or NULL, loop up. 284 */ 285 if (mpsafe) { 286 mpsafe = 0; 287 sc->next = c; 288 get_mplock(); 289 if (c != sc->next) 290 continue; 291 } 292 } 293 sc->next = TAILQ_NEXT(c, c_links.tqe); 294 TAILQ_REMOVE(bucket, c, c_links.tqe); 295 296 sc->running = c; 297 c_func = c->c_func; 298 c_arg = c->c_arg; 299 c->c_func = NULL; 300 KKASSERT(c->c_flags & CALLOUT_DID_INIT); 301 c->c_flags &= ~CALLOUT_PENDING; 302 crit_exit(); 303 c_func(c_arg); 304 crit_enter(); 305 sc->running = NULL; 306 /* NOTE: list may have changed */ 307 } 308 ++sc->softticks; 309 } 310 sc->isrunning = 0; 311 lwkt_deschedule_self(&sc->thread); /* == curthread */ 312 lwkt_switch(); 313 goto loop; 314 /* NOT REACHED */ 315 } 316 317 /* 318 * New interface; clients allocate their own callout structures. 319 * 320 * callout_reset() - establish or change a timeout 321 * callout_stop() - disestablish a timeout 322 * callout_init() - initialize a callout structure so that it can 323 * safely be passed to callout_reset() and callout_stop() 324 * callout_init_mp() - same but any installed functions must be MP safe. 325 * 326 * <sys/callout.h> defines three convenience macros: 327 * 328 * callout_active() - returns truth if callout has not been serviced 329 * callout_pending() - returns truth if callout is still waiting for timeout 330 * callout_deactivate() - marks the callout as having been serviced 331 */ 332 333 /* 334 * Start or restart a timeout. Install the callout structure in the 335 * callwheel. Callers may legally pass any value, even if 0 or negative, 336 * but since the sc->curticks index may have already been processed a 337 * minimum timeout of 1 tick will be enforced. 338 * 339 * The callout is installed on and will be processed on the current cpu's 340 * callout wheel. 341 * 342 * WARNING! This function may be called from any cpu but the caller must 343 * serialize callout_stop() and callout_reset() calls on the passed 344 * structure regardless of cpu. 345 */ 346 void 347 callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), 348 void *arg) 349 { 350 softclock_pcpu_t sc; 351 globaldata_t gd; 352 353 #ifdef INVARIANTS 354 if ((c->c_flags & CALLOUT_DID_INIT) == 0) { 355 callout_init(c); 356 kprintf( 357 "callout_reset(%p) from %p: callout was not initialized\n", 358 c, ((int **)&c)[-1]); 359 print_backtrace(-1); 360 } 361 #endif 362 gd = mycpu; 363 sc = &softclock_pcpu_ary[gd->gd_cpuid]; 364 crit_enter_gd(gd); 365 366 if (c->c_flags & CALLOUT_ACTIVE) 367 callout_stop(c); 368 369 if (to_ticks <= 0) 370 to_ticks = 1; 371 372 c->c_arg = arg; 373 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 374 c->c_func = ftn; 375 c->c_time = sc->curticks + to_ticks; 376 c->c_gd = gd; 377 378 TAILQ_INSERT_TAIL(&sc->callwheel[c->c_time & callwheelmask], 379 c, c_links.tqe); 380 crit_exit_gd(gd); 381 } 382 383 struct callout_remote_arg { 384 struct callout *c; 385 void (*ftn)(void *); 386 void *arg; 387 int to_ticks; 388 }; 389 390 static void 391 callout_reset_ipi(void *arg) 392 { 393 struct callout_remote_arg *rmt = arg; 394 395 callout_reset(rmt->c, rmt->to_ticks, rmt->ftn, rmt->arg); 396 } 397 398 void 399 callout_reset_bycpu(struct callout *c, int to_ticks, void (*ftn)(void *), 400 void *arg, int cpuid) 401 { 402 KASSERT(cpuid >= 0 && cpuid < ncpus, ("invalid cpuid %d", cpuid)); 403 404 if (cpuid == mycpuid) { 405 callout_reset(c, to_ticks, ftn, arg); 406 } else { 407 struct globaldata *target_gd; 408 struct callout_remote_arg rmt; 409 int seq; 410 411 rmt.c = c; 412 rmt.ftn = ftn; 413 rmt.arg = arg; 414 rmt.to_ticks = to_ticks; 415 416 target_gd = globaldata_find(cpuid); 417 418 seq = lwkt_send_ipiq(target_gd, callout_reset_ipi, &rmt); 419 lwkt_wait_ipiq(target_gd, seq); 420 } 421 } 422 423 /* 424 * Stop a running timer. WARNING! If called on a cpu other then the one 425 * the callout was started on this function will liveloop on its IPI to 426 * the target cpu to process the request. It is possible for the callout 427 * to execute in that case. 428 * 429 * WARNING! This function may be called from any cpu but the caller must 430 * serialize callout_stop() and callout_reset() calls on the passed 431 * structure regardless of cpu. 432 * 433 * WARNING! This routine may be called from an IPI 434 * 435 * WARNING! This function can return while it's c_func is still running 436 * in the callout thread, a secondary check may be needed. 437 * Use callout_stop_sync() to wait for any callout function to 438 * complete before returning, being sure that no deadlock is 439 * possible if you do. 440 */ 441 int 442 callout_stop(struct callout *c) 443 { 444 globaldata_t gd = mycpu; 445 globaldata_t tgd; 446 softclock_pcpu_t sc; 447 448 #ifdef INVARIANTS 449 if ((c->c_flags & CALLOUT_DID_INIT) == 0) { 450 callout_init(c); 451 kprintf( 452 "callout_stop(%p) from %p: callout was not initialized\n", 453 c, ((int **)&c)[-1]); 454 print_backtrace(-1); 455 } 456 #endif 457 crit_enter_gd(gd); 458 459 /* 460 * Don't attempt to delete a callout that's not on the queue. The 461 * callout may not have a cpu assigned to it. Callers do not have 462 * to be on the issuing cpu but must still serialize access to the 463 * callout structure. 464 * 465 * We are not cpu-localized here and cannot safely modify the 466 * flags field in the callout structure. Note that most of the 467 * time CALLOUT_ACTIVE will be 0 if CALLOUT_PENDING is also 0. 468 * 469 * If we race another cpu's dispatch of this callout it is possible 470 * for CALLOUT_ACTIVE to be set with CALLOUT_PENDING unset. This 471 * will cause us to fall through and synchronize with the other 472 * cpu. 473 */ 474 if ((c->c_flags & CALLOUT_PENDING) == 0) { 475 if ((c->c_flags & CALLOUT_ACTIVE) == 0) { 476 crit_exit_gd(gd); 477 return (0); 478 } 479 if (c->c_gd == NULL || c->c_gd == gd) { 480 c->c_flags &= ~CALLOUT_ACTIVE; 481 crit_exit_gd(gd); 482 return (0); 483 } 484 } 485 if ((tgd = c->c_gd) != gd) { 486 /* 487 * If the callout is owned by a different CPU we have to 488 * execute the function synchronously on the target cpu. 489 */ 490 int seq; 491 492 cpu_ccfence(); /* don't let tgd alias c_gd */ 493 seq = lwkt_send_ipiq(tgd, (void *)callout_stop, c); 494 lwkt_wait_ipiq(tgd, seq); 495 } else { 496 /* 497 * If the callout is owned by the same CPU we can 498 * process it directly, but if we are racing our helper 499 * thread (sc->next), we have to adjust sc->next. The 500 * race is interlocked by a critical section. 501 */ 502 sc = &softclock_pcpu_ary[gd->gd_cpuid]; 503 504 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 505 if (sc->next == c) 506 sc->next = TAILQ_NEXT(c, c_links.tqe); 507 508 TAILQ_REMOVE(&sc->callwheel[c->c_time & callwheelmask], 509 c, c_links.tqe); 510 c->c_func = NULL; 511 } 512 crit_exit_gd(gd); 513 return (1); 514 } 515 516 /* 517 * Issue a callout_stop() and ensure that any callout race completes 518 * before returning. Does NOT de-initialized the callout. 519 */ 520 void 521 callout_stop_sync(struct callout *c) 522 { 523 softclock_pcpu_t sc; 524 525 while (c->c_flags & CALLOUT_DID_INIT) { 526 callout_stop(c); 527 if (c->c_gd) { 528 sc = &softclock_pcpu_ary[c->c_gd->gd_cpuid]; 529 if (sc->running == c) { 530 while (sc->running == c) 531 tsleep(&sc->running, 0, "crace", 1); 532 } 533 } 534 if ((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == 0) 535 break; 536 kprintf("Warning: %s: callout race\n", curthread->td_comm); 537 } 538 } 539 540 /* 541 * Terminate a callout 542 * 543 * This function will stop any pending callout and also block while the 544 * callout's function is running. It should only be used in cases where 545 * no deadlock is possible (due to the callout function acquiring locks 546 * that the current caller of callout_terminate() already holds), when 547 * the caller is ready to destroy the callout structure. 548 * 549 * This function clears the CALLOUT_DID_INIT flag. 550 * 551 * lwkt_token locks are ok. 552 */ 553 void 554 callout_terminate(struct callout *c) 555 { 556 softclock_pcpu_t sc; 557 558 if (c->c_flags & CALLOUT_DID_INIT) { 559 callout_stop(c); 560 sc = &softclock_pcpu_ary[c->c_gd->gd_cpuid]; 561 if (sc->running == c) { 562 while (sc->running == c) 563 tsleep(&sc->running, 0, "crace", 1); 564 } 565 KKASSERT((c->c_flags & (CALLOUT_PENDING|CALLOUT_ACTIVE)) == 0); 566 c->c_flags &= ~CALLOUT_DID_INIT; 567 } 568 } 569 570 /* 571 * Prepare a callout structure for use by callout_reset() and/or 572 * callout_stop(). The MP version of this routine requires that the callback 573 * function installed by callout_reset() be MP safe. 574 * 575 * The init functions can be called from any cpu and do not have to be 576 * called from the cpu that the timer will eventually run on. 577 */ 578 void 579 callout_init(struct callout *c) 580 { 581 bzero(c, sizeof *c); 582 c->c_flags = CALLOUT_DID_INIT; 583 } 584 585 void 586 callout_init_mp(struct callout *c) 587 { 588 callout_init(c); 589 c->c_flags |= CALLOUT_MPSAFE; 590 } 591 592 /* What, are you joking? This is nuts! -Matt */ 593 #if 0 594 #ifdef APM_FIXUP_CALLTODO 595 /* 596 * Adjust the kernel calltodo timeout list. This routine is used after 597 * an APM resume to recalculate the calltodo timer list values with the 598 * number of hz's we have been sleeping. The next hardclock() will detect 599 * that there are fired timers and run softclock() to execute them. 600 * 601 * Please note, I have not done an exhaustive analysis of what code this 602 * might break. I am motivated to have my select()'s and alarm()'s that 603 * have expired during suspend firing upon resume so that the applications 604 * which set the timer can do the maintanence the timer was for as close 605 * as possible to the originally intended time. Testing this code for a 606 * week showed that resuming from a suspend resulted in 22 to 25 timers 607 * firing, which seemed independant on whether the suspend was 2 hours or 608 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 609 */ 610 void 611 adjust_timeout_calltodo(struct timeval *time_change) 612 { 613 struct callout *p; 614 unsigned long delta_ticks; 615 616 /* 617 * How many ticks were we asleep? 618 * (stolen from tvtohz()). 619 */ 620 621 /* Don't do anything */ 622 if (time_change->tv_sec < 0) 623 return; 624 else if (time_change->tv_sec <= LONG_MAX / 1000000) 625 delta_ticks = (time_change->tv_sec * 1000000 + 626 time_change->tv_usec + (tick - 1)) / tick + 1; 627 else if (time_change->tv_sec <= LONG_MAX / hz) 628 delta_ticks = time_change->tv_sec * hz + 629 (time_change->tv_usec + (tick - 1)) / tick + 1; 630 else 631 delta_ticks = LONG_MAX; 632 633 if (delta_ticks > INT_MAX) 634 delta_ticks = INT_MAX; 635 636 /* 637 * Now rip through the timer calltodo list looking for timers 638 * to expire. 639 */ 640 641 /* don't collide with softclock() */ 642 crit_enter(); 643 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 644 p->c_time -= delta_ticks; 645 646 /* Break if the timer had more time on it than delta_ticks */ 647 if (p->c_time > 0) 648 break; 649 650 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 651 delta_ticks = -p->c_time; 652 } 653 crit_exit(); 654 655 return; 656 } 657 #endif /* APM_FIXUP_CALLTODO */ 658 #endif 659 660