1 /* 2 * Copyright (c) 2014,2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 /* 35 * Copyright (c) 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * 39 * All or some portions of this file are derived from material licensed 40 * to the University of California by American Telephone and Telegraph 41 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 42 * the permission of UNIX System Laboratories, Inc. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 */ 68 69 #ifndef _SYS_CALLOUT_H_ 70 #define _SYS_CALLOUT_H_ 71 72 #ifndef _SYS_QUEUE_H_ 73 #include <sys/queue.h> 74 #endif 75 #ifndef _SYS_LOCK_H_ 76 #include <sys/lock.h> 77 #endif 78 #ifndef _CPU_ATOMIC_H_ 79 #include <machine/atomic.h> 80 #endif 81 82 SLIST_HEAD(callout_list, callout); 83 TAILQ_HEAD(callout_tailq, callout); 84 85 /* 86 * Callwheel linkages are only adjusted on the target cpu. The target 87 * cpu can only be [re]assigned when the IPI_MASK and PENDING bits are 88 * clear. 89 * 90 * callout_reset() and callout_stop() are always synchronous and will 91 * interlock against a running callout as well as reassign the callout 92 * to the current cpu. The caller might block, and a deadlock is possible 93 * if the caller does not use callout_init_lk() or is not careful with 94 * locks acquired in the callout function. 95 * 96 * Programers should note that our lockmgr locks have a cancelation feature 97 * which can be used to avoid deadlocks. callout_init_lk() also uses this 98 * feature. 99 * 100 * callout_deactivate() is asynchronous and will not interlock against 101 * anything. Deactivation does not dequeue a callout, it simply prevents 102 * its function from being executed. 103 */ 104 struct callout { 105 union { 106 SLIST_ENTRY(callout) sle; 107 TAILQ_ENTRY(callout) tqe; 108 } c_links; 109 int c_time; /* match tick on event */ 110 int c_load; /* load value for reset ipi */ 111 void *c_arg; /* function argument */ 112 void (*c_func) (void *); /* function to call */ 113 int c_flags; /* state of this entry */ 114 int c_lineno; /* debugging */ 115 struct lock *c_lk; /* auto-lock */ 116 const char *c_ident; /* debugging */ 117 }; 118 119 /* 120 * ACTIVE - If cleared this the callout is prevented from issuing its 121 * callback. The callout remains on its timer queue. 122 * 123 * PENDING - Indicates the callout is on a particular cpu's timer queue. 124 * Also locks the cpu owning the callout. 125 * 126 * MPSAFE - Indicates the callout does not need the MP lock (most 127 * callouts are flagged this way). 128 * 129 * DID_INIT - Safety 130 * 131 * EXECUTED - Set prior to function dispatch, cleared by callout_reset(), 132 * cleared and (prior value) returned by callout_stop_sync(). 133 * 134 * WAITING - Used for tsleep/wakeup blocking, primarily for 135 * callout_stop(). 136 * 137 * IPI_MASK - Counts pending IPIs. Also locks the cpu owning the callout. 138 * 139 * CPU_MASK - Currently assigned cpu. Only valid when at least one bit 140 * in ARMED_MASK is set. 141 * 142 */ 143 #define CALLOUT_ACTIVE 0x80000000 /* quick [de]activation flag */ 144 #define CALLOUT_PENDING 0x40000000 /* callout is on callwheel */ 145 #define CALLOUT_MPSAFE 0x20000000 /* callout does not need the BGL */ 146 #define CALLOUT_DID_INIT 0x10000000 /* safety check */ 147 #define CALLOUT_AUTOLOCK 0x08000000 /* auto locking / cancel feature */ 148 #define CALLOUT_WAITING 0x04000000 /* interlocked waiter */ 149 #define CALLOUT_EXECUTED 0x02000000 /* (generates stop status) */ 150 #define CALLOUT_UNUSED01 0x01000000 151 #define CALLOUT_IPI_MASK 0x00000FFF /* count operations in prog */ 152 #define CALLOUT_CPU_MASK 0x00FFF000 /* cpu assignment */ 153 154 #define CALLOUT_ARMED_MASK (CALLOUT_PENDING | CALLOUT_IPI_MASK) 155 156 #define CALLOUT_FLAGS_TO_CPU(flags) (((flags) & CALLOUT_CPU_MASK) >> 12) 157 #define CALLOUT_CPU_TO_FLAGS(cpuid) ((cpuid) << 12) 158 159 /* 160 * WARNING! The caller is responsible for stabilizing the callout state, 161 * our suggestion is to either manage the callout on the same cpu 162 * or to use the callout_init_lk() feature and hold the lock while 163 * making callout_*() calls. The lock will be held automatically 164 * by the callout wheel for any call-back and the callout wheel 165 * will handle any callout_stop() deadlocks properly. 166 * 167 * active - Returns activation status. This bit is set by callout_reset*() 168 * and will only be cleared by an explicit callout_deactivate() 169 * or callout_stop(). A function dispatch does not clear this 170 * bit. In addition, a callout_reset() to another cpu is 171 * asynchronous and may not immediately re-set this bit. 172 * 173 * deactivate - Disarm the callout, preventing it from being executed if it 174 * is queued or the queueing operation is in-flight. Has no 175 * effect if the callout has already been dispatched. Does not 176 * dequeue the callout. Does not affect the status returned 177 * by callout_stop(). 178 * 179 * Not serialized, caller must be careful when racing a new 180 * callout_reset() that might be issued by the callback, which 181 * will re-arm the callout. 182 * 183 * callout_reset() must be called to reactivate the callout. 184 * 185 * pending - Only useful for same-cpu callouts, indicates that the callout 186 * is pending on the callwheel or that a callout_reset() ipi 187 * is (probably) in-flight. Can also false-positive on 188 * callout_stop() IPIs. 189 */ 190 #define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE) 191 192 #define callout_deactivate(c) atomic_clear_int(&(c)->c_flags, CALLOUT_ACTIVE) 193 194 #define callout_pending(c) ((c)->c_flags & CALLOUT_ARMED_MASK) 195 196 #ifdef _KERNEL 197 extern int ncallout; 198 199 struct globaldata; 200 201 void hardclock_softtick(struct globaldata *); 202 void callout_init (struct callout *); 203 void callout_init_mp (struct callout *); 204 void callout_init_lk (struct callout *, struct lock *); 205 void callout_initd (struct callout *, const char *, int); 206 void callout_initd_mp (struct callout *, const char *, int); 207 void callout_initd_lk (struct callout *, struct lock *, const char *, int); 208 void callout_reset (struct callout *, int, void (*)(void *), void *); 209 int callout_stop (struct callout *); 210 void callout_stop_async (struct callout *); 211 int callout_stop_sync (struct callout *); 212 void callout_terminate (struct callout *); 213 void callout_reset_bycpu (struct callout *, int, void (*)(void *), void *, 214 int); 215 216 #define callout_drain(x) callout_stop_sync(x) 217 218 #define CALLOUT_DEBUG 219 #ifdef CALLOUT_DEBUG 220 #define callout_init(co) callout_initd(co, __FILE__, __LINE__) 221 #define callout_init_mp(co) callout_initd_mp(co, __FILE__, __LINE__) 222 #define callout_init_lk(co, lk) callout_initd_lk(co, lk, __FILE__, __LINE__) 223 #endif 224 225 #endif 226 227 #endif /* _SYS_CALLOUT_H_ */ 228