xref: /original-bsd/usr.sbin/amd/amd/clock.c (revision 29faa970)
1 /*
2  * Copyright (c) 1989 Jan-Simon Pendry
3  * Copyright (c) 1989 Imperial College of Science, Technology & Medicine
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Jan-Simon Pendry at Imperial College, London.
9  *
10  * %sccs.include.redist.c%
11  *
12  *	@(#)clock.c	8.1 (Berkeley) 06/06/93
13  *
14  * $Id: clock.c,v 5.2.2.1 1992/02/09 15:08:20 jsp beta $
15  *
16  */
17 
18 /*
19  * Callouts.
20  *
21  * Modelled on kernel object of the same name.
22  * See usual references.
23  *
24  * Use of a heap-based mechanism was rejected:
25  * 1.  more complex implementation needed.
26  * 2.  not obvious that a list is too slow for Amd.
27  */
28 
29 #include "am.h"
30 
31 typedef struct callout callout;
32 struct callout {
33 	callout	*c_next;		/* List of callouts */
34 	void	(*c_fn)();		/* Function to call */
35 	voidp	c_closure;		/* Closure to pass to call */
36 	time_t	c_time;			/* Time of call */
37 	int	c_id;			/* Unique identifier */
38 };
39 
40 static callout callouts;		/* List of pending callouts */
41 static callout *free_callouts;		/* Cache of free callouts */
42 static int nfree_callouts;		/* Number on free list */
43 static int callout_id;			/* Next free callout identifier */
44 time_t next_softclock;			/* Time of next call to softclock() */
45 
46 /*
47  * Number of callout slots we keep on the free list
48  */
49 #define	CALLOUT_FREE_SLOP	10
50 
51 /*
52  * Global assumption: valid id's are non-zero.
53  */
54 #define	CID_ALLOC()	(++callout_id)
55 #define	CID_UNDEF	(0)
56 
57 static callout *alloc_callout(P_void);
58 static callout *alloc_callout()
59 {
60 	callout *cp = free_callouts;
61 	if (cp) {
62 		--nfree_callouts;
63 		free_callouts = free_callouts->c_next;
64 		return cp;
65 	}
66 	return ALLOC(callout);
67 }
68 
69 static void free_callout P((callout *cp));
70 static void free_callout(cp)
71 callout *cp;
72 {
73 	if (nfree_callouts > CALLOUT_FREE_SLOP) {
74 		free((voidp) cp);
75 	} else {
76 		cp->c_next = free_callouts;
77 		free_callouts = cp;
78 		nfree_callouts++;
79 	}
80 }
81 
82 /*
83  * Schedule a callout.
84  *
85  * (*fn)(closure) will be called at clocktime() + secs
86  */
87 int timeout P((unsigned int secs, void (*fn)(), voidp closure));
88 int timeout(secs, fn, closure)
89 unsigned int secs;
90 void (*fn)();
91 voidp closure;
92 {
93 	callout *cp, *cp2;
94 	time_t t = clocktime() + secs;
95 
96 	/*
97 	 * Allocate and fill in a new callout structure
98 	 */
99 	callout *cpnew = alloc_callout();
100 	cpnew->c_closure = closure;
101 	cpnew->c_fn = fn;
102 	cpnew->c_time = t;
103 	cpnew->c_id = CID_ALLOC();
104 
105 	if (t < next_softclock)
106 		next_softclock = t;
107 
108 	/*
109 	 * Find the correct place in the list
110 	 */
111 	for (cp = &callouts; cp2 = cp->c_next; cp = cp2)
112 		if (cp2->c_time >= t)
113 			break;
114 
115 	/*
116 	 * And link it in
117 	 */
118 	cp->c_next = cpnew;
119 	cpnew->c_next = cp2;
120 
121 	/*
122 	 * Return callout identifier
123 	 */
124 	return cpnew->c_id;
125 }
126 
127 /*
128  * De-schedule a callout
129  */
130 void untimeout P((int id));
131 void untimeout(id)
132 int id;
133 {
134 	callout *cp, *cp2;
135 	for (cp = &callouts; cp2 = cp->c_next; cp = cp2) {
136 		if (cp2->c_id == id) {
137 			cp->c_next = cp2->c_next;
138 			free_callout(cp2);
139 			break;
140 		}
141 	}
142 }
143 
144 /*
145  * Reschedule after clock changed
146  */
147 void reschedule_timeouts P((time_t now, time_t then));
148 void reschedule_timeouts(now, then)
149 time_t now;
150 time_t then;
151 {
152 	callout *cp;
153 
154 	for (cp = callouts.c_next; cp; cp = cp->c_next) {
155 		if (cp->c_time >= now && cp->c_time <= then) {
156 			plog(XLOG_WARNING, "job %d rescheduled to run immediately", cp->c_id);
157 #ifdef DEBUG
158 			dlog("rescheduling job %d back %d seconds",
159 				cp->c_id, cp->c_time - now);
160 #endif
161 			next_softclock = cp->c_time = now;
162 		}
163 	}
164 }
165 
166 /*
167  * Clock handler
168  */
169 int softclock(P_void);
170 int softclock()
171 {
172 	time_t now;
173 	callout *cp;
174 
175 	do {
176 		if (task_notify_todo)
177 			do_task_notify();
178 
179 		now = clocktime();
180 
181 		/*
182 		 * While there are more callouts waiting...
183 		 */
184 		while ((cp = callouts.c_next) && cp->c_time <= now) {
185 			/*
186 			 * Extract first from list, save fn & closure and
187 			 * unlink callout from list and free.
188 			 * Finally call function.
189 			 *
190 			 * The free is done first because
191 			 * it is quite common that the
192 			 * function will call timeout()
193 			 * and try to allocate a callout
194 			 */
195 			void (*fn)() = cp->c_fn;
196 			voidp closure = cp->c_closure;
197 
198 			callouts.c_next = cp->c_next;
199 			free_callout(cp);
200 #ifdef DEBUG
201 			/*dlog("Calling %#x(%#x)", fn, closure);*/
202 #endif /* DEBUG */
203 			(*fn)(closure);
204 		}
205 
206 	} while (task_notify_todo);
207 
208 	/*
209 	 * Return number of seconds to next event,
210 	 * or 0 if there is no event.
211 	 */
212 	if (cp = callouts.c_next)
213 		return cp->c_time - now;
214 	return 0;
215 }
216