xref: /original-bsd/usr.sbin/amd/amd/clock.c (revision 56c13d2e)
1 /*
2  * $Id: clock.c,v 5.2.1.4 91/03/03 20:41:36 jsp Alpha $
3  *
4  * Copyright (c) 1989 Jan-Simon Pendry
5  * Copyright (c) 1989 Imperial College of Science, Technology & Medicine
6  * Copyright (c) 1989 The Regents of the University of California.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * Jan-Simon Pendry at Imperial College, London.
11  *
12  * %sccs.include.redist.c%
13  *
14  *	@(#)clock.c	5.2 (Berkeley) 03/17/91
15  */
16 
17 /*
18  * Callouts.
19  *
20  * Modelled on kernel object of the same name.
21  * See usual references.
22  *
23  * Use of a heap-based mechanism was rejected:
24  * 1.  more complex implementation needed.
25  * 2.  not obvious that a list is too slow for Amd.
26  */
27 
28 #include "am.h"
29 
30 typedef struct callout callout;
31 struct callout {
32 	callout	*c_next;		/* List of callouts */
33 	void	(*c_fn)();		/* Function to call */
34 	voidp	c_closure;		/* Closure to pass to call */
35 	time_t	c_time;			/* Time of call */
36 	int	c_id;			/* Unique identifier */
37 };
38 
39 static callout callouts;		/* List of pending callouts */
40 static callout *free_callouts;		/* Cache of free callouts */
41 static int nfree_callouts;		/* Number on free list */
42 static int callout_id;			/* Next free callout identifier */
43 time_t next_softclock;			/* Time of next call to softclock() */
44 
45 /*
46  * Number of callout slots we keep on the free list
47  */
48 #define	CALLOUT_FREE_SLOP	10
49 
50 /*
51  * Global assumption: valid id's are non-zero.
52  */
53 #define	CID_ALLOC()	(++callout_id)
54 #define	CID_UNDEF	(0)
55 
56 static callout *alloc_callout(P_void);
57 static callout *alloc_callout()
58 {
59 	callout *cp = free_callouts;
60 	if (cp) {
61 		--nfree_callouts;
62 		free_callouts = free_callouts->c_next;
63 		return cp;
64 	}
65 	return ALLOC(callout);
66 }
67 
68 static void free_callout P((callout *cp));
69 static void free_callout(cp)
70 callout *cp;
71 {
72 	if (nfree_callouts > CALLOUT_FREE_SLOP) {
73 		free((voidp) cp);
74 	} else {
75 		cp->c_next = free_callouts;
76 		free_callouts = cp;
77 		nfree_callouts++;
78 	}
79 }
80 
81 /*
82  * Schedule a callout.
83  *
84  * (*fn)(closure) will be called at clocktime() + secs
85  */
86 int timeout P((unsigned int secs, void (*fn)(), voidp closure));
87 int timeout(secs, fn, closure)
88 unsigned int secs;
89 void (*fn)();
90 voidp closure;
91 {
92 	callout *cp, *cp2;
93 	time_t t = clocktime() + secs;
94 
95 	/*
96 	 * Allocate and fill in a new callout structure
97 	 */
98 	callout *cpnew = alloc_callout();
99 	cpnew->c_closure = closure;
100 	cpnew->c_fn = fn;
101 	cpnew->c_time = t;
102 	cpnew->c_id = CID_ALLOC();
103 
104 	if (t < next_softclock)
105 		next_softclock = t;
106 
107 	/*
108 	 * Find the correct place in the list
109 	 */
110 	for (cp = &callouts; cp2 = cp->c_next; cp = cp2)
111 		if (cp2->c_time >= t)
112 			break;
113 
114 	/*
115 	 * And link it in
116 	 */
117 	cp->c_next = cpnew;
118 	cpnew->c_next = cp2;
119 
120 	/*
121 	 * Return callout identifier
122 	 */
123 	return cpnew->c_id;
124 }
125 
126 /*
127  * De-schedule a callout
128  */
129 void untimeout P((int id));
130 void untimeout(id)
131 int id;
132 {
133 	callout *cp, *cp2;
134 	for (cp = &callouts; cp2 = cp->c_next; cp = cp2) {
135 		if (cp2->c_id == id) {
136 			cp->c_next = cp2->c_next;
137 			free_callout(cp2);
138 			break;
139 		}
140 	}
141 }
142 
143 /*
144  * Reschedule after clock changed
145  */
146 void reschedule_timeouts P((time_t now, time_t then));
147 void reschedule_timeouts(now, then)
148 time_t now;
149 time_t then;
150 {
151 	callout *cp;
152 
153 	for (cp = callouts.c_next; cp; cp = cp->c_next) {
154 		if (cp->c_time >= now && cp->c_time <= then) {
155 			plog(XLOG_WARNING, "job %d rescheduled to run immediately", cp->c_id);
156 #ifdef DEBUG
157 			dlog("rescheduling job %d back %d seconds",
158 				cp->c_id, cp->c_time - now);
159 #endif
160 			next_softclock = cp->c_time = now;
161 		}
162 	}
163 }
164 
165 /*
166  * Clock handler
167  */
168 int softclock(P_void);
169 int softclock()
170 {
171 	time_t now;
172 	callout *cp;
173 
174 	do {
175 		if (task_notify_todo)
176 			do_task_notify();
177 
178 		now = clocktime();
179 
180 		/*
181 		 * While there are more callouts waiting...
182 		 */
183 		while ((cp = callouts.c_next) && cp->c_time <= now) {
184 			/*
185 			 * Extract first from list, save fn & closure and
186 			 * unlink callout from list and free.
187 			 * Finally call function.
188 			 *
189 			 * The free is done first because
190 			 * it is quite common that the
191 			 * function will call timeout()
192 			 * and try to allocate a callout
193 			 */
194 			void (*fn)() = cp->c_fn;
195 			voidp closure = cp->c_closure;
196 
197 			callouts.c_next = cp->c_next;
198 			free_callout(cp);
199 #ifdef DEBUG
200 			/*dlog("Calling %#x(%#x)", fn, closure);*/
201 #endif /* DEBUG */
202 			(*fn)(closure);
203 		}
204 
205 	} while (task_notify_todo);
206 
207 	/*
208 	 * Return number of seconds to next event,
209 	 * or 0 if there is no event.
210 	 */
211 	if (cp = callouts.c_next)
212 		return cp->c_time - now;
213 	return 0;
214 }
215