1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Time-related runtime and pieces of package time.
6
7package time
8
9#include <sys/time.h>
10
11#include "runtime.h"
12#include "defs.h"
13#include "arch.h"
14#include "malloc.h"
15
16enum {
17	debug = 0,
18};
19
20static Timers timers;
21static void addtimer(Timer*);
22static void dumptimers(const char*);
23
24// nacl fake time support.
25int64 runtime_timens;
26
27// Package time APIs.
28// Godoc uses the comments in package time, not these.
29
30// time.now is implemented in assembly.
31
32// runtimeNano returns the current value of the runtime clock in nanoseconds.
33func runtimeNano() (ns int64) {
34	ns = runtime_nanotime();
35}
36
37// Sleep puts the current goroutine to sleep for at least ns nanoseconds.
38func Sleep(ns int64) {
39	runtime_tsleep(ns, "sleep");
40}
41
42// startTimer adds t to the timer heap.
43func startTimer(t *Timer) {
44	runtime_addtimer(t);
45}
46
47// stopTimer removes t from the timer heap if it is there.
48// It returns true if t was removed, false if t wasn't even there.
49func stopTimer(t *Timer) (stopped bool) {
50	stopped = runtime_deltimer(t);
51}
52
53// C runtime.
54
55int64 runtime_unixnanotime(void)
56{
57	struct time_now_ret r;
58
59	r = now();
60	return r.sec*1000000000 + r.nsec;
61}
62
63static void timerproc(void*);
64static void siftup(int32);
65static void siftdown(int32);
66
67// Ready the goroutine e.data.
68static void
69ready(Eface e, uintptr seq)
70{
71	USED(seq);
72
73	runtime_ready(e.__object);
74}
75
76static FuncVal readyv = {(void(*)(void))ready};
77
78// Put the current goroutine to sleep for ns nanoseconds.
79void
80runtime_tsleep(int64 ns, const char *reason)
81{
82	G* g;
83	Timer t;
84
85	g = runtime_g();
86
87	if(ns <= 0)
88		return;
89
90	t.when = runtime_nanotime() + ns;
91	t.period = 0;
92	t.fv = &readyv;
93	t.arg.__object = g;
94	t.seq = 0;
95	runtime_lock(&timers);
96	addtimer(&t);
97	runtime_parkunlock(&timers, reason);
98}
99
100void
101runtime_addtimer(Timer *t)
102{
103	runtime_lock(&timers);
104	addtimer(t);
105	runtime_unlock(&timers);
106}
107
108// Add a timer to the heap and start or kick the timer proc
109// if the new timer is earlier than any of the others.
110static void
111addtimer(Timer *t)
112{
113	int32 n;
114	Timer **nt;
115
116	// when must never be negative; otherwise timerproc will overflow
117	// during its delta calculation and never expire other timers.
118	if(t->when < 0)
119		t->when = (int64)((1ULL<<63)-1);
120
121	if(timers.len >= timers.cap) {
122		// Grow slice.
123		n = 16;
124		if(n <= timers.cap)
125			n = timers.cap*3 / 2;
126		nt = runtime_malloc(n*sizeof nt[0]);
127		runtime_memmove(nt, timers.t, timers.len*sizeof nt[0]);
128		runtime_free(timers.t);
129		timers.t = nt;
130		timers.cap = n;
131	}
132	t->i = timers.len++;
133	timers.t[t->i] = t;
134	siftup(t->i);
135	if(t->i == 0) {
136		// siftup moved to top: new earliest deadline.
137		if(timers.sleeping) {
138			timers.sleeping = false;
139			runtime_notewakeup(&timers.waitnote);
140		}
141		if(timers.rescheduling) {
142			timers.rescheduling = false;
143			runtime_ready(timers.timerproc);
144		}
145	}
146	if(timers.timerproc == nil) {
147		timers.timerproc = __go_go(timerproc, nil);
148		timers.timerproc->issystem = true;
149	}
150	if(debug)
151		dumptimers("addtimer");
152}
153
154// Used to force a dereference before the lock is acquired.
155static int32 gi;
156
157// Delete timer t from the heap.
158// Do not need to update the timerproc:
159// if it wakes up early, no big deal.
160bool
161runtime_deltimer(Timer *t)
162{
163	int32 i;
164
165	// Dereference t so that any panic happens before the lock is held.
166	// Discard result, because t might be moving in the heap.
167	i = t->i;
168	gi = i;
169
170	runtime_lock(&timers);
171
172	// t may not be registered anymore and may have
173	// a bogus i (typically 0, if generated by Go).
174	// Verify it before proceeding.
175	i = t->i;
176	if(i < 0 || i >= timers.len || timers.t[i] != t) {
177		runtime_unlock(&timers);
178		return false;
179	}
180
181	timers.len--;
182	if(i == timers.len) {
183		timers.t[i] = nil;
184	} else {
185		timers.t[i] = timers.t[timers.len];
186		timers.t[timers.len] = nil;
187		timers.t[i]->i = i;
188		siftup(i);
189		siftdown(i);
190	}
191	if(debug)
192		dumptimers("deltimer");
193	runtime_unlock(&timers);
194	return true;
195}
196
197// Timerproc runs the time-driven events.
198// It sleeps until the next event in the timers heap.
199// If addtimer inserts a new earlier event, addtimer
200// wakes timerproc early.
201static void
202timerproc(void* dummy __attribute__ ((unused)))
203{
204	int64 delta, now;
205	Timer *t;
206	FuncVal *fv;
207	void (*f)(Eface, uintptr);
208	Eface arg;
209	uintptr seq;
210
211	for(;;) {
212		runtime_lock(&timers);
213		timers.sleeping = false;
214		now = runtime_nanotime();
215		for(;;) {
216			if(timers.len == 0) {
217				delta = -1;
218				break;
219			}
220			t = timers.t[0];
221			delta = t->when - now;
222			if(delta > 0)
223				break;
224			if(t->period > 0) {
225				// leave in heap but adjust next time to fire
226				t->when += t->period * (1 + -delta/t->period);
227				siftdown(0);
228			} else {
229				// remove from heap
230				timers.t[0] = timers.t[--timers.len];
231				timers.t[0]->i = 0;
232				siftdown(0);
233				t->i = -1;  // mark as removed
234			}
235			fv = t->fv;
236			f = (void*)t->fv->fn;
237			arg = t->arg;
238			seq = t->seq;
239			runtime_unlock(&timers);
240			__builtin_call_with_static_chain(f(arg, seq), fv);
241
242			// clear f and arg to avoid leak while sleeping for next timer
243			f = nil;
244			USED(f);
245			arg.__type_descriptor = nil;
246			arg.__object = nil;
247			USED(&arg);
248
249			runtime_lock(&timers);
250		}
251		if(delta < 0) {
252			// No timers left - put goroutine to sleep.
253			timers.rescheduling = true;
254			runtime_g()->isbackground = true;
255			runtime_parkunlock(&timers, "timer goroutine (idle)");
256			runtime_g()->isbackground = false;
257			continue;
258		}
259		// At least one timer pending.  Sleep until then.
260		timers.sleeping = true;
261		runtime_noteclear(&timers.waitnote);
262		runtime_unlock(&timers);
263		runtime_notetsleepg(&timers.waitnote, delta);
264	}
265}
266
267// heap maintenance algorithms.
268
269static void
270siftup(int32 i)
271{
272	int32 p;
273	int64 when;
274	Timer **t, *tmp;
275
276	t = timers.t;
277	when = t[i]->when;
278	tmp = t[i];
279	while(i > 0) {
280		p = (i-1)/4;  // parent
281		if(when >= t[p]->when)
282			break;
283		t[i] = t[p];
284		t[i]->i = i;
285		t[p] = tmp;
286		tmp->i = p;
287		i = p;
288	}
289}
290
291static void
292siftdown(int32 i)
293{
294	int32 c, c3, len;
295	int64 when, w, w3;
296	Timer **t, *tmp;
297
298	t = timers.t;
299	len = timers.len;
300	when = t[i]->when;
301	tmp = t[i];
302	for(;;) {
303		c = i*4 + 1;  // left child
304		c3 = c + 2;  // mid child
305		if(c >= len) {
306			break;
307		}
308		w = t[c]->when;
309		if(c+1 < len && t[c+1]->when < w) {
310			w = t[c+1]->when;
311			c++;
312		}
313		if(c3 < len) {
314			w3 = t[c3]->when;
315			if(c3+1 < len && t[c3+1]->when < w3) {
316				w3 = t[c3+1]->when;
317				c3++;
318			}
319			if(w3 < w) {
320				w = w3;
321				c = c3;
322			}
323		}
324		if(w >= when)
325			break;
326		t[i] = t[c];
327		t[i]->i = i;
328		t[c] = tmp;
329		tmp->i = c;
330		i = c;
331	}
332}
333
334static void
335dumptimers(const char *msg)
336{
337	Timer *t;
338	int32 i;
339
340	runtime_printf("timers: %s\n", msg);
341	for(i = 0; i < timers.len; i++) {
342		t = timers.t[i];
343		runtime_printf("\t%d\t%p:\ti %d when %D period %D fn %p\n",
344				i, t, t->i, t->when, t->period, t->fv->fn);
345	}
346	runtime_printf("\n");
347}
348
349void
350runtime_time_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
351{
352	enqueue1(wbufp, (Obj){(byte*)&timers, sizeof timers, 0});
353}
354