1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "private-lib-core.h"
26 
27 static int
sul_compare(const lws_dll2_t * d,const lws_dll2_t * i)28 sul_compare(const lws_dll2_t *d, const lws_dll2_t *i)
29 {
30 	lws_usec_t a = ((lws_sorted_usec_list_t *)d)->us;
31 	lws_usec_t b = ((lws_sorted_usec_list_t *)i)->us;
32 
33 	/*
34 	 * Simply returning (a - b) in an int
35 	 * may lead to an integer overflow bug
36 	 */
37 
38 	if (a > b)
39 		return 1;
40 	if (a < b)
41 		return -1;
42 
43 	return 0;
44 }
45 
46 /*
47  * notice owner was chosen already, and sul->us was already computed
48  */
49 
50 int
__lws_sul_insert(lws_dll2_owner_t * own,lws_sorted_usec_list_t * sul)51 __lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul)
52 {
53 	lws_dll2_remove(&sul->list);
54 
55 	assert(sul->cb);
56 
57 	/*
58 	 * we sort the pt's list of sequencers with pending timeouts, so it's
59 	 * cheap to check it every poll wait
60 	 */
61 
62 	lws_dll2_add_sorted(&sul->list, own, sul_compare);
63 
64 	return 0;
65 }
66 
67 void
lws_sul_cancel(lws_sorted_usec_list_t * sul)68 lws_sul_cancel(lws_sorted_usec_list_t *sul)
69 {
70 	lws_dll2_remove(&sul->list);
71 
72 	/* we are clearing the timeout and leaving ourselves detached */
73 	sul->us = 0;
74 }
75 
76 void
lws_sul2_schedule(struct lws_context * context,int tsi,int flags,lws_sorted_usec_list_t * sul)77 lws_sul2_schedule(struct lws_context *context, int tsi, int flags,
78 	          lws_sorted_usec_list_t *sul)
79 {
80 	struct lws_context_per_thread *pt = &context->pt[tsi];
81 
82 	lws_pt_assert_lock_held(pt);
83 
84 	__lws_sul_insert(
85 		&pt->pt_sul_owner[!!(flags & LWSSULLI_WAKE_IF_SUSPENDED)], sul);
86 }
87 
88 /*
89  * own points to the first in an array of length own_len
90  *
91  * While any sul list owner has a "ripe", ie, ready to handle sul we do them
92  * strictly in order of sul time.  When nobody has a ripe sul we return 0, if
93  * actually nobody has any sul, or the interval between usnow and the next
94  * earliest scheduled event on any list.
95  */
96 
97 lws_usec_t
__lws_sul_service_ripe(lws_dll2_owner_t * own,int own_len,lws_usec_t usnow)98 __lws_sul_service_ripe(lws_dll2_owner_t *own, int own_len, lws_usec_t usnow)
99 {
100 	struct lws_context_per_thread *pt = (struct lws_context_per_thread *)
101 			lws_container_of(own, struct lws_context_per_thread,
102 					 pt_sul_owner);
103 
104 	if (pt->attach_owner.count)
105 		lws_system_do_attach(pt);
106 
107 	lws_pt_assert_lock_held(pt);
108 
109 	/* must be at least 1 */
110 	assert(own_len > 0);
111 
112 	/*
113 	 * Of the own_len sul owning lists, the earliest next sul could be on
114 	 * any of them.  We have to find it and handle each in turn until no
115 	 * ripe sul left on any owning list, and we can exit.
116 	 *
117 	 * This ensures the ripe sul are handled strictly in the right order no
118 	 * matter which owning list they are on.
119 	 */
120 
121 	do {
122 		lws_sorted_usec_list_t *hit = NULL;
123 		lws_usec_t lowest = 0;
124 		int n = 0;
125 
126 		for (n = 0; n < own_len; n++) {
127 			lws_sorted_usec_list_t *sul;
128 			if (!own[n].count)
129 				continue;
130 			 sul = (lws_sorted_usec_list_t *)
131 						     lws_dll2_get_head(&own[n]);
132 
133 			if (!hit || sul->us <= lowest) {
134 				hit = sul;
135 				lowest = sul->us;
136 			}
137 		}
138 
139 		if (!hit)
140 			return 0;
141 
142 		if (lowest > usnow)
143 			return lowest - usnow;
144 
145 		/* his moment has come... remove him from his owning list */
146 
147 		lws_dll2_remove(&hit->list);
148 		hit->us = 0;
149 
150 		// lwsl_notice("%s: sul: %p\n", __func__, hit->cb);
151 
152 		pt->inside_lws_service = 1;
153 		hit->cb(hit);
154 		pt->inside_lws_service = 0;
155 
156 	} while (1);
157 
158 	/* unreachable */
159 
160 	return 0;
161 }
162 
163 /*
164  * Normally we use the OS monotonic time, which does not step when the
165  * gettimeofday() time is adjusted after, eg, ntpclient.  But on some OSes,
166  * high resolution monotonic time doesn't exist; sul time is computed from and
167  * compared against gettimeofday() time and breaks when that steps.
168  *
169  * For those cases, this allows us to retrospectively adjust existing suls on
170  * all owning lists by the step amount, at the same time we adjust the
171  * nonmonotonic clock.  Then nothing breaks so long as we do this when the
172  * gettimeofday() clock is stepped.
173  *
174  * Linux and so on offer Posix MONOTONIC, which lws uses.  FreeRTOS doesn't
175  * have a high-resolution monotonic clock and has to use gettimeofday(), which
176  * requires this adjustment when it is stepped.
177  */
178 
179 lws_usec_t
lws_sul_nonmonotonic_adjust(struct lws_context * ctx,int64_t step_us)180 lws_sul_nonmonotonic_adjust(struct lws_context *ctx, int64_t step_us)
181 {
182 	struct lws_context_per_thread *pt = &ctx->pt[0];
183 	int n, m;
184 
185 	/*
186 	 * for each pt
187 	 */
188 
189 	for (m = 0; m < ctx->count_threads; m++) {
190 
191 		/*
192 		 * For each owning list...
193 		 */
194 
195 		lws_pt_lock(pt, __func__);
196 
197 		for (n = 0; n < LWS_COUNT_PT_SUL_OWNERS; n++) {
198 
199 			if (!pt->pt_sul_owner[n].count)
200 				continue;
201 
202 			/* ... and for every existing sul on a list... */
203 
204 			lws_start_foreach_dll(struct lws_dll2 *, p,
205 					      lws_dll2_get_head(
206 							&pt->pt_sul_owner[n])) {
207 				lws_sorted_usec_list_t *sul = lws_container_of(
208 					       p, lws_sorted_usec_list_t, list);
209 
210 				/*
211 				 * ... retrospectively step its ripe time by the
212 				 * step we will adjust the gettimeofday() clock
213 				 * with
214 				 */
215 
216 				sul->us += step_us;
217 
218 			} lws_end_foreach_dll(p);
219 		}
220 
221 		lws_pt_unlock(pt);
222 
223 		pt++;
224 	}
225 
226 	return 0;
227 }
228 
229 /*
230  * Earliest wakeable event on any pt
231  */
232 
233 int
lws_sul_earliest_wakeable_event(struct lws_context * ctx,lws_usec_t * pearliest)234 lws_sul_earliest_wakeable_event(struct lws_context *ctx, lws_usec_t *pearliest)
235 {
236 	struct lws_context_per_thread *pt;
237 	int n = 0, hit = -1;
238 	lws_usec_t lowest = 0;
239 
240 	for (n = 0; n < ctx->count_threads; n++) {
241 		pt = &ctx->pt[n];
242 
243 		lws_pt_lock(pt, __func__);
244 
245 		if (pt->pt_sul_owner[LWSSULLI_WAKE_IF_SUSPENDED].count) {
246 			lws_sorted_usec_list_t *sul = (lws_sorted_usec_list_t *)
247 					lws_dll2_get_head(&pt->pt_sul_owner[
248 					           LWSSULLI_WAKE_IF_SUSPENDED]);
249 
250 			if (hit == -1 || sul->us < lowest) {
251 				hit = n;
252 				lowest = sul->us;
253 			}
254 		}
255 
256 		lws_pt_unlock(pt);
257 	}
258 
259 
260 	if (hit == -1)
261 		/* there is no pending event */
262 		return 1;
263 
264 	*pearliest = lowest;
265 
266 	return 0;
267 }
268 
269 void
lws_sul_schedule(struct lws_context * ctx,int tsi,lws_sorted_usec_list_t * sul,sul_cb_t _cb,lws_usec_t _us)270 lws_sul_schedule(struct lws_context *ctx, int tsi, lws_sorted_usec_list_t *sul,
271 		 sul_cb_t _cb, lws_usec_t _us)
272 {
273 	struct lws_context_per_thread *_pt = &ctx->pt[tsi];
274 
275 	lws_pt_lock(_pt, __func__);
276 
277 	if (_us == (lws_usec_t)LWS_SET_TIMER_USEC_CANCEL)
278 		lws_sul_cancel(sul);
279 	else {
280 		sul->cb = _cb;
281 		sul->us = lws_now_usecs() + _us;
282 		lws_sul2_schedule(ctx, tsi, LWSSULLI_MISS_IF_SUSPENDED, sul);
283 	}
284 
285 	lws_pt_unlock(_pt);
286 }
287 
288 void
lws_sul_schedule_wakesuspend(struct lws_context * ctx,int tsi,lws_sorted_usec_list_t * sul,sul_cb_t _cb,lws_usec_t _us)289 lws_sul_schedule_wakesuspend(struct lws_context *ctx, int tsi,
290 			     lws_sorted_usec_list_t *sul, sul_cb_t _cb,
291 			     lws_usec_t _us)
292 {
293 	struct lws_context_per_thread *_pt = &ctx->pt[tsi];
294 
295 	lws_pt_lock(_pt, __func__);
296 
297 	if (_us == (lws_usec_t)LWS_SET_TIMER_USEC_CANCEL)
298 		lws_sul_cancel(sul);
299 	else {
300 		sul->cb = _cb;
301 		sul->us = lws_now_usecs() + _us;
302 		lws_sul2_schedule(ctx, tsi, LWSSULLI_WAKE_IF_SUSPENDED, sul);
303 	}
304 
305 	lws_pt_unlock(_pt);
306 }
307 
308 #if defined(LWS_WITH_SUL_DEBUGGING)
309 
310 /*
311  * Sanity checker for any sul left scheduled when its containing object is
312  * freed... code scheduling suls must take care to cancel them when destroying
313  * their object.  This optional debugging helper checks that when an object is
314  * being destroyed, there is no live sul scheduled from inside the object.
315  */
316 
317 void
lws_sul_debug_zombies(struct lws_context * ctx,void * po,size_t len,const char * destroy_description)318 lws_sul_debug_zombies(struct lws_context *ctx, void *po, size_t len,
319 		      const char *destroy_description)
320 {
321 	struct lws_context_per_thread *pt;
322 	int n, m;
323 
324 	for (n = 0; n < ctx->count_threads; n++) {
325 		pt = &ctx->pt[n];
326 
327 		lws_pt_lock(pt, __func__);
328 
329 		for (m = 0; m < LWS_COUNT_PT_SUL_OWNERS; m++) {
330 
331 			lws_start_foreach_dll(struct lws_dll2 *, p,
332 				      lws_dll2_get_head(&pt->pt_sul_owner[m])) {
333 				lws_sorted_usec_list_t *sul =
334 					lws_container_of(p,
335 						lws_sorted_usec_list_t, list);
336 
337 				/*
338 				 * Is the sul resident inside the object that is
339 				 * indicated as being deleted?
340 				 */
341 
342 				if ((void *)sul >= po &&
343 				    (size_t)lws_ptr_diff(sul, po) < len) {
344 					lwsl_err("%s: ERROR: Zombie Sul "
345 						 "(on list %d) %s, cb %p\n",
346 						 __func__, m,
347 						 destroy_description, sul->cb);
348 					/*
349 					 * This assert fires if you have left
350 					 * a sul scheduled to fire later, but
351 					 * are about to destroy the object the
352 					 * sul lives in.  You must take care to
353 					 * do lws_sul_cancel(&sul) on any suls
354 					 * that may be scheduled before
355 					 * destroying the object the sul lives
356 					 * inside.
357 					 *
358 					 * You can look up the cb pointer in
359 					 * your mapfile to find out which
360 					 * callback function the sul was using
361 					 * which usually tells you which sul
362 					 * it is.
363 					 */
364 					assert(0);
365 				}
366 
367 			} lws_end_foreach_dll(p);
368 		}
369 
370 		lws_pt_unlock(pt);
371 	}
372 }
373 
374 #endif
375