xref: /freebsd/contrib/ntp/libntp/recvbuff.c (revision 4e8d558c)
1 #ifdef HAVE_CONFIG_H
2 # include <config.h>
3 #endif
4 
5 #include <stdio.h>
6 
7 #include "ntp_assert.h"
8 #include "ntp_syslog.h"
9 #include "ntp_stdlib.h"
10 #include "ntp_lists.h"
11 #include "recvbuff.h"
12 #include "iosignal.h"
13 
14 #if (RECV_INC & (RECV_INC-1))
15 # error RECV_INC not a power of 2!
16 #endif
17 #if (RECV_BATCH & (RECV_BATCH - 1))
18 #error RECV_BATCH not a power of 2!
19 #endif
20 #if (RECV_BATCH < RECV_INC)
21 #error RECV_BATCH must be >= RECV_INC!
22 #endif
23 
24 /*
25  * Memory allocation
26  */
27 static u_long volatile full_recvbufs;	/* recvbufs on full_recv_fifo */
28 static u_long volatile free_recvbufs;	/* recvbufs on free_recv_list */
29 static u_long volatile total_recvbufs;	/* total recvbufs currently in use */
30 static u_long volatile lowater_adds;	/* number of times we have added memory */
31 static u_long volatile buffer_shortfall;/* number of missed free receive buffers
32 					   between replenishments */
33 static u_long limit_recvbufs;		/* maximum total of receive buffers */
34 static u_long emerg_recvbufs;		/* emergency/urgent buffers to keep */
35 
36 static DECL_FIFO_ANCHOR(recvbuf_t) full_recv_fifo;
37 static recvbuf_t *		   free_recv_list;
38 
39 #if defined(SYS_WINNT)
40 
41 /*
42  * For Windows we need to set up a lock to manipulate the
43  * recv buffers to prevent corruption. We keep it lock for as
44  * short a time as possible
45  */
46 static CRITICAL_SECTION RecvLock;
47 static CRITICAL_SECTION FreeLock;
48 # define LOCK_R()	EnterCriticalSection(&RecvLock)
49 # define UNLOCK_R()	LeaveCriticalSection(&RecvLock)
50 # define LOCK_F()	EnterCriticalSection(&FreeLock)
51 # define UNLOCK_F()	LeaveCriticalSection(&FreeLock)
52 #else
53 # define LOCK_R()	do {} while (FALSE)
54 # define UNLOCK_R()	do {} while (FALSE)
55 # define LOCK_F()	do {} while (FALSE)
56 # define UNLOCK_F()	do {} while (FALSE)
57 #endif
58 
59 #ifdef DEBUG
60 static void uninit_recvbuff(void);
61 #endif
62 
63 
64 u_long
65 free_recvbuffs (void)
66 {
67 	return free_recvbufs;
68 }
69 
70 u_long
71 full_recvbuffs (void)
72 {
73 	return full_recvbufs;
74 }
75 
76 u_long
77 total_recvbuffs (void)
78 {
79 	return total_recvbufs;
80 }
81 
82 u_long
83 lowater_additions(void)
84 {
85 	return lowater_adds;
86 }
87 
88 static inline void
89 initialise_buffer(recvbuf_t *buff)
90 {
91 	ZERO(*buff);
92 }
93 
94 static void
95 create_buffers(
96 	size_t		nbufs)
97 {
98 #   ifndef DEBUG
99 	static const u_int chunk = RECV_INC;
100 #   else
101 	/* Allocate each buffer individually so they can be free()d
102 	 * during ntpd shutdown on DEBUG builds to keep them out of heap
103 	 * leak reports.
104 	 */
105 	static const u_int chunk = 1;
106 #   endif
107 
108 	register recvbuf_t *bufp;
109 	u_int i;
110 	size_t abuf;
111 
112 	/*[bug 3666]: followup -- reset shortfalls in all cases */
113 	abuf = nbufs + buffer_shortfall;
114 	buffer_shortfall = 0;
115 
116 	if (limit_recvbufs <= total_recvbufs)
117 		return;
118 
119 	if (abuf < nbufs || abuf > RECV_BATCH)
120 		abuf = RECV_BATCH;	/* clamp on overflow */
121 	else
122 		abuf += (~abuf + 1) & (RECV_INC - 1);	/* round up */
123 
124 	if (abuf > (limit_recvbufs - total_recvbufs))
125 		abuf = limit_recvbufs - total_recvbufs;
126 	abuf += (~abuf + 1) & (chunk - 1);		/* round up */
127 
128 	while (abuf) {
129 		bufp = calloc(chunk, sizeof(*bufp));
130 		if (!bufp) {
131 			limit_recvbufs = total_recvbufs;
132 			break;
133 		}
134 		for (i = chunk; i; --i,++bufp) {
135 			LINK_SLIST(free_recv_list, bufp, link);
136 		}
137 		free_recvbufs += chunk;
138 		total_recvbufs += chunk;
139 		abuf -= chunk;
140 	}
141 	++lowater_adds;
142 }
143 
144 void
145 init_recvbuff(int nbufs)
146 {
147 
148 	/*
149 	 * Init buffer free list and stat counters
150 	 */
151 	free_recvbufs = total_recvbufs = 0;
152 	full_recvbufs = lowater_adds = 0;
153 
154 	limit_recvbufs = RECV_TOOMANY;
155 	emerg_recvbufs = RECV_CLOCK;
156 
157 	create_buffers(nbufs);
158 
159 #   if defined(SYS_WINNT)
160 	InitializeCriticalSection(&RecvLock);
161 	InitializeCriticalSection(&FreeLock);
162 #   endif
163 
164 #   ifdef DEBUG
165 	atexit(&uninit_recvbuff);
166 #   endif
167 }
168 
169 
170 #ifdef DEBUG
171 static void
172 uninit_recvbuff(void)
173 {
174 	recvbuf_t *rbunlinked;
175 
176 	for (;;) {
177 		UNLINK_FIFO(rbunlinked, full_recv_fifo, link);
178 		if (rbunlinked == NULL)
179 			break;
180 		free(rbunlinked);
181 	}
182 
183 	for (;;) {
184 		UNLINK_HEAD_SLIST(rbunlinked, free_recv_list, link);
185 		if (rbunlinked == NULL)
186 			break;
187 		free(rbunlinked);
188 	}
189 #   if defined(SYS_WINNT)
190 	DeleteCriticalSection(&FreeLock);
191 	DeleteCriticalSection(&RecvLock);
192 #   endif
193 }
194 #endif	/* DEBUG */
195 
196 
197 /*
198  * freerecvbuf - make a single recvbuf available for reuse
199  */
200 void
201 freerecvbuf(recvbuf_t *rb)
202 {
203 	if (rb) {
204 		if (--rb->used != 0) {
205 			msyslog(LOG_ERR, "******** freerecvbuff non-zero usage: %d *******", rb->used);
206 			rb->used = 0;
207 		}
208 		LOCK_F();
209 		LINK_SLIST(free_recv_list, rb, link);
210 		++free_recvbufs;
211 		UNLOCK_F();
212 	}
213 }
214 
215 
216 void
217 add_full_recv_buffer(recvbuf_t *rb)
218 {
219 	if (rb == NULL) {
220 		msyslog(LOG_ERR, "add_full_recv_buffer received NULL buffer");
221 		return;
222 	}
223 	LOCK_R();
224 	LINK_FIFO(full_recv_fifo, rb, link);
225 	++full_recvbufs;
226 	UNLOCK_R();
227 }
228 
229 
230 recvbuf_t *
231 get_free_recv_buffer(
232     int /*BOOL*/ urgent
233     )
234 {
235 	recvbuf_t *buffer = NULL;
236 
237 	LOCK_F();
238 	if (free_recvbufs > (urgent ? 0 : emerg_recvbufs)) {
239 		UNLINK_HEAD_SLIST(buffer, free_recv_list, link);
240 	}
241 
242 	if (buffer != NULL) {
243 		if (free_recvbufs)
244 			--free_recvbufs;
245 		initialise_buffer(buffer);
246 		++buffer->used;
247 	} else {
248 		++buffer_shortfall;
249 	}
250 	UNLOCK_F();
251 
252 	return buffer;
253 }
254 
255 
256 #ifdef HAVE_IO_COMPLETION_PORT
257 recvbuf_t *
258 get_free_recv_buffer_alloc(
259     int /*BOOL*/ urgent
260     )
261 {
262 	LOCK_F();
263 	if (free_recvbufs <= emerg_recvbufs || buffer_shortfall > 0)
264 		create_buffers(RECV_INC);
265 	UNLOCK_F();
266 	return get_free_recv_buffer(urgent);
267 }
268 #endif
269 
270 
271 recvbuf_t *
272 get_full_recv_buffer(void)
273 {
274 	recvbuf_t *	rbuf;
275 
276 	/*
277 	 * make sure there are free buffers when we wander off to do
278 	 * lengthy packet processing with any buffer we grab from the
279 	 * full list.
280 	 *
281 	 * fixes malloc() interrupted by SIGIO risk (Bug 889)
282 	 */
283 	LOCK_F();
284 	if (free_recvbufs <= emerg_recvbufs || buffer_shortfall > 0)
285 		create_buffers(RECV_INC);
286 	UNLOCK_F();
287 
288 	/*
289 	 * try to grab a full buffer
290 	 */
291 	LOCK_R();
292 	UNLINK_FIFO(rbuf, full_recv_fifo, link);
293 	if (rbuf != NULL && full_recvbufs)
294 		--full_recvbufs;
295 	UNLOCK_R();
296 
297 	return rbuf;
298 }
299 
300 
301 /*
302  * purge_recv_buffers_for_fd() - purges any previously-received input
303  *				 from a given file descriptor.
304  */
305 void
306 purge_recv_buffers_for_fd(
307 	int	fd
308 	)
309 {
310 	recvbuf_t *rbufp;
311 	recvbuf_t *next;
312 	recvbuf_t *punlinked;
313 	recvbuf_t *freelist = NULL;
314 
315 	/* We want to hold only one lock at a time. So we do a scan on
316 	 * the full buffer queue, collecting items as we go, and when
317 	 * done we spool the the collected items to 'freerecvbuf()'.
318 	 */
319 	LOCK_R();
320 
321 	for (rbufp = HEAD_FIFO(full_recv_fifo);
322 	     rbufp != NULL;
323 	     rbufp = next)
324 	{
325 		next = rbufp->link;
326 #	    ifdef HAVE_IO_COMPLETION_PORT
327 		if (rbufp->dstadr == NULL && rbufp->fd == fd)
328 #	    else
329 		if (rbufp->fd == fd)
330 #	    endif
331 		{
332 			UNLINK_MID_FIFO(punlinked, full_recv_fifo,
333 					rbufp, link, recvbuf_t);
334 			INSIST(punlinked == rbufp);
335 			if (full_recvbufs)
336 				--full_recvbufs;
337 			rbufp->link = freelist;
338 			freelist = rbufp;
339 		}
340 	}
341 
342 	UNLOCK_R();
343 
344 	while (freelist) {
345 		next = freelist->link;
346 		freerecvbuf(freelist);
347 		freelist = next;
348 	}
349 }
350 
351 
352 /*
353  * Checks to see if there are buffers to process
354  */
355 isc_boolean_t has_full_recv_buffer(void)
356 {
357 	if (HEAD_FIFO(full_recv_fifo) != NULL)
358 		return (ISC_TRUE);
359 	else
360 		return (ISC_FALSE);
361 }
362 
363 
364 #ifdef NTP_DEBUG_LISTS_H
365 void
366 check_gen_fifo_consistency(void *fifo)
367 {
368 	gen_fifo *pf;
369 	gen_node *pthis;
370 	gen_node **pptail;
371 
372 	pf = fifo;
373 	REQUIRE((NULL == pf->phead && NULL == pf->pptail) ||
374 		(NULL != pf->phead && NULL != pf->pptail));
375 
376 	pptail = &pf->phead;
377 	for (pthis = pf->phead;
378 	     pthis != NULL;
379 	     pthis = pthis->link)
380 		if (NULL != pthis->link)
381 			pptail = &pthis->link;
382 
383 	REQUIRE(NULL == pf->pptail || pptail == pf->pptail);
384 }
385 #endif	/* NTP_DEBUG_LISTS_H */
386