xref: /minix/external/bsd/libevent/dist/buffer.c (revision 0a6a1f1d)
1 /*	$NetBSD: buffer.c,v 1.3 2015/01/29 07:26:02 spz Exp $	*/
2 /*
3  * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
4  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "event2/event-config.h"
30 #include <sys/cdefs.h>
31 __RCSID("$NetBSD: buffer.c,v 1.3 2015/01/29 07:26:02 spz Exp $");
32 
33 #ifdef WIN32
34 #include <winsock2.h>
35 #include <windows.h>
36 #include <io.h>
37 #endif
38 
39 #ifdef _EVENT_HAVE_VASPRINTF
40 /* If we have vasprintf, we need to define this before we include stdio.h. */
41 #define _GNU_SOURCE
42 #endif
43 
44 #include <sys/types.h>
45 
46 #ifdef _EVENT_HAVE_SYS_TIME_H
47 #include <sys/time.h>
48 #endif
49 
50 #ifdef _EVENT_HAVE_SYS_SOCKET_H
51 #include <sys/socket.h>
52 #endif
53 
54 #ifdef _EVENT_HAVE_SYS_UIO_H
55 #include <sys/uio.h>
56 #endif
57 
58 #ifdef _EVENT_HAVE_SYS_IOCTL_H
59 #include <sys/ioctl.h>
60 #endif
61 
62 #ifdef _EVENT_HAVE_SYS_MMAN_H
63 #include <sys/mman.h>
64 #endif
65 
66 #ifdef _EVENT_HAVE_SYS_SENDFILE_H
67 #include <sys/sendfile.h>
68 #endif
69 
70 #include <errno.h>
71 #include <stdio.h>
72 #include <stdlib.h>
73 #include <string.h>
74 #ifdef _EVENT_HAVE_STDARG_H
75 #include <stdarg.h>
76 #endif
77 #ifdef _EVENT_HAVE_UNISTD_H
78 #include <unistd.h>
79 #endif
80 #include <limits.h>
81 
82 #include "event2/event.h"
83 #include "event2/buffer.h"
84 #include "event2/buffer_compat.h"
85 #include "event2/bufferevent.h"
86 #include "event2/bufferevent_compat.h"
87 #include "event2/bufferevent_struct.h"
88 #include "event2/thread.h"
89 #include "event2/event-config.h"
90 #include <sys/cdefs.h>
91 __RCSID("$NetBSD: buffer.c,v 1.3 2015/01/29 07:26:02 spz Exp $");
92 #include "log-internal.h"
93 #include "mm-internal.h"
94 #include "util-internal.h"
95 #include "evthread-internal.h"
96 #include "evbuffer-internal.h"
97 #include "bufferevent-internal.h"
98 
99 /* some systems do not have MAP_FAILED */
100 #ifndef MAP_FAILED
101 #define MAP_FAILED	((void *)-1)
102 #endif
103 
104 /* send file support */
105 #if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__)
106 #define USE_SENDFILE		1
107 #define SENDFILE_IS_LINUX	1
108 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__)
109 #define USE_SENDFILE		1
110 #define SENDFILE_IS_FREEBSD	1
111 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__)
112 #define USE_SENDFILE		1
113 #define SENDFILE_IS_MACOSX	1
114 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
115 #define USE_SENDFILE		1
116 #define SENDFILE_IS_SOLARIS	1
117 #endif
118 
119 #ifdef USE_SENDFILE
120 static int use_sendfile = 1;
121 #endif
122 #ifdef _EVENT_HAVE_MMAP
123 static int use_mmap = 1;
124 #endif
125 
126 
127 /* Mask of user-selectable callback flags. */
128 #define EVBUFFER_CB_USER_FLAGS	    0xffff
129 /* Mask of all internal-use-only flags. */
130 #define EVBUFFER_CB_INTERNAL_FLAGS  0xffff0000
131 
132 /* Flag set if the callback is using the cb_obsolete function pointer  */
133 #define EVBUFFER_CB_OBSOLETE	       0x00040000
134 
135 /* evbuffer_chain support */
136 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
137 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
138 	    0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
139 
140 #define CHAIN_PINNED(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
141 #define CHAIN_PINNED_R(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
142 
143 static void evbuffer_chain_align(struct evbuffer_chain *chain);
144 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,
145     size_t datalen);
146 static void evbuffer_deferred_callback(struct deferred_cb *cb, void *arg);
147 static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
148     const struct evbuffer_ptr *pos, const char *mem, size_t len);
149 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,
150     size_t datlen);
151 
152 #ifdef WIN32
153 static int evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd,
154     ev_ssize_t howmuch);
155 #else
156 #define evbuffer_readfile evbuffer_read
157 #endif
158 
159 static struct evbuffer_chain *
evbuffer_chain_new(size_t size)160 evbuffer_chain_new(size_t size)
161 {
162 	struct evbuffer_chain *chain;
163 	size_t to_alloc;
164 
165 	if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE)
166 		return (NULL);
167 
168 	size += EVBUFFER_CHAIN_SIZE;
169 
170 	/* get the next largest memory that can hold the buffer */
171 	if (size < EVBUFFER_CHAIN_MAX / 2) {
172 		to_alloc = MIN_BUFFER_SIZE;
173 		while (to_alloc < size) {
174 			to_alloc <<= 1;
175 		}
176 	} else {
177 		to_alloc = size;
178 	}
179 
180 	/* we get everything in one chunk */
181 	if ((chain = mm_malloc(to_alloc)) == NULL)
182 		return (NULL);
183 
184 	memset(chain, 0, EVBUFFER_CHAIN_SIZE);
185 
186 	chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
187 
188 	/* this way we can manipulate the buffer to different addresses,
189 	 * which is required for mmap for example.
190 	 */
191 	chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain);
192 
193 	return (chain);
194 }
195 
196 static inline void
evbuffer_chain_free(struct evbuffer_chain * chain)197 evbuffer_chain_free(struct evbuffer_chain *chain)
198 {
199 	if (CHAIN_PINNED(chain)) {
200 		chain->flags |= EVBUFFER_DANGLING;
201 		return;
202 	}
203 	if (chain->flags & (EVBUFFER_MMAP|EVBUFFER_SENDFILE|
204 		EVBUFFER_REFERENCE)) {
205 		if (chain->flags & EVBUFFER_REFERENCE) {
206 			struct evbuffer_chain_reference *info =
207 			    EVBUFFER_CHAIN_EXTRA(
208 				    struct evbuffer_chain_reference,
209 				    chain);
210 			if (info->cleanupfn)
211 				(*info->cleanupfn)(chain->buffer,
212 				    chain->buffer_len,
213 				    info->extra);
214 		}
215 #ifdef _EVENT_HAVE_MMAP
216 		if (chain->flags & EVBUFFER_MMAP) {
217 			struct evbuffer_chain_fd *info =
218 			    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
219 				chain);
220 			if (munmap(chain->buffer, chain->buffer_len) == -1)
221 				event_warn("%s: munmap failed", __func__);
222 			if (close(info->fd) == -1)
223 				event_warn("%s: close(%d) failed",
224 				    __func__, info->fd);
225 		}
226 #endif
227 #ifdef USE_SENDFILE
228 		if (chain->flags & EVBUFFER_SENDFILE) {
229 			struct evbuffer_chain_fd *info =
230 			    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
231 				chain);
232 			if (close(info->fd) == -1)
233 				event_warn("%s: close(%d) failed",
234 				    __func__, info->fd);
235 		}
236 #endif
237 	}
238 
239 	mm_free(chain);
240 }
241 
242 static void
evbuffer_free_all_chains(struct evbuffer_chain * chain)243 evbuffer_free_all_chains(struct evbuffer_chain *chain)
244 {
245 	struct evbuffer_chain *next;
246 	for (; chain; chain = next) {
247 		next = chain->next;
248 		evbuffer_chain_free(chain);
249 	}
250 }
251 
252 #ifndef NDEBUG
253 static int
evbuffer_chains_all_empty(struct evbuffer_chain * chain)254 evbuffer_chains_all_empty(struct evbuffer_chain *chain)
255 {
256 	for (; chain; chain = chain->next) {
257 		if (chain->off)
258 			return 0;
259 	}
260 	return 1;
261 }
262 #else
263 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
264 "unused variable" warnings. */
evbuffer_chains_all_empty(struct evbuffer_chain * chain)265 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) {
266 	return 1;
267 }
268 #endif
269 
270 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
271  * to replacing them all with a new chain.  Return a pointer to the place
272  * where the new chain will go.
273  *
274  * Internal; requires lock.  The caller must fix up buf->last and buf->first
275  * as needed; they might have been freed.
276  */
277 static struct evbuffer_chain **
evbuffer_free_trailing_empty_chains(struct evbuffer * buf)278 evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
279 {
280 	struct evbuffer_chain **ch = buf->last_with_datap;
281 	/* Find the first victim chain.  It might be *last_with_datap */
282 	while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
283 		ch = &(*ch)->next;
284 	if (*ch) {
285 		EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
286 		evbuffer_free_all_chains(*ch);
287 		*ch = NULL;
288 	}
289 	return ch;
290 }
291 
292 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
293  * chains as necessary.  Requires lock.  Does not schedule callbacks.
294  */
295 static void
evbuffer_chain_insert(struct evbuffer * buf,struct evbuffer_chain * chain)296 evbuffer_chain_insert(struct evbuffer *buf,
297     struct evbuffer_chain *chain)
298 {
299 	ASSERT_EVBUFFER_LOCKED(buf);
300 	if (*buf->last_with_datap == NULL) {
301 		/* There are no chains data on the buffer at all. */
302 		EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
303 		EVUTIL_ASSERT(buf->first == NULL);
304 		buf->first = buf->last = chain;
305 	} else {
306 		struct evbuffer_chain **ch = buf->last_with_datap;
307 		/* Find the first victim chain.  It might be *last_with_datap */
308 		while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
309 			ch = &(*ch)->next;
310 		if (*ch == NULL) {
311 			/* There is no victim; just append this new chain. */
312 			buf->last->next = chain;
313 			if (chain->off)
314 				buf->last_with_datap = &buf->last->next;
315 		} else {
316 			/* Replace all victim chains with this chain. */
317 			EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
318 			evbuffer_free_all_chains(*ch);
319 			*ch = chain;
320 		}
321 		buf->last = chain;
322 	}
323 	buf->total_len += chain->off;
324 }
325 
326 static inline struct evbuffer_chain *
evbuffer_chain_insert_new(struct evbuffer * buf,size_t datlen)327 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
328 {
329 	struct evbuffer_chain *chain;
330 	if ((chain = evbuffer_chain_new(datlen)) == NULL)
331 		return NULL;
332 	evbuffer_chain_insert(buf, chain);
333 	return chain;
334 }
335 
336 void
_evbuffer_chain_pin(struct evbuffer_chain * chain,unsigned flag)337 _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag)
338 {
339 	EVUTIL_ASSERT((chain->flags & flag) == 0);
340 	chain->flags |= flag;
341 }
342 
343 void
_evbuffer_chain_unpin(struct evbuffer_chain * chain,unsigned flag)344 _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag)
345 {
346 	EVUTIL_ASSERT((chain->flags & flag) != 0);
347 	chain->flags &= ~flag;
348 	if (chain->flags & EVBUFFER_DANGLING)
349 		evbuffer_chain_free(chain);
350 }
351 
352 struct evbuffer *
evbuffer_new(void)353 evbuffer_new(void)
354 {
355 	struct evbuffer *buffer;
356 
357 	buffer = mm_calloc(1, sizeof(struct evbuffer));
358 	if (buffer == NULL)
359 		return (NULL);
360 
361 	TAILQ_INIT(&buffer->callbacks);
362 	buffer->refcnt = 1;
363 	buffer->last_with_datap = &buffer->first;
364 
365 	return (buffer);
366 }
367 
368 int
evbuffer_set_flags(struct evbuffer * buf,ev_uint64_t flags)369 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)
370 {
371 	EVBUFFER_LOCK(buf);
372 	buf->flags |= (ev_uint32_t)flags;
373 	EVBUFFER_UNLOCK(buf);
374 	return 0;
375 }
376 
377 int
evbuffer_clear_flags(struct evbuffer * buf,ev_uint64_t flags)378 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
379 {
380 	EVBUFFER_LOCK(buf);
381 	buf->flags &= ~(ev_uint32_t)flags;
382 	EVBUFFER_UNLOCK(buf);
383 	return 0;
384 }
385 
386 void
_evbuffer_incref(struct evbuffer * buf)387 _evbuffer_incref(struct evbuffer *buf)
388 {
389 	EVBUFFER_LOCK(buf);
390 	++buf->refcnt;
391 	EVBUFFER_UNLOCK(buf);
392 }
393 
394 void
_evbuffer_incref_and_lock(struct evbuffer * buf)395 _evbuffer_incref_and_lock(struct evbuffer *buf)
396 {
397 	EVBUFFER_LOCK(buf);
398 	++buf->refcnt;
399 }
400 
401 int
evbuffer_defer_callbacks(struct evbuffer * buffer,struct event_base * base)402 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
403 {
404 	EVBUFFER_LOCK(buffer);
405 	buffer->cb_queue = event_base_get_deferred_cb_queue(base);
406 	buffer->deferred_cbs = 1;
407 	event_deferred_cb_init(&buffer->deferred,
408 	    evbuffer_deferred_callback, buffer);
409 	EVBUFFER_UNLOCK(buffer);
410 	return 0;
411 }
412 
413 int
evbuffer_enable_locking(struct evbuffer * buf,void * lock)414 evbuffer_enable_locking(struct evbuffer *buf, void *lock)
415 {
416 #ifdef _EVENT_DISABLE_THREAD_SUPPORT
417 	return -1;
418 #else
419 	if (buf->lock)
420 		return -1;
421 
422 	if (!lock) {
423 		EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
424 		if (!lock)
425 			return -1;
426 		buf->lock = lock;
427 		buf->own_lock = 1;
428 	} else {
429 		buf->lock = lock;
430 		buf->own_lock = 0;
431 	}
432 
433 	return 0;
434 #endif
435 }
436 
437 void
evbuffer_set_parent(struct evbuffer * buf,struct bufferevent * bev)438 evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev)
439 {
440 	EVBUFFER_LOCK(buf);
441 	buf->parent = bev;
442 	EVBUFFER_UNLOCK(buf);
443 }
444 
445 static void
evbuffer_run_callbacks(struct evbuffer * buffer,int running_deferred)446 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
447 {
448 	struct evbuffer_cb_entry *cbent, *next;
449 	struct evbuffer_cb_info info;
450 	size_t new_size;
451 	ev_uint32_t mask, masked_val;
452 	int clear = 1;
453 
454 	if (running_deferred) {
455 		mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
456 		masked_val = EVBUFFER_CB_ENABLED;
457 	} else if (buffer->deferred_cbs) {
458 		mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
459 		masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
460 		/* Don't zero-out n_add/n_del, since the deferred callbacks
461 		   will want to see them. */
462 		clear = 0;
463 	} else {
464 		mask = EVBUFFER_CB_ENABLED;
465 		masked_val = EVBUFFER_CB_ENABLED;
466 	}
467 
468 	ASSERT_EVBUFFER_LOCKED(buffer);
469 
470 	if (TAILQ_EMPTY(&buffer->callbacks)) {
471 		buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
472 		return;
473 	}
474 	if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
475 		return;
476 
477 	new_size = buffer->total_len;
478 	info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
479 	info.n_added = buffer->n_add_for_cb;
480 	info.n_deleted = buffer->n_del_for_cb;
481 	if (clear) {
482 		buffer->n_add_for_cb = 0;
483 		buffer->n_del_for_cb = 0;
484 	}
485 	for (cbent = TAILQ_FIRST(&buffer->callbacks);
486 	     cbent != TAILQ_END(&buffer->callbacks);
487 	     cbent = next) {
488 		/* Get the 'next' pointer now in case this callback decides
489 		 * to remove itself or something. */
490 		next = TAILQ_NEXT(cbent, next);
491 
492 		if ((cbent->flags & mask) != masked_val)
493 			continue;
494 
495 		if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
496 			cbent->cb.cb_obsolete(buffer,
497 			    info.orig_size, new_size, cbent->cbarg);
498 		else
499 			cbent->cb.cb_func(buffer, &info, cbent->cbarg);
500 	}
501 }
502 
503 void
evbuffer_invoke_callbacks(struct evbuffer * buffer)504 evbuffer_invoke_callbacks(struct evbuffer *buffer)
505 {
506 	if (TAILQ_EMPTY(&buffer->callbacks)) {
507 		buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
508 		return;
509 	}
510 
511 	if (buffer->deferred_cbs) {
512 		if (buffer->deferred.queued)
513 			return;
514 		_evbuffer_incref_and_lock(buffer);
515 		if (buffer->parent)
516 			bufferevent_incref(buffer->parent);
517 		EVBUFFER_UNLOCK(buffer);
518 		event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred);
519 	}
520 
521 	evbuffer_run_callbacks(buffer, 0);
522 }
523 
524 static void
evbuffer_deferred_callback(struct deferred_cb * cb,void * arg)525 evbuffer_deferred_callback(struct deferred_cb *cb, void *arg)
526 {
527 	struct bufferevent *parent = NULL;
528 	struct evbuffer *buffer = arg;
529 
530 	/* XXXX It would be better to run these callbacks without holding the
531 	 * lock */
532 	EVBUFFER_LOCK(buffer);
533 	parent = buffer->parent;
534 	evbuffer_run_callbacks(buffer, 1);
535 	_evbuffer_decref_and_unlock(buffer);
536 	if (parent)
537 		bufferevent_decref(parent);
538 }
539 
540 static void
evbuffer_remove_all_callbacks(struct evbuffer * buffer)541 evbuffer_remove_all_callbacks(struct evbuffer *buffer)
542 {
543 	struct evbuffer_cb_entry *cbent;
544 
545 	while ((cbent = TAILQ_FIRST(&buffer->callbacks))) {
546 	    TAILQ_REMOVE(&buffer->callbacks, cbent, next);
547 	    mm_free(cbent);
548 	}
549 }
550 
551 void
_evbuffer_decref_and_unlock(struct evbuffer * buffer)552 _evbuffer_decref_and_unlock(struct evbuffer *buffer)
553 {
554 	struct evbuffer_chain *chain, *next;
555 	ASSERT_EVBUFFER_LOCKED(buffer);
556 
557 	EVUTIL_ASSERT(buffer->refcnt > 0);
558 
559 	if (--buffer->refcnt > 0) {
560 		EVBUFFER_UNLOCK(buffer);
561 		return;
562 	}
563 
564 	for (chain = buffer->first; chain != NULL; chain = next) {
565 		next = chain->next;
566 		evbuffer_chain_free(chain);
567 	}
568 	evbuffer_remove_all_callbacks(buffer);
569 	if (buffer->deferred_cbs)
570 		event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred);
571 
572 	EVBUFFER_UNLOCK(buffer);
573 	if (buffer->own_lock)
574 		EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
575 	mm_free(buffer);
576 }
577 
578 void
evbuffer_free(struct evbuffer * buffer)579 evbuffer_free(struct evbuffer *buffer)
580 {
581 	EVBUFFER_LOCK(buffer);
582 	_evbuffer_decref_and_unlock(buffer);
583 }
584 
585 void
evbuffer_lock(struct evbuffer * buf)586 evbuffer_lock(struct evbuffer *buf)
587 {
588 	EVBUFFER_LOCK(buf);
589 }
590 
591 void
evbuffer_unlock(struct evbuffer * buf)592 evbuffer_unlock(struct evbuffer *buf)
593 {
594 	EVBUFFER_UNLOCK(buf);
595 }
596 
597 size_t
evbuffer_get_length(const struct evbuffer * buffer)598 evbuffer_get_length(const struct evbuffer *buffer)
599 {
600 	size_t result;
601 
602 	EVBUFFER_LOCK(buffer);
603 
604 	result = (buffer->total_len);
605 
606 	EVBUFFER_UNLOCK(buffer);
607 
608 	return result;
609 }
610 
611 size_t
evbuffer_get_contiguous_space(const struct evbuffer * buf)612 evbuffer_get_contiguous_space(const struct evbuffer *buf)
613 {
614 	struct evbuffer_chain *chain;
615 	size_t result;
616 
617 	EVBUFFER_LOCK(buf);
618 	chain = buf->first;
619 	result = (chain != NULL ? chain->off : 0);
620 	EVBUFFER_UNLOCK(buf);
621 
622 	return result;
623 }
624 
625 int
evbuffer_reserve_space(struct evbuffer * buf,ev_ssize_t size,struct evbuffer_iovec * vec,int n_vecs)626 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
627     struct evbuffer_iovec *vec, int n_vecs)
628 {
629 	struct evbuffer_chain *chain, **chainp;
630 	int n = -1;
631 
632 	EVBUFFER_LOCK(buf);
633 	if (buf->freeze_end)
634 		goto done;
635 	if (n_vecs < 1)
636 		goto done;
637 	if (n_vecs == 1) {
638 		if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)
639 			goto done;
640 
641 		vec[0].iov_base = CHAIN_SPACE_PTR(chain);
642 		vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain);
643 		EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
644 		n = 1;
645 	} else {
646 		if (_evbuffer_expand_fast(buf, size, n_vecs)<0)
647 			goto done;
648 		n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs,
649 				&chainp, 0);
650 	}
651 
652 done:
653 	EVBUFFER_UNLOCK(buf);
654 	return n;
655 
656 }
657 
658 static int
advance_last_with_data(struct evbuffer * buf)659 advance_last_with_data(struct evbuffer *buf)
660 {
661 	int n = 0;
662 	ASSERT_EVBUFFER_LOCKED(buf);
663 
664 	if (!*buf->last_with_datap)
665 		return 0;
666 
667 	while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) {
668 		buf->last_with_datap = &(*buf->last_with_datap)->next;
669 		++n;
670 	}
671 	return n;
672 }
673 
674 int
evbuffer_commit_space(struct evbuffer * buf,struct evbuffer_iovec * vec,int n_vecs)675 evbuffer_commit_space(struct evbuffer *buf,
676     struct evbuffer_iovec *vec, int n_vecs)
677 {
678 	struct evbuffer_chain *chain, **firstchainp, **chainp;
679 	int result = -1;
680 	size_t added = 0;
681 	int i;
682 
683 	EVBUFFER_LOCK(buf);
684 
685 	if (buf->freeze_end)
686 		goto done;
687 	if (n_vecs == 0) {
688 		result = 0;
689 		goto done;
690 	} else if (n_vecs == 1 &&
691 	    (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) {
692 		/* The user only got or used one chain; it might not
693 		 * be the first one with space in it. */
694 		if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))
695 			goto done;
696 		buf->last->off += vec[0].iov_len;
697 		added = vec[0].iov_len;
698 		if (added)
699 			advance_last_with_data(buf);
700 		goto okay;
701 	}
702 
703 	/* Advance 'firstchain' to the first chain with space in it. */
704 	firstchainp = buf->last_with_datap;
705 	if (!*firstchainp)
706 		goto done;
707 	if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
708 		firstchainp = &(*firstchainp)->next;
709 	}
710 
711 	chain = *firstchainp;
712 	/* pass 1: make sure that the pointers and lengths of vecs[] are in
713 	 * bounds before we try to commit anything. */
714 	for (i=0; i<n_vecs; ++i) {
715 		if (!chain)
716 			goto done;
717 		if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) ||
718 		    (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))
719 			goto done;
720 		chain = chain->next;
721 	}
722 	/* pass 2: actually adjust all the chains. */
723 	chainp = firstchainp;
724 	for (i=0; i<n_vecs; ++i) {
725 		(*chainp)->off += vec[i].iov_len;
726 		added += vec[i].iov_len;
727 		if (vec[i].iov_len) {
728 			buf->last_with_datap = chainp;
729 		}
730 		chainp = &(*chainp)->next;
731 	}
732 
733 okay:
734 	buf->total_len += added;
735 	buf->n_add_for_cb += added;
736 	result = 0;
737 	evbuffer_invoke_callbacks(buf);
738 
739 done:
740 	EVBUFFER_UNLOCK(buf);
741 	return result;
742 }
743 
744 static inline int
HAS_PINNED_R(struct evbuffer * buf)745 HAS_PINNED_R(struct evbuffer *buf)
746 {
747 	return (buf->last && CHAIN_PINNED_R(buf->last));
748 }
749 
750 static inline void
ZERO_CHAIN(struct evbuffer * dst)751 ZERO_CHAIN(struct evbuffer *dst)
752 {
753 	ASSERT_EVBUFFER_LOCKED(dst);
754 	dst->first = NULL;
755 	dst->last = NULL;
756 	dst->last_with_datap = &(dst)->first;
757 	dst->total_len = 0;
758 }
759 
760 /* Prepares the contents of src to be moved to another buffer by removing
761  * read-pinned chains. The first pinned chain is saved in first, and the
762  * last in last. If src has no read-pinned chains, first and last are set
763  * to NULL. */
764 static int
PRESERVE_PINNED(struct evbuffer * src,struct evbuffer_chain ** first,struct evbuffer_chain ** last)765 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,
766 		struct evbuffer_chain **last)
767 {
768 	struct evbuffer_chain *chain, **pinned;
769 
770 	ASSERT_EVBUFFER_LOCKED(src);
771 
772 	if (!HAS_PINNED_R(src)) {
773 		*first = *last = NULL;
774 		return 0;
775 	}
776 
777 	pinned = src->last_with_datap;
778 	if (!CHAIN_PINNED_R(*pinned))
779 		pinned = &(*pinned)->next;
780 	EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));
781 	chain = *first = *pinned;
782 	*last = src->last;
783 
784 	/* If there's data in the first pinned chain, we need to allocate
785 	 * a new chain and copy the data over. */
786 	if (chain->off) {
787 		struct evbuffer_chain *tmp;
788 
789 		EVUTIL_ASSERT(pinned == src->last_with_datap);
790 		tmp = evbuffer_chain_new(chain->off);
791 		if (!tmp)
792 			return -1;
793 		memcpy(tmp->buffer, chain->buffer + chain->misalign,
794 			chain->off);
795 		tmp->off = chain->off;
796 		*src->last_with_datap = tmp;
797 		src->last = tmp;
798 		chain->misalign += chain->off;
799 		chain->off = 0;
800 	} else {
801 		src->last = *src->last_with_datap;
802 		*pinned = NULL;
803 	}
804 
805 	return 0;
806 }
807 
808 static inline void
RESTORE_PINNED(struct evbuffer * src,struct evbuffer_chain * pinned,struct evbuffer_chain * last)809 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,
810 		struct evbuffer_chain *last)
811 {
812 	ASSERT_EVBUFFER_LOCKED(src);
813 
814 	if (!pinned) {
815 		ZERO_CHAIN(src);
816 		return;
817 	}
818 
819 	src->first = pinned;
820 	src->last = last;
821 	src->last_with_datap = &src->first;
822 	src->total_len = 0;
823 }
824 
825 static inline void
COPY_CHAIN(struct evbuffer * dst,struct evbuffer * src)826 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)
827 {
828 	ASSERT_EVBUFFER_LOCKED(dst);
829 	ASSERT_EVBUFFER_LOCKED(src);
830 	dst->first = src->first;
831 	if (src->last_with_datap == &src->first)
832 		dst->last_with_datap = &dst->first;
833 	else
834 		dst->last_with_datap = src->last_with_datap;
835 	dst->last = src->last;
836 	dst->total_len = src->total_len;
837 }
838 
839 static void
APPEND_CHAIN(struct evbuffer * dst,struct evbuffer * src)840 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
841 {
842 	ASSERT_EVBUFFER_LOCKED(dst);
843 	ASSERT_EVBUFFER_LOCKED(src);
844 	dst->last->next = src->first;
845 	if (src->last_with_datap == &src->first)
846 		dst->last_with_datap = &dst->last->next;
847 	else
848 		dst->last_with_datap = src->last_with_datap;
849 	dst->last = src->last;
850 	dst->total_len += src->total_len;
851 }
852 
853 static void
PREPEND_CHAIN(struct evbuffer * dst,struct evbuffer * src)854 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
855 {
856 	ASSERT_EVBUFFER_LOCKED(dst);
857 	ASSERT_EVBUFFER_LOCKED(src);
858 	src->last->next = dst->first;
859 	dst->first = src->first;
860 	dst->total_len += src->total_len;
861 	if (*dst->last_with_datap == NULL) {
862 		if (src->last_with_datap == &(src)->first)
863 			dst->last_with_datap = &dst->first;
864 		else
865 			dst->last_with_datap = src->last_with_datap;
866 	} else if (dst->last_with_datap == &dst->first) {
867 		dst->last_with_datap = &src->last->next;
868 	}
869 }
870 
871 int
evbuffer_add_buffer(struct evbuffer * outbuf,struct evbuffer * inbuf)872 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
873 {
874 	struct evbuffer_chain *pinned, *last;
875 	size_t in_total_len, out_total_len;
876 	int result = 0;
877 
878 	EVBUFFER_LOCK2(inbuf, outbuf);
879 	in_total_len = inbuf->total_len;
880 	out_total_len = outbuf->total_len;
881 
882 	if (in_total_len == 0 || outbuf == inbuf)
883 		goto done;
884 
885 	if (outbuf->freeze_end || inbuf->freeze_start) {
886 		result = -1;
887 		goto done;
888 	}
889 
890 	if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
891 		result = -1;
892 		goto done;
893 	}
894 
895 	if (out_total_len == 0) {
896 		/* There might be an empty chain at the start of outbuf; free
897 		 * it. */
898 		evbuffer_free_all_chains(outbuf->first);
899 		COPY_CHAIN(outbuf, inbuf);
900 	} else {
901 		APPEND_CHAIN(outbuf, inbuf);
902 	}
903 
904 	RESTORE_PINNED(inbuf, pinned, last);
905 
906 	inbuf->n_del_for_cb += in_total_len;
907 	outbuf->n_add_for_cb += in_total_len;
908 
909 	evbuffer_invoke_callbacks(inbuf);
910 	evbuffer_invoke_callbacks(outbuf);
911 
912 done:
913 	EVBUFFER_UNLOCK2(inbuf, outbuf);
914 	return result;
915 }
916 
917 int
evbuffer_prepend_buffer(struct evbuffer * outbuf,struct evbuffer * inbuf)918 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
919 {
920 	struct evbuffer_chain *pinned, *last;
921 	size_t in_total_len, out_total_len;
922 	int result = 0;
923 
924 	EVBUFFER_LOCK2(inbuf, outbuf);
925 
926 	in_total_len = inbuf->total_len;
927 	out_total_len = outbuf->total_len;
928 
929 	if (!in_total_len || inbuf == outbuf)
930 		goto done;
931 
932 	if (outbuf->freeze_start || inbuf->freeze_start) {
933 		result = -1;
934 		goto done;
935 	}
936 
937 	if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
938 		result = -1;
939 		goto done;
940 	}
941 
942 	if (out_total_len == 0) {
943 		/* There might be an empty chain at the start of outbuf; free
944 		 * it. */
945 		evbuffer_free_all_chains(outbuf->first);
946 		COPY_CHAIN(outbuf, inbuf);
947 	} else {
948 		PREPEND_CHAIN(outbuf, inbuf);
949 	}
950 
951 	RESTORE_PINNED(inbuf, pinned, last);
952 
953 	inbuf->n_del_for_cb += in_total_len;
954 	outbuf->n_add_for_cb += in_total_len;
955 
956 	evbuffer_invoke_callbacks(inbuf);
957 	evbuffer_invoke_callbacks(outbuf);
958 done:
959 	EVBUFFER_UNLOCK2(inbuf, outbuf);
960 	return result;
961 }
962 
963 int
evbuffer_drain(struct evbuffer * buf,size_t len)964 evbuffer_drain(struct evbuffer *buf, size_t len)
965 {
966 	struct evbuffer_chain *chain, *next;
967 	size_t remaining, old_len;
968 	int result = 0;
969 
970 	EVBUFFER_LOCK(buf);
971 	old_len = buf->total_len;
972 
973 	if (old_len == 0)
974 		goto done;
975 
976 	if (buf->freeze_start) {
977 		result = -1;
978 		goto done;
979 	}
980 
981 	if (len >= old_len && !HAS_PINNED_R(buf)) {
982 		len = old_len;
983 		for (chain = buf->first; chain != NULL; chain = next) {
984 			next = chain->next;
985 			evbuffer_chain_free(chain);
986 		}
987 
988 		ZERO_CHAIN(buf);
989 	} else {
990 		if (len >= old_len)
991 			len = old_len;
992 
993 		buf->total_len -= len;
994 		remaining = len;
995 		for (chain = buf->first;
996 		     remaining >= chain->off;
997 		     chain = next) {
998 			next = chain->next;
999 			remaining -= chain->off;
1000 
1001 			if (chain == *buf->last_with_datap) {
1002 				buf->last_with_datap = &buf->first;
1003 			}
1004 			if (&chain->next == buf->last_with_datap)
1005 				buf->last_with_datap = &buf->first;
1006 
1007 			if (CHAIN_PINNED_R(chain)) {
1008 				EVUTIL_ASSERT(remaining == 0);
1009 				chain->misalign += chain->off;
1010 				chain->off = 0;
1011 				break;
1012 			} else
1013 				evbuffer_chain_free(chain);
1014 		}
1015 
1016 		buf->first = chain;
1017 		if (chain) {
1018 			EVUTIL_ASSERT(remaining <= chain->off);
1019 			chain->misalign += remaining;
1020 			chain->off -= remaining;
1021 		}
1022 	}
1023 
1024 	buf->n_del_for_cb += len;
1025 	/* Tell someone about changes in this buffer */
1026 	evbuffer_invoke_callbacks(buf);
1027 
1028 done:
1029 	EVBUFFER_UNLOCK(buf);
1030 	return result;
1031 }
1032 
1033 /* Reads data from an event buffer and drains the bytes read */
1034 int
evbuffer_remove(struct evbuffer * buf,void * data_out,size_t datlen)1035 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
1036 {
1037 	ev_ssize_t n;
1038 	EVBUFFER_LOCK(buf);
1039 	n = evbuffer_copyout(buf, data_out, datlen);
1040 	if (n > 0) {
1041 		if (evbuffer_drain(buf, n)<0)
1042 			n = -1;
1043 	}
1044 	EVBUFFER_UNLOCK(buf);
1045 	return (int)n;
1046 }
1047 
1048 ev_ssize_t
evbuffer_copyout(struct evbuffer * buf,void * data_out,size_t datlen)1049 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)
1050 {
1051 	/*XXX fails badly on sendfile case. */
1052 	struct evbuffer_chain *chain;
1053 	char *data = data_out;
1054 	size_t nread;
1055 	ev_ssize_t result = 0;
1056 
1057 	EVBUFFER_LOCK(buf);
1058 
1059 	chain = buf->first;
1060 
1061 	if (datlen >= buf->total_len)
1062 		datlen = buf->total_len;
1063 
1064 	if (datlen == 0)
1065 		goto done;
1066 
1067 	if (buf->freeze_start) {
1068 		result = -1;
1069 		goto done;
1070 	}
1071 
1072 	nread = datlen;
1073 
1074 	while (datlen && datlen >= chain->off) {
1075 		memcpy(data, chain->buffer + chain->misalign, chain->off);
1076 		data += chain->off;
1077 		datlen -= chain->off;
1078 
1079 		chain = chain->next;
1080 		EVUTIL_ASSERT(chain || datlen==0);
1081 	}
1082 
1083 	if (datlen) {
1084 		EVUTIL_ASSERT(chain);
1085 		EVUTIL_ASSERT(datlen <= chain->off);
1086 		memcpy(data, chain->buffer + chain->misalign, datlen);
1087 	}
1088 
1089 	result = nread;
1090 done:
1091 	EVBUFFER_UNLOCK(buf);
1092 	return result;
1093 }
1094 
1095 /* reads data from the src buffer to the dst buffer, avoids memcpy as
1096  * possible. */
1097 /*  XXXX should return ev_ssize_t */
1098 int
evbuffer_remove_buffer(struct evbuffer * src,struct evbuffer * dst,size_t datlen)1099 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
1100     size_t datlen)
1101 {
1102 	/*XXX We should have an option to force this to be zero-copy.*/
1103 
1104 	/*XXX can fail badly on sendfile case. */
1105 	struct evbuffer_chain *chain, *previous;
1106 	size_t nread = 0;
1107 	int result;
1108 
1109 	EVBUFFER_LOCK2(src, dst);
1110 
1111 	chain = previous = src->first;
1112 
1113 	if (datlen == 0 || dst == src) {
1114 		result = 0;
1115 		goto done;
1116 	}
1117 
1118 	if (dst->freeze_end || src->freeze_start) {
1119 		result = -1;
1120 		goto done;
1121 	}
1122 
1123 	/* short-cut if there is no more data buffered */
1124 	if (datlen >= src->total_len) {
1125 		datlen = src->total_len;
1126 		evbuffer_add_buffer(dst, src);
1127 		result = (int)datlen; /*XXXX should return ev_ssize_t*/
1128 		goto done;
1129 	}
1130 
1131 	/* removes chains if possible */
1132 	while (chain->off <= datlen) {
1133 		/* We can't remove the last with data from src unless we
1134 		 * remove all chains, in which case we would have done the if
1135 		 * block above */
1136 		EVUTIL_ASSERT(chain != *src->last_with_datap);
1137 		nread += chain->off;
1138 		datlen -= chain->off;
1139 		previous = chain;
1140 		if (src->last_with_datap == &chain->next)
1141 			src->last_with_datap = &src->first;
1142 		chain = chain->next;
1143 	}
1144 
1145 	if (nread) {
1146 		/* we can remove the chain */
1147 		struct evbuffer_chain **chp;
1148 		chp = evbuffer_free_trailing_empty_chains(dst);
1149 
1150 		if (dst->first == NULL) {
1151 			dst->first = src->first;
1152 		} else {
1153 			*chp = src->first;
1154 		}
1155 		dst->last = previous;
1156 		previous->next = NULL;
1157 		src->first = chain;
1158 		advance_last_with_data(dst);
1159 
1160 		dst->total_len += nread;
1161 		dst->n_add_for_cb += nread;
1162 	}
1163 
1164 	/* we know that there is more data in the src buffer than
1165 	 * we want to read, so we manually drain the chain */
1166 	evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
1167 	chain->misalign += datlen;
1168 	chain->off -= datlen;
1169 	nread += datlen;
1170 
1171 	/* You might think we would want to increment dst->n_add_for_cb
1172 	 * here too.  But evbuffer_add above already took care of that.
1173 	 */
1174 	src->total_len -= nread;
1175 	src->n_del_for_cb += nread;
1176 
1177 	if (nread) {
1178 		evbuffer_invoke_callbacks(dst);
1179 		evbuffer_invoke_callbacks(src);
1180 	}
1181 	result = (int)nread;/*XXXX should change return type */
1182 
1183 done:
1184 	EVBUFFER_UNLOCK2(src, dst);
1185 	return result;
1186 }
1187 
1188 unsigned char *
evbuffer_pullup(struct evbuffer * buf,ev_ssize_t size)1189 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
1190 {
1191 	struct evbuffer_chain *chain, *next, *tmp, *last_with_data;
1192 	unsigned char *buffer, *result = NULL;
1193 	ev_ssize_t remaining;
1194 	int removed_last_with_data = 0;
1195 	int removed_last_with_datap = 0;
1196 
1197 	EVBUFFER_LOCK(buf);
1198 
1199 	chain = buf->first;
1200 
1201 	if (size < 0)
1202 		size = buf->total_len;
1203 	/* if size > buf->total_len, we cannot guarantee to the user that she
1204 	 * is going to have a long enough buffer afterwards; so we return
1205 	 * NULL */
1206 	if (size == 0 || (size_t)size > buf->total_len)
1207 		goto done;
1208 
1209 	/* No need to pull up anything; the first size bytes are
1210 	 * already here. */
1211 	if (chain->off >= (size_t)size) {
1212 		result = chain->buffer + chain->misalign;
1213 		goto done;
1214 	}
1215 
1216 	/* Make sure that none of the chains we need to copy from is pinned. */
1217 	remaining = size - chain->off;
1218 	EVUTIL_ASSERT(remaining >= 0);
1219 	for (tmp=chain->next; tmp; tmp=tmp->next) {
1220 		if (CHAIN_PINNED(tmp))
1221 			goto done;
1222 		if (tmp->off >= (size_t)remaining)
1223 			break;
1224 		remaining -= tmp->off;
1225 	}
1226 
1227 	if (CHAIN_PINNED(chain)) {
1228 		size_t old_off = chain->off;
1229 		if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
1230 			/* not enough room at end of chunk. */
1231 			goto done;
1232 		}
1233 		buffer = CHAIN_SPACE_PTR(chain);
1234 		tmp = chain;
1235 		tmp->off = size;
1236 		size -= old_off;
1237 		chain = chain->next;
1238 	} else if (chain->buffer_len - chain->misalign >= (size_t)size) {
1239 		/* already have enough space in the first chain */
1240 		size_t old_off = chain->off;
1241 		buffer = chain->buffer + chain->misalign + chain->off;
1242 		tmp = chain;
1243 		tmp->off = size;
1244 		size -= old_off;
1245 		chain = chain->next;
1246 	} else {
1247 		if ((tmp = evbuffer_chain_new(size)) == NULL) {
1248 			event_warn("%s: out of memory", __func__);
1249 			goto done;
1250 		}
1251 		buffer = tmp->buffer;
1252 		tmp->off = size;
1253 		buf->first = tmp;
1254 	}
1255 
1256 	/* TODO(niels): deal with buffers that point to NULL like sendfile */
1257 
1258 	/* Copy and free every chunk that will be entirely pulled into tmp */
1259 	last_with_data = *buf->last_with_datap;
1260 	for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
1261 		next = chain->next;
1262 
1263 		memcpy(buffer, chain->buffer + chain->misalign, chain->off);
1264 		size -= chain->off;
1265 		buffer += chain->off;
1266 		if (chain == last_with_data)
1267 			removed_last_with_data = 1;
1268 		if (&chain->next == buf->last_with_datap)
1269 			removed_last_with_datap = 1;
1270 
1271 		evbuffer_chain_free(chain);
1272 	}
1273 
1274 	if (chain != NULL) {
1275 		memcpy(buffer, chain->buffer + chain->misalign, size);
1276 		chain->misalign += size;
1277 		chain->off -= size;
1278 	} else {
1279 		buf->last = tmp;
1280 	}
1281 
1282 	tmp->next = chain;
1283 
1284 	if (removed_last_with_data) {
1285 		buf->last_with_datap = &buf->first;
1286 	} else if (removed_last_with_datap) {
1287 		if (buf->first->next && buf->first->next->off)
1288 			buf->last_with_datap = &buf->first->next;
1289 		else
1290 			buf->last_with_datap = &buf->first;
1291 	}
1292 
1293 	result = (tmp->buffer + tmp->misalign);
1294 
1295 done:
1296 	EVBUFFER_UNLOCK(buf);
1297 	return result;
1298 }
1299 
1300 /*
1301  * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
1302  * The returned buffer needs to be freed by the called.
1303  */
1304 char *
evbuffer_readline(struct evbuffer * buffer)1305 evbuffer_readline(struct evbuffer *buffer)
1306 {
1307 	return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
1308 }
1309 
1310 static inline ev_ssize_t
evbuffer_strchr(struct evbuffer_ptr * it,const char chr)1311 evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
1312 {
1313 	struct evbuffer_chain *chain = it->_internal.chain;
1314 	size_t i = it->_internal.pos_in_chain;
1315 	while (chain != NULL) {
1316 		char *buffer = (char *)chain->buffer + chain->misalign;
1317 		char *cp = memchr(buffer+i, chr, chain->off-i);
1318 		if (cp) {
1319 			it->_internal.chain = chain;
1320 			it->_internal.pos_in_chain = cp - buffer;
1321 			it->pos += (cp - buffer - i);
1322 			return it->pos;
1323 		}
1324 		it->pos += chain->off - i;
1325 		i = 0;
1326 		chain = chain->next;
1327 	}
1328 
1329 	return (-1);
1330 }
1331 
1332 static inline char *
find_eol_char(char * s,size_t len)1333 find_eol_char(char *s, size_t len)
1334 {
1335 #define CHUNK_SZ 128
1336 	/* Lots of benchmarking found this approach to be faster in practice
1337 	 * than doing two memchrs over the whole buffer, doin a memchr on each
1338 	 * char of the buffer, or trying to emulate memchr by hand. */
1339 	char *s_end, *cr, *lf;
1340 	s_end = s+len;
1341 	while (s < s_end) {
1342 		size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
1343 		cr = memchr(s, '\r', chunk);
1344 		lf = memchr(s, '\n', chunk);
1345 		if (cr) {
1346 			if (lf && lf < cr)
1347 				return lf;
1348 			return cr;
1349 		} else if (lf) {
1350 			return lf;
1351 		}
1352 		s += CHUNK_SZ;
1353 	}
1354 
1355 	return NULL;
1356 #undef CHUNK_SZ
1357 }
1358 
1359 static ev_ssize_t
evbuffer_find_eol_char(struct evbuffer_ptr * it)1360 evbuffer_find_eol_char(struct evbuffer_ptr *it)
1361 {
1362 	struct evbuffer_chain *chain = it->_internal.chain;
1363 	size_t i = it->_internal.pos_in_chain;
1364 	while (chain != NULL) {
1365 		char *buffer = (char *)chain->buffer + chain->misalign;
1366 		char *cp = find_eol_char(buffer+i, chain->off-i);
1367 		if (cp) {
1368 			it->_internal.chain = chain;
1369 			it->_internal.pos_in_chain = cp - buffer;
1370 			it->pos += (cp - buffer) - i;
1371 			return it->pos;
1372 		}
1373 		it->pos += chain->off - i;
1374 		i = 0;
1375 		chain = chain->next;
1376 	}
1377 
1378 	return (-1);
1379 }
1380 
1381 static inline int
evbuffer_strspn(struct evbuffer_ptr * ptr,const char * chrset)1382 evbuffer_strspn(
1383 	struct evbuffer_ptr *ptr, const char *chrset)
1384 {
1385 	int count = 0;
1386 	struct evbuffer_chain *chain = ptr->_internal.chain;
1387 	size_t i = ptr->_internal.pos_in_chain;
1388 
1389 	if (!chain)
1390 		return -1;
1391 
1392 	while (1) {
1393 		char *buffer = (char *)chain->buffer + chain->misalign;
1394 		for (; i < chain->off; ++i) {
1395 			const char *p = chrset;
1396 			while (*p) {
1397 				if (buffer[i] == *p++)
1398 					goto next;
1399 			}
1400 			ptr->_internal.chain = chain;
1401 			ptr->_internal.pos_in_chain = i;
1402 			ptr->pos += count;
1403 			return count;
1404 		next:
1405 			++count;
1406 		}
1407 		i = 0;
1408 
1409 		if (! chain->next) {
1410 			ptr->_internal.chain = chain;
1411 			ptr->_internal.pos_in_chain = i;
1412 			ptr->pos += count;
1413 			return count;
1414 		}
1415 
1416 		chain = chain->next;
1417 	}
1418 }
1419 
1420 
1421 static inline char
evbuffer_getchr(struct evbuffer_ptr * it)1422 evbuffer_getchr(struct evbuffer_ptr *it)
1423 {
1424 	struct evbuffer_chain *chain = it->_internal.chain;
1425 	size_t off = it->_internal.pos_in_chain;
1426 
1427 	return chain->buffer[chain->misalign + off];
1428 }
1429 
1430 struct evbuffer_ptr
evbuffer_search_eol(struct evbuffer * buffer,struct evbuffer_ptr * start,size_t * eol_len_out,enum evbuffer_eol_style eol_style)1431 evbuffer_search_eol(struct evbuffer *buffer,
1432     struct evbuffer_ptr *start, size_t *eol_len_out,
1433     enum evbuffer_eol_style eol_style)
1434 {
1435 	struct evbuffer_ptr it, it2;
1436 	size_t extra_drain = 0;
1437 	int ok = 0;
1438 
1439 	EVBUFFER_LOCK(buffer);
1440 
1441 	if (start) {
1442 		memcpy(&it, start, sizeof(it));
1443 	} else {
1444 		it.pos = 0;
1445 		it._internal.chain = buffer->first;
1446 		it._internal.pos_in_chain = 0;
1447 	}
1448 
1449 	/* the eol_style determines our first stop character and how many
1450 	 * characters we are going to drain afterwards. */
1451 	switch (eol_style) {
1452 	case EVBUFFER_EOL_ANY:
1453 		if (evbuffer_find_eol_char(&it) < 0)
1454 			goto done;
1455 		memcpy(&it2, &it, sizeof(it));
1456 		extra_drain = evbuffer_strspn(&it2, "\r\n");
1457 		break;
1458 	case EVBUFFER_EOL_CRLF_STRICT: {
1459 		it = evbuffer_search(buffer, "\r\n", 2, &it);
1460 		if (it.pos < 0)
1461 			goto done;
1462 		extra_drain = 2;
1463 		break;
1464 	}
1465 	case EVBUFFER_EOL_CRLF:
1466 		while (1) {
1467 			if (evbuffer_find_eol_char(&it) < 0)
1468 				goto done;
1469 			if (evbuffer_getchr(&it) == '\n') {
1470 				extra_drain = 1;
1471 				break;
1472 			} else if (!evbuffer_ptr_memcmp(
1473 				    buffer, &it, "\r\n", 2)) {
1474 				extra_drain = 2;
1475 				break;
1476 			} else {
1477 				if (evbuffer_ptr_set(buffer, &it, 1,
1478 					EVBUFFER_PTR_ADD)<0)
1479 					goto done;
1480 			}
1481 		}
1482 		break;
1483 	case EVBUFFER_EOL_LF:
1484 		if (evbuffer_strchr(&it, '\n') < 0)
1485 			goto done;
1486 		extra_drain = 1;
1487 		break;
1488 	default:
1489 		goto done;
1490 	}
1491 
1492 	ok = 1;
1493 done:
1494 	EVBUFFER_UNLOCK(buffer);
1495 
1496 	if (!ok) {
1497 		it.pos = -1;
1498 	}
1499 	if (eol_len_out)
1500 		*eol_len_out = extra_drain;
1501 
1502 	return it;
1503 }
1504 
1505 char *
evbuffer_readln(struct evbuffer * buffer,size_t * n_read_out,enum evbuffer_eol_style eol_style)1506 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
1507 		enum evbuffer_eol_style eol_style)
1508 {
1509 	struct evbuffer_ptr it;
1510 	char *line;
1511 	size_t n_to_copy=0, extra_drain=0;
1512 	char *result = NULL;
1513 
1514 	EVBUFFER_LOCK(buffer);
1515 
1516 	if (buffer->freeze_start) {
1517 		goto done;
1518 	}
1519 
1520 	it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
1521 	if (it.pos < 0)
1522 		goto done;
1523 	n_to_copy = it.pos;
1524 
1525 	if ((line = mm_malloc(n_to_copy+1)) == NULL) {
1526 		event_warn("%s: out of memory", __func__);
1527 		goto done;
1528 	}
1529 
1530 	evbuffer_remove(buffer, line, n_to_copy);
1531 	line[n_to_copy] = '\0';
1532 
1533 	evbuffer_drain(buffer, extra_drain);
1534 	result = line;
1535 done:
1536 	EVBUFFER_UNLOCK(buffer);
1537 
1538 	if (n_read_out)
1539 		*n_read_out = result ? n_to_copy : 0;
1540 
1541 	return result;
1542 }
1543 
1544 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
1545 
1546 /* Adds data to an event buffer */
1547 
1548 int
evbuffer_add(struct evbuffer * buf,const void * data_in,size_t datlen)1549 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
1550 {
1551 	struct evbuffer_chain *chain, *tmp;
1552 	const unsigned char *data = data_in;
1553 	size_t remain, to_alloc;
1554 	int result = -1;
1555 
1556 	EVBUFFER_LOCK(buf);
1557 
1558 	if (buf->freeze_end) {
1559 		goto done;
1560 	}
1561 	/* Prevent buf->total_len overflow */
1562 	if (datlen > EV_SIZE_MAX - buf->total_len) {
1563 		goto done;
1564 	}
1565 
1566 	chain = buf->last;
1567 
1568 	/* If there are no chains allocated for this buffer, allocate one
1569 	 * big enough to hold all the data. */
1570 	if (chain == NULL) {
1571 		chain = evbuffer_chain_new(datlen);
1572 		if (!chain)
1573 			goto done;
1574 		evbuffer_chain_insert(buf, chain);
1575 	}
1576 
1577 	if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
1578 		/* Always true for mutable buffers */
1579 		EVUTIL_ASSERT(chain->misalign >= 0 &&
1580 		    (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
1581 		remain = chain->buffer_len - (size_t)chain->misalign - chain->off;
1582 		if (remain >= datlen) {
1583 			/* there's enough space to hold all the data in the
1584 			 * current last chain */
1585 			memcpy(chain->buffer + chain->misalign + chain->off,
1586 			    data, datlen);
1587 			chain->off += datlen;
1588 			buf->total_len += datlen;
1589 			buf->n_add_for_cb += datlen;
1590 			goto out;
1591 		} else if (!CHAIN_PINNED(chain) &&
1592 		    evbuffer_chain_should_realign(chain, datlen)) {
1593 			/* we can fit the data into the misalignment */
1594 			evbuffer_chain_align(chain);
1595 
1596 			memcpy(chain->buffer + chain->off, data, datlen);
1597 			chain->off += datlen;
1598 			buf->total_len += datlen;
1599 			buf->n_add_for_cb += datlen;
1600 			goto out;
1601 		}
1602 	} else {
1603 		/* we cannot write any data to the last chain */
1604 		remain = 0;
1605 	}
1606 
1607 	/* we need to add another chain */
1608 	to_alloc = chain->buffer_len;
1609 	if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
1610 		to_alloc <<= 1;
1611 	if (datlen > to_alloc)
1612 		to_alloc = datlen;
1613 	tmp = evbuffer_chain_new(to_alloc);
1614 	if (tmp == NULL)
1615 		goto done;
1616 
1617 	if (remain) {
1618 		memcpy(chain->buffer + chain->misalign + chain->off,
1619 		    data, remain);
1620 		chain->off += remain;
1621 		buf->total_len += remain;
1622 		buf->n_add_for_cb += remain;
1623 	}
1624 
1625 	data += remain;
1626 	datlen -= remain;
1627 
1628 	memcpy(tmp->buffer, data, datlen);
1629 	tmp->off = datlen;
1630 	evbuffer_chain_insert(buf, tmp);
1631 	buf->n_add_for_cb += datlen;
1632 
1633 out:
1634 	evbuffer_invoke_callbacks(buf);
1635 	result = 0;
1636 done:
1637 	EVBUFFER_UNLOCK(buf);
1638 	return result;
1639 }
1640 
1641 int
evbuffer_prepend(struct evbuffer * buf,const void * data,size_t datlen)1642 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
1643 {
1644 	struct evbuffer_chain *chain, *tmp;
1645 	int result = -1;
1646 
1647 	EVBUFFER_LOCK(buf);
1648 
1649 	if (buf->freeze_start) {
1650 		goto done;
1651 	}
1652 	if (datlen > EV_SIZE_MAX - buf->total_len) {
1653 		goto done;
1654 	}
1655 
1656 	chain = buf->first;
1657 
1658 	if (chain == NULL) {
1659 		chain = evbuffer_chain_new(datlen);
1660 		if (!chain)
1661 			goto done;
1662 		evbuffer_chain_insert(buf, chain);
1663 	}
1664 
1665 	/* we cannot touch immutable buffers */
1666 	if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
1667 		/* Always true for mutable buffers */
1668 		EVUTIL_ASSERT(chain->misalign >= 0 &&
1669 		    (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
1670 
1671 		/* If this chain is empty, we can treat it as
1672 		 * 'empty at the beginning' rather than 'empty at the end' */
1673 		if (chain->off == 0)
1674 			chain->misalign = chain->buffer_len;
1675 
1676 		if ((size_t)chain->misalign >= datlen) {
1677 			/* we have enough space to fit everything */
1678 			memcpy(chain->buffer + chain->misalign - datlen,
1679 			    data, datlen);
1680 			chain->off += datlen;
1681 			chain->misalign -= datlen;
1682 			buf->total_len += datlen;
1683 			buf->n_add_for_cb += datlen;
1684 			goto out;
1685 		} else if (chain->misalign) {
1686 			/* we can only fit some of the data. */
1687 			memcpy(chain->buffer,
1688 			    (const char*)data + datlen - chain->misalign,
1689 			    (size_t)chain->misalign);
1690 			chain->off += (size_t)chain->misalign;
1691 			buf->total_len += (size_t)chain->misalign;
1692 			buf->n_add_for_cb += (size_t)chain->misalign;
1693 			datlen -= (size_t)chain->misalign;
1694 			chain->misalign = 0;
1695 		}
1696 	}
1697 
1698 	/* we need to add another chain */
1699 	if ((tmp = evbuffer_chain_new(datlen)) == NULL)
1700 		goto done;
1701 	buf->first = tmp;
1702 	if (buf->last_with_datap == &buf->first)
1703 		buf->last_with_datap = &tmp->next;
1704 
1705 	tmp->next = chain;
1706 
1707 	tmp->off = datlen;
1708 	EVUTIL_ASSERT(datlen <= tmp->buffer_len);
1709 	tmp->misalign = tmp->buffer_len - datlen;
1710 
1711 	memcpy(tmp->buffer + tmp->misalign, data, datlen);
1712 	buf->total_len += datlen;
1713 	buf->n_add_for_cb += (size_t)chain->misalign;
1714 
1715 out:
1716 	evbuffer_invoke_callbacks(buf);
1717 	result = 0;
1718 done:
1719 	EVBUFFER_UNLOCK(buf);
1720 	return result;
1721 }
1722 
1723 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */
1724 static void
evbuffer_chain_align(struct evbuffer_chain * chain)1725 evbuffer_chain_align(struct evbuffer_chain *chain)
1726 {
1727 	EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
1728 	EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
1729 	memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
1730 	chain->misalign = 0;
1731 }
1732 
1733 #define MAX_TO_COPY_IN_EXPAND 4096
1734 #define MAX_TO_REALIGN_IN_EXPAND 2048
1735 
1736 /** Helper: return true iff we should realign chain to fit datalen bytes of
1737     data in it. */
1738 static int
evbuffer_chain_should_realign(struct evbuffer_chain * chain,size_t datlen)1739 evbuffer_chain_should_realign(struct evbuffer_chain *chain,
1740     size_t datlen)
1741 {
1742 	return chain->buffer_len - chain->off >= datlen &&
1743 	    (chain->off < chain->buffer_len / 2) &&
1744 	    (chain->off <= MAX_TO_REALIGN_IN_EXPAND);
1745 }
1746 
1747 /* Expands the available space in the event buffer to at least datlen, all in
1748  * a single chunk.  Return that chunk. */
1749 static struct evbuffer_chain *
evbuffer_expand_singlechain(struct evbuffer * buf,size_t datlen)1750 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
1751 {
1752 	struct evbuffer_chain *chain, **chainp;
1753 	struct evbuffer_chain *result = NULL;
1754 	ASSERT_EVBUFFER_LOCKED(buf);
1755 
1756 	chainp = buf->last_with_datap;
1757 
1758 	/* XXX If *chainp is no longer writeable, but has enough space in its
1759 	 * misalign, this might be a bad idea: we could still use *chainp, not
1760 	 * (*chainp)->next. */
1761 	if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
1762 		chainp = &(*chainp)->next;
1763 
1764 	/* 'chain' now points to the first chain with writable space (if any)
1765 	 * We will either use it, realign it, replace it, or resize it. */
1766 	chain = *chainp;
1767 
1768 	if (chain == NULL ||
1769 	    (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
1770 		/* We can't use the last_with_data chain at all.  Just add a
1771 		 * new one that's big enough. */
1772 		goto insert_new;
1773 	}
1774 
1775 	/* If we can fit all the data, then we don't have to do anything */
1776 	if (CHAIN_SPACE_LEN(chain) >= datlen) {
1777 		result = chain;
1778 		goto ok;
1779 	}
1780 
1781 	/* If the chain is completely empty, just replace it by adding a new
1782 	 * empty chain. */
1783 	if (chain->off == 0) {
1784 		goto insert_new;
1785 	}
1786 
1787 	/* If the misalignment plus the remaining space fulfills our data
1788 	 * needs, we could just force an alignment to happen.  Afterwards, we
1789 	 * have enough space.  But only do this if we're saving a lot of space
1790 	 * and not moving too much data.  Otherwise the space savings are
1791 	 * probably offset by the time lost in copying.
1792 	 */
1793 	if (evbuffer_chain_should_realign(chain, datlen)) {
1794 		evbuffer_chain_align(chain);
1795 		result = chain;
1796 		goto ok;
1797 	}
1798 
1799 	/* At this point, we can either resize the last chunk with space in
1800 	 * it, use the next chunk after it, or   If we add a new chunk, we waste
1801 	 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk.  If we
1802 	 * resize, we have to copy chain->off bytes.
1803 	 */
1804 
1805 	/* Would expanding this chunk be affordable and worthwhile? */
1806 	if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
1807 	    chain->off > MAX_TO_COPY_IN_EXPAND ||
1808 	    (datlen < EVBUFFER_CHAIN_MAX &&
1809 		EVBUFFER_CHAIN_MAX - datlen >= chain->off)) {
1810 		/* It's not worth resizing this chain. Can the next one be
1811 		 * used? */
1812 		if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) {
1813 			/* Yes, we can just use the next chain (which should
1814 			 * be empty. */
1815 			result = chain->next;
1816 			goto ok;
1817 		} else {
1818 			/* No; append a new chain (which will free all
1819 			 * terminal empty chains.) */
1820 			goto insert_new;
1821 		}
1822 	} else {
1823 		/* Okay, we're going to try to resize this chain: Not doing so
1824 		 * would waste at least 1/8 of its current allocation, and we
1825 		 * can do so without having to copy more than
1826 		 * MAX_TO_COPY_IN_EXPAND bytes. */
1827 		/* figure out how much space we need */
1828 		size_t length = chain->off + datlen;
1829 		struct evbuffer_chain *tmp = evbuffer_chain_new(length);
1830 		if (tmp == NULL)
1831 			goto err;
1832 
1833 		/* copy the data over that we had so far */
1834 		tmp->off = chain->off;
1835 		memcpy(tmp->buffer, chain->buffer + chain->misalign,
1836 		    chain->off);
1837 		/* fix up the list */
1838 		EVUTIL_ASSERT(*chainp == chain);
1839 		result = *chainp = tmp;
1840 
1841 		if (buf->last == chain)
1842 			buf->last = tmp;
1843 
1844 		tmp->next = chain->next;
1845 		evbuffer_chain_free(chain);
1846 		goto ok;
1847 	}
1848 
1849 insert_new:
1850 	result = evbuffer_chain_insert_new(buf, datlen);
1851 	if (!result)
1852 		goto err;
1853 ok:
1854 	EVUTIL_ASSERT(result);
1855 	EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
1856 err:
1857 	return result;
1858 }
1859 
1860 /* Make sure that datlen bytes are available for writing in the last n
1861  * chains.  Never copies or moves data. */
1862 int
_evbuffer_expand_fast(struct evbuffer * buf,size_t datlen,int n)1863 _evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n)
1864 {
1865 	struct evbuffer_chain *chain = buf->last, *tmp, *next;
1866 	size_t avail;
1867 	int used;
1868 
1869 	ASSERT_EVBUFFER_LOCKED(buf);
1870 	EVUTIL_ASSERT(n >= 2);
1871 
1872 	if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
1873 		/* There is no last chunk, or we can't touch the last chunk.
1874 		 * Just add a new chunk. */
1875 		chain = evbuffer_chain_new(datlen);
1876 		if (chain == NULL)
1877 			return (-1);
1878 
1879 		evbuffer_chain_insert(buf, chain);
1880 		return (0);
1881 	}
1882 
1883 	used = 0; /* number of chains we're using space in. */
1884 	avail = 0; /* how much space they have. */
1885 	/* How many bytes can we stick at the end of buffer as it is?  Iterate
1886 	 * over the chains at the end of the buffer, tring to see how much
1887 	 * space we have in the first n. */
1888 	for (chain = *buf->last_with_datap; chain; chain = chain->next) {
1889 		if (chain->off) {
1890 			size_t space = (size_t) CHAIN_SPACE_LEN(chain);
1891 			EVUTIL_ASSERT(chain == *buf->last_with_datap);
1892 			if (space) {
1893 				avail += space;
1894 				++used;
1895 			}
1896 		} else {
1897 			/* No data in chain; realign it. */
1898 			chain->misalign = 0;
1899 			avail += chain->buffer_len;
1900 			++used;
1901 		}
1902 		if (avail >= datlen) {
1903 			/* There is already enough space.  Just return */
1904 			return (0);
1905 		}
1906 		if (used == n)
1907 			break;
1908 	}
1909 
1910 	/* There wasn't enough space in the first n chains with space in
1911 	 * them. Either add a new chain with enough space, or replace all
1912 	 * empty chains with one that has enough space, depending on n. */
1913 	if (used < n) {
1914 		/* The loop ran off the end of the chains before it hit n
1915 		 * chains; we can add another. */
1916 		EVUTIL_ASSERT(chain == NULL);
1917 
1918 		tmp = evbuffer_chain_new(datlen - avail);
1919 		if (tmp == NULL)
1920 			return (-1);
1921 
1922 		buf->last->next = tmp;
1923 		buf->last = tmp;
1924 		/* (we would only set last_with_data if we added the first
1925 		 * chain. But if the buffer had no chains, we would have
1926 		 * just allocated a new chain earlier) */
1927 		return (0);
1928 	} else {
1929 		/* Nuke _all_ the empty chains. */
1930 		int rmv_all = 0; /* True iff we removed last_with_data. */
1931 		chain = *buf->last_with_datap;
1932 		if (!chain->off) {
1933 			EVUTIL_ASSERT(chain == buf->first);
1934 			rmv_all = 1;
1935 			avail = 0;
1936 		} else {
1937 			/* can't overflow, since only mutable chains have
1938 			 * huge misaligns. */
1939 			avail = (size_t) CHAIN_SPACE_LEN(chain);
1940 			chain = chain->next;
1941 		}
1942 
1943 
1944 		for (; chain; chain = next) {
1945 			next = chain->next;
1946 			EVUTIL_ASSERT(chain->off == 0);
1947 			evbuffer_chain_free(chain);
1948 		}
1949 		EVUTIL_ASSERT(datlen >= avail);
1950 		tmp = evbuffer_chain_new(datlen - avail);
1951 		if (tmp == NULL) {
1952 			if (rmv_all) {
1953 				ZERO_CHAIN(buf);
1954 			} else {
1955 				buf->last = *buf->last_with_datap;
1956 				(*buf->last_with_datap)->next = NULL;
1957 			}
1958 			return (-1);
1959 		}
1960 
1961 		if (rmv_all) {
1962 			buf->first = buf->last = tmp;
1963 			buf->last_with_datap = &buf->first;
1964 		} else {
1965 			(*buf->last_with_datap)->next = tmp;
1966 			buf->last = tmp;
1967 		}
1968 		return (0);
1969 	}
1970 }
1971 
1972 int
evbuffer_expand(struct evbuffer * buf,size_t datlen)1973 evbuffer_expand(struct evbuffer *buf, size_t datlen)
1974 {
1975 	struct evbuffer_chain *chain;
1976 
1977 	EVBUFFER_LOCK(buf);
1978 	chain = evbuffer_expand_singlechain(buf, datlen);
1979 	EVBUFFER_UNLOCK(buf);
1980 	return chain ? 0 : -1;
1981 }
1982 
1983 /*
1984  * Reads data from a file descriptor into a buffer.
1985  */
1986 
1987 #if defined(_EVENT_HAVE_SYS_UIO_H) || defined(WIN32)
1988 #define USE_IOVEC_IMPL
1989 #endif
1990 
1991 #ifdef USE_IOVEC_IMPL
1992 
1993 #ifdef _EVENT_HAVE_SYS_UIO_H
1994 /* number of iovec we use for writev, fragmentation is going to determine
1995  * how much we end up writing */
1996 
1997 #define DEFAULT_WRITE_IOVEC 128
1998 
1999 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
2000 #define NUM_WRITE_IOVEC UIO_MAXIOV
2001 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
2002 #define NUM_WRITE_IOVEC IOV_MAX
2003 #else
2004 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
2005 #endif
2006 
2007 #define IOV_TYPE struct iovec
2008 #define IOV_PTR_FIELD iov_base
2009 #define IOV_LEN_FIELD iov_len
2010 #define IOV_LEN_TYPE size_t
2011 #else
2012 #define NUM_WRITE_IOVEC 16
2013 #define IOV_TYPE WSABUF
2014 #define IOV_PTR_FIELD buf
2015 #define IOV_LEN_FIELD len
2016 #define IOV_LEN_TYPE unsigned long
2017 #endif
2018 #endif
2019 #define NUM_READ_IOVEC 4
2020 
2021 #define EVBUFFER_MAX_READ	4096
2022 
2023 /** Helper function to figure out which space to use for reading data into
2024     an evbuffer.  Internal use only.
2025 
2026     @param buf The buffer to read into
2027     @param howmuch How much we want to read.
2028     @param vecs An array of two or more iovecs or WSABUFs.
2029     @param n_vecs_avail The length of vecs
2030     @param chainp A pointer to a variable to hold the first chain we're
2031       reading into.
2032     @param exact Boolean: if true, we do not provide more than 'howmuch'
2033       space in the vectors, even if more space is available.
2034     @return The number of buffers we're using.
2035  */
2036 int
_evbuffer_read_setup_vecs(struct evbuffer * buf,ev_ssize_t howmuch,struct evbuffer_iovec * vecs,int n_vecs_avail,struct evbuffer_chain *** chainp,int exact)2037 _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
2038     struct evbuffer_iovec *vecs, int n_vecs_avail,
2039     struct evbuffer_chain ***chainp, int exact)
2040 {
2041 	struct evbuffer_chain *chain;
2042 	struct evbuffer_chain **firstchainp;
2043 	size_t so_far;
2044 	int i;
2045 	ASSERT_EVBUFFER_LOCKED(buf);
2046 
2047 	if (howmuch < 0)
2048 		return -1;
2049 
2050 	so_far = 0;
2051 	/* Let firstchain be the first chain with any space on it */
2052 	firstchainp = buf->last_with_datap;
2053 	if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
2054 		firstchainp = &(*firstchainp)->next;
2055 	}
2056 
2057 	chain = *firstchainp;
2058 	for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
2059 		size_t avail = (size_t) CHAIN_SPACE_LEN(chain);
2060 		if (avail > (howmuch - so_far) && exact)
2061 			avail = howmuch - so_far;
2062 		vecs[i].iov_base = CHAIN_SPACE_PTR(chain);
2063 		vecs[i].iov_len = avail;
2064 		so_far += avail;
2065 		chain = chain->next;
2066 	}
2067 
2068 	*chainp = firstchainp;
2069 	return i;
2070 }
2071 
2072 static int
get_n_bytes_readable_on_socket(evutil_socket_t fd)2073 get_n_bytes_readable_on_socket(evutil_socket_t fd)
2074 {
2075 #if defined(FIONREAD) && defined(WIN32)
2076 	unsigned long lng = EVBUFFER_MAX_READ;
2077 	if (ioctlsocket(fd, FIONREAD, &lng) < 0)
2078 		return -1;
2079 	/* Can overflow, but mostly harmlessly. XXXX */
2080 	return (int)lng;
2081 #elif defined(FIONREAD)
2082 	int n = EVBUFFER_MAX_READ;
2083 	if (ioctl(fd, FIONREAD, &n) < 0)
2084 		return -1;
2085 	return n;
2086 #else
2087 	return EVBUFFER_MAX_READ;
2088 #endif
2089 }
2090 
2091 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
2092  * as howmuch? */
2093 int
evbuffer_read(struct evbuffer * buf,evutil_socket_t fd,int howmuch)2094 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
2095 {
2096 	struct evbuffer_chain **chainp;
2097 	int n;
2098 	int result;
2099 
2100 #ifdef USE_IOVEC_IMPL
2101 	int nvecs, i, remaining;
2102 #else
2103 	struct evbuffer_chain *chain;
2104 	unsigned char *p;
2105 #endif
2106 
2107 	EVBUFFER_LOCK(buf);
2108 
2109 	if (buf->freeze_end) {
2110 		result = -1;
2111 		goto done;
2112 	}
2113 
2114 	n = get_n_bytes_readable_on_socket(fd);
2115 	if (n <= 0 || n > EVBUFFER_MAX_READ)
2116 		n = EVBUFFER_MAX_READ;
2117 	if (howmuch < 0 || howmuch > n)
2118 		howmuch = n;
2119 
2120 #ifdef USE_IOVEC_IMPL
2121 	/* Since we can use iovecs, we're willing to use the last
2122 	 * NUM_READ_IOVEC chains. */
2123 	if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) {
2124 		result = -1;
2125 		goto done;
2126 	} else {
2127 		IOV_TYPE vecs[NUM_READ_IOVEC];
2128 #ifdef _EVBUFFER_IOVEC_IS_NATIVE
2129 		nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs,
2130 		    NUM_READ_IOVEC, &chainp, 1);
2131 #else
2132 		/* We aren't using the native struct iovec.  Therefore,
2133 		   we are on win32. */
2134 		struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
2135 		nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2,
2136 		    &chainp, 1);
2137 
2138 		for (i=0; i < nvecs; ++i)
2139 			WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
2140 #endif
2141 
2142 #ifdef WIN32
2143 		{
2144 			DWORD bytesRead;
2145 			DWORD flags=0;
2146 			if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
2147 				/* The read failed. It might be a close,
2148 				 * or it might be an error. */
2149 				if (WSAGetLastError() == WSAECONNABORTED)
2150 					n = 0;
2151 				else
2152 					n = -1;
2153 			} else
2154 				n = bytesRead;
2155 		}
2156 #else
2157 		n = readv(fd, vecs, nvecs);
2158 #endif
2159 	}
2160 
2161 #else /*!USE_IOVEC_IMPL*/
2162 	/* If we don't have FIONREAD, we might waste some space here */
2163 	/* XXX we _will_ waste some space here if there is any space left
2164 	 * over on buf->last. */
2165 	if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
2166 		result = -1;
2167 		goto done;
2168 	}
2169 
2170 	/* We can append new data at this point */
2171 	p = chain->buffer + chain->misalign + chain->off;
2172 
2173 #ifndef WIN32
2174 	n = read(fd, p, howmuch);
2175 #else
2176 	n = recv(fd, p, howmuch, 0);
2177 #endif
2178 #endif /* USE_IOVEC_IMPL */
2179 
2180 	if (n == -1) {
2181 		result = -1;
2182 		goto done;
2183 	}
2184 	if (n == 0) {
2185 		result = 0;
2186 		goto done;
2187 	}
2188 
2189 #ifdef USE_IOVEC_IMPL
2190 	remaining = n;
2191 	for (i=0; i < nvecs; ++i) {
2192 		/* can't overflow, since only mutable chains have
2193 		 * huge misaligns. */
2194 		size_t space = (size_t) CHAIN_SPACE_LEN(*chainp);
2195 		/* XXXX This is a kludge that can waste space in perverse
2196 		 * situations. */
2197 		if (space > EVBUFFER_CHAIN_MAX)
2198 			space = EVBUFFER_CHAIN_MAX;
2199 		if ((ev_ssize_t)space < remaining) {
2200 			(*chainp)->off += space;
2201 			remaining -= (int)space;
2202 		} else {
2203 			(*chainp)->off += remaining;
2204 			buf->last_with_datap = chainp;
2205 			break;
2206 		}
2207 		chainp = &(*chainp)->next;
2208 	}
2209 #else
2210 	chain->off += n;
2211 	advance_last_with_data(buf);
2212 #endif
2213 	buf->total_len += n;
2214 	buf->n_add_for_cb += n;
2215 
2216 	/* Tell someone about changes in this buffer */
2217 	evbuffer_invoke_callbacks(buf);
2218 	result = n;
2219 done:
2220 	EVBUFFER_UNLOCK(buf);
2221 	return result;
2222 }
2223 
2224 #ifdef WIN32
2225 static int
evbuffer_readfile(struct evbuffer * buf,evutil_socket_t fd,ev_ssize_t howmuch)2226 evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd, ev_ssize_t howmuch)
2227 {
2228 	int result;
2229 	int nchains, n;
2230 	struct evbuffer_iovec v[2];
2231 
2232 	EVBUFFER_LOCK(buf);
2233 
2234 	if (buf->freeze_end) {
2235 		result = -1;
2236 		goto done;
2237 	}
2238 
2239 	if (howmuch < 0)
2240 		howmuch = 16384;
2241 
2242 
2243 	/* XXX we _will_ waste some space here if there is any space left
2244 	 * over on buf->last. */
2245 	nchains = evbuffer_reserve_space(buf, howmuch, v, 2);
2246 	if (nchains < 1 || nchains > 2) {
2247 		result = -1;
2248 		goto done;
2249 	}
2250 	n = read((int)fd, v[0].iov_base, (unsigned int)v[0].iov_len);
2251 	if (n <= 0) {
2252 		result = n;
2253 		goto done;
2254 	}
2255 	v[0].iov_len = (IOV_LEN_TYPE) n; /* XXXX another problem with big n.*/
2256 	if (nchains > 1) {
2257 		n = read((int)fd, v[1].iov_base, (unsigned int)v[1].iov_len);
2258 		if (n <= 0) {
2259 			result = (unsigned long) v[0].iov_len;
2260 			evbuffer_commit_space(buf, v, 1);
2261 			goto done;
2262 		}
2263 		v[1].iov_len = n;
2264 	}
2265 	evbuffer_commit_space(buf, v, nchains);
2266 
2267 	result = n;
2268 done:
2269 	EVBUFFER_UNLOCK(buf);
2270 	return result;
2271 }
2272 #endif
2273 
2274 #ifdef USE_IOVEC_IMPL
2275 static inline int
evbuffer_write_iovec(struct evbuffer * buffer,evutil_socket_t fd,ev_ssize_t howmuch)2276 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
2277     ev_ssize_t howmuch)
2278 {
2279 	IOV_TYPE iov[NUM_WRITE_IOVEC];
2280 	struct evbuffer_chain *chain = buffer->first;
2281 	int n, i = 0;
2282 
2283 	if (howmuch < 0)
2284 		return -1;
2285 
2286 	ASSERT_EVBUFFER_LOCKED(buffer);
2287 	/* XXX make this top out at some maximal data length?  if the
2288 	 * buffer has (say) 1MB in it, split over 128 chains, there's
2289 	 * no way it all gets written in one go. */
2290 	while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
2291 #ifdef USE_SENDFILE
2292 		/* we cannot write the file info via writev */
2293 		if (chain->flags & EVBUFFER_SENDFILE)
2294 			break;
2295 #endif
2296 		iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
2297 		if ((size_t)howmuch >= chain->off) {
2298 			/* XXXcould be problematic when windows supports mmap*/
2299 			iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
2300 			howmuch -= chain->off;
2301 		} else {
2302 			/* XXXcould be problematic when windows supports mmap*/
2303 			iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
2304 			break;
2305 		}
2306 		chain = chain->next;
2307 	}
2308 	if (! i)
2309 		return 0;
2310 #ifdef WIN32
2311 	{
2312 		DWORD bytesSent;
2313 		if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
2314 			n = -1;
2315 		else
2316 			n = bytesSent;
2317 	}
2318 #else
2319 	n = writev(fd, iov, i);
2320 #endif
2321 	return (n);
2322 }
2323 #endif
2324 
2325 #ifdef USE_SENDFILE
2326 static inline int
evbuffer_write_sendfile(struct evbuffer * buffer,evutil_socket_t fd,ev_ssize_t howmuch)2327 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t fd,
2328     ev_ssize_t howmuch)
2329 {
2330 	struct evbuffer_chain *chain = buffer->first;
2331 	struct evbuffer_chain_fd *info =
2332 	    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
2333 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
2334 	int res;
2335 	off_t len = chain->off;
2336 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
2337 	ev_ssize_t res;
2338 	off_t offset = chain->misalign;
2339 #endif
2340 
2341 	ASSERT_EVBUFFER_LOCKED(buffer);
2342 
2343 #if defined(SENDFILE_IS_MACOSX)
2344 	res = sendfile(info->fd, fd, chain->misalign, &len, NULL, 0);
2345 	if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
2346 		return (-1);
2347 
2348 	return (len);
2349 #elif defined(SENDFILE_IS_FREEBSD)
2350 	res = sendfile(info->fd, fd, chain->misalign, chain->off, NULL, &len, 0);
2351 	if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
2352 		return (-1);
2353 
2354 	return (len);
2355 #elif defined(SENDFILE_IS_LINUX)
2356 	/* TODO(niels): implement splice */
2357 	res = sendfile(fd, info->fd, &offset, chain->off);
2358 	if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
2359 		/* if this is EAGAIN or EINTR return 0; otherwise, -1 */
2360 		return (0);
2361 	}
2362 	return (res);
2363 #elif defined(SENDFILE_IS_SOLARIS)
2364 	{
2365 		const off_t offset_orig = offset;
2366 		res = sendfile(fd, info->fd, &offset, chain->off);
2367 		if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
2368 			if (offset - offset_orig)
2369 				return offset - offset_orig;
2370 			/* if this is EAGAIN or EINTR and no bytes were
2371 			 * written, return 0 */
2372 			return (0);
2373 		}
2374 		return (res);
2375 	}
2376 #endif
2377 }
2378 #endif
2379 
2380 int
evbuffer_write_atmost(struct evbuffer * buffer,evutil_socket_t fd,ev_ssize_t howmuch)2381 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
2382     ev_ssize_t howmuch)
2383 {
2384 	int n = -1;
2385 
2386 	EVBUFFER_LOCK(buffer);
2387 
2388 	if (buffer->freeze_start) {
2389 		goto done;
2390 	}
2391 
2392 	if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
2393 		howmuch = buffer->total_len;
2394 
2395 	if (howmuch > 0) {
2396 #ifdef USE_SENDFILE
2397 		struct evbuffer_chain *chain = buffer->first;
2398 		if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
2399 			n = evbuffer_write_sendfile(buffer, fd, howmuch);
2400 		else {
2401 #endif
2402 #ifdef USE_IOVEC_IMPL
2403 		n = evbuffer_write_iovec(buffer, fd, howmuch);
2404 #elif defined(WIN32)
2405 		/* XXX(nickm) Don't disable this code until we know if
2406 		 * the WSARecv code above works. */
2407 		void *p = evbuffer_pullup(buffer, howmuch);
2408 		EVUTIL_ASSERT(p || !howmuch);
2409 		n = send(fd, p, howmuch, 0);
2410 #else
2411 		void *p = evbuffer_pullup(buffer, howmuch);
2412 		EVUTIL_ASSERT(p || !howmuch);
2413 		n = write(fd, p, howmuch);
2414 #endif
2415 #ifdef USE_SENDFILE
2416 		}
2417 #endif
2418 	}
2419 
2420 	if (n > 0)
2421 		evbuffer_drain(buffer, n);
2422 
2423 done:
2424 	EVBUFFER_UNLOCK(buffer);
2425 	return (n);
2426 }
2427 
2428 int
evbuffer_write(struct evbuffer * buffer,evutil_socket_t fd)2429 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
2430 {
2431 	return evbuffer_write_atmost(buffer, fd, -1);
2432 }
2433 
2434 unsigned char *
evbuffer_find(struct evbuffer * buffer,const unsigned char * what,size_t len)2435 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
2436 {
2437 	unsigned char *search;
2438 	struct evbuffer_ptr ptr;
2439 
2440 	EVBUFFER_LOCK(buffer);
2441 
2442 	ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
2443 	if (ptr.pos < 0) {
2444 		search = NULL;
2445 	} else {
2446 		search = evbuffer_pullup(buffer, ptr.pos + len);
2447 		if (search)
2448 			search += ptr.pos;
2449 	}
2450 	EVBUFFER_UNLOCK(buffer);
2451 	return search;
2452 }
2453 
2454 int
evbuffer_ptr_set(struct evbuffer * buf,struct evbuffer_ptr * pos,size_t position,enum evbuffer_ptr_how how)2455 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
2456     size_t position, enum evbuffer_ptr_how how)
2457 {
2458 	size_t left = position;
2459 	struct evbuffer_chain *chain = NULL;
2460 
2461 	EVBUFFER_LOCK(buf);
2462 
2463 	switch (how) {
2464 	case EVBUFFER_PTR_SET:
2465 		chain = buf->first;
2466 		pos->pos = position;
2467 		position = 0;
2468 		break;
2469 	case EVBUFFER_PTR_ADD:
2470 		/* this avoids iterating over all previous chains if
2471 		   we just want to advance the position */
2472 		if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) {
2473 			EVBUFFER_UNLOCK(buf);
2474 			return -1;
2475 		}
2476 		chain = pos->_internal.chain;
2477 		pos->pos += position;
2478 		position = pos->_internal.pos_in_chain;
2479 		break;
2480 	}
2481 
2482 	EVUTIL_ASSERT(EV_SIZE_MAX - left >= position);
2483 	while (chain && position + left >= chain->off) {
2484 		left -= chain->off - position;
2485 		chain = chain->next;
2486 		position = 0;
2487 	}
2488 	if (chain) {
2489 		pos->_internal.chain = chain;
2490 		pos->_internal.pos_in_chain = position + left;
2491 	} else {
2492 		pos->_internal.chain = NULL;
2493 		pos->pos = -1;
2494 	}
2495 
2496 	EVBUFFER_UNLOCK(buf);
2497 
2498 	return chain != NULL ? 0 : -1;
2499 }
2500 
2501 /**
2502    Compare the bytes in buf at position pos to the len bytes in mem.  Return
2503    less than 0, 0, or greater than 0 as memcmp.
2504  */
2505 static int
evbuffer_ptr_memcmp(const struct evbuffer * buf,const struct evbuffer_ptr * pos,const char * mem,size_t len)2506 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
2507     const char *mem, size_t len)
2508 {
2509 	struct evbuffer_chain *chain;
2510 	size_t position;
2511 	int r;
2512 
2513 	ASSERT_EVBUFFER_LOCKED(buf);
2514 
2515 	if (pos->pos < 0 ||
2516 	    EV_SIZE_MAX - len < (size_t)pos->pos ||
2517 	    pos->pos + len > buf->total_len)
2518 		return -1;
2519 
2520 	chain = pos->_internal.chain;
2521 	position = pos->_internal.pos_in_chain;
2522 	while (len && chain) {
2523 		size_t n_comparable;
2524 		if (len + position > chain->off)
2525 			n_comparable = chain->off - position;
2526 		else
2527 			n_comparable = len;
2528 		r = memcmp(chain->buffer + chain->misalign + position, mem,
2529 		    n_comparable);
2530 		if (r)
2531 			return r;
2532 		mem += n_comparable;
2533 		len -= n_comparable;
2534 		position = 0;
2535 		chain = chain->next;
2536 	}
2537 
2538 	return 0;
2539 }
2540 
2541 struct evbuffer_ptr
evbuffer_search(struct evbuffer * buffer,const char * what,size_t len,const struct evbuffer_ptr * start)2542 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
2543 {
2544 	return evbuffer_search_range(buffer, what, len, start, NULL);
2545 }
2546 
2547 struct evbuffer_ptr
evbuffer_search_range(struct evbuffer * buffer,const char * what,size_t len,const struct evbuffer_ptr * start,const struct evbuffer_ptr * end)2548 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
2549 {
2550 	struct evbuffer_ptr pos;
2551 	struct evbuffer_chain *chain, *last_chain = NULL;
2552 	const unsigned char *p;
2553 	char first;
2554 
2555 	EVBUFFER_LOCK(buffer);
2556 
2557 	if (start) {
2558 		memcpy(&pos, start, sizeof(pos));
2559 		chain = pos._internal.chain;
2560 	} else {
2561 		pos.pos = 0;
2562 		chain = pos._internal.chain = buffer->first;
2563 		pos._internal.pos_in_chain = 0;
2564 	}
2565 
2566 	if (end)
2567 		last_chain = end->_internal.chain;
2568 
2569 	if (!len || len > EV_SSIZE_MAX)
2570 		goto done;
2571 
2572 	first = what[0];
2573 
2574 	while (chain) {
2575 		const unsigned char *start_at =
2576 		    chain->buffer + chain->misalign +
2577 		    pos._internal.pos_in_chain;
2578 		p = memchr(start_at, first,
2579 		    chain->off - pos._internal.pos_in_chain);
2580 		if (p) {
2581 			pos.pos += p - start_at;
2582 			pos._internal.pos_in_chain += p - start_at;
2583 			if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
2584 				if (end && pos.pos + (ev_ssize_t)len > end->pos)
2585 					goto not_found;
2586 				else
2587 					goto done;
2588 			}
2589 			++pos.pos;
2590 			++pos._internal.pos_in_chain;
2591 			if (pos._internal.pos_in_chain == chain->off) {
2592 				chain = pos._internal.chain = chain->next;
2593 				pos._internal.pos_in_chain = 0;
2594 			}
2595 		} else {
2596 			if (chain == last_chain)
2597 				goto not_found;
2598 			pos.pos += chain->off - pos._internal.pos_in_chain;
2599 			chain = pos._internal.chain = chain->next;
2600 			pos._internal.pos_in_chain = 0;
2601 		}
2602 	}
2603 
2604 not_found:
2605 	pos.pos = -1;
2606 	pos._internal.chain = NULL;
2607 done:
2608 	EVBUFFER_UNLOCK(buffer);
2609 	return pos;
2610 }
2611 
2612 int
evbuffer_peek(struct evbuffer * buffer,ev_ssize_t len,struct evbuffer_ptr * start_at,struct evbuffer_iovec * vec,int n_vec)2613 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
2614     struct evbuffer_ptr *start_at,
2615     struct evbuffer_iovec *vec, int n_vec)
2616 {
2617 	struct evbuffer_chain *chain;
2618 	int idx = 0;
2619 	ev_ssize_t len_so_far = 0;
2620 
2621 	EVBUFFER_LOCK(buffer);
2622 
2623 	if (start_at) {
2624 		chain = start_at->_internal.chain;
2625 		len_so_far = chain->off
2626 		    - start_at->_internal.pos_in_chain;
2627 		idx = 1;
2628 		if (n_vec > 0) {
2629 			vec[0].iov_base = chain->buffer + chain->misalign
2630 			    + start_at->_internal.pos_in_chain;
2631 			vec[0].iov_len = len_so_far;
2632 		}
2633 		chain = chain->next;
2634 	} else {
2635 		chain = buffer->first;
2636 	}
2637 
2638 	if (n_vec == 0 && len < 0) {
2639 		/* If no vectors are provided and they asked for "everything",
2640 		 * pretend they asked for the actual available amount. */
2641 		len = buffer->total_len;
2642 		if (start_at) {
2643 			len -= start_at->pos;
2644 		}
2645 	}
2646 
2647 	while (chain) {
2648 		if (len >= 0 && len_so_far >= len)
2649 			break;
2650 		if (idx<n_vec) {
2651 			vec[idx].iov_base = chain->buffer + chain->misalign;
2652 			vec[idx].iov_len = chain->off;
2653 		} else if (len<0) {
2654 			break;
2655 		}
2656 		++idx;
2657 		len_so_far += chain->off;
2658 		chain = chain->next;
2659 	}
2660 
2661 	EVBUFFER_UNLOCK(buffer);
2662 
2663 	return idx;
2664 }
2665 
2666 
2667 int
evbuffer_add_vprintf(struct evbuffer * buf,const char * fmt,va_list ap)2668 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
2669 {
2670 	char *buffer;
2671 	size_t space;
2672 	int sz, result = -1;
2673 	va_list aq;
2674 	struct evbuffer_chain *chain;
2675 
2676 
2677 	EVBUFFER_LOCK(buf);
2678 
2679 	if (buf->freeze_end) {
2680 		goto done;
2681 	}
2682 
2683 	/* make sure that at least some space is available */
2684 	if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)
2685 		goto done;
2686 
2687 	for (;;) {
2688 #if 0
2689 		size_t used = chain->misalign + chain->off;
2690 		buffer = (char *)chain->buffer + chain->misalign + chain->off;
2691 		EVUTIL_ASSERT(chain->buffer_len >= used);
2692 		space = chain->buffer_len - used;
2693 #endif
2694 		buffer = (char*) CHAIN_SPACE_PTR(chain);
2695 		space = (size_t) CHAIN_SPACE_LEN(chain);
2696 
2697 #ifndef va_copy
2698 #define	va_copy(dst, src)	memcpy(&(dst), &(src), sizeof(va_list))
2699 #endif
2700 		va_copy(aq, ap);
2701 
2702 		sz = evutil_vsnprintf(buffer, space, fmt, aq);
2703 
2704 		va_end(aq);
2705 
2706 		if (sz < 0)
2707 			goto done;
2708 		if (INT_MAX >= EVBUFFER_CHAIN_MAX &&
2709 		    (size_t)sz >= EVBUFFER_CHAIN_MAX)
2710 			goto done;
2711 		if ((size_t)sz < space) {
2712 			chain->off += sz;
2713 			buf->total_len += sz;
2714 			buf->n_add_for_cb += sz;
2715 
2716 			advance_last_with_data(buf);
2717 			evbuffer_invoke_callbacks(buf);
2718 			result = sz;
2719 			goto done;
2720 		}
2721 		if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)
2722 			goto done;
2723 	}
2724 	/* NOTREACHED */
2725 
2726 done:
2727 	EVBUFFER_UNLOCK(buf);
2728 	return result;
2729 }
2730 
2731 int
evbuffer_add_printf(struct evbuffer * buf,const char * fmt,...)2732 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
2733 {
2734 	int res = -1;
2735 	va_list ap;
2736 
2737 	va_start(ap, fmt);
2738 	res = evbuffer_add_vprintf(buf, fmt, ap);
2739 	va_end(ap);
2740 
2741 	return (res);
2742 }
2743 
2744 int
evbuffer_add_reference(struct evbuffer * outbuf,const void * data,size_t datlen,evbuffer_ref_cleanup_cb cleanupfn,void * extra)2745 evbuffer_add_reference(struct evbuffer *outbuf,
2746     const void *data, size_t datlen,
2747     evbuffer_ref_cleanup_cb cleanupfn, void *extra)
2748 {
2749 	struct evbuffer_chain *chain;
2750 	struct evbuffer_chain_reference *info;
2751 	int result = -1;
2752 
2753 	chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
2754 	if (!chain)
2755 		return (-1);
2756 	chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
2757 	chain->buffer = __UNCONST(data);
2758 	chain->buffer_len = datlen;
2759 	chain->off = datlen;
2760 
2761 	info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
2762 	info->cleanupfn = cleanupfn;
2763 	info->extra = extra;
2764 
2765 	EVBUFFER_LOCK(outbuf);
2766 	if (outbuf->freeze_end) {
2767 		/* don't call chain_free; we do not want to actually invoke
2768 		 * the cleanup function */
2769 		mm_free(chain);
2770 		goto done;
2771 	}
2772 	evbuffer_chain_insert(outbuf, chain);
2773 	outbuf->n_add_for_cb += datlen;
2774 
2775 	evbuffer_invoke_callbacks(outbuf);
2776 
2777 	result = 0;
2778 done:
2779 	EVBUFFER_UNLOCK(outbuf);
2780 
2781 	return result;
2782 }
2783 
2784 /* TODO(niels): maybe we don't want to own the fd, however, in that
2785  * case, we should dup it - dup is cheap.  Perhaps, we should use a
2786  * callback instead?
2787  */
2788 /* TODO(niels): we may want to add to automagically convert to mmap, in
2789  * case evbuffer_remove() or evbuffer_pullup() are being used.
2790  */
2791 int
evbuffer_add_file(struct evbuffer * outbuf,int fd,ev_off_t offset,ev_off_t length)2792 evbuffer_add_file(struct evbuffer *outbuf, int fd,
2793     ev_off_t offset, ev_off_t length)
2794 {
2795 #if defined(USE_SENDFILE) || defined(_EVENT_HAVE_MMAP)
2796 	struct evbuffer_chain *chain;
2797 	struct evbuffer_chain_fd *info;
2798 #endif
2799 #if defined(USE_SENDFILE)
2800 	int sendfile_okay = 1;
2801 #endif
2802 	int ok = 1;
2803 
2804 	if (offset < 0 || length < 0 ||
2805 	    ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) ||
2806 	    (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length))
2807 		return (-1);
2808 
2809 #if defined(USE_SENDFILE)
2810 	if (use_sendfile) {
2811 		EVBUFFER_LOCK(outbuf);
2812 		sendfile_okay = outbuf->flags & EVBUFFER_FLAG_DRAINS_TO_FD;
2813 		EVBUFFER_UNLOCK(outbuf);
2814 	}
2815 
2816 	if (use_sendfile && sendfile_okay) {
2817 		chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
2818 		if (chain == NULL) {
2819 			event_warn("%s: out of memory", __func__);
2820 			return (-1);
2821 		}
2822 
2823 		chain->flags |= EVBUFFER_SENDFILE | EVBUFFER_IMMUTABLE;
2824 		chain->buffer = NULL;	/* no reading possible */
2825 		chain->buffer_len = length + offset;
2826 		chain->off = length;
2827 		chain->misalign = offset;
2828 
2829 		info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
2830 		info->fd = fd;
2831 
2832 		EVBUFFER_LOCK(outbuf);
2833 		if (outbuf->freeze_end) {
2834 			mm_free(chain);
2835 			ok = 0;
2836 		} else {
2837 			outbuf->n_add_for_cb += length;
2838 			evbuffer_chain_insert(outbuf, chain);
2839 		}
2840 	} else
2841 #endif
2842 #if defined(_EVENT_HAVE_MMAP)
2843 	if (use_mmap) {
2844 		void *mapped = mmap(NULL, length + offset, PROT_READ,
2845 #ifdef MAP_NOCACHE
2846 		    MAP_NOCACHE |
2847 #endif
2848 #ifdef MAP_FILE
2849 		    MAP_FILE |
2850 #endif
2851 		    MAP_PRIVATE,
2852 		    fd, 0);
2853 		/* some mmap implementations require offset to be a multiple of
2854 		 * the page size.  most users of this api, are likely to use 0
2855 		 * so mapping everything is not likely to be a problem.
2856 		 * TODO(niels): determine page size and round offset to that
2857 		 * page size to avoid mapping too much memory.
2858 		 */
2859 		if (mapped == MAP_FAILED) {
2860 			event_warn("%s: mmap(%d, %d, %zu) failed",
2861 			    __func__, fd, 0, (size_t)(offset + length));
2862 			return (-1);
2863 		}
2864 		chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
2865 		if (chain == NULL) {
2866 			event_warn("%s: out of memory", __func__);
2867 			munmap(mapped, length);
2868 			return (-1);
2869 		}
2870 
2871 		chain->flags |= EVBUFFER_MMAP | EVBUFFER_IMMUTABLE;
2872 		chain->buffer = mapped;
2873 		chain->buffer_len = length + offset;
2874 		chain->off = length + offset;
2875 
2876 		info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
2877 		info->fd = fd;
2878 
2879 		EVBUFFER_LOCK(outbuf);
2880 		if (outbuf->freeze_end) {
2881 			info->fd = -1;
2882 			evbuffer_chain_free(chain);
2883 			ok = 0;
2884 		} else {
2885 			outbuf->n_add_for_cb += length;
2886 
2887 			evbuffer_chain_insert(outbuf, chain);
2888 
2889 			/* we need to subtract whatever we don't need */
2890 			evbuffer_drain(outbuf, offset);
2891 		}
2892 	} else
2893 #endif
2894 	{
2895 		/* the default implementation */
2896 		struct evbuffer *tmp = evbuffer_new();
2897 		ev_ssize_t read;
2898 
2899 		if (tmp == NULL)
2900 			return (-1);
2901 
2902 #ifdef WIN32
2903 #define lseek _lseeki64
2904 #endif
2905 		if (lseek(fd, offset, SEEK_SET) == -1) {
2906 			evbuffer_free(tmp);
2907 			return (-1);
2908 		}
2909 
2910 		/* we add everything to a temporary buffer, so that we
2911 		 * can abort without side effects if the read fails.
2912 		 */
2913 		while (length) {
2914 			ev_ssize_t to_read = length > EV_SSIZE_MAX ? EV_SSIZE_MAX : (ev_ssize_t)length;
2915 			read = evbuffer_readfile(tmp, fd, to_read);
2916 			if (read == -1) {
2917 				evbuffer_free(tmp);
2918 				return (-1);
2919 			}
2920 
2921 			length -= read;
2922 		}
2923 
2924 		EVBUFFER_LOCK(outbuf);
2925 		if (outbuf->freeze_end) {
2926 			evbuffer_free(tmp);
2927 			ok = 0;
2928 		} else {
2929 			evbuffer_add_buffer(outbuf, tmp);
2930 			evbuffer_free(tmp);
2931 
2932 #ifdef WIN32
2933 #define close _close
2934 #endif
2935 			close(fd);
2936 		}
2937 	}
2938 
2939 	if (ok)
2940 		evbuffer_invoke_callbacks(outbuf);
2941 	EVBUFFER_UNLOCK(outbuf);
2942 
2943 	return ok ? 0 : -1;
2944 }
2945 
2946 
2947 void
evbuffer_setcb(struct evbuffer * buffer,evbuffer_cb cb,void * cbarg)2948 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
2949 {
2950 	EVBUFFER_LOCK(buffer);
2951 
2952 	if (!TAILQ_EMPTY(&buffer->callbacks))
2953 		evbuffer_remove_all_callbacks(buffer);
2954 
2955 	if (cb) {
2956 		struct evbuffer_cb_entry *ent =
2957 		    evbuffer_add_cb(buffer, NULL, cbarg);
2958 		ent->cb.cb_obsolete = cb;
2959 		ent->flags |= EVBUFFER_CB_OBSOLETE;
2960 	}
2961 	EVBUFFER_UNLOCK(buffer);
2962 }
2963 
2964 struct evbuffer_cb_entry *
evbuffer_add_cb(struct evbuffer * buffer,evbuffer_cb_func cb,void * cbarg)2965 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
2966 {
2967 	struct evbuffer_cb_entry *e;
2968 	if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
2969 		return NULL;
2970 	EVBUFFER_LOCK(buffer);
2971 	e->cb.cb_func = cb;
2972 	e->cbarg = cbarg;
2973 	e->flags = EVBUFFER_CB_ENABLED;
2974 	TAILQ_INSERT_HEAD(&buffer->callbacks, e, next);
2975 	EVBUFFER_UNLOCK(buffer);
2976 	return e;
2977 }
2978 
2979 int
evbuffer_remove_cb_entry(struct evbuffer * buffer,struct evbuffer_cb_entry * ent)2980 evbuffer_remove_cb_entry(struct evbuffer *buffer,
2981 			 struct evbuffer_cb_entry *ent)
2982 {
2983 	EVBUFFER_LOCK(buffer);
2984 	TAILQ_REMOVE(&buffer->callbacks, ent, next);
2985 	EVBUFFER_UNLOCK(buffer);
2986 	mm_free(ent);
2987 	return 0;
2988 }
2989 
2990 int
evbuffer_remove_cb(struct evbuffer * buffer,evbuffer_cb_func cb,void * cbarg)2991 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
2992 {
2993 	struct evbuffer_cb_entry *cbent;
2994 	int result = -1;
2995 	EVBUFFER_LOCK(buffer);
2996 	TAILQ_FOREACH(cbent, &buffer->callbacks, next) {
2997 		if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
2998 			result = evbuffer_remove_cb_entry(buffer, cbent);
2999 			goto done;
3000 		}
3001 	}
3002 done:
3003 	EVBUFFER_UNLOCK(buffer);
3004 	return result;
3005 }
3006 
3007 int
evbuffer_cb_set_flags(struct evbuffer * buffer,struct evbuffer_cb_entry * cb,ev_uint32_t flags)3008 evbuffer_cb_set_flags(struct evbuffer *buffer,
3009 		      struct evbuffer_cb_entry *cb, ev_uint32_t flags)
3010 {
3011 	/* the user isn't allowed to mess with these. */
3012 	flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
3013 	EVBUFFER_LOCK(buffer);
3014 	cb->flags |= flags;
3015 	EVBUFFER_UNLOCK(buffer);
3016 	return 0;
3017 }
3018 
3019 int
evbuffer_cb_clear_flags(struct evbuffer * buffer,struct evbuffer_cb_entry * cb,ev_uint32_t flags)3020 evbuffer_cb_clear_flags(struct evbuffer *buffer,
3021 		      struct evbuffer_cb_entry *cb, ev_uint32_t flags)
3022 {
3023 	/* the user isn't allowed to mess with these. */
3024 	flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
3025 	EVBUFFER_LOCK(buffer);
3026 	cb->flags &= ~flags;
3027 	EVBUFFER_UNLOCK(buffer);
3028 	return 0;
3029 }
3030 
3031 int
evbuffer_freeze(struct evbuffer * buffer,int start)3032 evbuffer_freeze(struct evbuffer *buffer, int start)
3033 {
3034 	EVBUFFER_LOCK(buffer);
3035 	if (start)
3036 		buffer->freeze_start = 1;
3037 	else
3038 		buffer->freeze_end = 1;
3039 	EVBUFFER_UNLOCK(buffer);
3040 	return 0;
3041 }
3042 
3043 int
evbuffer_unfreeze(struct evbuffer * buffer,int start)3044 evbuffer_unfreeze(struct evbuffer *buffer, int start)
3045 {
3046 	EVBUFFER_LOCK(buffer);
3047 	if (start)
3048 		buffer->freeze_start = 0;
3049 	else
3050 		buffer->freeze_end = 0;
3051 	EVBUFFER_UNLOCK(buffer);
3052 	return 0;
3053 }
3054 
3055 #if 0
3056 void
3057 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
3058 {
3059 	if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
3060 		cb->size_before_suspend = evbuffer_get_length(buffer);
3061 		cb->flags |= EVBUFFER_CB_SUSPENDED;
3062 	}
3063 }
3064 
3065 void
3066 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
3067 {
3068 	if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
3069 		unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
3070 		size_t sz = cb->size_before_suspend;
3071 		cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
3072 			       EVBUFFER_CB_CALL_ON_UNSUSPEND);
3073 		cb->size_before_suspend = 0;
3074 		if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
3075 			cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
3076 		}
3077 	}
3078 }
3079 #endif
3080 
3081 /* These hooks are exposed so that the unit tests can temporarily disable
3082  * sendfile support in order to test mmap, or both to test linear
3083  * access. Don't use it; if we need to add a way to disable sendfile support
3084  * in the future, it will probably be via an alternate version of
3085  * evbuffer_add_file() with a 'flags' argument.
3086  */
3087 int _evbuffer_testing_use_sendfile(void);
3088 int _evbuffer_testing_use_mmap(void);
3089 int _evbuffer_testing_use_linear_file_access(void);
3090 
3091 int
_evbuffer_testing_use_sendfile(void)3092 _evbuffer_testing_use_sendfile(void)
3093 {
3094 	int ok = 0;
3095 #ifdef USE_SENDFILE
3096 	use_sendfile = 1;
3097 	ok = 1;
3098 #endif
3099 #ifdef _EVENT_HAVE_MMAP
3100 	use_mmap = 0;
3101 #endif
3102 	return ok;
3103 }
3104 int
_evbuffer_testing_use_mmap(void)3105 _evbuffer_testing_use_mmap(void)
3106 {
3107 	int ok = 0;
3108 #ifdef USE_SENDFILE
3109 	use_sendfile = 0;
3110 #endif
3111 #ifdef _EVENT_HAVE_MMAP
3112 	use_mmap = 1;
3113 	ok = 1;
3114 #endif
3115 	return ok;
3116 }
3117 int
_evbuffer_testing_use_linear_file_access(void)3118 _evbuffer_testing_use_linear_file_access(void)
3119 {
3120 #ifdef USE_SENDFILE
3121 	use_sendfile = 0;
3122 #endif
3123 #ifdef _EVENT_HAVE_MMAP
3124 	use_mmap = 0;
3125 #endif
3126 	return 1;
3127 }
3128