1 /*
2 * include/haproxy/dynbuf.h
3 * Buffer management functions.
4 *
5 * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #ifndef _HAPROXY_DYNBUF_H
23 #define _HAPROXY_DYNBUF_H
24
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28
29 #include <import/ist.h>
30 #include <haproxy/activity.h>
31 #include <haproxy/api.h>
32 #include <haproxy/buf.h>
33 #include <haproxy/chunk.h>
34 #include <haproxy/dynbuf-t.h>
35 #include <haproxy/pool.h>
36
37 extern struct pool_head *pool_head_buffer;
38
39 int init_buffer();
40 void buffer_dump(FILE *o, struct buffer *b, int from, int to);
41
42 /*****************************************************************/
43 /* These functions are used to compute various buffer area sizes */
44 /*****************************************************************/
45
46 /* Return 1 if the buffer has less than 1/4 of its capacity free, otherwise 0 */
buffer_almost_full(const struct buffer * buf)47 static inline int buffer_almost_full(const struct buffer *buf)
48 {
49 if (b_is_null(buf))
50 return 0;
51
52 return b_almost_full(buf);
53 }
54
55 /**************************************************/
56 /* Functions below are used for buffer allocation */
57 /**************************************************/
58
59 /* Allocates a buffer and assigns it to *buf. If no memory is available,
60 * ((char *)1) is assigned instead with a zero size. No control is made to
61 * check if *buf already pointed to another buffer. The allocated buffer is
62 * returned, or NULL in case no memory is available.
63 */
b_alloc(struct buffer * buf)64 static inline struct buffer *b_alloc(struct buffer *buf)
65 {
66 char *area;
67
68 *buf = BUF_WANTED;
69 area = pool_alloc_dirty(pool_head_buffer);
70 if (unlikely(!area)) {
71 activity[tid].buf_wait++;
72 return NULL;
73 }
74
75 buf->area = area;
76 buf->size = pool_head_buffer->size;
77 return buf;
78 }
79
80 /* Allocates a buffer and assigns it to *buf. If no memory is available,
81 * ((char *)1) is assigned instead with a zero size. No control is made to
82 * check if *buf already pointed to another buffer. The allocated buffer is
83 * returned, or NULL in case no memory is available. The difference with
84 * b_alloc() is that this function only picks from the pool and never calls
85 * malloc(), so it can fail even if some memory is available.
86 */
b_alloc_fast(struct buffer * buf)87 static inline struct buffer *b_alloc_fast(struct buffer *buf)
88 {
89 char *area;
90
91 *buf = BUF_WANTED;
92 area = pool_get_first(pool_head_buffer);
93 if (unlikely(!area))
94 return NULL;
95
96 buf->area = area;
97 buf->size = pool_head_buffer->size;
98 return buf;
99 }
100
101 /* Releases buffer <buf> (no check of emptiness). The buffer's head is marked
102 * empty.
103 */
__b_free(struct buffer * buf)104 static inline void __b_free(struct buffer *buf)
105 {
106 char *area = buf->area;
107
108 /* let's first clear the area to save an occasional "show sess all"
109 * glancing over our shoulder from getting a dangling pointer.
110 */
111 *buf = BUF_NULL;
112 __ha_barrier_store();
113 pool_free(pool_head_buffer, area);
114 }
115
116 /* Releases buffer <buf> if allocated, and marks it empty. */
b_free(struct buffer * buf)117 static inline void b_free(struct buffer *buf)
118 {
119 if (buf->size)
120 __b_free(buf);
121 }
122
123 /* Ensures that <buf> is allocated. If an allocation is needed, it ensures that
124 * there are still at least <margin> buffers available in the pool after this
125 * allocation so that we don't leave the pool in a condition where a session or
126 * a response buffer could not be allocated anymore, resulting in a deadlock.
127 * This means that we sometimes need to try to allocate extra entries even if
128 * only one buffer is needed.
129 *
130 * We need to lock the pool here to be sure to have <margin> buffers available
131 * after the allocation, regardless how many threads that doing it in the same
132 * time. So, we use internal and lockless memory functions (prefixed with '__').
133 */
b_alloc_margin(struct buffer * buf,int margin)134 static inline struct buffer *b_alloc_margin(struct buffer *buf, int margin)
135 {
136 char *area;
137 ssize_t idx __maybe_unused;
138 unsigned int cached;
139
140 if (buf->size)
141 return buf;
142
143 cached = 0;
144 #ifdef CONFIG_HAP_LOCAL_POOLS
145 if (likely(area = __pool_get_from_cache(pool_head_buffer)))
146 goto done;
147
148 idx = pool_get_index(pool_head_buffer);
149 if (idx >= 0)
150 cached = pool_cache[tid][idx].count;
151 #endif
152
153 *buf = BUF_WANTED;
154
155 #ifndef CONFIG_HAP_LOCKLESS_POOLS
156 HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
157 #endif
158
159 /* fast path */
160 if ((pool_head_buffer->allocated - pool_head_buffer->used + cached) > margin) {
161 area = __pool_get_first(pool_head_buffer);
162 if (likely(area)) {
163 #ifndef CONFIG_HAP_LOCKLESS_POOLS
164 HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
165 #endif
166 goto done;
167 }
168 }
169
170 /* slow path, uses malloc() */
171 area = __pool_refill_alloc(pool_head_buffer, margin);
172
173 #ifndef CONFIG_HAP_LOCKLESS_POOLS
174 HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
175 #endif
176
177 if (unlikely(!area)) {
178 activity[tid].buf_wait++;
179 return NULL;
180 }
181
182 done:
183 buf->area = area;
184 buf->size = pool_head_buffer->size;
185 return buf;
186 }
187
188
189 /* Offer one or multiple buffer currently belonging to target <from> to whoever
190 * needs one. Any pointer is valid for <from>, including NULL. Its purpose is
191 * to avoid passing a buffer to oneself in case of failed allocations (e.g.
192 * need two buffers, get one, fail, release it and wake up self again). In case
193 * of normal buffer release where it is expected that the caller is not waiting
194 * for a buffer, NULL is fine. It will wake waiters on the current thread only.
195 */
196 void __offer_buffers(void *from, unsigned int count);
197
offer_buffers(void * from,unsigned int count)198 static inline void offer_buffers(void *from, unsigned int count)
199 {
200 if (!LIST_ISEMPTY(&ti->buffer_wq))
201 __offer_buffers(from, count);
202 }
203
204
205 #endif /* _HAPROXY_DYNBUF_H */
206
207 /*
208 * Local variables:
209 * c-indent-level: 8
210 * c-basic-offset: 8
211 * End:
212 */
213